diff options
Diffstat (limited to 'arch/parisc/include/asm')
-rw-r--r-- | arch/parisc/include/asm/Kbuild | 18 | ||||
-rw-r--r-- | arch/parisc/include/asm/futex.h | 2 | ||||
-rw-r--r-- | arch/parisc/include/asm/page.h | 3 | ||||
-rw-r--r-- | arch/parisc/include/asm/pgtable.h | 2 | ||||
-rw-r--r-- | arch/parisc/include/asm/spinlock.h | 160 | ||||
-rw-r--r-- | arch/parisc/include/asm/spinlock_types.h | 14 |
6 files changed, 84 insertions, 115 deletions
diff --git a/arch/parisc/include/asm/Kbuild b/arch/parisc/include/asm/Kbuild index 9ceedf6393c4..e3ee5c0bfe80 100644 --- a/arch/parisc/include/asm/Kbuild +++ b/arch/parisc/include/asm/Kbuild @@ -2,26 +2,8 @@ generated-y += syscall_table_32.h generated-y += syscall_table_64.h generated-y += syscall_table_c32.h -generic-y += current.h -generic-y += device.h -generic-y += div64.h -generic-y += emergency-restart.h -generic-y += exec.h -generic-y += hw_irq.h -generic-y += irq_regs.h -generic-y += irq_work.h -generic-y += kdebug.h generic-y += kvm_para.h -generic-y += local.h generic-y += local64.h generic-y += mcs_spinlock.h -generic-y += mm-arch-hooks.h -generic-y += mmiowb.h -generic-y += percpu.h -generic-y += preempt.h generic-y += seccomp.h -generic-y += trace_clock.h generic-y += user.h -generic-y += vga.h -generic-y += word-at-a-time.h -generic-y += xor.h diff --git a/arch/parisc/include/asm/futex.h b/arch/parisc/include/asm/futex.h index d2c3e4106851..c459f656c8c3 100644 --- a/arch/parisc/include/asm/futex.h +++ b/arch/parisc/include/asm/futex.h @@ -40,7 +40,6 @@ arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr) u32 tmp; _futex_spin_lock_irqsave(uaddr, &flags); - pagefault_disable(); ret = -EFAULT; if (unlikely(get_user(oldval, uaddr) != 0)) @@ -73,7 +72,6 @@ arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr) ret = -EFAULT; out_pagefault_enable: - pagefault_enable(); _futex_spin_unlock_irqrestore(uaddr, &flags); if (!ret) diff --git a/arch/parisc/include/asm/page.h b/arch/parisc/include/asm/page.h index 796ae29e9b9a..6b3f6740a6a6 100644 --- a/arch/parisc/include/asm/page.h +++ b/arch/parisc/include/asm/page.h @@ -180,9 +180,6 @@ extern int npmem_ranges; #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) -#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) - #include <asm-generic/memory_model.h> #include <asm-generic/getorder.h> #include <asm/pdc.h> diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h index f0a365950536..9832c73a7021 100644 --- a/arch/parisc/include/asm/pgtable.h +++ b/arch/parisc/include/asm/pgtable.h @@ -377,7 +377,6 @@ static inline void pud_clear(pud_t *pud) { static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; } -static inline int pte_special(pte_t pte) { return 0; } static inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~_PAGE_DIRTY; return pte; } static inline pte_t pte_mkold(pte_t pte) { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; } @@ -385,7 +384,6 @@ static inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) &= ~_PAGE_WRITE; ret static inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= _PAGE_DIRTY; return pte; } static inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= _PAGE_ACCESSED; return pte; } static inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) |= _PAGE_WRITE; return pte; } -static inline pte_t pte_mkspecial(pte_t pte) { return pte; } /* * Huge pte definitions. diff --git a/arch/parisc/include/asm/spinlock.h b/arch/parisc/include/asm/spinlock.h index 197d2247e4db..70fecb8dc4e2 100644 --- a/arch/parisc/include/asm/spinlock.h +++ b/arch/parisc/include/asm/spinlock.h @@ -10,25 +10,34 @@ static inline int arch_spin_is_locked(arch_spinlock_t *x) { volatile unsigned int *a = __ldcw_align(x); + smp_mb(); return *a == 0; } -#define arch_spin_lock(lock) arch_spin_lock_flags(lock, 0) +static inline void arch_spin_lock(arch_spinlock_t *x) +{ + volatile unsigned int *a; + + a = __ldcw_align(x); + while (__ldcw(a) == 0) + while (*a == 0) + cpu_relax(); +} static inline void arch_spin_lock_flags(arch_spinlock_t *x, unsigned long flags) { volatile unsigned int *a; + unsigned long flags_dis; a = __ldcw_align(x); - while (__ldcw(a) == 0) + while (__ldcw(a) == 0) { + local_save_flags(flags_dis); + local_irq_restore(flags); while (*a == 0) - if (flags & PSW_SM_I) { - local_irq_enable(); - cpu_relax(); - local_irq_disable(); - } else - cpu_relax(); + cpu_relax(); + local_irq_restore(flags_dis); + } } #define arch_spin_lock_flags arch_spin_lock_flags @@ -58,116 +67,93 @@ static inline int arch_spin_trylock(arch_spinlock_t *x) /* * Read-write spinlocks, allowing multiple readers but only one writer. - * Linux rwlocks are unfair to writers; they can be starved for an indefinite - * time by readers. With care, they can also be taken in interrupt context. + * Unfair locking as Writers could be starved indefinitely by Reader(s) * - * In the PA-RISC implementation, we have a spinlock and a counter. - * Readers use the lock to serialise their access to the counter (which - * records how many readers currently hold the lock). - * Writers hold the spinlock, preventing any readers or other writers from - * grabbing the rwlock. + * The spinlock itself is contained in @counter and access to it is + * serialized with @lock_mutex. */ -/* Note that we have to ensure interrupts are disabled in case we're - * interrupted by some other code that wants to grab the same read lock */ -static __inline__ void arch_read_lock(arch_rwlock_t *rw) +/* 1 - lock taken successfully */ +static inline int arch_read_trylock(arch_rwlock_t *rw) { + int ret = 0; unsigned long flags; - local_irq_save(flags); - arch_spin_lock_flags(&rw->lock, flags); - rw->counter++; - arch_spin_unlock(&rw->lock); - local_irq_restore(flags); -} -/* Note that we have to ensure interrupts are disabled in case we're - * interrupted by some other code that wants to grab the same read lock */ -static __inline__ void arch_read_unlock(arch_rwlock_t *rw) -{ - unsigned long flags; local_irq_save(flags); - arch_spin_lock_flags(&rw->lock, flags); - rw->counter--; - arch_spin_unlock(&rw->lock); + arch_spin_lock(&(rw->lock_mutex)); + + /* + * zero means writer holds the lock exclusively, deny Reader. + * Otherwise grant lock to first/subseq reader + */ + if (rw->counter > 0) { + rw->counter--; + ret = 1; + } + + arch_spin_unlock(&(rw->lock_mutex)); local_irq_restore(flags); + + return ret; } -/* Note that we have to ensure interrupts are disabled in case we're - * interrupted by some other code that wants to grab the same read lock */ -static __inline__ int arch_read_trylock(arch_rwlock_t *rw) +/* 1 - lock taken successfully */ +static inline int arch_write_trylock(arch_rwlock_t *rw) { + int ret = 0; unsigned long flags; - retry: + local_irq_save(flags); - if (arch_spin_trylock(&rw->lock)) { - rw->counter++; - arch_spin_unlock(&rw->lock); - local_irq_restore(flags); - return 1; + arch_spin_lock(&(rw->lock_mutex)); + + /* + * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__), + * deny writer. Otherwise if unlocked grant to writer + * Hence the claim that Linux rwlocks are unfair to writers. + * (can be starved for an indefinite time by readers). + */ + if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) { + rw->counter = 0; + ret = 1; } - + arch_spin_unlock(&(rw->lock_mutex)); local_irq_restore(flags); - /* If write-locked, we fail to acquire the lock */ - if (rw->counter < 0) - return 0; - /* Wait until we have a realistic chance at the lock */ - while (arch_spin_is_locked(&rw->lock) && rw->counter >= 0) + return ret; +} + +static inline void arch_read_lock(arch_rwlock_t *rw) +{ + while (!arch_read_trylock(rw)) cpu_relax(); +} - goto retry; +static inline void arch_write_lock(arch_rwlock_t *rw) +{ + while (!arch_write_trylock(rw)) + cpu_relax(); } -/* Note that we have to ensure interrupts are disabled in case we're - * interrupted by some other code that wants to read_trylock() this lock */ -static __inline__ void arch_write_lock(arch_rwlock_t *rw) +static inline void arch_read_unlock(arch_rwlock_t *rw) { unsigned long flags; -retry: - local_irq_save(flags); - arch_spin_lock_flags(&rw->lock, flags); - if (rw->counter != 0) { - arch_spin_unlock(&rw->lock); - local_irq_restore(flags); - - while (rw->counter != 0) - cpu_relax(); - - goto retry; - } - - rw->counter = -1; /* mark as write-locked */ - mb(); + local_irq_save(flags); + arch_spin_lock(&(rw->lock_mutex)); + rw->counter++; + arch_spin_unlock(&(rw->lock_mutex)); local_irq_restore(flags); } -static __inline__ void arch_write_unlock(arch_rwlock_t *rw) -{ - rw->counter = 0; - arch_spin_unlock(&rw->lock); -} - -/* Note that we have to ensure interrupts are disabled in case we're - * interrupted by some other code that wants to read_trylock() this lock */ -static __inline__ int arch_write_trylock(arch_rwlock_t *rw) +static inline void arch_write_unlock(arch_rwlock_t *rw) { unsigned long flags; - int result = 0; local_irq_save(flags); - if (arch_spin_trylock(&rw->lock)) { - if (rw->counter == 0) { - rw->counter = -1; - result = 1; - } else { - /* Read-locked. Oh well. */ - arch_spin_unlock(&rw->lock); - } - } + arch_spin_lock(&(rw->lock_mutex)); + rw->counter = __ARCH_RW_LOCK_UNLOCKED__; + arch_spin_unlock(&(rw->lock_mutex)); local_irq_restore(flags); - - return result; } #endif /* __ASM_SPINLOCK_H */ diff --git a/arch/parisc/include/asm/spinlock_types.h b/arch/parisc/include/asm/spinlock_types.h index 42979c5704dc..ca39ee350c3f 100644 --- a/arch/parisc/include/asm/spinlock_types.h +++ b/arch/parisc/include/asm/spinlock_types.h @@ -12,11 +12,19 @@ typedef struct { #endif } arch_spinlock_t; + +/* counter: + * Unlocked : 0x0100_0000 + * Read lock(s) : 0x00FF_FFFF to 0x01 (Multiple Readers decrement it) + * Write lock : 0x0, but only if prior value is "unlocked" 0x0100_0000 + */ typedef struct { - arch_spinlock_t lock; - volatile int counter; + arch_spinlock_t lock_mutex; + volatile unsigned int counter; } arch_rwlock_t; -#define __ARCH_RW_LOCK_UNLOCKED { __ARCH_SPIN_LOCK_UNLOCKED, 0 } +#define __ARCH_RW_LOCK_UNLOCKED__ 0x01000000 +#define __ARCH_RW_LOCK_UNLOCKED { .lock_mutex = __ARCH_SPIN_LOCK_UNLOCKED, \ + .counter = __ARCH_RW_LOCK_UNLOCKED__ } #endif |