diff options
Diffstat (limited to 'arch/sh')
| -rw-r--r-- | arch/sh/configs/ecovec24_defconfig | 2 | ||||
| -rw-r--r-- | arch/sh/include/asm/bitops-op32.h | 34 | ||||
| -rw-r--r-- | arch/sh/include/asm/dma.h | 6 | ||||
| -rw-r--r-- | arch/sh/include/asm/io.h | 8 | ||||
| -rw-r--r-- | arch/sh/include/asm/pci.h | 6 | ||||
| -rw-r--r-- | arch/sh/include/asm/pgtable.h | 17 | ||||
| -rw-r--r-- | arch/sh/kernel/irq.c | 9 | ||||
| -rw-r--r-- | arch/sh/mm/fault.c | 4 | ||||
| -rw-r--r-- | arch/sh/mm/mmap.c | 20 |
9 files changed, 58 insertions, 48 deletions
diff --git a/arch/sh/configs/ecovec24_defconfig b/arch/sh/configs/ecovec24_defconfig index d90d29d44469..e699e2e04128 100644 --- a/arch/sh/configs/ecovec24_defconfig +++ b/arch/sh/configs/ecovec24_defconfig @@ -29,8 +29,6 @@ CONFIG_IP_PNP_DHCP=y # CONFIG_INET_XFRM_MODE_TUNNEL is not set # CONFIG_INET_XFRM_MODE_BEET is not set # CONFIG_IPV6 is not set -CONFIG_IRDA=y -CONFIG_SH_SIR=y CONFIG_MTD=y CONFIG_MTD_CMDLINE_PARTS=y CONFIG_MTD_BLOCK=y diff --git a/arch/sh/include/asm/bitops-op32.h b/arch/sh/include/asm/bitops-op32.h index cfe5465acce7..565a85d8b7fb 100644 --- a/arch/sh/include/asm/bitops-op32.h +++ b/arch/sh/include/asm/bitops-op32.h @@ -2,6 +2,8 @@ #ifndef __ASM_SH_BITOPS_OP32_H #define __ASM_SH_BITOPS_OP32_H +#include <linux/bits.h> + /* * The bit modifying instructions on SH-2A are only capable of working * with a 3-bit immediate, which signifies the shift position for the bit @@ -16,7 +18,8 @@ #define BYTE_OFFSET(nr) ((nr) % BITS_PER_BYTE) #endif -static inline void __set_bit(int nr, volatile unsigned long *addr) +static __always_inline void +arch___set_bit(unsigned long nr, volatile unsigned long *addr) { if (__builtin_constant_p(nr)) { __asm__ __volatile__ ( @@ -33,7 +36,8 @@ static inline void __set_bit(int nr, volatile unsigned long *addr) } } -static inline void __clear_bit(int nr, volatile unsigned long *addr) +static __always_inline void +arch___clear_bit(unsigned long nr, volatile unsigned long *addr) { if (__builtin_constant_p(nr)) { __asm__ __volatile__ ( @@ -52,7 +56,7 @@ static inline void __clear_bit(int nr, volatile unsigned long *addr) } /** - * __change_bit - Toggle a bit in memory + * arch___change_bit - Toggle a bit in memory * @nr: the bit to change * @addr: the address to start counting from * @@ -60,7 +64,8 @@ static inline void __clear_bit(int nr, volatile unsigned long *addr) * If it's called on the same region of memory simultaneously, the effect * may be that only one operation succeeds. */ -static inline void __change_bit(int nr, volatile unsigned long *addr) +static __always_inline void +arch___change_bit(unsigned long nr, volatile unsigned long *addr) { if (__builtin_constant_p(nr)) { __asm__ __volatile__ ( @@ -79,7 +84,7 @@ static inline void __change_bit(int nr, volatile unsigned long *addr) } /** - * __test_and_set_bit - Set a bit and return its old value + * arch___test_and_set_bit - Set a bit and return its old value * @nr: Bit to set * @addr: Address to count from * @@ -87,7 +92,8 @@ static inline void __change_bit(int nr, volatile unsigned long *addr) * If two examples of this operation race, one can appear to succeed * but actually fail. You must protect multiple accesses with a lock. */ -static inline int __test_and_set_bit(int nr, volatile unsigned long *addr) +static __always_inline bool +arch___test_and_set_bit(unsigned long nr, volatile unsigned long *addr) { unsigned long mask = BIT_MASK(nr); unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); @@ -98,7 +104,7 @@ static inline int __test_and_set_bit(int nr, volatile unsigned long *addr) } /** - * __test_and_clear_bit - Clear a bit and return its old value + * arch___test_and_clear_bit - Clear a bit and return its old value * @nr: Bit to clear * @addr: Address to count from * @@ -106,7 +112,8 @@ static inline int __test_and_set_bit(int nr, volatile unsigned long *addr) * If two examples of this operation race, one can appear to succeed * but actually fail. You must protect multiple accesses with a lock. */ -static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr) +static __always_inline bool +arch___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr) { unsigned long mask = BIT_MASK(nr); unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); @@ -117,8 +124,8 @@ static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr) } /* WARNING: non atomic and it can be reordered! */ -static inline int __test_and_change_bit(int nr, - volatile unsigned long *addr) +static __always_inline bool +arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr) { unsigned long mask = BIT_MASK(nr); unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); @@ -129,13 +136,16 @@ static inline int __test_and_change_bit(int nr, } /** - * test_bit - Determine whether a bit is set + * arch_test_bit - Determine whether a bit is set * @nr: bit number to test * @addr: Address to start counting from */ -static inline int test_bit(int nr, const volatile unsigned long *addr) +static __always_inline bool +arch_test_bit(unsigned long nr, const volatile unsigned long *addr) { return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1))); } +#include <asm-generic/bitops/non-instrumented-non-atomic.h> + #endif /* __ASM_SH_BITOPS_OP32_H */ diff --git a/arch/sh/include/asm/dma.h b/arch/sh/include/asm/dma.h index 17d23ae98c77..c8bee3f985a2 100644 --- a/arch/sh/include/asm/dma.h +++ b/arch/sh/include/asm/dma.h @@ -137,10 +137,4 @@ extern int register_chan_caps(const char *dmac, struct dma_chan_caps *capslist); extern int dma_create_sysfs_files(struct dma_channel *, struct dma_info *); extern void dma_remove_sysfs_files(struct dma_channel *, struct dma_info *); -#ifdef CONFIG_PCI -extern int isa_dma_bridge_buggy; -#else -#define isa_dma_bridge_buggy (0) -#endif - #endif /* __ASM_SH_DMA_H */ diff --git a/arch/sh/include/asm/io.h b/arch/sh/include/asm/io.h index cf9a3ec32406..fba90e670ed4 100644 --- a/arch/sh/include/asm/io.h +++ b/arch/sh/include/asm/io.h @@ -271,8 +271,12 @@ static inline void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size, #endif /* CONFIG_HAVE_IOREMAP_PROT */ #else /* CONFIG_MMU */ -#define iounmap(addr) do { } while (0) -#define ioremap(offset, size) ((void __iomem *)(unsigned long)(offset)) +static inline void __iomem *ioremap(phys_addr_t offset, size_t size) +{ + return (void __iomem *)(unsigned long)offset; +} + +static inline void iounmap(volatile void __iomem *addr) { } #endif /* CONFIG_MMU */ #define ioremap_uc ioremap diff --git a/arch/sh/include/asm/pci.h b/arch/sh/include/asm/pci.h index ad22e88c6657..54c30126ea17 100644 --- a/arch/sh/include/asm/pci.h +++ b/arch/sh/include/asm/pci.h @@ -88,10 +88,4 @@ static inline int pci_proc_domain(struct pci_bus *bus) return hose->need_domain_info; } -/* Chances are this interrupt is wired PC-style ... */ -static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) -{ - return channel ? 15 : 14; -} - #endif /* __ASM_SH_PCI_H */ diff --git a/arch/sh/include/asm/pgtable.h b/arch/sh/include/asm/pgtable.h index d7ddb1ec86a0..6fb9ec54cf9b 100644 --- a/arch/sh/include/asm/pgtable.h +++ b/arch/sh/include/asm/pgtable.h @@ -89,23 +89,6 @@ static inline unsigned long phys_addr_mask(void) * completely separate permission bits for user and kernel space. */ /*xwr*/ -#define __P000 PAGE_NONE -#define __P001 PAGE_READONLY -#define __P010 PAGE_COPY -#define __P011 PAGE_COPY -#define __P100 PAGE_EXECREAD -#define __P101 PAGE_EXECREAD -#define __P110 PAGE_COPY -#define __P111 PAGE_COPY - -#define __S000 PAGE_NONE -#define __S001 PAGE_READONLY -#define __S010 PAGE_WRITEONLY -#define __S011 PAGE_SHARED -#define __S100 PAGE_EXECREAD -#define __S101 PAGE_EXECREAD -#define __S110 PAGE_RWX -#define __S111 PAGE_RWX typedef pte_t *pte_addr_t; diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c index ef0f0827cf57..909276738078 100644 --- a/arch/sh/kernel/irq.c +++ b/arch/sh/kernel/irq.c @@ -149,6 +149,7 @@ void irq_ctx_exit(int cpu) hardirq_ctx[cpu] = NULL; } +#ifndef CONFIG_PREEMPT_RT void do_softirq_own_stack(void) { struct thread_info *curctx; @@ -176,6 +177,7 @@ void do_softirq_own_stack(void) "r5", "r6", "r7", "r8", "r9", "r15", "t", "pr" ); } +#endif #else static inline void handle_one_irq(unsigned int irq) { @@ -230,16 +232,17 @@ void migrate_irqs(void) struct irq_data *data = irq_get_irq_data(irq); if (irq_data_get_node(data) == cpu) { - struct cpumask *mask = irq_data_get_affinity_mask(data); + const struct cpumask *mask = irq_data_get_affinity_mask(data); unsigned int newcpu = cpumask_any_and(mask, cpu_online_mask); if (newcpu >= nr_cpu_ids) { pr_info_ratelimited("IRQ%u no longer affine to CPU%u\n", irq, cpu); - cpumask_setall(mask); + irq_set_affinity(irq, cpu_all_mask); + } else { + irq_set_affinity(irq, mask); } - irq_set_affinity(irq, mask); } } } diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c index e175667b1363..acd2f5e50bfc 100644 --- a/arch/sh/mm/fault.c +++ b/arch/sh/mm/fault.c @@ -485,6 +485,10 @@ good_area: if (mm_fault_error(regs, error_code, address, fault)) return; + /* The fault is fully completed (including releasing mmap lock) */ + if (fault & VM_FAULT_COMPLETED) + return; + if (fault & VM_FAULT_RETRY) { flags |= FAULT_FLAG_TRIED; diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c index 6a1a1297baae..b82199878b45 100644 --- a/arch/sh/mm/mmap.c +++ b/arch/sh/mm/mmap.c @@ -19,6 +19,26 @@ unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */ EXPORT_SYMBOL(shm_align_mask); #ifdef CONFIG_MMU +static const pgprot_t protection_map[16] = { + [VM_NONE] = PAGE_NONE, + [VM_READ] = PAGE_READONLY, + [VM_WRITE] = PAGE_COPY, + [VM_WRITE | VM_READ] = PAGE_COPY, + [VM_EXEC] = PAGE_EXECREAD, + [VM_EXEC | VM_READ] = PAGE_EXECREAD, + [VM_EXEC | VM_WRITE] = PAGE_COPY, + [VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY, + [VM_SHARED] = PAGE_NONE, + [VM_SHARED | VM_READ] = PAGE_READONLY, + [VM_SHARED | VM_WRITE] = PAGE_WRITEONLY, + [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED, + [VM_SHARED | VM_EXEC] = PAGE_EXECREAD, + [VM_SHARED | VM_EXEC | VM_READ] = PAGE_EXECREAD, + [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_RWX, + [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_RWX +}; +DECLARE_VM_GET_PAGE_PROT + /* * To avoid cache aliases, we map the shared page with same color. */ |