diff options
Diffstat (limited to 'arch/powerpc/include/asm')
35 files changed, 406 insertions, 165 deletions
diff --git a/arch/powerpc/include/asm/Kbuild b/arch/powerpc/include/asm/Kbuild index 050712e1ce41..ab9f4e0ed4cf 100644 --- a/arch/powerpc/include/asm/Kbuild +++ b/arch/powerpc/include/asm/Kbuild @@ -6,5 +6,4 @@ generic-y += local64.h  generic-y += mcs_spinlock.h  generic-y += preempt.h  generic-y += rwsem.h -generic-y += trace_clock.h  generic-y += vtime.h diff --git a/arch/powerpc/include/asm/archrandom.h b/arch/powerpc/include/asm/archrandom.h index 0cc6eedc4780..85e88f7a59c0 100644 --- a/arch/powerpc/include/asm/archrandom.h +++ b/arch/powerpc/include/asm/archrandom.h @@ -7,14 +7,23 @@  static inline int arch_get_random_long(unsigned long *v)  { -	if (ppc_md.get_random_long) -		return ppc_md.get_random_long(v); -  	return 0;  }  static inline int arch_get_random_int(unsigned int *v)  { +	return 0; +} + +static inline int arch_get_random_seed_long(unsigned long *v) +{ +	if (ppc_md.get_random_seed) +		return ppc_md.get_random_seed(v); + +	return 0; +} +static inline int arch_get_random_seed_int(unsigned int *v) +{  	unsigned long val;  	int rc; @@ -27,22 +36,13 @@ static inline int arch_get_random_int(unsigned int *v)  static inline int arch_has_random(void)  { -	return !!ppc_md.get_random_long; -} - -static inline int arch_get_random_seed_long(unsigned long *v) -{ -	return 0; -} -static inline int arch_get_random_seed_int(unsigned int *v) -{  	return 0;  } +  static inline int arch_has_random_seed(void)  { -	return 0; +	return !!ppc_md.get_random_seed;  } -  #endif /* CONFIG_ARCH_RANDOM */  #ifdef CONFIG_PPC_POWERNV diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h index 512d2782b043..55f106ed12bf 100644 --- a/arch/powerpc/include/asm/atomic.h +++ b/arch/powerpc/include/asm/atomic.h @@ -67,6 +67,10 @@ static __inline__ int atomic_##op##_return(int a, atomic_t *v)		\  ATOMIC_OPS(add, add)  ATOMIC_OPS(sub, subf) +ATOMIC_OP(and, and) +ATOMIC_OP(or, or) +ATOMIC_OP(xor, xor) +  #undef ATOMIC_OPS  #undef ATOMIC_OP_RETURN  #undef ATOMIC_OP @@ -304,6 +308,9 @@ static __inline__ long atomic64_##op##_return(long a, atomic64_t *v)	\  ATOMIC64_OPS(add, add)  ATOMIC64_OPS(sub, subf) +ATOMIC64_OP(and, and) +ATOMIC64_OP(or, or) +ATOMIC64_OP(xor, xor)  #undef ATOMIC64_OPS  #undef ATOMIC64_OP_RETURN diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h index 51ccc7232042..0eca6efc0631 100644 --- a/arch/powerpc/include/asm/barrier.h +++ b/arch/powerpc/include/asm/barrier.h @@ -76,12 +76,12 @@  do {									\  	compiletime_assert_atomic_type(*p);				\  	smp_lwsync();							\ -	ACCESS_ONCE(*p) = (v);						\ +	WRITE_ONCE(*p, v);						\  } while (0)  #define smp_load_acquire(p)						\  ({									\ -	typeof(*p) ___p1 = ACCESS_ONCE(*p);				\ +	typeof(*p) ___p1 = READ_ONCE(*p);				\  	compiletime_assert_atomic_type(*p);				\  	smp_lwsync();							\  	___p1;								\ diff --git a/arch/powerpc/include/asm/cacheflush.h b/arch/powerpc/include/asm/cacheflush.h index 30b35fff2dea..6229e6b6037b 100644 --- a/arch/powerpc/include/asm/cacheflush.h +++ b/arch/powerpc/include/asm/cacheflush.h @@ -40,7 +40,12 @@ extern void __flush_dcache_icache(void *page_va);  extern void flush_dcache_icache_page(struct page *page);  #if defined(CONFIG_PPC32) && !defined(CONFIG_BOOKE)  extern void __flush_dcache_icache_phys(unsigned long physaddr); -#endif /* CONFIG_PPC32 && !CONFIG_BOOKE */ +#else +static inline void __flush_dcache_icache_phys(unsigned long physaddr) +{ +	BUG(); +} +#endif  extern void flush_dcache_range(unsigned long start, unsigned long stop);  #ifdef CONFIG_PPC32 diff --git a/arch/powerpc/include/asm/checksum.h b/arch/powerpc/include/asm/checksum.h index 8251a3ba870f..e8d9ef4755a4 100644 --- a/arch/powerpc/include/asm/checksum.h +++ b/arch/powerpc/include/asm/checksum.h @@ -20,15 +20,6 @@  extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl);  /* - * computes the checksum of the TCP/UDP pseudo-header - * returns a 16-bit checksum, already complemented - */ -extern __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, -					unsigned short len, -					unsigned short proto, -					__wsum sum); - -/*   * computes the checksum of a memory block at buff, length len,   * and adds in "sum" (32-bit)   * @@ -127,6 +118,34 @@ static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,  #endif  } +/* + * computes the checksum of the TCP/UDP pseudo-header + * returns a 16-bit checksum, already complemented + */ +static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, +					unsigned short len, +					unsigned short proto, +					__wsum sum) +{ +	return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum)); +} + +#define HAVE_ARCH_CSUM_ADD +static inline __wsum csum_add(__wsum csum, __wsum addend) +{ +#ifdef __powerpc64__ +	u64 res = (__force u64)csum; + +	res += (__force u64)addend; +	return (__force __wsum)((u32)res + (res >> 32)); +#else +	asm("addc %0,%0,%1;" +	    "addze %0,%0;" +	    : "+r" (csum) : "r" (addend)); +	return csum; +#endif +} +  #endif  #endif /* __KERNEL__ */  #endif diff --git a/arch/powerpc/include/asm/compat.h b/arch/powerpc/include/asm/compat.h index b142b8e0ed9e..4f2df589ec1d 100644 --- a/arch/powerpc/include/asm/compat.h +++ b/arch/powerpc/include/asm/compat.h @@ -174,6 +174,13 @@ typedef struct compat_siginfo {  			int _band;	/* POLL_IN, POLL_OUT, POLL_MSG */  			int _fd;  		} _sigpoll; + +		/* SIGSYS */ +		struct { +			unsigned int _call_addr; /* calling insn */ +			int _syscall;		 /* triggering system call number */ +			unsigned int _arch;	 /* AUDIT_ARCH_* of syscall */ +		} _sigsys;  	} _sifields;  } compat_siginfo_t; diff --git a/arch/powerpc/include/asm/device.h b/arch/powerpc/include/asm/device.h index e9bdda88f1fb..406c2b1ff82d 100644 --- a/arch/powerpc/include/asm/device.h +++ b/arch/powerpc/include/asm/device.h @@ -10,6 +10,7 @@ struct dma_map_ops;  struct device_node;  #ifdef CONFIG_PPC64  struct pci_dn; +struct iommu_table;  #endif  /* @@ -23,13 +24,15 @@ struct dev_archdata {  	struct dma_map_ops	*dma_ops;  	/* -	 * When an iommu is in use, dma_data is used as a ptr to the base of the -	 * iommu_table.  Otherwise, it is a simple numerical offset. +	 * These two used to be a union. However, with the hybrid ops we need +	 * both so here we store both a DMA offset for direct mappings and +	 * an iommu_table for remapped DMA.  	 */ -	union { -		dma_addr_t	dma_offset; -		void		*iommu_table_base; -	} dma_data; +	dma_addr_t		dma_offset; + +#ifdef CONFIG_PPC64 +	struct iommu_table	*iommu_table_base; +#endif  #ifdef CONFIG_IOMMU_API  	void			*iommu_domain; diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h index 9103687b0436..7f522c021dc3 100644 --- a/arch/powerpc/include/asm/dma-mapping.h +++ b/arch/powerpc/include/asm/dma-mapping.h @@ -18,15 +18,17 @@  #include <asm/io.h>  #include <asm/swiotlb.h> +#ifdef CONFIG_PPC64  #define DMA_ERROR_CODE		(~(dma_addr_t)0x0) +#endif  /* Some dma direct funcs must be visible for use in other dma_ops */ -extern void *dma_direct_alloc_coherent(struct device *dev, size_t size, -				       dma_addr_t *dma_handle, gfp_t flag, +extern void *__dma_direct_alloc_coherent(struct device *dev, size_t size, +					 dma_addr_t *dma_handle, gfp_t flag, +					 struct dma_attrs *attrs); +extern void __dma_direct_free_coherent(struct device *dev, size_t size, +				       void *vaddr, dma_addr_t dma_handle,  				       struct dma_attrs *attrs); -extern void dma_direct_free_coherent(struct device *dev, size_t size, -				     void *vaddr, dma_addr_t dma_handle, -				     struct dma_attrs *attrs);  extern int dma_direct_mmap_coherent(struct device *dev,  				    struct vm_area_struct *vma,  				    void *cpu_addr, dma_addr_t handle, @@ -106,7 +108,7 @@ static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)  static inline dma_addr_t get_dma_offset(struct device *dev)  {  	if (dev) -		return dev->archdata.dma_data.dma_offset; +		return dev->archdata.dma_offset;  	return PCI_DRAM_OFFSET;  } @@ -114,77 +116,20 @@ static inline dma_addr_t get_dma_offset(struct device *dev)  static inline void set_dma_offset(struct device *dev, dma_addr_t off)  {  	if (dev) -		dev->archdata.dma_data.dma_offset = off; +		dev->archdata.dma_offset = off;  }  /* this will be removed soon */  #define flush_write_buffers() -#include <asm-generic/dma-mapping-common.h> - -static inline int dma_supported(struct device *dev, u64 mask) -{ -	struct dma_map_ops *dma_ops = get_dma_ops(dev); +#define HAVE_ARCH_DMA_SET_MASK 1 +extern int dma_set_mask(struct device *dev, u64 dma_mask); -	if (unlikely(dma_ops == NULL)) -		return 0; -	if (dma_ops->dma_supported == NULL) -		return 1; -	return dma_ops->dma_supported(dev, mask); -} +#include <asm-generic/dma-mapping-common.h> -extern int dma_set_mask(struct device *dev, u64 dma_mask);  extern int __dma_set_mask(struct device *dev, u64 dma_mask);  extern u64 __dma_get_required_mask(struct device *dev); -#define dma_alloc_coherent(d,s,h,f)	dma_alloc_attrs(d,s,h,f,NULL) - -static inline void *dma_alloc_attrs(struct device *dev, size_t size, -				    dma_addr_t *dma_handle, gfp_t flag, -				    struct dma_attrs *attrs) -{ -	struct dma_map_ops *dma_ops = get_dma_ops(dev); -	void *cpu_addr; - -	BUG_ON(!dma_ops); - -	cpu_addr = dma_ops->alloc(dev, size, dma_handle, flag, attrs); - -	debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr); - -	return cpu_addr; -} - -#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL) - -static inline void dma_free_attrs(struct device *dev, size_t size, -				  void *cpu_addr, dma_addr_t dma_handle, -				  struct dma_attrs *attrs) -{ -	struct dma_map_ops *dma_ops = get_dma_ops(dev); - -	BUG_ON(!dma_ops); - -	debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); - -	dma_ops->free(dev, size, cpu_addr, dma_handle, attrs); -} - -static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) -{ -	struct dma_map_ops *dma_ops = get_dma_ops(dev); - -	debug_dma_mapping_error(dev, dma_addr); -	if (dma_ops->mapping_error) -		return dma_ops->mapping_error(dev, dma_addr); - -#ifdef CONFIG_PPC64 -	return (dma_addr == DMA_ERROR_CODE); -#else -	return 0; -#endif -} -  static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)  {  #ifdef CONFIG_SWIOTLB @@ -210,9 +155,6 @@ static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)  	return daddr - get_dma_offset(dev);  } -#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) -#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) -  #define ARCH_HAS_DMA_MMAP_COHERENT  static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, diff --git a/arch/powerpc/include/asm/ftrace.h b/arch/powerpc/include/asm/ftrace.h index e3661872fbea..ef89b1465573 100644 --- a/arch/powerpc/include/asm/ftrace.h +++ b/arch/powerpc/include/asm/ftrace.h @@ -2,7 +2,7 @@  #define _ASM_POWERPC_FTRACE  #ifdef CONFIG_FUNCTION_TRACER -#define MCOUNT_ADDR		((long)(_mcount)) +#define MCOUNT_ADDR		((unsigned long)(_mcount))  #define MCOUNT_INSN_SIZE	4 /* sizeof mcount call */  #ifdef __ASSEMBLY__ diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h index a8d2ef30d473..5879fde56f3c 100644 --- a/arch/powerpc/include/asm/io.h +++ b/arch/powerpc/include/asm/io.h @@ -721,6 +721,7 @@ extern void __iomem *ioremap_prot(phys_addr_t address, unsigned long size,  				  unsigned long flags);  extern void __iomem *ioremap_wc(phys_addr_t address, unsigned long size);  #define ioremap_nocache(addr, size)	ioremap((addr), (size)) +#define ioremap_uc(addr, size)		ioremap((addr), (size))  extern void iounmap(volatile void __iomem *addr); diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h index ca18cff90900..7b87bab09564 100644 --- a/arch/powerpc/include/asm/iommu.h +++ b/arch/powerpc/include/asm/iommu.h @@ -2,17 +2,17 @@   * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation   * Rewrite, cleanup:   * Copyright (C) 2004 Olof Johansson <[email protected]>, IBM Corporation - *  + *   * This program is free software; you can redistribute it and/or modify   * it under the terms of the GNU General Public License as published by   * the Free Software Foundation; either version 2 of the License, or   * (at your option) any later version. - *  + *   * This program is distributed in the hope that it will be useful,   * but WITHOUT ANY WARRANTY; without even the implied warranty of   * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the   * GNU General Public License for more details. - *  + *   * You should have received a copy of the GNU General Public License   * along with this program; if not, write to the Free Software   * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA @@ -131,16 +131,21 @@ int get_iommu_order(unsigned long size, struct iommu_table *tbl)  struct scatterlist; -static inline void set_iommu_table_base(struct device *dev, void *base) +#ifdef CONFIG_PPC64 + +static inline void set_iommu_table_base(struct device *dev, +					struct iommu_table *base)  { -	dev->archdata.dma_data.iommu_table_base = base; +	dev->archdata.iommu_table_base = base;  }  static inline void *get_iommu_table_base(struct device *dev)  { -	return dev->archdata.dma_data.iommu_table_base; +	return dev->archdata.iommu_table_base;  } +extern int dma_iommu_dma_supported(struct device *dev, u64 mask); +  /* Frees table for an individual device node */  extern void iommu_free_table(struct iommu_table *tbl, const char *node_name); @@ -225,6 +230,20 @@ static inline int __init tce_iommu_bus_notifier_init(void)  }  #endif /* !CONFIG_IOMMU_API */ +#else + +static inline void *get_iommu_table_base(struct device *dev) +{ +	return NULL; +} + +static inline int dma_iommu_dma_supported(struct device *dev, u64 mask) +{ +	return 0; +} + +#endif /* CONFIG_PPC64 */ +  extern int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl,  			    struct scatterlist *sglist, int nelems,  			    unsigned long mask, diff --git a/arch/powerpc/include/asm/jump_label.h b/arch/powerpc/include/asm/jump_label.h index efbf9a322a23..47e155f15433 100644 --- a/arch/powerpc/include/asm/jump_label.h +++ b/arch/powerpc/include/asm/jump_label.h @@ -18,14 +18,29 @@  #define JUMP_ENTRY_TYPE		stringify_in_c(FTR_ENTRY_LONG)  #define JUMP_LABEL_NOP_SIZE	4 -static __always_inline bool arch_static_branch(struct static_key *key) +static __always_inline bool arch_static_branch(struct static_key *key, bool branch)  {  	asm_volatile_goto("1:\n\t"  		 "nop\n\t"  		 ".pushsection __jump_table,  \"aw\"\n\t"  		 JUMP_ENTRY_TYPE "1b, %l[l_yes], %c0\n\t"  		 ".popsection \n\t" -		 : :  "i" (key) : : l_yes); +		 : :  "i" (&((char *)key)[branch]) : : l_yes); + +	return false; +l_yes: +	return true; +} + +static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch) +{ +	asm_volatile_goto("1:\n\t" +		 "b %l[l_yes]\n\t" +		 ".pushsection __jump_table,  \"aw\"\n\t" +		 JUMP_ENTRY_TYPE "1b, %l[l_yes], %c0\n\t" +		 ".popsection \n\t" +		 : :  "i" (&((char *)key)[branch]) : : l_yes); +  	return false;  l_yes:  	return true; diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index b91e74a817d8..9fac01cb89c1 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h @@ -158,6 +158,7 @@ extern pfn_t kvmppc_gpa_to_pfn(struct kvm_vcpu *vcpu, gpa_t gpa, bool writing,  			bool *writable);  extern void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev,  			unsigned long *rmap, long pte_index, int realmode); +extern void kvmppc_update_rmap_change(unsigned long *rmap, unsigned long psize);  extern void kvmppc_invalidate_hpte(struct kvm *kvm, __be64 *hptep,  			unsigned long pte_index);  void kvmppc_clear_ref_hpte(struct kvm *kvm, __be64 *hptep, @@ -225,12 +226,12 @@ static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)  	return vcpu->arch.cr;  } -static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val) +static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, ulong val)  {  	vcpu->arch.xer = val;  } -static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu) +static inline ulong kvmppc_get_xer(struct kvm_vcpu *vcpu)  {  	return vcpu->arch.xer;  } diff --git a/arch/powerpc/include/asm/kvm_book3s_asm.h b/arch/powerpc/include/asm/kvm_book3s_asm.h index 5bdfb5dd3400..72b6225aca73 100644 --- a/arch/powerpc/include/asm/kvm_book3s_asm.h +++ b/arch/powerpc/include/asm/kvm_book3s_asm.h @@ -25,6 +25,12 @@  #define XICS_MFRR		0xc  #define XICS_IPI		2	/* interrupt source # for IPIs */ +/* Maximum number of threads per physical core */ +#define MAX_SMT_THREADS		8 + +/* Maximum number of subcores per physical core */ +#define MAX_SUBCORES		4 +  #ifdef __ASSEMBLY__  #ifdef CONFIG_KVM_BOOK3S_HANDLER @@ -65,6 +71,19 @@ kvmppc_resume_\intno:  #else  /*__ASSEMBLY__ */ +struct kvmppc_vcore; + +/* Struct used for coordinating micro-threading (split-core) mode changes */ +struct kvm_split_mode { +	unsigned long	rpr; +	unsigned long	pmmar; +	unsigned long	ldbar; +	u8		subcore_size; +	u8		do_nap; +	u8		napped[MAX_SMT_THREADS]; +	struct kvmppc_vcore *master_vcs[MAX_SUBCORES]; +}; +  /*   * This struct goes in the PACA on 64-bit processors.  It is used   * to store host state that needs to be saved when we enter a guest @@ -100,6 +119,7 @@ struct kvmppc_host_state {  	u64 host_spurr;  	u64 host_dscr;  	u64 dec_expires; +	struct kvm_split_mode *kvm_split_mode;  #endif  #ifdef CONFIG_PPC_BOOK3S_64  	u64 cfar; @@ -112,7 +132,7 @@ struct kvmppc_book3s_shadow_vcpu {  	bool in_use;  	ulong gpr[14];  	u32 cr; -	u32 xer; +	ulong xer;  	ulong ctr;  	ulong lr;  	ulong pc; diff --git a/arch/powerpc/include/asm/kvm_booke.h b/arch/powerpc/include/asm/kvm_booke.h index 3286f0d6a86c..bc6e29e4dfd4 100644 --- a/arch/powerpc/include/asm/kvm_booke.h +++ b/arch/powerpc/include/asm/kvm_booke.h @@ -54,12 +54,12 @@ static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)  	return vcpu->arch.cr;  } -static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val) +static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, ulong val)  {  	vcpu->arch.xer = val;  } -static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu) +static inline ulong kvmppc_get_xer(struct kvm_vcpu *vcpu)  {  	return vcpu->arch.xer;  } diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index d91f65b28e32..98eebbf66340 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -205,8 +205,10 @@ struct revmap_entry {   */  #define KVMPPC_RMAP_LOCK_BIT	63  #define KVMPPC_RMAP_RC_SHIFT	32 +#define KVMPPC_RMAP_CHG_SHIFT	48  #define KVMPPC_RMAP_REFERENCED	(HPTE_R_R << KVMPPC_RMAP_RC_SHIFT)  #define KVMPPC_RMAP_CHANGED	(HPTE_R_C << KVMPPC_RMAP_RC_SHIFT) +#define KVMPPC_RMAP_CHG_ORDER	(0x3ful << KVMPPC_RMAP_CHG_SHIFT)  #define KVMPPC_RMAP_PRESENT	0x100000000ul  #define KVMPPC_RMAP_INDEX	0xfffffffful @@ -278,7 +280,9 @@ struct kvmppc_vcore {  	u16 last_cpu;  	u8 vcore_state;  	u8 in_guest; +	struct kvmppc_vcore *master_vcore;  	struct list_head runnable_threads; +	struct list_head preempt_list;  	spinlock_t lock;  	wait_queue_head_t wq;  	spinlock_t stoltb_lock;	/* protects stolen_tb and preempt_tb */ @@ -300,12 +304,21 @@ struct kvmppc_vcore {  #define VCORE_EXIT_MAP(vc)	((vc)->entry_exit_map >> 8)  #define VCORE_IS_EXITING(vc)	(VCORE_EXIT_MAP(vc) != 0) -/* Values for vcore_state */ +/* This bit is used when a vcore exit is triggered from outside the vcore */ +#define VCORE_EXIT_REQ		0x10000 + +/* + * Values for vcore_state. + * Note that these are arranged such that lower values + * (< VCORE_SLEEPING) don't require stolen time accounting + * on load/unload, and higher values do. + */  #define VCORE_INACTIVE	0 -#define VCORE_SLEEPING	1 -#define VCORE_PREEMPT	2 -#define VCORE_RUNNING	3 -#define VCORE_EXITING	4 +#define VCORE_PREEMPT	1 +#define VCORE_PIGGYBACK	2 +#define VCORE_SLEEPING	3 +#define VCORE_RUNNING	4 +#define VCORE_EXITING	5  /*   * Struct used to manage memory for a virtual processor area @@ -473,7 +486,7 @@ struct kvm_vcpu_arch {  	ulong ciabr;  	ulong cfar;  	ulong ppr; -	ulong pspb; +	u32 pspb;  	ulong fscr;  	ulong shadow_fscr;  	ulong ebbhr; @@ -619,6 +632,7 @@ struct kvm_vcpu_arch {  	int trap;  	int state;  	int ptid; +	int thread_cpu;  	bool timer_running;  	wait_queue_head_t cpu_run; diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h index 952579f5e79a..cab6753f1be5 100644 --- a/arch/powerpc/include/asm/machdep.h +++ b/arch/powerpc/include/asm/machdep.h @@ -249,7 +249,7 @@ struct machdep_calls {  #endif  #ifdef CONFIG_ARCH_RANDOM -	int (*get_random_long)(unsigned long *v); +	int (*get_random_seed)(unsigned long *v);  #endif  }; diff --git a/arch/powerpc/include/asm/mpc52xx_psc.h b/arch/powerpc/include/asm/mpc52xx_psc.h index d0ece257d310..04c7e8fc24c2 100644 --- a/arch/powerpc/include/asm/mpc52xx_psc.h +++ b/arch/powerpc/include/asm/mpc52xx_psc.h @@ -150,7 +150,10 @@  /* Structure of the hardware registers */  struct mpc52xx_psc { -	u8		mode;		/* PSC + 0x00 */ +	union { +		u8	mode;		/* PSC + 0x00 */ +		u8	mr2; +	};  	u8		reserved0[3];  	union {				/* PSC + 0x04 */  		u16	status; diff --git a/arch/powerpc/include/asm/opal-api.h b/arch/powerpc/include/asm/opal-api.h index e9e4c52f3685..8374afed9d0a 100644 --- a/arch/powerpc/include/asm/opal-api.h +++ b/arch/powerpc/include/asm/opal-api.h @@ -154,7 +154,10 @@  #define OPAL_FLASH_WRITE			111  #define OPAL_FLASH_ERASE			112  #define OPAL_PRD_MSG				113 -#define OPAL_LAST				113 +#define OPAL_LEDS_GET_INDICATOR			114 +#define OPAL_LEDS_SET_INDICATOR			115 +#define OPAL_CEC_REBOOT2			116 +#define OPAL_LAST				116  /* Device tree flags */ @@ -340,6 +343,18 @@ enum OpalPciResetState {  	OPAL_ASSERT_RESET   = 1  }; +enum OpalSlotLedType { +	OPAL_SLOT_LED_TYPE_ID = 0,	/* IDENTIFY LED */ +	OPAL_SLOT_LED_TYPE_FAULT = 1,	/* FAULT LED */ +	OPAL_SLOT_LED_TYPE_ATTN = 2,	/* System Attention LED */ +	OPAL_SLOT_LED_TYPE_MAX = 3 +}; + +enum OpalSlotLedState { +	OPAL_SLOT_LED_STATE_OFF = 0,	/* LED is OFF */ +	OPAL_SLOT_LED_STATE_ON = 1	/* LED is ON */ +}; +  /*   * Address cycle types for LPC accesses. These also correspond   * to the content of the first cell of the "reg" property for @@ -361,6 +376,7 @@ enum opal_msg_type {  	OPAL_MSG_HMI_EVT,  	OPAL_MSG_DPO,  	OPAL_MSG_PRD, +	OPAL_MSG_OCC,  	OPAL_MSG_TYPE_MAX,  }; @@ -437,6 +453,7 @@ struct OpalMemoryErrorData {  /* HMI interrupt event */  enum OpalHMI_Version {  	OpalHMIEvt_V1 = 1, +	OpalHMIEvt_V2 = 2,  };  enum OpalHMI_Severity { @@ -467,6 +484,49 @@ enum OpalHMI_ErrType {  	OpalHMI_ERROR_CAPP_RECOVERY,  }; +enum OpalHMI_XstopType { +	CHECKSTOP_TYPE_UNKNOWN	=	0, +	CHECKSTOP_TYPE_CORE	=	1, +	CHECKSTOP_TYPE_NX	=	2, +}; + +enum OpalHMI_CoreXstopReason { +	CORE_CHECKSTOP_IFU_REGFILE		= 0x00000001, +	CORE_CHECKSTOP_IFU_LOGIC		= 0x00000002, +	CORE_CHECKSTOP_PC_DURING_RECOV		= 0x00000004, +	CORE_CHECKSTOP_ISU_REGFILE		= 0x00000008, +	CORE_CHECKSTOP_ISU_LOGIC		= 0x00000010, +	CORE_CHECKSTOP_FXU_LOGIC		= 0x00000020, +	CORE_CHECKSTOP_VSU_LOGIC		= 0x00000040, +	CORE_CHECKSTOP_PC_RECOV_IN_MAINT_MODE	= 0x00000080, +	CORE_CHECKSTOP_LSU_REGFILE		= 0x00000100, +	CORE_CHECKSTOP_PC_FWD_PROGRESS		= 0x00000200, +	CORE_CHECKSTOP_LSU_LOGIC		= 0x00000400, +	CORE_CHECKSTOP_PC_LOGIC			= 0x00000800, +	CORE_CHECKSTOP_PC_HYP_RESOURCE		= 0x00001000, +	CORE_CHECKSTOP_PC_HANG_RECOV_FAILED	= 0x00002000, +	CORE_CHECKSTOP_PC_AMBI_HANG_DETECTED	= 0x00004000, +	CORE_CHECKSTOP_PC_DEBUG_TRIG_ERR_INJ	= 0x00008000, +	CORE_CHECKSTOP_PC_SPRD_HYP_ERR_INJ	= 0x00010000, +}; + +enum OpalHMI_NestAccelXstopReason { +	NX_CHECKSTOP_SHM_INVAL_STATE_ERR	= 0x00000001, +	NX_CHECKSTOP_DMA_INVAL_STATE_ERR_1	= 0x00000002, +	NX_CHECKSTOP_DMA_INVAL_STATE_ERR_2	= 0x00000004, +	NX_CHECKSTOP_DMA_CH0_INVAL_STATE_ERR	= 0x00000008, +	NX_CHECKSTOP_DMA_CH1_INVAL_STATE_ERR	= 0x00000010, +	NX_CHECKSTOP_DMA_CH2_INVAL_STATE_ERR	= 0x00000020, +	NX_CHECKSTOP_DMA_CH3_INVAL_STATE_ERR	= 0x00000040, +	NX_CHECKSTOP_DMA_CH4_INVAL_STATE_ERR	= 0x00000080, +	NX_CHECKSTOP_DMA_CH5_INVAL_STATE_ERR	= 0x00000100, +	NX_CHECKSTOP_DMA_CH6_INVAL_STATE_ERR	= 0x00000200, +	NX_CHECKSTOP_DMA_CH7_INVAL_STATE_ERR	= 0x00000400, +	NX_CHECKSTOP_DMA_CRB_UE			= 0x00000800, +	NX_CHECKSTOP_DMA_CRB_SUE		= 0x00001000, +	NX_CHECKSTOP_PBI_ISN_UE			= 0x00002000, +}; +  struct OpalHMIEvent {  	uint8_t		version;	/* 0x00 */  	uint8_t		severity;	/* 0x01 */ @@ -477,6 +537,23 @@ struct OpalHMIEvent {  	__be64		hmer;  	/* TFMR register. Valid only for TFAC and TFMR_PARITY error type. */  	__be64		tfmr; + +	/* version 2 and later */ +	union { +		/* +		 * checkstop info (Core/NX). +		 * Valid for OpalHMI_ERROR_MALFUNC_ALERT. +		 */ +		struct { +			uint8_t	xstop_type;	/* enum OpalHMI_XstopType */ +			uint8_t reserved_1[3]; +			__be32  xstop_reason; +			union { +				__be32 pir;	/* for CHECKSTOP_TYPE_CORE */ +				__be32 chip_id;	/* for CHECKSTOP_TYPE_NX */ +			} u; +		} xstop_error; +	} u;  };  enum { @@ -700,6 +777,17 @@ struct opal_prd_msg_header {  struct opal_prd_msg; +#define OCC_RESET                       0 +#define OCC_LOAD                        1 +#define OCC_THROTTLE                    2 +#define OCC_MAX_THROTTLE_STATUS         5 + +struct opal_occ_msg { +	__be64 type; +	__be64 chip; +	__be64 throttle_status; +}; +  /*   * SG entries   * @@ -756,6 +844,52 @@ struct opal_i2c_request {  	__be64 buffer_ra;		/* Buffer real address */  }; +/* + * EPOW status sharing (OPAL and the host) + * + * The host will pass on OPAL, a buffer of length OPAL_SYSEPOW_MAX + * with individual elements being 16 bits wide to fetch the system + * wide EPOW status. Each element in the buffer will contain the + * EPOW status in it's bit representation for a particular EPOW sub + * class as defiend here. So multiple detailed EPOW status bits + * specific for any sub class can be represented in a single buffer + * element as it's bit representation. + */ + +/* System EPOW type */ +enum OpalSysEpow { +	OPAL_SYSEPOW_POWER	= 0,	/* Power EPOW */ +	OPAL_SYSEPOW_TEMP	= 1,	/* Temperature EPOW */ +	OPAL_SYSEPOW_COOLING	= 2,	/* Cooling EPOW */ +	OPAL_SYSEPOW_MAX	= 3,	/* Max EPOW categories */ +}; + +/* Power EPOW */ +enum OpalSysPower { +	OPAL_SYSPOWER_UPS	= 0x0001, /* System on UPS power */ +	OPAL_SYSPOWER_CHNG	= 0x0002, /* System power config change */ +	OPAL_SYSPOWER_FAIL	= 0x0004, /* System impending power failure */ +	OPAL_SYSPOWER_INCL	= 0x0008, /* System incomplete power */ +}; + +/* Temperature EPOW */ +enum OpalSysTemp { +	OPAL_SYSTEMP_AMB	= 0x0001, /* System over ambient temperature */ +	OPAL_SYSTEMP_INT	= 0x0002, /* System over internal temperature */ +	OPAL_SYSTEMP_HMD	= 0x0004, /* System over ambient humidity */ +}; + +/* Cooling EPOW */ +enum OpalSysCooling { +	OPAL_SYSCOOL_INSF	= 0x0001, /* System insufficient cooling */ +}; + +/* Argument to OPAL_CEC_REBOOT2() */ +enum { +	OPAL_REBOOT_NORMAL		= 0, +	OPAL_REBOOT_PLATFORM_ERROR	= 1, +}; +  #endif /* __ASSEMBLY__ */  #endif /* __OPAL_API_H */ diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h index 958e941c0cda..800115910e43 100644 --- a/arch/powerpc/include/asm/opal.h +++ b/arch/powerpc/include/asm/opal.h @@ -44,6 +44,7 @@ int64_t opal_tpo_write(uint64_t token, uint32_t year_mon_day,  		       uint32_t hour_min);  int64_t opal_cec_power_down(uint64_t request);  int64_t opal_cec_reboot(void); +int64_t opal_cec_reboot2(uint32_t reboot_type, char *diag);  int64_t opal_read_nvram(uint64_t buffer, uint64_t size, uint64_t offset);  int64_t opal_write_nvram(uint64_t buffer, uint64_t size, uint64_t offset);  int64_t opal_handle_interrupt(uint64_t isn, __be64 *outstanding_event_mask); @@ -141,7 +142,8 @@ int64_t opal_pci_fence_phb(uint64_t phb_id);  int64_t opal_pci_reinit(uint64_t phb_id, uint64_t reinit_scope, uint64_t data);  int64_t opal_pci_mask_pe_error(uint64_t phb_id, uint16_t pe_number, uint8_t error_type, uint8_t mask_action);  int64_t opal_set_slot_led_status(uint64_t phb_id, uint64_t slot_id, uint8_t led_type, uint8_t led_action); -int64_t opal_get_epow_status(__be64 *status); +int64_t opal_get_epow_status(__be16 *epow_status, __be16 *num_epow_classes); +int64_t opal_get_dpo_status(__be64 *dpo_timeout);  int64_t opal_set_system_attention_led(uint8_t led_action);  int64_t opal_pci_next_error(uint64_t phb_id, __be64 *first_frozen_pe,  			    __be16 *pci_error_type, __be16 *severity); @@ -195,6 +197,10 @@ int64_t opal_ipmi_recv(uint64_t interface, struct opal_ipmi_msg *msg,  int64_t opal_i2c_request(uint64_t async_token, uint32_t bus_id,  			 struct opal_i2c_request *oreq);  int64_t opal_prd_msg(struct opal_prd_msg *msg); +int64_t opal_leds_get_ind(char *loc_code, __be64 *led_mask, +			  __be64 *led_value, __be64 *max_led_type); +int64_t opal_leds_set_ind(uint64_t token, char *loc_code, const u64 led_mask, +			  const u64 led_value, __be64 *max_led_type);  int64_t opal_flash_read(uint64_t id, uint64_t offset, uint64_t buf,  		uint64_t size, uint64_t token); diff --git a/arch/powerpc/include/asm/pci-bridge.h b/arch/powerpc/include/asm/pci-bridge.h index 712add590445..37fc53587bb4 100644 --- a/arch/powerpc/include/asm/pci-bridge.h +++ b/arch/powerpc/include/asm/pci-bridge.h @@ -42,6 +42,7 @@ struct pci_controller_ops {  #endif  	int             (*dma_set_mask)(struct pci_dev *dev, u64 dma_mask); +	u64		(*dma_get_required_mask)(struct pci_dev *dev);  	void		(*shutdown)(struct pci_controller *);  }; diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h index 3bb7488bd24b..fa1dfb7f7b48 100644 --- a/arch/powerpc/include/asm/pgtable-ppc64.h +++ b/arch/powerpc/include/asm/pgtable-ppc64.h @@ -134,11 +134,11 @@  #define pte_iterate_hashed_end() } while(0) -#ifdef CONFIG_PPC_HAS_HASH_64K -#define pte_pagesize_index(mm, addr, pte)	get_slice_psize(mm, addr) -#else +/* + * We expect this to be called only for user addresses or kernel virtual + * addresses other than the linear mapping. + */  #define pte_pagesize_index(mm, addr, pte)	MMU_PAGE_4K -#endif  #endif /* __real_pte */ diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h index 11a38635dd65..0717693c8428 100644 --- a/arch/powerpc/include/asm/pgtable.h +++ b/arch/powerpc/include/asm/pgtable.h @@ -169,6 +169,17 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,  	 * cases, and 32-bit non-hash with 32-bit PTEs.  	 */  	*ptep = pte; + +#ifdef CONFIG_PPC_BOOK3E_64 +	/* +	 * With hardware tablewalk, a sync is needed to ensure that +	 * subsequent accesses see the PTE we just wrote.  Unlike userspace +	 * mappings, we can't tolerate spurious faults, so make sure +	 * the new PTE will be seen the first time. +	 */ +	if (is_kernel_addr(addr)) +		mb(); +#endif  #endif  } diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h index 8452335661a5..790f5d1d9a46 100644 --- a/arch/powerpc/include/asm/ppc-opcode.h +++ b/arch/powerpc/include/asm/ppc-opcode.h @@ -287,7 +287,7 @@  /* POWER8 Micro Partition Prefetch (MPP) parameters */  /* Address mask is common for LOGMPP instruction and MPPR SPR */ -#define PPC_MPPE_ADDRESS_MASK 0xffffffffc000 +#define PPC_MPPE_ADDRESS_MASK 0xffffffffc000ULL  /* Bits 60 and 61 of MPP SPR should be set to one of the following */  /* Aborting the fetch is indeed setting 00 in the table size bits */ diff --git a/arch/powerpc/include/asm/ppc-pci.h b/arch/powerpc/include/asm/ppc-pci.h index 4122a86d6858..ca0c5bff7849 100644 --- a/arch/powerpc/include/asm/ppc-pci.h +++ b/arch/powerpc/include/asm/ppc-pci.h @@ -61,6 +61,7 @@ int rtas_write_config(struct pci_dn *, int where, int size, u32 val);  int rtas_read_config(struct pci_dn *, int where, int size, u32 *val);  void eeh_pe_state_mark(struct eeh_pe *pe, int state);  void eeh_pe_state_clear(struct eeh_pe *pe, int state); +void eeh_pe_state_mark_with_cfg(struct eeh_pe *pe, int state);  void eeh_pe_dev_mode_mark(struct eeh_pe *pe, int mode);  void eeh_sysfs_add_device(struct pci_dev *pdev); diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h index 28ded5d9b579..5afea361beaa 100644 --- a/arch/powerpc/include/asm/processor.h +++ b/arch/powerpc/include/asm/processor.h @@ -264,7 +264,6 @@ struct thread_struct {  	u64		tm_tfhar;	/* Transaction fail handler addr */  	u64		tm_texasr;	/* Transaction exception & summary */  	u64		tm_tfiar;	/* Transaction fail instr address reg */ -	unsigned long	tm_orig_msr;	/* Thread's MSR on ctx switch */  	struct pt_regs	ckpt_regs;	/* Checkpointed registers */  	unsigned long	tm_tar; diff --git a/arch/powerpc/include/asm/pte-common.h b/arch/powerpc/include/asm/pte-common.h index b7c8d079c121..71537a319fc8 100644 --- a/arch/powerpc/include/asm/pte-common.h +++ b/arch/powerpc/include/asm/pte-common.h @@ -109,7 +109,8 @@ extern unsigned long bad_call_to_PMD_PAGE_SIZE(void);   * the processor might need it for DMA coherency.   */  #define _PAGE_BASE_NC	(_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_PSIZE) -#if defined(CONFIG_SMP) || defined(CONFIG_PPC_STD_MMU) +#if defined(CONFIG_SMP) || defined(CONFIG_PPC_STD_MMU) || \ +	defined(CONFIG_PPC_E500MC)  #define _PAGE_BASE	(_PAGE_BASE_NC | _PAGE_COHERENT)  #else  #define _PAGE_BASE	(_PAGE_BASE_NC) diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index af56b5c6c81a..aa1cc5f015ee 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -1193,8 +1193,7 @@  #ifdef CONFIG_PPC_BOOK3S_64  #define __mtmsrd(v, l)	asm volatile("mtmsrd %0," __stringify(l) \  				     : : "r" (v) : "memory") -#define mtmsrd(v)	__mtmsrd((v), 0) -#define mtmsr(v)	mtmsrd(v) +#define mtmsr(v)	__mtmsrd((v), 0)  #else  #define mtmsr(v)	asm volatile("mtmsr %0" : \  				     : "r" ((unsigned long)(v)) \ @@ -1281,6 +1280,15 @@ struct pt_regs;  extern void ppc_save_regs(struct pt_regs *regs); +static inline void update_power8_hid0(unsigned long hid0) +{ +	/* +	 *  The HID0 update on Power8 should at the very least be +	 *  preceded by a a SYNC instruction followed by an ISYNC +	 *  instruction +	 */ +	asm volatile("sync; mtspr %0,%1; isync":: "i"(SPRN_HID0), "r"(hid0)); +}  #endif /* __ASSEMBLY__ */  #endif /* __KERNEL__ */  #endif /* _ASM_POWERPC_REG_H */ diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h index 7a4ede16b283..b77ef369c0f0 100644 --- a/arch/powerpc/include/asm/rtas.h +++ b/arch/powerpc/include/asm/rtas.h @@ -343,6 +343,7 @@ extern void rtas_power_off(void);  extern void rtas_halt(void);  extern void rtas_os_term(char *str);  extern int rtas_get_sensor(int sensor, int index, int *state); +extern int rtas_get_sensor_fast(int sensor, int index, int *state);  extern int rtas_get_power_level(int powerdomain, int *level);  extern int rtas_set_power_level(int powerdomain, int level, int *setlevel);  extern bool rtas_indicator_present(int token, int *maxindex); diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h index 4dbe072eecbe..523673d7583c 100644 --- a/arch/powerpc/include/asm/spinlock.h +++ b/arch/powerpc/include/asm/spinlock.h @@ -28,8 +28,6 @@  #include <asm/synch.h>  #include <asm/ppc-opcode.h> -#define smp_mb__after_unlock_lock()	smp_mb()  /* Full ordering for lock. */ -  #ifdef CONFIG_PPC64  /* use 0x800000yy when locked, where yy == CPU number */  #ifdef __BIG_ENDIAN__ diff --git a/arch/powerpc/include/asm/spu_csa.h b/arch/powerpc/include/asm/spu_csa.h index a40fd491250c..51f80b41cda3 100644 --- a/arch/powerpc/include/asm/spu_csa.h +++ b/arch/powerpc/include/asm/spu_csa.h @@ -241,12 +241,6 @@ struct spu_priv2_collapsed {   */  struct spu_state {  	struct spu_lscsa *lscsa; -#ifdef CONFIG_SPU_FS_64K_LS -	int		use_big_pages; -	/* One struct page per 64k page */ -#define SPU_LSCSA_NUM_BIG_PAGES	(sizeof(struct spu_lscsa) / 0x10000) -	struct page	*lscsa_pages[SPU_LSCSA_NUM_BIG_PAGES]; -#endif  	struct spu_problem_collapsed prob;  	struct spu_priv1_collapsed priv1;  	struct spu_priv2_collapsed priv2; diff --git a/arch/powerpc/include/asm/switch_to.h b/arch/powerpc/include/asm/switch_to.h index 58abeda64cb7..15cca17cba4b 100644 --- a/arch/powerpc/include/asm/switch_to.h +++ b/arch/powerpc/include/asm/switch_to.h @@ -29,6 +29,7 @@ static inline void save_early_sprs(struct thread_struct *prev) {}  extern void enable_kernel_fp(void);  extern void enable_kernel_altivec(void); +extern void enable_kernel_vsx(void);  extern int emulate_altivec(struct pt_regs *);  extern void __giveup_vsx(struct task_struct *);  extern void giveup_vsx(struct task_struct *); diff --git a/arch/powerpc/include/asm/syscall.h b/arch/powerpc/include/asm/syscall.h index ff21b7a2f0cc..ab9f3f0a8637 100644 --- a/arch/powerpc/include/asm/syscall.h +++ b/arch/powerpc/include/asm/syscall.h @@ -22,10 +22,15 @@  extern const unsigned long sys_call_table[];  #endif /* CONFIG_FTRACE_SYSCALLS */ -static inline long syscall_get_nr(struct task_struct *task, -				  struct pt_regs *regs) +static inline int syscall_get_nr(struct task_struct *task, struct pt_regs *regs)  { -	return TRAP(regs) == 0xc00 ? regs->gpr[0] : -1L; +	/* +	 * Note that we are returning an int here. That means 0xffffffff, ie. +	 * 32-bit negative 1, will be interpreted as -1 on a 64-bit kernel. +	 * This is important for seccomp so that compat tasks can set r0 = -1 +	 * to reject the syscall. +	 */ +	return TRAP(regs) == 0xc00 ? regs->gpr[0] : -1;  }  static inline void syscall_rollback(struct task_struct *task, @@ -34,12 +39,6 @@ static inline void syscall_rollback(struct task_struct *task,  	regs->gpr[3] = regs->orig_gpr3;  } -static inline long syscall_get_error(struct task_struct *task, -				     struct pt_regs *regs) -{ -	return (regs->ccr & 0x10000000) ? -regs->gpr[3] : 0; -} -  static inline long syscall_get_return_value(struct task_struct *task,  					    struct pt_regs *regs)  { @@ -50,9 +49,15 @@ static inline void syscall_set_return_value(struct task_struct *task,  					    struct pt_regs *regs,  					    int error, long val)  { +	/* +	 * In the general case it's not obvious that we must deal with CCR +	 * here, as the syscall exit path will also do that for us. However +	 * there are some places, eg. the signal code, which check ccr to +	 * decide if the value in r3 is actually an error. +	 */  	if (error) {  		regs->ccr |= 0x10000000L; -		regs->gpr[3] = -error; +		regs->gpr[3] = error;  	} else {  		regs->ccr &= ~0x10000000L;  		regs->gpr[3] = val; @@ -64,19 +69,22 @@ static inline void syscall_get_arguments(struct task_struct *task,  					 unsigned int i, unsigned int n,  					 unsigned long *args)  { +	unsigned long val, mask = -1UL; +  	BUG_ON(i + n > 6); -#ifdef CONFIG_PPC64 -	if (test_tsk_thread_flag(task, TIF_32BIT)) { -		/* -		 * Zero-extend 32-bit argument values.  The high bits are -		 * garbage ignored by the actual syscall dispatch. -		 */ -		while (n-- > 0) -			args[n] = (u32) regs->gpr[3 + i + n]; -		return; -	} + +#ifdef CONFIG_COMPAT +	if (test_tsk_thread_flag(task, TIF_32BIT)) +		mask = 0xffffffff;  #endif -	memcpy(args, ®s->gpr[3 + i], n * sizeof(args[0])); +	while (n--) { +		if (n == 0 && i == 0) +			val = regs->orig_gpr3; +		else +			val = regs->gpr[3 + i + n]; + +		args[n] = val & mask; +	}  }  static inline void syscall_set_arguments(struct task_struct *task, @@ -86,6 +94,10 @@ static inline void syscall_set_arguments(struct task_struct *task,  {  	BUG_ON(i + n > 6);  	memcpy(®s->gpr[3 + i], args, n * sizeof(args[0])); + +	/* Also copy the first argument into orig_gpr3 */ +	if (i == 0 && n > 0) +		regs->orig_gpr3 = args[0];  }  static inline int syscall_get_arch(void) diff --git a/arch/powerpc/include/asm/trace_clock.h b/arch/powerpc/include/asm/trace_clock.h new file mode 100644 index 000000000000..cf1ee75ca069 --- /dev/null +++ b/arch/powerpc/include/asm/trace_clock.h @@ -0,0 +1,19 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * Copyright (C) 2015 Naveen N. Rao, IBM Corporation + */ + +#ifndef _ASM_PPC_TRACE_CLOCK_H +#define _ASM_PPC_TRACE_CLOCK_H + +#include <linux/compiler.h> +#include <linux/types.h> + +extern u64 notrace trace_clock_ppc_tb(void); + +#define ARCH_TRACE_CLOCKS { trace_clock_ppc_tb, "ppc-tb", 0 }, + +#endif  /* _ASM_PPC_TRACE_CLOCK_H */  |