diff options
Diffstat (limited to 'include/linux')
64 files changed, 424 insertions, 407 deletions
| diff --git a/include/linux/acpi.h b/include/linux/acpi.h index dc1ebfeeb5ec..d918f1ea84e6 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -640,6 +640,12 @@ static inline bool acpi_dev_present(const char *hid, const char *uid, s64 hrv)  	return false;  } +static inline const char * +acpi_dev_get_first_match_name(const char *hid, const char *uid, s64 hrv) +{ +	return NULL; +} +  static inline bool is_acpi_node(struct fwnode_handle *fwnode)  {  	return false; diff --git a/include/linux/bio.h b/include/linux/bio.h index 82f0c8fd7be8..23d29b39f71e 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -492,6 +492,8 @@ extern unsigned int bvec_nr_vecs(unsigned short idx);  #define bio_set_dev(bio, bdev) 			\  do {						\ +	if ((bio)->bi_disk != (bdev)->bd_disk)	\ +		bio_clear_flag(bio, BIO_THROTTLED);\  	(bio)->bi_disk = (bdev)->bd_disk;	\  	(bio)->bi_partno = (bdev)->bd_partno;	\  } while (0) diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index a1e628e032da..9e7d8bd776d2 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -50,8 +50,6 @@ struct blk_issue_stat {  struct bio {  	struct bio		*bi_next;	/* request queue link */  	struct gendisk		*bi_disk; -	u8			bi_partno; -	blk_status_t		bi_status;  	unsigned int		bi_opf;		/* bottom bits req flags,  						 * top bits REQ_OP. Use  						 * accessors. @@ -59,8 +57,8 @@ struct bio {  	unsigned short		bi_flags;	/* status, etc and bvec pool number */  	unsigned short		bi_ioprio;  	unsigned short		bi_write_hint; - -	struct bvec_iter	bi_iter; +	blk_status_t		bi_status; +	u8			bi_partno;  	/* Number of segments in this BIO after  	 * physical address coalescing is performed. @@ -74,8 +72,9 @@ struct bio {  	unsigned int		bi_seg_front_size;  	unsigned int		bi_seg_back_size; -	atomic_t		__bi_remaining; +	struct bvec_iter	bi_iter; +	atomic_t		__bi_remaining;  	bio_end_io_t		*bi_end_io;  	void			*bi_private; diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 8089ca17db9a..0ce8a372d506 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -135,7 +135,7 @@ typedef __u32 __bitwise req_flags_t;  struct request {  	struct list_head queuelist;  	union { -		call_single_data_t csd; +		struct __call_single_data csd;  		u64 fifo_time;  	}; @@ -241,14 +241,24 @@ struct request {  	struct request *next_rq;  }; +static inline bool blk_op_is_scsi(unsigned int op) +{ +	return op == REQ_OP_SCSI_IN || op == REQ_OP_SCSI_OUT; +} + +static inline bool blk_op_is_private(unsigned int op) +{ +	return op == REQ_OP_DRV_IN || op == REQ_OP_DRV_OUT; +} +  static inline bool blk_rq_is_scsi(struct request *rq)  { -	return req_op(rq) == REQ_OP_SCSI_IN || req_op(rq) == REQ_OP_SCSI_OUT; +	return blk_op_is_scsi(req_op(rq));  }  static inline bool blk_rq_is_private(struct request *rq)  { -	return req_op(rq) == REQ_OP_DRV_IN || req_op(rq) == REQ_OP_DRV_OUT; +	return blk_op_is_private(req_op(rq));  }  static inline bool blk_rq_is_passthrough(struct request *rq) @@ -256,6 +266,13 @@ static inline bool blk_rq_is_passthrough(struct request *rq)  	return blk_rq_is_scsi(rq) || blk_rq_is_private(rq);  } +static inline bool bio_is_passthrough(struct bio *bio) +{ +	unsigned op = bio_op(bio); + +	return blk_op_is_scsi(op) || blk_op_is_private(op); +} +  static inline unsigned short req_get_ioprio(struct request *req)  {  	return req->ioprio; @@ -948,7 +965,7 @@ extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src,  extern void blk_rq_unprep_clone(struct request *rq);  extern blk_status_t blk_insert_cloned_request(struct request_queue *q,  				     struct request *rq); -extern int blk_rq_append_bio(struct request *rq, struct bio *bio); +extern int blk_rq_append_bio(struct request *rq, struct bio **bio);  extern void blk_delay_queue(struct request_queue *, unsigned long);  extern void blk_queue_split(struct request_queue *, struct bio **);  extern void blk_recount_segments(struct request_queue *, struct bio *); diff --git a/include/linux/bpf.h b/include/linux/bpf.h index e55e4255a210..b63a592ad29d 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -419,6 +419,8 @@ static inline int bpf_map_attr_numa_node(const union bpf_attr *attr)  		attr->numa_node : NUMA_NO_NODE;  } +struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type); +  #else /* !CONFIG_BPF_SYSCALL */  static inline struct bpf_prog *bpf_prog_get(u32 ufd)  { @@ -506,6 +508,12 @@ static inline int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu,  {  	return 0;  } + +static inline struct bpf_prog *bpf_prog_get_type_path(const char *name, +				enum bpf_prog_type type) +{ +	return ERR_PTR(-EOPNOTSUPP); +}  #endif /* CONFIG_BPF_SYSCALL */  static inline struct bpf_prog *bpf_prog_get_type(u32 ufd, @@ -514,6 +522,8 @@ static inline struct bpf_prog *bpf_prog_get_type(u32 ufd,  	return bpf_prog_get_type_dev(ufd, type, false);  } +bool bpf_prog_get_ok(struct bpf_prog *, enum bpf_prog_type *, bool); +  int bpf_prog_offload_compile(struct bpf_prog *prog);  void bpf_prog_offload_destroy(struct bpf_prog *prog); diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index c561b986bab0..1632bb13ad8a 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -15,11 +15,11 @@   * In practice this is far bigger than any realistic pointer offset; this limit   * ensures that umax_value + (int)off + (int)size cannot overflow a u64.   */ -#define BPF_MAX_VAR_OFF	(1ULL << 31) +#define BPF_MAX_VAR_OFF	(1 << 29)  /* Maximum variable size permitted for ARG_CONST_SIZE[_OR_ZERO].  This ensures   * that converting umax_value to int cannot overflow.   */ -#define BPF_MAX_VAR_SIZ	INT_MAX +#define BPF_MAX_VAR_SIZ	(1 << 29)  /* Liveness marks, used for registers and spilled-regs (in stack slots).   * Read marks propagate upwards until they find a write mark; they record that diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 188ed9f65517..52e611ab9a6c 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -220,21 +220,21 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s  /*   * Prevent the compiler from merging or refetching reads or writes. The   * compiler is also forbidden from reordering successive instances of - * READ_ONCE, WRITE_ONCE and ACCESS_ONCE (see below), but only when the - * compiler is aware of some particular ordering.  One way to make the - * compiler aware of ordering is to put the two invocations of READ_ONCE, - * WRITE_ONCE or ACCESS_ONCE() in different C statements. + * READ_ONCE and WRITE_ONCE, but only when the compiler is aware of some + * particular ordering. One way to make the compiler aware of ordering is to + * put the two invocations of READ_ONCE or WRITE_ONCE in different C + * statements.   * - * In contrast to ACCESS_ONCE these two macros will also work on aggregate - * data types like structs or unions. If the size of the accessed data - * type exceeds the word size of the machine (e.g., 32 bits or 64 bits) - * READ_ONCE() and WRITE_ONCE() will fall back to memcpy(). There's at - * least two memcpy()s: one for the __builtin_memcpy() and then one for - * the macro doing the copy of variable - '__u' allocated on the stack. + * These two macros will also work on aggregate data types like structs or + * unions. If the size of the accessed data type exceeds the word size of + * the machine (e.g., 32 bits or 64 bits) READ_ONCE() and WRITE_ONCE() will + * fall back to memcpy(). There's at least two memcpy()s: one for the + * __builtin_memcpy() and then one for the macro doing the copy of variable + * - '__u' allocated on the stack.   *   * Their two major use cases are: (1) Mediating communication between   * process-level code and irq/NMI handlers, all running on the same CPU, - * and (2) Ensuring that the compiler does not  fold, spindle, or otherwise + * and (2) Ensuring that the compiler does not fold, spindle, or otherwise   * mutilate accesses that either do not require ordering or that interact   * with an explicit memory barrier or atomic instruction that provides the   * required ordering. @@ -327,29 +327,4 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s  	compiletime_assert(__native_word(t),				\  		"Need native word sized stores/loads for atomicity.") -/* - * Prevent the compiler from merging or refetching accesses.  The compiler - * is also forbidden from reordering successive instances of ACCESS_ONCE(), - * but only when the compiler is aware of some particular ordering.  One way - * to make the compiler aware of ordering is to put the two invocations of - * ACCESS_ONCE() in different C statements. - * - * ACCESS_ONCE will only work on scalar types. For union types, ACCESS_ONCE - * on a union member will work as long as the size of the member matches the - * size of the union and the size is smaller than word size. - * - * The major use cases of ACCESS_ONCE used to be (1) Mediating communication - * between process-level code and irq/NMI handlers, all running on the same CPU, - * and (2) Ensuring that the compiler does not  fold, spindle, or otherwise - * mutilate accesses that either do not require ordering or that interact - * with an explicit memory barrier or atomic instruction that provides the - * required ordering. - * - * If possible use READ_ONCE()/WRITE_ONCE() instead. - */ -#define __ACCESS_ONCE(x) ({ \ -	 __maybe_unused typeof(x) __var = (__force typeof(x)) 0; \ -	(volatile typeof(x) *)&(x); }) -#define ACCESS_ONCE(x) (*__ACCESS_ONCE(x)) -  #endif /* __LINUX_COMPILER_H */ diff --git a/include/linux/completion.h b/include/linux/completion.h index 0662a417febe..94a59ba7d422 100644 --- a/include/linux/completion.h +++ b/include/linux/completion.h @@ -10,9 +10,6 @@   */  #include <linux/wait.h> -#ifdef CONFIG_LOCKDEP_COMPLETIONS -#include <linux/lockdep.h> -#endif  /*   * struct completion - structure used to maintain state for a "completion" @@ -29,58 +26,16 @@  struct completion {  	unsigned int done;  	wait_queue_head_t wait; -#ifdef CONFIG_LOCKDEP_COMPLETIONS -	struct lockdep_map_cross map; -#endif  }; -#ifdef CONFIG_LOCKDEP_COMPLETIONS -static inline void complete_acquire(struct completion *x) -{ -	lock_acquire_exclusive((struct lockdep_map *)&x->map, 0, 0, NULL, _RET_IP_); -} - -static inline void complete_release(struct completion *x) -{ -	lock_release((struct lockdep_map *)&x->map, 0, _RET_IP_); -} - -static inline void complete_release_commit(struct completion *x) -{ -	lock_commit_crosslock((struct lockdep_map *)&x->map); -} - -#define init_completion_map(x, m)					\ -do {									\ -	lockdep_init_map_crosslock((struct lockdep_map *)&(x)->map,	\ -			(m)->name, (m)->key, 0);				\ -	__init_completion(x);						\ -} while (0) - -#define init_completion(x)						\ -do {									\ -	static struct lock_class_key __key;				\ -	lockdep_init_map_crosslock((struct lockdep_map *)&(x)->map,	\ -			"(completion)" #x,				\ -			&__key, 0);					\ -	__init_completion(x);						\ -} while (0) -#else  #define init_completion_map(x, m) __init_completion(x)  #define init_completion(x) __init_completion(x)  static inline void complete_acquire(struct completion *x) {}  static inline void complete_release(struct completion *x) {}  static inline void complete_release_commit(struct completion *x) {} -#endif -#ifdef CONFIG_LOCKDEP_COMPLETIONS -#define COMPLETION_INITIALIZER(work) \ -	{ 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait), \ -	STATIC_CROSS_LOCKDEP_MAP_INIT("(completion)" #work, &(work)) } -#else  #define COMPLETION_INITIALIZER(work) \  	{ 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) } -#endif  #define COMPLETION_INITIALIZER_ONSTACK_MAP(work, map) \  	(*({ init_completion_map(&(work), &(map)); &(work); })) diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 201ab7267986..1a32e558eb11 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -86,7 +86,7 @@ enum cpuhp_state {  	CPUHP_MM_ZSWP_POOL_PREPARE,  	CPUHP_KVM_PPC_BOOK3S_PREPARE,  	CPUHP_ZCOMP_PREPARE, -	CPUHP_TIMERS_DEAD, +	CPUHP_TIMERS_PREPARE,  	CPUHP_MIPS_SOC_PREPARE,  	CPUHP_BP_PREPARE_DYN,  	CPUHP_BP_PREPARE_DYN_END		= CPUHP_BP_PREPARE_DYN + 20, diff --git a/include/linux/cred.h b/include/linux/cred.h index 099058e1178b..631286535d0f 100644 --- a/include/linux/cred.h +++ b/include/linux/cred.h @@ -83,6 +83,7 @@ extern int set_current_groups(struct group_info *);  extern void set_groups(struct cred *, struct group_info *);  extern int groups_search(const struct group_info *, kgid_t);  extern bool may_setgroups(void); +extern void groups_sort(struct group_info *);  /*   * The security context of a task diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h index f36ecc2a5712..3b0ba54cc4d5 100644 --- a/include/linux/debugfs.h +++ b/include/linux/debugfs.h @@ -216,6 +216,8 @@ static inline void debugfs_remove(struct dentry *dentry)  static inline void debugfs_remove_recursive(struct dentry *dentry)  { } +const struct file_operations *debugfs_real_fops(const struct file *filp); +  static inline int debugfs_file_get(struct dentry *dentry)  {  	return 0; diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index e8f8e8fb244d..81ed9b2d84dc 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -704,7 +704,6 @@ static inline void *dma_zalloc_coherent(struct device *dev, size_t size,  	return ret;  } -#ifdef CONFIG_HAS_DMA  static inline int dma_get_cache_alignment(void)  {  #ifdef ARCH_DMA_MINALIGN @@ -712,7 +711,6 @@ static inline int dma_get_cache_alignment(void)  #endif  	return 1;  } -#endif  /* flags for the coherent memory api */  #define DMA_MEMORY_EXCLUSIVE		0x01 diff --git a/include/linux/efi.h b/include/linux/efi.h index d813f7b04da7..29fdf8029cf6 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -140,11 +140,13 @@ struct efi_boot_memmap {  struct capsule_info {  	efi_capsule_header_t	header; +	efi_capsule_header_t	*capsule;  	int			reset_type;  	long			index;  	size_t			count;  	size_t			total_size; -	phys_addr_t		*pages; +	struct page		**pages; +	phys_addr_t		*phys;  	size_t			page_bytes_remain;  }; diff --git a/include/linux/fs.h b/include/linux/fs.h index 2995a271ec46..511fbaabf624 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1872,7 +1872,7 @@ struct super_operations {   */  #define __IS_FLG(inode, flg)	((inode)->i_sb->s_flags & (flg)) -static inline bool sb_rdonly(const struct super_block *sb) { return sb->s_flags & MS_RDONLY; } +static inline bool sb_rdonly(const struct super_block *sb) { return sb->s_flags & SB_RDONLY; }  #define IS_RDONLY(inode)	sb_rdonly((inode)->i_sb)  #define IS_SYNC(inode)		(__IS_FLG(inode, SB_SYNCHRONOUS) || \  					((inode)->i_flags & S_SYNC)) @@ -3088,7 +3088,8 @@ static inline int vfs_lstat(const char __user *name, struct kstat *stat)  static inline int vfs_fstatat(int dfd, const char __user *filename,  			      struct kstat *stat, int flags)  { -	return vfs_statx(dfd, filename, flags, stat, STATX_BASIC_STATS); +	return vfs_statx(dfd, filename, flags | AT_NO_AUTOMOUNT, +			 stat, STATX_BASIC_STATS);  }  static inline int vfs_fstat(int fd, struct kstat *stat)  { @@ -3194,6 +3195,20 @@ static inline bool vma_is_dax(struct vm_area_struct *vma)  	return vma->vm_file && IS_DAX(vma->vm_file->f_mapping->host);  } +static inline bool vma_is_fsdax(struct vm_area_struct *vma) +{ +	struct inode *inode; + +	if (!vma->vm_file) +		return false; +	if (!vma_is_dax(vma)) +		return false; +	inode = file_inode(vma->vm_file); +	if (inode->i_mode == S_IFCHR) +		return false; /* device-dax */ +	return true; +} +  static inline int iocb_flags(struct file *file)  {  	int res = 0; diff --git a/include/linux/fscache.h b/include/linux/fscache.h index f4ff47d4a893..fe0c349684fa 100644 --- a/include/linux/fscache.h +++ b/include/linux/fscache.h @@ -755,7 +755,7 @@ bool fscache_maybe_release_page(struct fscache_cookie *cookie,  {  	if (fscache_cookie_valid(cookie) && PageFsCache(page))  		return __fscache_maybe_release_page(cookie, page, gfp); -	return false; +	return true;  }  /** diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h index 55e672592fa9..7258cd676df4 100644 --- a/include/linux/gpio/driver.h +++ b/include/linux/gpio/driver.h @@ -66,9 +66,10 @@ struct gpio_irq_chip {  	/**  	 * @lock_key:  	 * -	 * Per GPIO IRQ chip lockdep class. +	 * Per GPIO IRQ chip lockdep classes.  	 */  	struct lock_class_key *lock_key; +	struct lock_class_key *request_key;  	/**  	 * @parent_handler: @@ -323,7 +324,8 @@ extern const char *gpiochip_is_requested(struct gpio_chip *chip,  /* add/remove chips */  extern int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data, -				      struct lock_class_key *lock_key); +				      struct lock_class_key *lock_key, +				      struct lock_class_key *request_key);  /**   * gpiochip_add_data() - register a gpio_chip @@ -350,11 +352,13 @@ extern int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,   */  #ifdef CONFIG_LOCKDEP  #define gpiochip_add_data(chip, data) ({		\ -		static struct lock_class_key key;	\ -		gpiochip_add_data_with_key(chip, data, &key);	\ +		static struct lock_class_key lock_key;	\ +		static struct lock_class_key request_key;	  \ +		gpiochip_add_data_with_key(chip, data, &lock_key, \ +					   &request_key);	  \  	})  #else -#define gpiochip_add_data(chip, data) gpiochip_add_data_with_key(chip, data, NULL) +#define gpiochip_add_data(chip, data) gpiochip_add_data_with_key(chip, data, NULL, NULL)  #endif  static inline int gpiochip_add(struct gpio_chip *chip) @@ -429,7 +433,8 @@ int gpiochip_irqchip_add_key(struct gpio_chip *gpiochip,  			     irq_flow_handler_t handler,  			     unsigned int type,  			     bool threaded, -			     struct lock_class_key *lock_key); +			     struct lock_class_key *lock_key, +			     struct lock_class_key *request_key);  #ifdef CONFIG_LOCKDEP @@ -445,10 +450,12 @@ static inline int gpiochip_irqchip_add(struct gpio_chip *gpiochip,  				       irq_flow_handler_t handler,  				       unsigned int type)  { -	static struct lock_class_key key; +	static struct lock_class_key lock_key; +	static struct lock_class_key request_key;  	return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq, -					handler, type, false, &key); +					handler, type, false, +					&lock_key, &request_key);  }  static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip, @@ -458,10 +465,12 @@ static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip,  			  unsigned int type)  { -	static struct lock_class_key key; +	static struct lock_class_key lock_key; +	static struct lock_class_key request_key;  	return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq, -					handler, type, true, &key); +					handler, type, true, +					&lock_key, &request_key);  }  #else  static inline int gpiochip_irqchip_add(struct gpio_chip *gpiochip, @@ -471,7 +480,7 @@ static inline int gpiochip_irqchip_add(struct gpio_chip *gpiochip,  				       unsigned int type)  {  	return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq, -					handler, type, false, NULL); +					handler, type, false, NULL, NULL);  }  static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip, @@ -481,7 +490,7 @@ static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip,  			  unsigned int type)  {  	return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq, -					handler, type, true, NULL); +					handler, type, true, NULL, NULL);  }  #endif /* CONFIG_LOCKDEP */ diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index fbf5b31d47ee..82a25880714a 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -239,14 +239,6 @@ static inline int pgd_write(pgd_t pgd)  }  #endif -#ifndef pud_write -static inline int pud_write(pud_t pud) -{ -	BUG(); -	return 0; -} -#endif -  #define HUGETLB_ANON_FILE "anon_hugepage"  enum { diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index f3e97c5f94c9..6c9336626592 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h @@ -708,6 +708,7 @@ struct vmbus_channel {  	u8 monitor_bit;  	bool rescind; /* got rescind msg */ +	struct completion rescind_event;  	u32 ringbuffer_gpadlhandle; diff --git a/include/linux/idr.h b/include/linux/idr.h index 7c3a365f7e12..fa14f834e4ed 100644 --- a/include/linux/idr.h +++ b/include/linux/idr.h @@ -15,6 +15,7 @@  #include <linux/radix-tree.h>  #include <linux/gfp.h>  #include <linux/percpu.h> +#include <linux/bug.h>  struct idr {  	struct radix_tree_root	idr_rt; diff --git a/include/linux/iio/adc/stm32-dfsdm-adc.h b/include/linux/iio/adc/stm32-dfsdm-adc.h new file mode 100644 index 000000000000..e7dc7a542a4e --- /dev/null +++ b/include/linux/iio/adc/stm32-dfsdm-adc.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This file discribe the STM32 DFSDM IIO driver API for audio part + * + * Copyright (C) 2017, STMicroelectronics - All Rights Reserved + * Author(s): Arnaud Pouliquen <[email protected]>. + */ + +#ifndef STM32_DFSDM_ADC_H +#define STM32_DFSDM_ADC_H + +int stm32_dfsdm_get_buff_cb(struct iio_dev *iio_dev, +			    int (*cb)(const void *data, size_t size, +				      void *private), +			    void *private); +int stm32_dfsdm_release_buff_cb(struct iio_dev *iio_dev); + +#endif diff --git a/include/linux/iio/consumer.h b/include/linux/iio/consumer.h index 5e347a9805fd..9887f4f8e2a8 100644 --- a/include/linux/iio/consumer.h +++ b/include/linux/iio/consumer.h @@ -134,6 +134,17 @@ struct iio_cb_buffer *iio_channel_get_all_cb(struct device *dev,  						       void *private),  					     void *private);  /** + * iio_channel_cb_set_buffer_watermark() - set the buffer watermark. + * @cb_buffer:		The callback buffer from whom we want the channel + *			information. + * @watermark: buffer watermark in bytes. + * + * This function allows to configure the buffer watermark. + */ +int iio_channel_cb_set_buffer_watermark(struct iio_cb_buffer *cb_buffer, +					size_t watermark); + +/**   * iio_channel_release_all_cb() - release and unregister the callback.   * @cb_buffer:		The callback buffer that was allocated.   */ @@ -216,6 +227,32 @@ int iio_read_channel_average_raw(struct iio_channel *chan, int *val);  int iio_read_channel_processed(struct iio_channel *chan, int *val);  /** + * iio_write_channel_attribute() - Write values to the device attribute. + * @chan:	The channel being queried. + * @val:	Value being written. + * @val2:	Value being written.val2 use depends on attribute type. + * @attribute:	info attribute to be read. + * + * Returns an error code or 0. + */ +int iio_write_channel_attribute(struct iio_channel *chan, int val, +				int val2, enum iio_chan_info_enum attribute); + +/** + * iio_read_channel_attribute() - Read values from the device attribute. + * @chan:	The channel being queried. + * @val:	Value being written. + * @val2:	Value being written.Val2 use depends on attribute type. + * @attribute:	info attribute to be written. + * + * Returns an error code if failed. Else returns a description of what is in val + * and val2, such as IIO_VAL_INT_PLUS_MICRO telling us we have a value of val + * + val2/1e6 + */ +int iio_read_channel_attribute(struct iio_channel *chan, int *val, +			       int *val2, enum iio_chan_info_enum attribute); + +/**   * iio_write_channel_raw() - write to a given channel   * @chan:		The channel being queried.   * @val:		Value being written. diff --git a/include/linux/iio/hw-consumer.h b/include/linux/iio/hw-consumer.h new file mode 100644 index 000000000000..44d48bb1d39f --- /dev/null +++ b/include/linux/iio/hw-consumer.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Industrial I/O in kernel hardware consumer interface + * + * Copyright 2017 Analog Devices Inc. + *  Author: Lars-Peter Clausen <[email protected]> + */ + +#ifndef LINUX_IIO_HW_CONSUMER_H +#define LINUX_IIO_HW_CONSUMER_H + +struct iio_hw_consumer; + +struct iio_hw_consumer *iio_hw_consumer_alloc(struct device *dev); +void iio_hw_consumer_free(struct iio_hw_consumer *hwc); +struct iio_hw_consumer *devm_iio_hw_consumer_alloc(struct device *dev); +void devm_iio_hw_consumer_free(struct device *dev, struct iio_hw_consumer *hwc); +int iio_hw_consumer_enable(struct iio_hw_consumer *hwc); +void iio_hw_consumer_disable(struct iio_hw_consumer *hwc); + +#endif diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h index 20b61347ea58..f12a61be1ede 100644 --- a/include/linux/iio/iio.h +++ b/include/linux/iio/iio.h @@ -20,34 +20,6 @@   * Currently assumes nano seconds.   */ -enum iio_chan_info_enum { -	IIO_CHAN_INFO_RAW = 0, -	IIO_CHAN_INFO_PROCESSED, -	IIO_CHAN_INFO_SCALE, -	IIO_CHAN_INFO_OFFSET, -	IIO_CHAN_INFO_CALIBSCALE, -	IIO_CHAN_INFO_CALIBBIAS, -	IIO_CHAN_INFO_PEAK, -	IIO_CHAN_INFO_PEAK_SCALE, -	IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW, -	IIO_CHAN_INFO_AVERAGE_RAW, -	IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY, -	IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY, -	IIO_CHAN_INFO_SAMP_FREQ, -	IIO_CHAN_INFO_FREQUENCY, -	IIO_CHAN_INFO_PHASE, -	IIO_CHAN_INFO_HARDWAREGAIN, -	IIO_CHAN_INFO_HYSTERESIS, -	IIO_CHAN_INFO_INT_TIME, -	IIO_CHAN_INFO_ENABLE, -	IIO_CHAN_INFO_CALIBHEIGHT, -	IIO_CHAN_INFO_CALIBWEIGHT, -	IIO_CHAN_INFO_DEBOUNCE_COUNT, -	IIO_CHAN_INFO_DEBOUNCE_TIME, -	IIO_CHAN_INFO_CALIBEMISSIVITY, -	IIO_CHAN_INFO_OVERSAMPLING_RATIO, -}; -  enum iio_shared_by {  	IIO_SEPARATE,  	IIO_SHARED_BY_TYPE, diff --git a/include/linux/iio/timer/stm32-lptim-trigger.h b/include/linux/iio/timer/stm32-lptim-trigger.h index 34d59bfdce2d..464458d20b16 100644 --- a/include/linux/iio/timer/stm32-lptim-trigger.h +++ b/include/linux/iio/timer/stm32-lptim-trigger.h @@ -16,11 +16,14 @@  #define LPTIM2_OUT	"lptim2_out"  #define LPTIM3_OUT	"lptim3_out" -#if IS_ENABLED(CONFIG_IIO_STM32_LPTIMER_TRIGGER) +#if IS_REACHABLE(CONFIG_IIO_STM32_LPTIMER_TRIGGER)  bool is_stm32_lptim_trigger(struct iio_trigger *trig);  #else  static inline bool is_stm32_lptim_trigger(struct iio_trigger *trig)  { +#if IS_ENABLED(CONFIG_IIO_STM32_LPTIMER_TRIGGER) +	pr_warn_once("stm32 lptim_trigger not linked in\n"); +#endif  	return false;  }  #endif diff --git a/include/linux/iio/types.h b/include/linux/iio/types.h index 2aa7b6384d64..6eb3d683ef62 100644 --- a/include/linux/iio/types.h +++ b/include/linux/iio/types.h @@ -34,4 +34,32 @@ enum iio_available_type {  	IIO_AVAIL_RANGE,  }; +enum iio_chan_info_enum { +	IIO_CHAN_INFO_RAW = 0, +	IIO_CHAN_INFO_PROCESSED, +	IIO_CHAN_INFO_SCALE, +	IIO_CHAN_INFO_OFFSET, +	IIO_CHAN_INFO_CALIBSCALE, +	IIO_CHAN_INFO_CALIBBIAS, +	IIO_CHAN_INFO_PEAK, +	IIO_CHAN_INFO_PEAK_SCALE, +	IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW, +	IIO_CHAN_INFO_AVERAGE_RAW, +	IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY, +	IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY, +	IIO_CHAN_INFO_SAMP_FREQ, +	IIO_CHAN_INFO_FREQUENCY, +	IIO_CHAN_INFO_PHASE, +	IIO_CHAN_INFO_HARDWAREGAIN, +	IIO_CHAN_INFO_HYSTERESIS, +	IIO_CHAN_INFO_INT_TIME, +	IIO_CHAN_INFO_ENABLE, +	IIO_CHAN_INFO_CALIBHEIGHT, +	IIO_CHAN_INFO_CALIBWEIGHT, +	IIO_CHAN_INFO_DEBOUNCE_COUNT, +	IIO_CHAN_INFO_DEBOUNCE_TIME, +	IIO_CHAN_INFO_CALIBEMISSIVITY, +	IIO_CHAN_INFO_OVERSAMPLING_RATIO, +}; +  #endif /* _IIO_TYPES_H_ */ diff --git a/include/linux/intel-pti.h b/include/linux/intel-pti.h new file mode 100644 index 000000000000..2710d72de3c9 --- /dev/null +++ b/include/linux/intel-pti.h @@ -0,0 +1,43 @@ +/* + *  Copyright (C) Intel 2011 + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * The PTI (Parallel Trace Interface) driver directs trace data routed from + * various parts in the system out through the Intel Penwell PTI port and + * out of the mobile device for analysis with a debugging tool + * (Lauterbach, Fido). This is part of a solution for the MIPI P1149.7, + * compact JTAG, standard. + * + * This header file will allow other parts of the OS to use the + * interface to write out it's contents for debugging a mobile system. + */ + +#ifndef LINUX_INTEL_PTI_H_ +#define LINUX_INTEL_PTI_H_ + +/* offset for last dword of any PTI message. Part of MIPI P1149.7 */ +#define PTI_LASTDWORD_DTS	0x30 + +/* basic structure used as a write address to the PTI HW */ +struct pti_masterchannel { +	u8 master; +	u8 channel; +}; + +/* the following functions are defined in misc/pti.c */ +void pti_writedata(struct pti_masterchannel *mc, u8 *buf, int count); +struct pti_masterchannel *pti_request_masterchannel(u8 type, +						    const char *thread_name); +void pti_release_masterchannel(struct pti_masterchannel *mc); + +#endif /* LINUX_INTEL_PTI_H_ */ diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index cb18c6290ca8..8415bf1a9776 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h @@ -273,7 +273,8 @@ struct ipv6_pinfo {  						 * 100: prefer care-of address  						 */  				dontfrag:1, -				autoflowlabel:1; +				autoflowlabel:1, +				autoflowlabel_set:1;  	__u8			min_hopcount;  	__u8			tclass;  	__be32			rcv_flowinfo; diff --git a/include/linux/irq.h b/include/linux/irq.h index e140f69163b6..a0231e96a578 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -212,6 +212,7 @@ struct irq_data {   *				  mask. Applies only to affinity managed irqs.   * IRQD_SINGLE_TARGET		- IRQ allows only a single affinity target   * IRQD_DEFAULT_TRIGGER_SET	- Expected trigger already been set + * IRQD_CAN_RESERVE		- Can use reservation mode   */  enum {  	IRQD_TRIGGER_MASK		= 0xf, @@ -233,6 +234,7 @@ enum {  	IRQD_MANAGED_SHUTDOWN		= (1 << 23),  	IRQD_SINGLE_TARGET		= (1 << 24),  	IRQD_DEFAULT_TRIGGER_SET	= (1 << 25), +	IRQD_CAN_RESERVE		= (1 << 26),  };  #define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors) @@ -377,6 +379,21 @@ static inline bool irqd_is_managed_and_shutdown(struct irq_data *d)  	return __irqd_to_state(d) & IRQD_MANAGED_SHUTDOWN;  } +static inline void irqd_set_can_reserve(struct irq_data *d) +{ +	__irqd_to_state(d) |= IRQD_CAN_RESERVE; +} + +static inline void irqd_clr_can_reserve(struct irq_data *d) +{ +	__irqd_to_state(d) &= ~IRQD_CAN_RESERVE; +} + +static inline bool irqd_can_reserve(struct irq_data *d) +{ +	return __irqd_to_state(d) & IRQD_CAN_RESERVE; +} +  #undef __irqd_to_state  static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d) diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h index dd418955962b..25b33b664537 100644 --- a/include/linux/irqdesc.h +++ b/include/linux/irqdesc.h @@ -230,7 +230,7 @@ irq_set_chip_handler_name_locked(struct irq_data *data, struct irq_chip *chip,  	data->chip = chip;  } -static inline int irq_balancing_disabled(unsigned int irq) +static inline bool irq_balancing_disabled(unsigned int irq)  {  	struct irq_desc *desc; @@ -238,7 +238,7 @@ static inline int irq_balancing_disabled(unsigned int irq)  	return desc->status_use_accessors & IRQ_NO_BALANCING_MASK;  } -static inline int irq_is_percpu(unsigned int irq) +static inline bool irq_is_percpu(unsigned int irq)  {  	struct irq_desc *desc; @@ -246,7 +246,7 @@ static inline int irq_is_percpu(unsigned int irq)  	return desc->status_use_accessors & IRQ_PER_CPU;  } -static inline int irq_is_percpu_devid(unsigned int irq) +static inline bool irq_is_percpu_devid(unsigned int irq)  {  	struct irq_desc *desc; @@ -255,12 +255,15 @@ static inline int irq_is_percpu_devid(unsigned int irq)  }  static inline void -irq_set_lockdep_class(unsigned int irq, struct lock_class_key *class) +irq_set_lockdep_class(unsigned int irq, struct lock_class_key *lock_class, +		      struct lock_class_key *request_class)  {  	struct irq_desc *desc = irq_to_desc(irq); -	if (desc) -		lockdep_set_class(&desc->lock, class); +	if (desc) { +		lockdep_set_class(&desc->lock, lock_class); +		lockdep_set_class(&desc->request_mutex, request_class); +	}  }  #ifdef CONFIG_IRQ_PREFLOW_FASTEOI diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h index a34355d19546..48c7e86bb556 100644 --- a/include/linux/irqdomain.h +++ b/include/linux/irqdomain.h @@ -113,7 +113,7 @@ struct irq_domain_ops {  		     unsigned int nr_irqs, void *arg);  	void (*free)(struct irq_domain *d, unsigned int virq,  		     unsigned int nr_irqs); -	int (*activate)(struct irq_domain *d, struct irq_data *irqd, bool early); +	int (*activate)(struct irq_domain *d, struct irq_data *irqd, bool reserve);  	void (*deactivate)(struct irq_domain *d, struct irq_data *irq_data);  	int (*translate)(struct irq_domain *d, struct irq_fwspec *fwspec,  			 unsigned long *out_hwirq, unsigned int *out_type); diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h index 708f337d780b..bd118a6c60cb 100644 --- a/include/linux/kallsyms.h +++ b/include/linux/kallsyms.h @@ -14,12 +14,6 @@  #define KSYM_SYMBOL_LEN (sizeof("%s+%#lx/%#lx [%s]") + (KSYM_NAME_LEN - 1) + \  			 2*(BITS_PER_LONG*3/10) + (MODULE_NAME_LEN - 1) + 1) -#ifndef CONFIG_64BIT -# define KALLSYM_FMT "%08lx" -#else -# define KALLSYM_FMT "%016lx" -#endif -  struct module;  #ifdef CONFIG_KALLSYMS diff --git a/include/linux/kmemcheck.h b/include/linux/kmemcheck.h deleted file mode 100644 index ea32a7d3cf1b..000000000000 --- a/include/linux/kmemcheck.h +++ /dev/null @@ -1 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 2e754b7c282c..6bdd4b9f6611 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -232,7 +232,7 @@ struct kvm_vcpu {  	struct mutex mutex;  	struct kvm_run *run; -	int guest_fpu_loaded, guest_xcr0_loaded; +	int guest_xcr0_loaded;  	struct swait_queue_head wq;  	struct pid __rcu *pid;  	int sigset_active; @@ -715,6 +715,9 @@ int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data,  			 unsigned long len);  void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn); +void kvm_sigset_activate(struct kvm_vcpu *vcpu); +void kvm_sigset_deactivate(struct kvm_vcpu *vcpu); +  void kvm_vcpu_block(struct kvm_vcpu *vcpu);  void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu);  void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu); diff --git a/include/linux/libgcc.h b/include/linux/libgcc.h new file mode 100644 index 000000000000..32e1e0f4b2d0 --- /dev/null +++ b/include/linux/libgcc.h @@ -0,0 +1,43 @@ +/* + * include/lib/libgcc.h + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc. + */ + +#ifndef __LIB_LIBGCC_H +#define __LIB_LIBGCC_H + +#include <asm/byteorder.h> + +typedef int word_type __attribute__ ((mode (__word__))); + +#ifdef __BIG_ENDIAN +struct DWstruct { +	int high, low; +}; +#elif defined(__LITTLE_ENDIAN) +struct DWstruct { +	int low, high; +}; +#else +#error I feel sick. +#endif + +typedef union { +	struct DWstruct s; +	long long ll; +} DWunion; + +#endif /* __ASM_LIBGCC_H */ diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index a842551fe044..2e75dc34bff5 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -158,12 +158,6 @@ struct lockdep_map {  	int				cpu;  	unsigned long			ip;  #endif -#ifdef CONFIG_LOCKDEP_CROSSRELEASE -	/* -	 * Whether it's a crosslock. -	 */ -	int				cross; -#endif  };  static inline void lockdep_copy_map(struct lockdep_map *to, @@ -267,96 +261,9 @@ struct held_lock {  	unsigned int hardirqs_off:1;  	unsigned int references:12;					/* 32 bits */  	unsigned int pin_count; -#ifdef CONFIG_LOCKDEP_CROSSRELEASE -	/* -	 * Generation id. -	 * -	 * A value of cross_gen_id will be stored when holding this, -	 * which is globally increased whenever each crosslock is held. -	 */ -	unsigned int gen_id; -#endif -}; - -#ifdef CONFIG_LOCKDEP_CROSSRELEASE -#define MAX_XHLOCK_TRACE_ENTRIES 5 - -/* - * This is for keeping locks waiting for commit so that true dependencies - * can be added at commit step. - */ -struct hist_lock { -	/* -	 * Id for each entry in the ring buffer. This is used to -	 * decide whether the ring buffer was overwritten or not. -	 * -	 * For example, -	 * -	 *           |<----------- hist_lock ring buffer size ------->| -	 *           pppppppppppppppppppppiiiiiiiiiiiiiiiiiiiiiiiiiiiii -	 * wrapped > iiiiiiiiiiiiiiiiiiiiiiiiiii....................... -	 * -	 *           where 'p' represents an acquisition in process -	 *           context, 'i' represents an acquisition in irq -	 *           context. -	 * -	 * In this example, the ring buffer was overwritten by -	 * acquisitions in irq context, that should be detected on -	 * rollback or commit. -	 */ -	unsigned int hist_id; - -	/* -	 * Seperate stack_trace data. This will be used at commit step. -	 */ -	struct stack_trace	trace; -	unsigned long		trace_entries[MAX_XHLOCK_TRACE_ENTRIES]; - -	/* -	 * Seperate hlock instance. This will be used at commit step. -	 * -	 * TODO: Use a smaller data structure containing only necessary -	 * data. However, we should make lockdep code able to handle the -	 * smaller one first. -	 */ -	struct held_lock	hlock;  };  /* - * To initialize a lock as crosslock, lockdep_init_map_crosslock() should - * be called instead of lockdep_init_map(). - */ -struct cross_lock { -	/* -	 * When more than one acquisition of crosslocks are overlapped, -	 * we have to perform commit for them based on cross_gen_id of -	 * the first acquisition, which allows us to add more true -	 * dependencies. -	 * -	 * Moreover, when no acquisition of a crosslock is in progress, -	 * we should not perform commit because the lock might not exist -	 * any more, which might cause incorrect memory access. So we -	 * have to track the number of acquisitions of a crosslock. -	 */ -	int nr_acquire; - -	/* -	 * Seperate hlock instance. This will be used at commit step. -	 * -	 * TODO: Use a smaller data structure containing only necessary -	 * data. However, we should make lockdep code able to handle the -	 * smaller one first. -	 */ -	struct held_lock	hlock; -}; - -struct lockdep_map_cross { -	struct lockdep_map map; -	struct cross_lock xlock; -}; -#endif - -/*   * Initialization, self-test and debugging-output methods:   */  extern void lockdep_info(void); @@ -560,37 +467,6 @@ enum xhlock_context_t {  	XHLOCK_CTX_NR,  }; -#ifdef CONFIG_LOCKDEP_CROSSRELEASE -extern void lockdep_init_map_crosslock(struct lockdep_map *lock, -				       const char *name, -				       struct lock_class_key *key, -				       int subclass); -extern void lock_commit_crosslock(struct lockdep_map *lock); - -/* - * What we essencially have to initialize is 'nr_acquire'. Other members - * will be initialized in add_xlock(). - */ -#define STATIC_CROSS_LOCK_INIT() \ -	{ .nr_acquire = 0,} - -#define STATIC_CROSS_LOCKDEP_MAP_INIT(_name, _key) \ -	{ .map.name = (_name), .map.key = (void *)(_key), \ -	  .map.cross = 1, .xlock = STATIC_CROSS_LOCK_INIT(), } - -/* - * To initialize a lockdep_map statically use this macro. - * Note that _name must not be NULL. - */ -#define STATIC_LOCKDEP_MAP_INIT(_name, _key) \ -	{ .name = (_name), .key = (void *)(_key), .cross = 0, } - -extern void crossrelease_hist_start(enum xhlock_context_t c); -extern void crossrelease_hist_end(enum xhlock_context_t c); -extern void lockdep_invariant_state(bool force); -extern void lockdep_init_task(struct task_struct *task); -extern void lockdep_free_task(struct task_struct *task); -#else /* !CROSSRELEASE */  #define lockdep_init_map_crosslock(m, n, k, s) do {} while (0)  /*   * To initialize a lockdep_map statically use this macro. @@ -604,7 +480,6 @@ static inline void crossrelease_hist_end(enum xhlock_context_t c) {}  static inline void lockdep_invariant_state(bool force) {}  static inline void lockdep_init_task(struct task_struct *task) {}  static inline void lockdep_free_task(struct task_struct *task) {} -#endif /* CROSSRELEASE */  #ifdef CONFIG_LOCK_STAT diff --git a/include/linux/mfd/rtsx_pci.h b/include/linux/mfd/rtsx_pci.h index a2a1318a3d0c..c3d3f04d8cc6 100644 --- a/include/linux/mfd/rtsx_pci.h +++ b/include/linux/mfd/rtsx_pci.h @@ -915,10 +915,10 @@ enum PDEV_STAT  {PDEV_STAT_IDLE, PDEV_STAT_RUN};  #define LTR_L1SS_PWR_GATE_CHECK_CARD_EN	BIT(6)  enum dev_aspm_mode { -	DEV_ASPM_DISABLE = 0,  	DEV_ASPM_DYNAMIC,  	DEV_ASPM_BACKDOOR,  	DEV_ASPM_STATIC, +	DEV_ASPM_DISABLE,  };  /* diff --git a/include/linux/migrate.h b/include/linux/migrate.h index 895ec0c4942e..a2246cf670ba 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -54,7 +54,7 @@ static inline struct page *new_page_nodemask(struct page *page,  	new_page = __alloc_pages_nodemask(gfp_mask, order,  				preferred_nid, nodemask); -	if (new_page && PageTransHuge(page)) +	if (new_page && PageTransHuge(new_page))  		prep_transhuge_page(new_page);  	return new_page; diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index a886b51511ab..1f509d072026 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -556,6 +556,7 @@ struct mlx5_core_sriov {  };  struct mlx5_irq_info { +	cpumask_var_t mask;  	char name[MLX5_MAX_IRQ_NAME];  }; @@ -1048,7 +1049,7 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,  		       enum mlx5_eq_type type);  int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq);  int mlx5_start_eqs(struct mlx5_core_dev *dev); -int mlx5_stop_eqs(struct mlx5_core_dev *dev); +void mlx5_stop_eqs(struct mlx5_core_dev *dev);  int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,  		    unsigned int *irqn);  int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn); @@ -1164,6 +1165,10 @@ int mlx5_cmd_create_vport_lag(struct mlx5_core_dev *dev);  int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev);  bool mlx5_lag_is_active(struct mlx5_core_dev *dev);  struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev); +int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev, +				 u64 *values, +				 int num_counters, +				 size_t *offsets);  struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev);  void mlx5_put_uars_page(struct mlx5_core_dev *mdev, struct mlx5_uars_page *up); diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 38a7577a9ce7..d44ec5f41d4a 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -147,7 +147,7 @@ enum {  	MLX5_CMD_OP_ALLOC_Q_COUNTER               = 0x771,  	MLX5_CMD_OP_DEALLOC_Q_COUNTER             = 0x772,  	MLX5_CMD_OP_QUERY_Q_COUNTER               = 0x773, -	MLX5_CMD_OP_SET_RATE_LIMIT                = 0x780, +	MLX5_CMD_OP_SET_PP_RATE_LIMIT             = 0x780,  	MLX5_CMD_OP_QUERY_RATE_LIMIT              = 0x781,  	MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT      = 0x782,  	MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT     = 0x783, @@ -7239,7 +7239,7 @@ struct mlx5_ifc_add_vxlan_udp_dport_in_bits {  	u8         vxlan_udp_port[0x10];  }; -struct mlx5_ifc_set_rate_limit_out_bits { +struct mlx5_ifc_set_pp_rate_limit_out_bits {  	u8         status[0x8];  	u8         reserved_at_8[0x18]; @@ -7248,7 +7248,7 @@ struct mlx5_ifc_set_rate_limit_out_bits {  	u8         reserved_at_40[0x40];  }; -struct mlx5_ifc_set_rate_limit_in_bits { +struct mlx5_ifc_set_pp_rate_limit_in_bits {  	u8         opcode[0x10];  	u8         reserved_at_10[0x10]; @@ -7261,6 +7261,8 @@ struct mlx5_ifc_set_rate_limit_in_bits {  	u8         reserved_at_60[0x20];  	u8         rate_limit[0x20]; + +	u8         reserved_at_a0[0x160];  };  struct mlx5_ifc_access_register_out_bits { diff --git a/include/linux/mm.h b/include/linux/mm.h index ee073146aaa7..ea818ff739cd 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -377,6 +377,7 @@ enum page_entry_size {  struct vm_operations_struct {  	void (*open)(struct vm_area_struct * area);  	void (*close)(struct vm_area_struct * area); +	int (*split)(struct vm_area_struct * area, unsigned long addr);  	int (*mremap)(struct vm_area_struct * area);  	int (*fault)(struct vm_fault *vmf);  	int (*huge_fault)(struct vm_fault *vmf, enum page_entry_size pe_size); @@ -1379,6 +1380,19 @@ long get_user_pages_locked(unsigned long start, unsigned long nr_pages,  		    unsigned int gup_flags, struct page **pages, int *locked);  long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,  		    struct page **pages, unsigned int gup_flags); +#ifdef CONFIG_FS_DAX +long get_user_pages_longterm(unsigned long start, unsigned long nr_pages, +			    unsigned int gup_flags, struct page **pages, +			    struct vm_area_struct **vmas); +#else +static inline long get_user_pages_longterm(unsigned long start, +		unsigned long nr_pages, unsigned int gup_flags, +		struct page **pages, struct vm_area_struct **vmas) +{ +	return get_user_pages(start, nr_pages, gup_flags, pages, vmas); +} +#endif /* CONFIG_FS_DAX */ +  int get_user_pages_fast(unsigned long start, int nr_pages, int write,  			struct page **pages); diff --git a/include/linux/oom.h b/include/linux/oom.h index 01c91d874a57..5bad038ac012 100644 --- a/include/linux/oom.h +++ b/include/linux/oom.h @@ -67,6 +67,15 @@ static inline bool tsk_is_oom_victim(struct task_struct * tsk)  }  /* + * Use this helper if tsk->mm != mm and the victim mm needs a special + * handling. This is guaranteed to stay true after once set. + */ +static inline bool mm_is_oom_victim(struct mm_struct *mm) +{ +	return test_bit(MMF_OOM_VICTIM, &mm->flags); +} + +/*   * Checks whether a page fault on the given mm is still reliable.   * This is no longer true if the oom reaper started to reap the   * address space which is reflected by MMF_UNSTABLE flag set in diff --git a/include/linux/pci.h b/include/linux/pci.h index 0403894147a3..c170c9250c8b 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -1674,6 +1674,9 @@ static inline struct pci_dev *pci_get_slot(struct pci_bus *bus,  static inline struct pci_dev *pci_get_bus_and_slot(unsigned int bus,  						unsigned int devfn)  { return NULL; } +static inline struct pci_dev *pci_get_domain_bus_and_slot(int domain, +					unsigned int bus, unsigned int devfn) +{ return NULL; }  static inline int pci_domain_nr(struct pci_bus *bus) { return 0; }  static inline struct pci_dev *pci_dev_get(struct pci_dev *dev) { return NULL; } diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 2c9c87d8a0c1..7546822a1d74 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -15,6 +15,7 @@  #define _LINUX_PERF_EVENT_H  #include <uapi/linux/perf_event.h> +#include <uapi/linux/bpf_perf_event.h>  /*   * Kernel-internal data types and definitions: @@ -787,7 +788,7 @@ struct perf_output_handle {  };  struct bpf_perf_event_data_kern { -	struct pt_regs *regs; +	bpf_user_pt_regs_t *regs;  	struct perf_sample_data *data;  	struct perf_event *event;  }; @@ -1177,6 +1178,9 @@ extern void perf_bp_event(struct perf_event *event, void *data);  		(user_mode(regs) ? PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL)  # define perf_instruction_pointer(regs)	instruction_pointer(regs)  #endif +#ifndef perf_arch_bpf_user_pt_regs +# define perf_arch_bpf_user_pt_regs(regs) regs +#endif  static inline bool has_branch_stack(struct perf_event *event)  { diff --git a/include/linux/pm.h b/include/linux/pm.h index 65d39115f06d..492ed473ba7e 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h @@ -765,6 +765,7 @@ extern int pm_generic_poweroff_late(struct device *dev);  extern int pm_generic_poweroff(struct device *dev);  extern void pm_generic_complete(struct device *dev); +extern void dev_pm_skip_next_resume_phases(struct device *dev);  extern bool dev_pm_smart_suspend_and_suspended(struct device *dev);  #else /* !CONFIG_PM_SLEEP */ diff --git a/include/linux/pti.h b/include/linux/pti.h index b3ea01a3197e..0174883a935a 100644 --- a/include/linux/pti.h +++ b/include/linux/pti.h @@ -1,43 +1,11 @@ -/* - *  Copyright (C) Intel 2011 - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the - * GNU General Public License for more details. - * - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - * - * The PTI (Parallel Trace Interface) driver directs trace data routed from - * various parts in the system out through the Intel Penwell PTI port and - * out of the mobile device for analysis with a debugging tool - * (Lauterbach, Fido). This is part of a solution for the MIPI P1149.7, - * compact JTAG, standard. - * - * This header file will allow other parts of the OS to use the - * interface to write out it's contents for debugging a mobile system. - */ +// SPDX-License-Identifier: GPL-2.0 +#ifndef _INCLUDE_PTI_H +#define _INCLUDE_PTI_H -#ifndef PTI_H_ -#define PTI_H_ +#ifdef CONFIG_PAGE_TABLE_ISOLATION +#include <asm/pti.h> +#else +static inline void pti_init(void) { } +#endif -/* offset for last dword of any PTI message. Part of MIPI P1149.7 */ -#define PTI_LASTDWORD_DTS	0x30 - -/* basic structure used as a write address to the PTI HW */ -struct pti_masterchannel { -	u8 master; -	u8 channel; -}; - -/* the following functions are defined in misc/pti.c */ -void pti_writedata(struct pti_masterchannel *mc, u8 *buf, int count); -struct pti_masterchannel *pti_request_masterchannel(u8 type, -						    const char *thread_name); -void pti_release_masterchannel(struct pti_masterchannel *mc); - -#endif /*PTI_H_*/ +#endif diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h index 37b4bb2545b3..6866df4f31b5 100644 --- a/include/linux/ptr_ring.h +++ b/include/linux/ptr_ring.h @@ -101,12 +101,18 @@ static inline bool ptr_ring_full_bh(struct ptr_ring *r)  /* Note: callers invoking this in a loop must use a compiler barrier,   * for example cpu_relax(). Callers must hold producer_lock. + * Callers are responsible for making sure pointer that is being queued + * points to a valid data.   */  static inline int __ptr_ring_produce(struct ptr_ring *r, void *ptr)  {  	if (unlikely(!r->size) || r->queue[r->producer])  		return -ENOSPC; +	/* Make sure the pointer we are storing points to a valid data. */ +	/* Pairs with smp_read_barrier_depends in __ptr_ring_consume. */ +	smp_wmb(); +  	r->queue[r->producer++] = ptr;  	if (unlikely(r->producer >= r->size))  		r->producer = 0; @@ -275,6 +281,9 @@ static inline void *__ptr_ring_consume(struct ptr_ring *r)  	if (ptr)  		__ptr_ring_discard_one(r); +	/* Make sure anyone accessing data through the pointer is up to date. */ +	/* Pairs with smp_wmb in __ptr_ring_produce. */ +	smp_read_barrier_depends();  	return ptr;  } diff --git a/include/linux/rbtree.h b/include/linux/rbtree.h index d574361943ea..fcbeed4053ef 100644 --- a/include/linux/rbtree.h +++ b/include/linux/rbtree.h @@ -99,6 +99,8 @@ extern void rb_replace_node(struct rb_node *victim, struct rb_node *new,  			    struct rb_root *root);  extern void rb_replace_node_rcu(struct rb_node *victim, struct rb_node *new,  				struct rb_root *root); +extern void rb_replace_node_cached(struct rb_node *victim, struct rb_node *new, +				   struct rb_root_cached *root);  static inline void rb_link_node(struct rb_node *node, struct rb_node *parent,  				struct rb_node **rb_link) diff --git a/include/linux/rculist_nulls.h b/include/linux/rculist_nulls.h index a328e8181e49..e4b257ff881b 100644 --- a/include/linux/rculist_nulls.h +++ b/include/linux/rculist_nulls.h @@ -101,44 +101,6 @@ static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n,  }  /** - * hlist_nulls_add_tail_rcu - * @n: the element to add to the hash list. - * @h: the list to add to. - * - * Description: - * Adds the specified element to the end of the specified hlist_nulls, - * while permitting racing traversals.  NOTE: tail insertion requires - * list traversal. - * - * The caller must take whatever precautions are necessary - * (such as holding appropriate locks) to avoid racing - * with another list-mutation primitive, such as hlist_nulls_add_head_rcu() - * or hlist_nulls_del_rcu(), running on this same list. - * However, it is perfectly legal to run concurrently with - * the _rcu list-traversal primitives, such as - * hlist_nulls_for_each_entry_rcu(), used to prevent memory-consistency - * problems on Alpha CPUs.  Regardless of the type of CPU, the - * list-traversal primitive must be guarded by rcu_read_lock(). - */ -static inline void hlist_nulls_add_tail_rcu(struct hlist_nulls_node *n, -					struct hlist_nulls_head *h) -{ -	struct hlist_nulls_node *i, *last = NULL; - -	for (i = hlist_nulls_first_rcu(h); !is_a_nulls(i); -	     i = hlist_nulls_next_rcu(i)) -		last = i; - -	if (last) { -		n->next = last->next; -		n->pprev = &last->next; -		rcu_assign_pointer(hlist_nulls_next_rcu(last), n); -	} else { -		hlist_nulls_add_head_rcu(n, h); -	} -} - -/**   * hlist_nulls_for_each_entry_rcu - iterate over rcu list of given type   * @tpos:	the type * to use as a loop cursor.   * @pos:	the &struct hlist_nulls_node to use as a loop cursor. diff --git a/include/linux/rwlock_types.h b/include/linux/rwlock_types.h index cc0072e93e36..857a72ceb794 100644 --- a/include/linux/rwlock_types.h +++ b/include/linux/rwlock_types.h @@ -10,9 +10,6 @@   */  typedef struct {  	arch_rwlock_t raw_lock; -#ifdef CONFIG_GENERIC_LOCKBREAK -	unsigned int break_lock; -#endif  #ifdef CONFIG_DEBUG_SPINLOCK  	unsigned int magic, owner_cpu;  	void *owner; diff --git a/include/linux/sched.h b/include/linux/sched.h index 21991d668d35..d2588263a989 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -849,17 +849,6 @@ struct task_struct {  	struct held_lock		held_locks[MAX_LOCK_DEPTH];  #endif -#ifdef CONFIG_LOCKDEP_CROSSRELEASE -#define MAX_XHLOCKS_NR 64UL -	struct hist_lock *xhlocks; /* Crossrelease history locks */ -	unsigned int xhlock_idx; -	/* For restoring at history boundaries */ -	unsigned int xhlock_idx_hist[XHLOCK_CTX_NR]; -	unsigned int hist_id; -	/* For overwrite check at each context exit */ -	unsigned int hist_id_save[XHLOCK_CTX_NR]; -#endif -  #ifdef CONFIG_UBSAN  	unsigned int			in_ubsan;  #endif @@ -1503,7 +1492,11 @@ static inline void set_task_comm(struct task_struct *tsk, const char *from)  	__set_task_comm(tsk, from, false);  } -extern char *get_task_comm(char *to, struct task_struct *tsk); +extern char *__get_task_comm(char *to, size_t len, struct task_struct *tsk); +#define get_task_comm(buf, tsk) ({			\ +	BUILD_BUG_ON(sizeof(buf) != TASK_COMM_LEN);	\ +	__get_task_comm(buf, sizeof(buf), tsk);		\ +})  #ifdef CONFIG_SMP  void scheduler_ipi(void); diff --git a/include/linux/sched/coredump.h b/include/linux/sched/coredump.h index 9c8847395b5e..ec912d01126f 100644 --- a/include/linux/sched/coredump.h +++ b/include/linux/sched/coredump.h @@ -70,6 +70,7 @@ static inline int get_dumpable(struct mm_struct *mm)  #define MMF_UNSTABLE		22	/* mm is unstable for copy_from_user */  #define MMF_HUGE_ZERO_PAGE	23      /* mm has ever used the global huge zero page */  #define MMF_DISABLE_THP		24	/* disable THP for all VMAs */ +#define MMF_OOM_VICTIM		25	/* mm is the oom victim */  #define MMF_DISABLE_THP_MASK	(1 << MMF_DISABLE_THP)  #define MMF_INIT_MASK		(MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK |\ diff --git a/include/linux/serdev.h b/include/linux/serdev.h index e69402d4a8ae..d609e6dc5bad 100644 --- a/include/linux/serdev.h +++ b/include/linux/serdev.h @@ -184,7 +184,7 @@ static inline int serdev_controller_receive_buf(struct serdev_controller *ctrl,  	struct serdev_device *serdev = ctrl->serdev;  	if (!serdev || !serdev->ops->receive_buf) -		return -EINVAL; +		return 0;  	return serdev->ops->receive_buf(serdev, data, count);  } diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index bc486ef23f20..a38c80e9f91e 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -1406,8 +1406,7 @@ static inline struct sk_buff *skb_get(struct sk_buff *skb)  }  /* - * If users == 1, we are the only owner and are can avoid redundant - * atomic change. + * If users == 1, we are the only owner and can avoid redundant atomic changes.   */  /** diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index 7b2170bfd6e7..bc6bb325d1bf 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -126,7 +126,7 @@ void spi_statistics_add_transfer_stats(struct spi_statistics *stats,   *	for that name.  This appears in the sysfs "modalias" attribute   *	for driver coldplugging, and in uevents used for hotplugging   * @cs_gpio: gpio number of the chipselect line (optional, -ENOENT when - *	when not using a GPIO line) + *	not using a GPIO line)   *   * @statistics: statistics for the spi_device   * diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index a39186194cd6..3bf273538840 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -107,16 +107,11 @@ do {								\  #define raw_spin_is_locked(lock)	arch_spin_is_locked(&(lock)->raw_lock) -#ifdef CONFIG_GENERIC_LOCKBREAK -#define raw_spin_is_contended(lock) ((lock)->break_lock) -#else -  #ifdef arch_spin_is_contended  #define raw_spin_is_contended(lock)	arch_spin_is_contended(&(lock)->raw_lock)  #else  #define raw_spin_is_contended(lock)	(((void)(lock), 0))  #endif /*arch_spin_is_contended*/ -#endif  /*   * This barrier must provide two things: diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h index 73548eb13a5d..24b4e6f2c1a2 100644 --- a/include/linux/spinlock_types.h +++ b/include/linux/spinlock_types.h @@ -19,9 +19,6 @@  typedef struct raw_spinlock {  	arch_spinlock_t raw_lock; -#ifdef CONFIG_GENERIC_LOCKBREAK -	unsigned int break_lock; -#endif  #ifdef CONFIG_DEBUG_SPINLOCK  	unsigned int magic, owner_cpu;  	void *owner; diff --git a/include/linux/string.h b/include/linux/string.h index 410ecf17de3c..cfd83eb2f926 100644 --- a/include/linux/string.h +++ b/include/linux/string.h @@ -259,7 +259,10 @@ __FORTIFY_INLINE __kernel_size_t strlen(const char *p)  {  	__kernel_size_t ret;  	size_t p_size = __builtin_object_size(p, 0); -	if (p_size == (size_t)-1) + +	/* Work around gcc excess stack consumption issue */ +	if (p_size == (size_t)-1 || +	    (__builtin_constant_p(p[p_size - 1]) && p[p_size - 1] == '\0'))  		return __builtin_strlen(p);  	ret = strnlen(p, p_size);  	if (p_size <= ret) diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h index 270bad0e1bed..40d2822f0e2f 100644 --- a/include/linux/sunrpc/cache.h +++ b/include/linux/sunrpc/cache.h @@ -213,7 +213,7 @@ extern void __init cache_initialize(void);  extern int cache_register_net(struct cache_detail *cd, struct net *net);  extern void cache_unregister_net(struct cache_detail *cd, struct net *net); -extern struct cache_detail *cache_create_net(struct cache_detail *tmpl, struct net *net); +extern struct cache_detail *cache_create_net(const struct cache_detail *tmpl, struct net *net);  extern void cache_destroy_net(struct cache_detail *cd, struct net *net);  extern void sunrpc_init_cache_detail(struct cache_detail *cd); diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h index e32dfe098e82..40839c02d28c 100644 --- a/include/linux/sysfs.h +++ b/include/linux/sysfs.h @@ -117,6 +117,12 @@ struct attribute_group {  	.show	= _name##_show,						\  } +#define __ATTR_RO_MODE(_name, _mode) {					\ +	.attr	= { .name = __stringify(_name),				\ +		    .mode = VERIFY_OCTAL_PERMISSIONS(_mode) },		\ +	.show	= _name##_show,						\ +} +  #define __ATTR_WO(_name) {						\  	.attr	= { .name = __stringify(_name), .mode = S_IWUSR },	\  	.store	= _name##_store,					\ diff --git a/include/linux/tcp.h b/include/linux/tcp.h index df5d97a85e1a..ca4a6361389b 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -224,7 +224,8 @@ struct tcp_sock {  		rate_app_limited:1,  /* rate_{delivered,interval_us} limited? */  		fastopen_connect:1, /* FASTOPEN_CONNECT sockopt */  		fastopen_no_cookie:1, /* Allow send/recv SYN+data without a cookie */ -		unused:3; +		is_sack_reneg:1,    /* in recovery from loss with SACK reneg? */ +		unused:2;  	u8	nonagle     : 4,/* Disable Nagle algorithm?             */  		thin_lto    : 1,/* Use linear timeouts for thin streams */  		unused1	    : 1, diff --git a/include/linux/tick.h b/include/linux/tick.h index f442d1a42025..7cc35921218e 100644 --- a/include/linux/tick.h +++ b/include/linux/tick.h @@ -119,6 +119,7 @@ extern void tick_nohz_idle_exit(void);  extern void tick_nohz_irq_exit(void);  extern ktime_t tick_nohz_get_sleep_length(void);  extern unsigned long tick_nohz_get_idle_calls(void); +extern unsigned long tick_nohz_get_idle_calls_cpu(int cpu);  extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time);  extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time);  #else /* !CONFIG_NO_HZ_COMMON */ diff --git a/include/linux/timer.h b/include/linux/timer.h index 04af640ea95b..2448f9cc48a3 100644 --- a/include/linux/timer.h +++ b/include/linux/timer.h @@ -207,9 +207,11 @@ unsigned long round_jiffies_up(unsigned long j);  unsigned long round_jiffies_up_relative(unsigned long j);  #ifdef CONFIG_HOTPLUG_CPU +int timers_prepare_cpu(unsigned int cpu);  int timers_dead_cpu(unsigned int cpu);  #else -#define timers_dead_cpu NULL +#define timers_prepare_cpu	NULL +#define timers_dead_cpu		NULL  #endif  #endif diff --git a/include/linux/trace.h b/include/linux/trace.h index d24991c1fef3..b95ffb2188ab 100644 --- a/include/linux/trace.h +++ b/include/linux/trace.h @@ -18,7 +18,7 @@   */  struct trace_export {  	struct trace_export __rcu	*next; -	void (*write)(const void *, unsigned int); +	void (*write)(struct trace_export *, const void *, unsigned int);  };  int register_ftrace_export(struct trace_export *export); diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h index a69877734c4e..e2ec3582e549 100644 --- a/include/linux/usb/usbnet.h +++ b/include/linux/usb/usbnet.h @@ -82,6 +82,7 @@ struct usbnet {  #		define EVENT_RX_KILL	10  #		define EVENT_LINK_CHANGE	11  #		define EVENT_SET_RX_MODE	12 +#		define EVENT_NO_IP_ALIGN	13  };  static inline struct usb_driver *driver_of(struct usb_interface *intf) |