diff options
Diffstat (limited to 'include/linux')
355 files changed, 7974 insertions, 2815 deletions
diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 4d5ee84c468b..6adcd1b92b20 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -40,7 +40,7 @@ struct irq_domain_ops; #include <asm/acpi.h> #ifdef CONFIG_ACPI_TABLE_LIB -#define EXPORT_SYMBOL_ACPI_LIB(x) EXPORT_SYMBOL_NS_GPL(x, ACPI) +#define EXPORT_SYMBOL_ACPI_LIB(x) EXPORT_SYMBOL_NS_GPL(x, "ACPI") #define __init_or_acpilib #define __initdata_or_acpilib #else @@ -1164,8 +1164,6 @@ int acpi_subsys_suspend_noirq(struct device *dev); int acpi_subsys_suspend(struct device *dev); int acpi_subsys_freeze(struct device *dev); int acpi_subsys_poweroff(struct device *dev); -void acpi_ec_mark_gpe_for_wake(void); -void acpi_ec_set_gpe_wake_mask(u8 action); int acpi_subsys_restore_early(struct device *dev); #else static inline int acpi_subsys_prepare(struct device *dev) { return 0; } @@ -1176,6 +1174,12 @@ static inline int acpi_subsys_suspend(struct device *dev) { return 0; } static inline int acpi_subsys_freeze(struct device *dev) { return 0; } static inline int acpi_subsys_poweroff(struct device *dev) { return 0; } static inline int acpi_subsys_restore_early(struct device *dev) { return 0; } +#endif + +#if defined(CONFIG_ACPI_EC) && defined(CONFIG_PM_SLEEP) +void acpi_ec_mark_gpe_for_wake(void); +void acpi_ec_set_gpe_wake_mask(u8 action); +#else static inline void acpi_ec_mark_gpe_for_wake(void) {} static inline void acpi_ec_set_gpe_wake_mask(u8 action) {} #endif @@ -1526,17 +1530,7 @@ static inline int find_acpi_cpu_topology_hetero_id(unsigned int cpu) } #endif -#ifdef CONFIG_ARM64 -void acpi_arm_init(void); -#else -static inline void acpi_arm_init(void) { } -#endif - -#ifdef CONFIG_RISCV -void acpi_riscv_init(void); -#else -static inline void acpi_riscv_init(void) { } -#endif +void acpi_arch_init(void); #ifdef CONFIG_ACPI_PCC void acpi_init_pcc(void); diff --git a/include/linux/acpi_dma.h b/include/linux/acpi_dma.h index 72cedb916a9c..e748b2877602 100644 --- a/include/linux/acpi_dma.h +++ b/include/linux/acpi_dma.h @@ -11,10 +11,11 @@ #ifndef __LINUX_ACPI_DMA_H #define __LINUX_ACPI_DMA_H -#include <linux/list.h> -#include <linux/device.h> #include <linux/err.h> #include <linux/dmaengine.h> +#include <linux/types.h> + +struct device; /** * struct acpi_dma_spec - slave device DMA resources @@ -65,7 +66,6 @@ int devm_acpi_dma_controller_register(struct device *dev, struct dma_chan *(*acpi_dma_xlate) (struct acpi_dma_spec *, struct acpi_dma *), void *data); -void devm_acpi_dma_controller_free(struct device *dev); struct dma_chan *acpi_dma_request_slave_chan_by_index(struct device *dev, size_t index); @@ -94,9 +94,6 @@ static inline int devm_acpi_dma_controller_register(struct device *dev, { return -ENODEV; } -static inline void devm_acpi_dma_controller_free(struct device *dev) -{ -} static inline struct dma_chan *acpi_dma_request_slave_chan_by_index( struct device *dev, size_t index) diff --git a/include/linux/alarmtimer.h b/include/linux/alarmtimer.h index 05e758b8b894..3ffa5341dce2 100644 --- a/include/linux/alarmtimer.h +++ b/include/linux/alarmtimer.h @@ -20,12 +20,6 @@ enum alarmtimer_type { ALARM_BOOTTIME_FREEZER, }; -enum alarmtimer_restart { - ALARMTIMER_NORESTART, - ALARMTIMER_RESTART, -}; - - #define ALARMTIMER_STATE_INACTIVE 0x00 #define ALARMTIMER_STATE_ENQUEUED 0x01 @@ -42,14 +36,14 @@ enum alarmtimer_restart { struct alarm { struct timerqueue_node node; struct hrtimer timer; - enum alarmtimer_restart (*function)(struct alarm *, ktime_t now); + void (*function)(struct alarm *, ktime_t now); enum alarmtimer_type type; int state; void *data; }; void alarm_init(struct alarm *alarm, enum alarmtimer_type type, - enum alarmtimer_restart (*function)(struct alarm *, ktime_t)); + void (*function)(struct alarm *, ktime_t)); void alarm_start(struct alarm *alarm, ktime_t start); void alarm_start_relative(struct alarm *alarm, ktime_t start); void alarm_restart(struct alarm *alarm); diff --git a/include/linux/alcor_pci.h b/include/linux/alcor_pci.h index c4a0b23846d8..dcb1d37dabc2 100644 --- a/include/linux/alcor_pci.h +++ b/include/linux/alcor_pci.h @@ -11,6 +11,7 @@ #define ALCOR_SD_CARD 0 #define ALCOR_MS_CARD 1 +#define DRV_NAME_ALCOR_PCI "alcor_pci" #define DRV_NAME_ALCOR_PCI_SDMMC "alcor_sdmmc" #define DRV_NAME_ALCOR_PCI_MS "alcor_ms" diff --git a/include/linux/alloc_tag.h b/include/linux/alloc_tag.h index 941deffc590d..0bbbe537c5f9 100644 --- a/include/linux/alloc_tag.h +++ b/include/linux/alloc_tag.h @@ -30,6 +30,21 @@ struct alloc_tag { struct alloc_tag_counters __percpu *counters; } __aligned(8); +struct alloc_tag_kernel_section { + struct alloc_tag *first_tag; + unsigned long count; +}; + +struct alloc_tag_module_section { + union { + unsigned long start_addr; + struct alloc_tag *first_tag; + }; + unsigned long end_addr; + /* used size */ + unsigned long size; +}; + #ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG #define CODETAG_EMPTY ((void *)1) @@ -48,12 +63,19 @@ static inline void set_codetag_empty(union codetag_ref *ref) #else /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */ static inline bool is_codetag_empty(union codetag_ref *ref) { return false; } -static inline void set_codetag_empty(union codetag_ref *ref) {} + +static inline void set_codetag_empty(union codetag_ref *ref) +{ + if (ref) + ref->ct = NULL; +} #endif /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */ #ifdef CONFIG_MEM_ALLOC_PROFILING +#define ALLOC_TAG_SECTION_NAME "alloc_tags" + struct codetag_bytes { struct codetag *ct; s64 bytes; @@ -76,7 +98,7 @@ DECLARE_PER_CPU(struct alloc_tag_counters, _shared_alloc_tag); #define DEFINE_ALLOC_TAG(_alloc_tag) \ static struct alloc_tag _alloc_tag __used __aligned(8) \ - __section("alloc_tags") = { \ + __section(ALLOC_TAG_SECTION_NAME) = { \ .ct = CODE_TAG_INIT, \ .counters = &_shared_alloc_tag }; @@ -85,7 +107,7 @@ DECLARE_PER_CPU(struct alloc_tag_counters, _shared_alloc_tag); #define DEFINE_ALLOC_TAG(_alloc_tag) \ static DEFINE_PER_CPU(struct alloc_tag_counters, _alloc_tag_cntr); \ static struct alloc_tag _alloc_tag __used __aligned(8) \ - __section("alloc_tags") = { \ + __section(ALLOC_TAG_SECTION_NAME) = { \ .ct = CODE_TAG_INIT, \ .counters = &_alloc_tag_cntr }; @@ -118,7 +140,7 @@ static inline struct alloc_tag_counters alloc_tag_read(struct alloc_tag *tag) #ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG static inline void alloc_tag_add_check(union codetag_ref *ref, struct alloc_tag *tag) { - WARN_ONCE(ref && ref->ct, + WARN_ONCE(ref && ref->ct && !is_codetag_empty(ref), "alloc_tag was not cleared (got tag for %s:%u)\n", ref->ct->filename, ref->ct->lineno); diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h index dda2f3ea89cb..9946276aff73 100644 --- a/include/linux/amba/bus.h +++ b/include/linux/amba/bus.h @@ -121,6 +121,7 @@ extern const struct bus_type amba_bustype; #ifdef CONFIG_ARM_AMBA int __amba_driver_register(struct amba_driver *, struct module *); void amba_driver_unregister(struct amba_driver *); +bool dev_is_amba(const struct device *dev); #else static inline int __amba_driver_register(struct amba_driver *drv, struct module *owner) @@ -130,6 +131,10 @@ static inline int __amba_driver_register(struct amba_driver *drv, static inline void amba_driver_unregister(struct amba_driver *drv) { } +static inline bool dev_is_amba(const struct device *dev) +{ + return false; +} #endif struct amba_device *amba_device_alloc(const char *, resource_size_t, size_t); diff --git a/include/linux/arch_topology.h b/include/linux/arch_topology.h index 4a952c4885ed..2222e8b03ff4 100644 --- a/include/linux/arch_topology.h +++ b/include/linux/arch_topology.h @@ -45,6 +45,7 @@ enum scale_freq_source { SCALE_FREQ_SOURCE_CPUFREQ = 0, SCALE_FREQ_SOURCE_ARCH, SCALE_FREQ_SOURCE_CPPC, + SCALE_FREQ_SOURCE_VIRT, }; struct scale_freq_data { diff --git a/include/linux/arm_ffa.h b/include/linux/arm_ffa.h index a28e2a6a13d0..74169dd0f659 100644 --- a/include/linux/arm_ffa.h +++ b/include/linux/arm_ffa.h @@ -166,9 +166,12 @@ static inline void *ffa_dev_get_drvdata(struct ffa_device *fdev) return dev_get_drvdata(&fdev->dev); } +struct ffa_partition_info; + #if IS_REACHABLE(CONFIG_ARM_FFA_TRANSPORT) -struct ffa_device *ffa_device_register(const uuid_t *uuid, int vm_id, - const struct ffa_ops *ops); +struct ffa_device * +ffa_device_register(const struct ffa_partition_info *part_info, + const struct ffa_ops *ops); void ffa_device_unregister(struct ffa_device *ffa_dev); int ffa_driver_register(struct ffa_driver *driver, struct module *owner, const char *mod_name); @@ -176,9 +179,9 @@ void ffa_driver_unregister(struct ffa_driver *driver); bool ffa_device_is_valid(struct ffa_device *ffa_dev); #else -static inline -struct ffa_device *ffa_device_register(const uuid_t *uuid, int vm_id, - const struct ffa_ops *ops) +static inline struct ffa_device * +ffa_device_register(const struct ffa_partition_info *part_info, + const struct ffa_ops *ops) { return NULL; } diff --git a/include/linux/asn1_decoder.h b/include/linux/asn1_decoder.h index 83f9c6e1e5e9..b41bce82a191 100644 --- a/include/linux/asn1_decoder.h +++ b/include/linux/asn1_decoder.h @@ -9,6 +9,7 @@ #define _LINUX_ASN1_DECODER_H #include <linux/asn1.h> +#include <linux/types.h> struct asn1_decoder; diff --git a/include/linux/asn1_encoder.h b/include/linux/asn1_encoder.h index 08cd0c2ad34f..d17484dffb74 100644 --- a/include/linux/asn1_encoder.h +++ b/include/linux/asn1_encoder.h @@ -6,7 +6,6 @@ #include <linux/types.h> #include <linux/asn1.h> #include <linux/asn1_ber_bytecode.h> -#include <linux/bug.h> #define asn1_oid_len(oid) (sizeof(oid)/sizeof(u32)) unsigned char * diff --git a/include/linux/ath9k_platform.h b/include/linux/ath9k_platform.h deleted file mode 100644 index 76860a461ed2..000000000000 --- a/include/linux/ath9k_platform.h +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Copyright (c) 2008 Atheros Communications Inc. - * Copyright (c) 2009 Gabor Juhos <[email protected]> - * Copyright (c) 2009 Imre Kaloz <[email protected]> - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -#ifndef _LINUX_ATH9K_PLATFORM_H -#define _LINUX_ATH9K_PLATFORM_H - -#define ATH9K_PLAT_EEP_MAX_WORDS 2048 - -struct ath9k_platform_data { - const char *eeprom_name; - - u16 eeprom_data[ATH9K_PLAT_EEP_MAX_WORDS]; - u8 *macaddr; - - int led_pin; - u32 gpio_mask; - u32 gpio_val; - - u32 bt_active_pin; - u32 bt_priority_pin; - u32 wlan_active_pin; - - bool endian_check; - bool is_clk_25mhz; - bool tx_gain_buffalo; - bool disable_2ghz; - bool disable_5ghz; - bool led_active_high; - - int (*get_mac_revision)(void); - int (*external_reset)(void); - - bool use_eeprom; -}; - -#endif /* _LINUX_ATH9K_PLATFORM_H */ diff --git a/include/linux/auxiliary_bus.h b/include/linux/auxiliary_bus.h index 31762324bcc9..65dd7f154374 100644 --- a/include/linux/auxiliary_bus.h +++ b/include/linux/auxiliary_bus.h @@ -269,8 +269,4 @@ void auxiliary_driver_unregister(struct auxiliary_driver *auxdrv); #define module_auxiliary_driver(__auxiliary_driver) \ module_driver(__auxiliary_driver, auxiliary_driver_register, auxiliary_driver_unregister) -struct auxiliary_device *auxiliary_find_device(struct device *start, - const void *data, - device_match_t match); - #endif /* _AUXILIARY_BUS_H_ */ diff --git a/include/linux/avf/virtchnl.h b/include/linux/avf/virtchnl.h index f41395264dca..13a11f3c09b8 100644 --- a/include/linux/avf/virtchnl.h +++ b/include/linux/avf/virtchnl.h @@ -89,6 +89,9 @@ enum virtchnl_rx_hsplit { VIRTCHNL_RX_HSPLIT_SPLIT_SCTP = 8, }; +enum virtchnl_bw_limit_type { + VIRTCHNL_BW_SHAPER = 0, +}; /* END GENERIC DEFINES */ /* Opcodes for VF-PF communication. These are placed in the v_opcode field @@ -151,6 +154,11 @@ enum virtchnl_ops { VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2 = 55, VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2 = 56, VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2 = 57, + /* opcode 57 - 65 are reserved */ + VIRTCHNL_OP_GET_QOS_CAPS = 66, + /* opcode 68 through 111 are reserved */ + VIRTCHNL_OP_CONFIG_QUEUE_BW = 112, + VIRTCHNL_OP_CONFIG_QUANTA = 113, VIRTCHNL_OP_MAX, }; @@ -261,6 +269,7 @@ VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource); #define VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC BIT(26) #define VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF BIT(27) #define VIRTCHNL_VF_OFFLOAD_FDIR_PF BIT(28) +#define VIRTCHNL_VF_OFFLOAD_QOS BIT(29) #define VF_BASE_MODE_OFFLOADS (VIRTCHNL_VF_OFFLOAD_L2 | \ VIRTCHNL_VF_OFFLOAD_VLAN | \ @@ -1416,6 +1425,86 @@ struct virtchnl_fdir_del { VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_fdir_del); +struct virtchnl_shaper_bw { + /* Unit is Kbps */ + u32 committed; + u32 peak; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_shaper_bw); + +/* VIRTCHNL_OP_GET_QOS_CAPS + * VF sends this message to get its QoS Caps, such as + * TC number, Arbiter and Bandwidth. + */ +struct virtchnl_qos_cap_elem { + u8 tc_num; + u8 tc_prio; +#define VIRTCHNL_ABITER_STRICT 0 +#define VIRTCHNL_ABITER_ETS 2 + u8 arbiter; +#define VIRTCHNL_STRICT_WEIGHT 1 + u8 weight; + enum virtchnl_bw_limit_type type; + union { + struct virtchnl_shaper_bw shaper; + u8 pad2[32]; + }; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_qos_cap_elem); + +struct virtchnl_qos_cap_list { + u16 vsi_id; + u16 num_elem; + struct virtchnl_qos_cap_elem cap[]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_qos_cap_list); +#define virtchnl_qos_cap_list_LEGACY_SIZEOF 44 + +/* VIRTCHNL_OP_CONFIG_QUEUE_BW */ +struct virtchnl_queue_bw { + u16 queue_id; + u8 tc; + u8 pad; + struct virtchnl_shaper_bw shaper; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_bw); + +struct virtchnl_queues_bw_cfg { + u16 vsi_id; + u16 num_queues; + struct virtchnl_queue_bw cfg[]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_queues_bw_cfg); +#define virtchnl_queues_bw_cfg_LEGACY_SIZEOF 16 + +enum virtchnl_queue_type { + VIRTCHNL_QUEUE_TYPE_TX = 0, + VIRTCHNL_QUEUE_TYPE_RX = 1, +}; + +/* structure to specify a chunk of contiguous queues */ +struct virtchnl_queue_chunk { + /* see enum virtchnl_queue_type */ + s32 type; + u16 start_queue_id; + u16 num_queues; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_queue_chunk); + +struct virtchnl_quanta_cfg { + u16 quanta_size; + u16 pad; + struct virtchnl_queue_chunk queue_select; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_quanta_cfg); + #define __vss_byone(p, member, count, old) \ (struct_size(p, member, count) + (old - 1 - struct_size(p, member, 0))) @@ -1438,6 +1527,8 @@ VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_fdir_del); __vss(virtchnl_vlan_filter_list_v2, __vss_byelem, p, m, c), \ __vss(virtchnl_tc_info, __vss_byelem, p, m, c), \ __vss(virtchnl_rdma_qvlist_info, __vss_byelem, p, m, c), \ + __vss(virtchnl_qos_cap_list, __vss_byelem, p, m, c), \ + __vss(virtchnl_queues_bw_cfg, __vss_byelem, p, m, c), \ __vss(virtchnl_rss_key, __vss_byone, p, m, c), \ __vss(virtchnl_rss_lut, __vss_byone, p, m, c)) @@ -1637,6 +1728,35 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode, case VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2: valid_len = sizeof(struct virtchnl_vlan_setting); break; + case VIRTCHNL_OP_GET_QOS_CAPS: + break; + case VIRTCHNL_OP_CONFIG_QUEUE_BW: + valid_len = virtchnl_queues_bw_cfg_LEGACY_SIZEOF; + if (msglen >= valid_len) { + struct virtchnl_queues_bw_cfg *q_bw = + (struct virtchnl_queues_bw_cfg *)msg; + + valid_len = virtchnl_struct_size(q_bw, cfg, + q_bw->num_queues); + if (q_bw->num_queues == 0) { + err_msg_format = true; + break; + } + } + break; + case VIRTCHNL_OP_CONFIG_QUANTA: + valid_len = sizeof(struct virtchnl_quanta_cfg); + if (msglen >= valid_len) { + struct virtchnl_quanta_cfg *q_quanta = + (struct virtchnl_quanta_cfg *)msg; + + if (q_quanta->quanta_size == 0 || + q_quanta->queue_select.num_queues == 0) { + err_msg_format = true; + break; + } + } + break; /* These are always errors coming from the VF. */ case VIRTCHNL_OP_EVENT: case VIRTCHNL_OP_UNKNOWN: diff --git a/include/linux/backing-file.h b/include/linux/backing-file.h index 2eed0ffb5e8f..1476a6ed1bfd 100644 --- a/include/linux/backing-file.h +++ b/include/linux/backing-file.h @@ -14,9 +14,8 @@ struct backing_file_ctx { const struct cred *cred; - struct file *user_file; - void (*accessed)(struct file *); - void (*end_write)(struct file *, loff_t, ssize_t); + void (*accessed)(struct file *file); + void (*end_write)(struct kiocb *iocb, ssize_t); }; struct file *backing_file_open(const struct path *user_path, int flags, @@ -31,13 +30,13 @@ ssize_t backing_file_read_iter(struct file *file, struct iov_iter *iter, ssize_t backing_file_write_iter(struct file *file, struct iov_iter *iter, struct kiocb *iocb, int flags, struct backing_file_ctx *ctx); -ssize_t backing_file_splice_read(struct file *in, loff_t *ppos, +ssize_t backing_file_splice_read(struct file *in, struct kiocb *iocb, struct pipe_inode_info *pipe, size_t len, unsigned int flags, struct backing_file_ctx *ctx); ssize_t backing_file_splice_write(struct pipe_inode_info *pipe, - struct file *out, loff_t *ppos, size_t len, - unsigned int flags, + struct file *out, struct kiocb *iocb, + size_t len, unsigned int flags, struct backing_file_ctx *ctx); int backing_file_mmap(struct file *file, struct vm_area_struct *vma, struct backing_file_ctx *ctx); diff --git a/include/linux/backlight.h b/include/linux/backlight.h index ea9c1bc8148e..f5652e5a9060 100644 --- a/include/linux/backlight.h +++ b/include/linux/backlight.h @@ -66,24 +66,6 @@ enum backlight_type { BACKLIGHT_TYPE_MAX, }; -/** - * enum backlight_notification - the type of notification - * - * The notifications that is used for notification sent to the receiver - * that registered notifications using backlight_register_notifier(). - */ -enum backlight_notification { - /** - * @BACKLIGHT_REGISTERED: The backlight device is registered. - */ - BACKLIGHT_REGISTERED, - - /** - * @BACKLIGHT_UNREGISTERED: The backlight revice is unregistered. - */ - BACKLIGHT_UNREGISTERED, -}; - /** enum backlight_scale - the type of scale used for brightness values * * The type of scale used for brightness values. @@ -421,8 +403,6 @@ void devm_backlight_device_unregister(struct device *dev, struct backlight_device *bd); void backlight_force_update(struct backlight_device *bd, enum backlight_update_reason reason); -int backlight_register_notifier(struct notifier_block *nb); -int backlight_unregister_notifier(struct notifier_block *nb); struct backlight_device *backlight_device_get_by_name(const char *name); struct backlight_device *backlight_device_get_by_type(enum backlight_type type); int backlight_device_set_brightness(struct backlight_device *bd, diff --git a/include/linux/bio-integrity.h b/include/linux/bio-integrity.h index dd831c269e99..dbf0f74c1529 100644 --- a/include/linux/bio-integrity.h +++ b/include/linux/bio-integrity.h @@ -72,7 +72,7 @@ struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio, gfp_t gfp, unsigned int nr); int bio_integrity_add_page(struct bio *bio, struct page *page, unsigned int len, unsigned int offset); -int bio_integrity_map_user(struct bio *bio, void __user *ubuf, ssize_t len, u32 seed); +int bio_integrity_map_user(struct bio *bio, void __user *ubuf, ssize_t len); void bio_integrity_unmap_user(struct bio *bio); bool bio_integrity_prep(struct bio *bio); void bio_integrity_advance(struct bio *bio, unsigned int bytes_done); @@ -99,7 +99,7 @@ static inline void bioset_integrity_free(struct bio_set *bs) } static inline int bio_integrity_map_user(struct bio *bio, void __user *ubuf, - ssize_t len, u32 seed) + ssize_t len) { return -EINVAL; } diff --git a/include/linux/bio.h b/include/linux/bio.h index faceadb040f9..7a1b3b1a8fed 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -418,14 +418,12 @@ bool __must_check bio_add_folio(struct bio *bio, struct folio *folio, size_t len, size_t off); extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *, unsigned int, unsigned int); -int bio_add_zone_append_page(struct bio *bio, struct page *page, - unsigned int len, unsigned int offset); void __bio_add_page(struct bio *bio, struct page *page, unsigned int len, unsigned int off); void bio_add_folio_nofail(struct bio *bio, struct folio *folio, size_t len, size_t off); int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter); -void bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter); +void bio_iov_bvec_set(struct bio *bio, const struct iov_iter *iter); void __bio_release_pages(struct bio *bio, bool mark_dirty); extern void bio_set_pages_dirty(struct bio *bio); extern void bio_check_pages_dirty(struct bio *bio); @@ -677,6 +675,23 @@ static inline void bio_clear_polled(struct bio *bio) bio->bi_opf &= ~REQ_POLLED; } +/** + * bio_is_zone_append - is this a zone append bio? + * @bio: bio to check + * + * Check if @bio is a zone append operation. Core block layer code and end_io + * handlers must use this instead of an open coded REQ_OP_ZONE_APPEND check + * because the block layer can rewrite REQ_OP_ZONE_APPEND to REQ_OP_WRITE if + * it is not natively supported. + */ +static inline bool bio_is_zone_append(struct bio *bio) +{ + if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED)) + return false; + return bio_op(bio) == REQ_OP_ZONE_APPEND || + bio_flagged(bio, BIO_EMULATES_ZONE_APPEND); +} + struct bio *blk_next_bio(struct bio *bio, struct block_device *bdev, unsigned int nr_pages, blk_opf_t opf, gfp_t gfp); struct bio *bio_chain_and_submit(struct bio *prev, struct bio *new); diff --git a/include/linux/blk-integrity.h b/include/linux/blk-integrity.h index 676f8f860c47..c7eae0bfb013 100644 --- a/include/linux/blk-integrity.h +++ b/include/linux/blk-integrity.h @@ -28,7 +28,7 @@ static inline bool queue_limits_stack_integrity_bdev(struct queue_limits *t, int blk_rq_map_integrity_sg(struct request *, struct scatterlist *); int blk_rq_count_integrity_sg(struct request_queue *, struct bio *); int blk_rq_integrity_map_user(struct request *rq, void __user *ubuf, - ssize_t bytes, u32 seed); + ssize_t bytes); static inline bool blk_integrity_queue_supports_integrity(struct request_queue *q) @@ -104,8 +104,7 @@ static inline int blk_rq_map_integrity_sg(struct request *q, } static inline int blk_rq_integrity_map_user(struct request *rq, void __user *ubuf, - ssize_t bytes, - u32 seed) + ssize_t bytes) { return -EINVAL; } diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 4fecf46ef681..c596e0e4cb75 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -156,9 +156,6 @@ struct request { struct blk_crypto_keyslot *crypt_keyslot; #endif - enum rw_hint write_hint; - unsigned short ioprio; - enum mq_rq_state state; atomic_t ref; @@ -222,7 +219,9 @@ static inline bool blk_rq_is_passthrough(struct request *rq) static inline unsigned short req_get_ioprio(struct request *req) { - return req->ioprio; + if (req->bio) + return req->bio->bi_ioprio; + return 0; } #define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ) @@ -230,62 +229,61 @@ static inline unsigned short req_get_ioprio(struct request *req) #define rq_dma_dir(rq) \ (op_is_write(req_op(rq)) ? DMA_TO_DEVICE : DMA_FROM_DEVICE) -#define rq_list_add(listptr, rq) do { \ - (rq)->rq_next = *(listptr); \ - *(listptr) = rq; \ -} while (0) - -#define rq_list_add_tail(lastpptr, rq) do { \ - (rq)->rq_next = NULL; \ - **(lastpptr) = rq; \ - *(lastpptr) = &rq->rq_next; \ -} while (0) - -#define rq_list_pop(listptr) \ -({ \ - struct request *__req = NULL; \ - if ((listptr) && *(listptr)) { \ - __req = *(listptr); \ - *(listptr) = __req->rq_next; \ - } \ - __req; \ -}) +static inline int rq_list_empty(const struct rq_list *rl) +{ + return rl->head == NULL; +} -#define rq_list_peek(listptr) \ -({ \ - struct request *__req = NULL; \ - if ((listptr) && *(listptr)) \ - __req = *(listptr); \ - __req; \ -}) +static inline void rq_list_init(struct rq_list *rl) +{ + rl->head = NULL; + rl->tail = NULL; +} -#define rq_list_for_each(listptr, pos) \ - for (pos = rq_list_peek((listptr)); pos; pos = rq_list_next(pos)) +static inline void rq_list_add_tail(struct rq_list *rl, struct request *rq) +{ + rq->rq_next = NULL; + if (rl->tail) + rl->tail->rq_next = rq; + else + rl->head = rq; + rl->tail = rq; +} -#define rq_list_for_each_safe(listptr, pos, nxt) \ - for (pos = rq_list_peek((listptr)), nxt = rq_list_next(pos); \ - pos; pos = nxt, nxt = pos ? rq_list_next(pos) : NULL) +static inline void rq_list_add_head(struct rq_list *rl, struct request *rq) +{ + rq->rq_next = rl->head; + rl->head = rq; + if (!rl->tail) + rl->tail = rq; +} -#define rq_list_next(rq) (rq)->rq_next -#define rq_list_empty(list) ((list) == (struct request *) NULL) +static inline struct request *rq_list_pop(struct rq_list *rl) +{ + struct request *rq = rl->head; -/** - * rq_list_move() - move a struct request from one list to another - * @src: The source list @rq is currently in - * @dst: The destination list that @rq will be appended to - * @rq: The request to move - * @prev: The request preceding @rq in @src (NULL if @rq is the head) - */ -static inline void rq_list_move(struct request **src, struct request **dst, - struct request *rq, struct request *prev) + if (rq) { + rl->head = rl->head->rq_next; + if (!rl->head) + rl->tail = NULL; + rq->rq_next = NULL; + } + + return rq; +} + +static inline struct request *rq_list_peek(struct rq_list *rl) { - if (prev) - prev->rq_next = rq->rq_next; - else - *src = rq->rq_next; - rq_list_add(dst, rq); + return rl->head; } +#define rq_list_for_each(rl, pos) \ + for (pos = rq_list_peek((rl)); (pos); pos = pos->rq_next) + +#define rq_list_for_each_safe(rl, pos, nxt) \ + for (pos = rq_list_peek((rl)), nxt = pos->rq_next; \ + pos; pos = nxt, nxt = pos ? pos->rq_next : NULL) + /** * enum blk_eh_timer_return - How the timeout handler should proceed * @BLK_EH_DONE: The block driver completed the command or will complete it at @@ -577,7 +575,7 @@ struct blk_mq_ops { * empty the @rqlist completely, then the rest will be queued * individually by the block layer upon return. */ - void (*queue_rqs)(struct request **rqlist); + void (*queue_rqs)(struct rq_list *rqlist); /** * @get_budget: Reserve budget before queue request, once .queue_rq is @@ -857,12 +855,6 @@ void blk_mq_end_request_batch(struct io_comp_batch *ib); */ static inline bool blk_mq_need_time_stamp(struct request *rq) { - /* - * passthrough io doesn't use iostat accounting, cgroup stats - * and io scheduler functionalities. - */ - if (blk_rq_is_passthrough(rq)) - return false; return (rq->rq_flags & (RQF_IO_STAT | RQF_STATS | RQF_USE_SCHED)); } @@ -892,7 +884,7 @@ static inline bool blk_mq_add_to_batch(struct request *req, else if (iob->complete != complete) return false; iob->need_ts |= blk_mq_need_time_stamp(req); - rq_list_add(&iob->req_list, req); + rq_list_add_tail(&iob->req_list, req); return true; } @@ -925,6 +917,8 @@ void blk_freeze_queue_start(struct request_queue *q); void blk_mq_freeze_queue_wait(struct request_queue *q); int blk_mq_freeze_queue_wait_timeout(struct request_queue *q, unsigned long timeout); +void blk_mq_unfreeze_queue_non_owner(struct request_queue *q); +void blk_freeze_queue_start_non_owner(struct request_queue *q); void blk_mq_map_queues(struct blk_mq_queue_map *qmap); void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues); @@ -989,7 +983,6 @@ static inline void blk_rq_bio_prep(struct request *rq, struct bio *bio, rq->nr_phys_segments = nr_segs; rq->__data_len = bio->bi_iter.bi_size; rq->bio = rq->biotail = bio; - rq->ioprio = bio_prio(bio); } void blk_mq_hctx_set_fq_lock_class(struct blk_mq_hw_ctx *hctx, diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 50c3b959da28..378d3a1a22fc 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -25,6 +25,7 @@ #include <linux/uuid.h> #include <linux/xarray.h> #include <linux/file.h> +#include <linux/lockdep.h> struct module; struct request_queue; @@ -194,13 +195,11 @@ struct gendisk { unsigned int nr_zones; unsigned int zone_capacity; unsigned int last_zone_capacity; - unsigned long *conv_zones_bitmap; + unsigned long __rcu *conv_zones_bitmap; unsigned int zone_wplugs_hash_bits; spinlock_t zone_wplugs_lock; struct mempool_s *zone_wplugs_pool; struct hlist_head *zone_wplugs_hash; - struct list_head zone_wplugs_err_list; - struct work_struct zone_wplugs_work; struct workqueue_struct *zone_wplugs_wq; #endif /* CONFIG_BLK_DEV_ZONED */ @@ -332,6 +331,10 @@ typedef unsigned int __bitwise blk_features_t; #define BLK_FEAT_RAID_PARTIAL_STRIPES_EXPENSIVE \ ((__force blk_features_t)(1u << 15)) +/* stacked device can/does support atomic writes */ +#define BLK_FEAT_ATOMIC_WRITES_STACKED \ + ((__force blk_features_t)(1u << 16)) + /* * Flags automatically inherited when stacking limits. */ @@ -349,6 +352,9 @@ typedef unsigned int __bitwise blk_flags_t; /* I/O topology is misaligned */ #define BLK_FLAG_MISALIGNED ((__force blk_flags_t)(1u << 1)) +/* passthrough command IO accounting */ +#define BLK_FLAG_IOSTATS_PASSTHROUGH ((__force blk_flags_t)(1u << 2)) + struct queue_limits { blk_features_t features; blk_flags_t flags; @@ -371,6 +377,7 @@ struct queue_limits { unsigned int max_user_discard_sectors; unsigned int max_secure_erase_sectors; unsigned int max_write_zeroes_sectors; + unsigned int max_hw_zone_append_sectors; unsigned int max_zone_append_sectors; unsigned int discard_granularity; unsigned int discard_alignment; @@ -471,6 +478,11 @@ struct request_queue { struct xarray hctx_table; struct percpu_ref q_usage_counter; + struct lock_class_key io_lock_cls_key; + struct lockdep_map io_lockdep_map; + + struct lock_class_key q_lock_cls_key; + struct lockdep_map q_lockdep_map; struct request *last_merge; @@ -566,6 +578,10 @@ struct request_queue { struct throtl_data *td; #endif struct rcu_head rcu_head; +#ifdef CONFIG_LOCKDEP + struct task_struct *mq_freeze_owner; + int mq_freeze_owner_depth; +#endif wait_queue_head_t mq_freeze_wq; /* * Protect concurrent access to q_usage_counter by @@ -617,6 +633,8 @@ void blk_queue_flag_clear(unsigned int flag, struct request_queue *q); test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags) #define blk_queue_nonrot(q) (!((q)->limits.features & BLK_FEAT_ROTATIONAL)) #define blk_queue_io_stat(q) ((q)->limits.features & BLK_FEAT_IO_STAT) +#define blk_queue_passthrough_stat(q) \ + ((q)->limits.flags & BLK_FLAG_IOSTATS_PASSTHROUGH) #define blk_queue_dax(q) ((q)->limits.features & BLK_FEAT_DAX) #define blk_queue_pci_p2pdma(q) ((q)->limits.features & BLK_FEAT_PCI_P2PDMA) #ifdef CONFIG_BLK_RQ_ALLOC_TIME @@ -725,6 +743,9 @@ static inline unsigned int blk_queue_depth(struct request_queue *q) #define for_each_bio(_bio) \ for (; _bio; _bio = _bio->bi_next) +int __must_check add_disk_fwnode(struct device *parent, struct gendisk *disk, + const struct attribute_group **groups, + struct fwnode_handle *fwnode); int __must_check device_add_disk(struct device *parent, struct gendisk *disk, const struct attribute_group **groups); static inline int __must_check add_disk(struct gendisk *disk) @@ -756,13 +777,13 @@ static inline void bdev_clear_flag(struct block_device *bdev, unsigned flag) atomic_andnot(flag, &bdev->__bd_flags); } -static inline int get_disk_ro(struct gendisk *disk) +static inline bool get_disk_ro(struct gendisk *disk) { return bdev_test_flag(disk->part0, BD_READ_ONLY) || test_bit(GD_READ_ONLY, &disk->state); } -static inline int bdev_read_only(struct block_device *bdev) +static inline bool bdev_read_only(struct block_device *bdev) { return bdev_test_flag(bdev, BD_READ_ONLY) || get_disk_ro(bdev->bd_disk); } @@ -929,6 +950,7 @@ queue_limits_start_update(struct request_queue *q) int queue_limits_commit_update(struct request_queue *q, struct queue_limits *lim); int queue_limits_set(struct request_queue *q, struct queue_limits *lim); +int blk_validate_limits(struct queue_limits *lim); /** * queue_limits_cancel_update - cancel an atomic update of queue limits @@ -986,6 +1008,11 @@ extern void blk_put_queue(struct request_queue *); void blk_mark_disk_dead(struct gendisk *disk); +struct rq_list { + struct request *head; + struct request *tail; +}; + #ifdef CONFIG_BLOCK /* * blk_plug permits building a queue of related requests by holding the I/O @@ -999,10 +1026,10 @@ void blk_mark_disk_dead(struct gendisk *disk); * blk_flush_plug() is called. */ struct blk_plug { - struct request *mq_list; /* blk-mq requests */ + struct rq_list mq_list; /* blk-mq requests */ /* if ios_left is > 1, we can batch tag/rq allocations */ - struct request *cached_rq; + struct rq_list cached_rqs; u64 cur_ktime; unsigned short nr_ios; @@ -1145,6 +1172,11 @@ enum blk_default_limits { */ #define BLK_DEF_MAX_SECTORS_CAP 2560u +static inline struct queue_limits *bdev_limits(struct block_device *bdev) +{ + return &bdev_get_queue(bdev)->limits; +} + static inline unsigned long queue_segment_boundary(const struct request_queue *q) { return q->limits.seg_boundary_mask; @@ -1185,25 +1217,9 @@ static inline unsigned int queue_max_segment_size(const struct request_queue *q) return q->limits.max_segment_size; } -static inline unsigned int -queue_limits_max_zone_append_sectors(const struct queue_limits *l) -{ - unsigned int max_sectors = min(l->chunk_sectors, l->max_hw_sectors); - - return min_not_zero(l->max_zone_append_sectors, max_sectors); -} - -static inline unsigned int queue_max_zone_append_sectors(struct request_queue *q) -{ - if (!blk_queue_is_zoned(q)) - return 0; - - return queue_limits_max_zone_append_sectors(&q->limits); -} - static inline bool queue_emulates_zone_append(struct request_queue *q) { - return blk_queue_is_zoned(q) && !q->limits.max_zone_append_sectors; + return blk_queue_is_zoned(q) && !q->limits.max_hw_zone_append_sectors; } static inline bool bdev_emulates_zone_append(struct block_device *bdev) @@ -1214,7 +1230,7 @@ static inline bool bdev_emulates_zone_append(struct block_device *bdev) static inline unsigned int bdev_max_zone_append_sectors(struct block_device *bdev) { - return queue_max_zone_append_sectors(bdev_get_queue(bdev)); + return bdev_limits(bdev)->max_zone_append_sectors; } static inline unsigned int bdev_max_segments(struct block_device *bdev) @@ -1247,7 +1263,7 @@ static inline unsigned int queue_io_min(const struct request_queue *q) return q->limits.io_min; } -static inline int bdev_io_min(struct block_device *bdev) +static inline unsigned int bdev_io_min(struct block_device *bdev) { return queue_io_min(bdev_get_queue(bdev)); } @@ -1257,7 +1273,7 @@ static inline unsigned int queue_io_opt(const struct request_queue *q) return q->limits.io_opt; } -static inline int bdev_io_opt(struct block_device *bdev) +static inline unsigned int bdev_io_opt(struct block_device *bdev) { return queue_io_opt(bdev_get_queue(bdev)); } @@ -1279,23 +1295,23 @@ unsigned int bdev_discard_alignment(struct block_device *bdev); static inline unsigned int bdev_max_discard_sectors(struct block_device *bdev) { - return bdev_get_queue(bdev)->limits.max_discard_sectors; + return bdev_limits(bdev)->max_discard_sectors; } static inline unsigned int bdev_discard_granularity(struct block_device *bdev) { - return bdev_get_queue(bdev)->limits.discard_granularity; + return bdev_limits(bdev)->discard_granularity; } static inline unsigned int bdev_max_secure_erase_sectors(struct block_device *bdev) { - return bdev_get_queue(bdev)->limits.max_secure_erase_sectors; + return bdev_limits(bdev)->max_secure_erase_sectors; } static inline unsigned int bdev_write_zeroes_sectors(struct block_device *bdev) { - return bdev_get_queue(bdev)->limits.max_write_zeroes_sectors; + return bdev_limits(bdev)->max_write_zeroes_sectors; } static inline bool bdev_nonrot(struct block_device *bdev) @@ -1331,7 +1347,7 @@ static inline bool bdev_write_cache(struct block_device *bdev) static inline bool bdev_fua(struct block_device *bdev) { - return bdev_get_queue(bdev)->limits.features & BLK_FEAT_FUA; + return bdev_limits(bdev)->features & BLK_FEAT_FUA; } static inline bool bdev_nowait(struct block_device *bdev) @@ -1376,7 +1392,37 @@ static inline bool bdev_is_zone_start(struct block_device *bdev, return bdev_offset_from_zone_start(bdev, sector) == 0; } -static inline int queue_dma_alignment(const struct request_queue *q) +/** + * bdev_zone_is_seq - check if a sector belongs to a sequential write zone + * @bdev: block device to check + * @sector: sector number + * + * Check if @sector on @bdev is contained in a sequential write required zone. + */ +static inline bool bdev_zone_is_seq(struct block_device *bdev, sector_t sector) +{ + bool is_seq = false; + +#if IS_ENABLED(CONFIG_BLK_DEV_ZONED) + if (bdev_is_zoned(bdev)) { + struct gendisk *disk = bdev->bd_disk; + unsigned long *bitmap; + + rcu_read_lock(); + bitmap = rcu_dereference(disk->conv_zones_bitmap); + is_seq = !bitmap || + !test_bit(disk_zone_no(disk, sector), bitmap); + rcu_read_unlock(); + } +#endif + + return is_seq; +} + +int blk_zone_issue_zeroout(struct block_device *bdev, sector_t sector, + sector_t nr_sects, gfp_t gfp_mask); + +static inline unsigned int queue_dma_alignment(const struct request_queue *q) { return q->limits.dma_alignment; } @@ -1417,12 +1463,13 @@ static inline bool bdev_iter_is_aligned(struct block_device *bdev, bdev_logical_block_size(bdev) - 1); } -static inline int blk_lim_dma_alignment_and_pad(struct queue_limits *lim) +static inline unsigned int +blk_lim_dma_alignment_and_pad(struct queue_limits *lim) { return lim->dma_alignment | lim->dma_pad_mask; } -static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr, +static inline bool blk_rq_aligned(struct request_queue *q, unsigned long addr, unsigned int len) { unsigned int alignment = blk_lim_dma_alignment_and_pad(&q->limits); @@ -1540,7 +1587,6 @@ static inline void bio_end_io_acct(struct bio *bio, unsigned long start_time) return bio_end_io_acct_remapped(bio, start_time, bio->bi_bdev); } -int bdev_read_only(struct block_device *bdev); int set_blocksize(struct file *file, int size); int lookup_bdev(const char *pathname, dev_t *dev); @@ -1648,7 +1694,7 @@ int bdev_thaw(struct block_device *bdev); void bdev_fput(struct file *bdev_file); struct io_comp_batch { - struct request *req_list; + struct rq_list req_list; bool need_ts; void (*complete)(struct io_comp_batch *); }; @@ -1674,6 +1720,22 @@ static inline bool bdev_can_atomic_write(struct block_device *bdev) return true; } +static inline unsigned int +bdev_atomic_write_unit_min_bytes(struct block_device *bdev) +{ + if (!bdev_can_atomic_write(bdev)) + return 0; + return queue_atomic_write_unit_min_bytes(bdev_get_queue(bdev)); +} + +static inline unsigned int +bdev_atomic_write_unit_max_bytes(struct block_device *bdev) +{ + if (!bdev_can_atomic_write(bdev)) + return 0; + return queue_atomic_write_unit_max_bytes(bdev_get_queue(bdev)); +} + #define DEFINE_IO_COMP_BATCH(name) struct io_comp_batch name = { } #endif /* _LINUX_BLKDEV_H */ diff --git a/include/linux/bootmem_info.h b/include/linux/bootmem_info.h index cffa38a73618..d8a8d245824a 100644 --- a/include/linux/bootmem_info.h +++ b/include/linux/bootmem_info.h @@ -6,11 +6,10 @@ #include <linux/kmemleak.h> /* - * Types for free bootmem stored in page->lru.next. These have to be in - * some random range in unsigned long space for debugging purposes. + * Types for free bootmem stored in the low bits of page->private. */ -enum { - MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE = 12, +enum bootmem_type { + MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE = 1, SECTION_INFO = MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE, MIX_SECTION_INFO, NODE_INFO, @@ -21,9 +20,19 @@ enum { void __init register_page_bootmem_info_node(struct pglist_data *pgdat); void get_page_bootmem(unsigned long info, struct page *page, - unsigned long type); + enum bootmem_type type); void put_page_bootmem(struct page *page); +static inline enum bootmem_type bootmem_type(const struct page *page) +{ + return (unsigned long)page->private & 0xf; +} + +static inline unsigned long bootmem_info(const struct page *page) +{ + return (unsigned long)page->private >> 4; +} + /* * Any memory allocated via the memblock allocator and not via the * buddy will be marked reserved already in the memmap. For those @@ -31,7 +40,7 @@ void put_page_bootmem(struct page *page); */ static inline void free_bootmem_page(struct page *page) { - unsigned long magic = page->index; + enum bootmem_type type = bootmem_type(page); /* * The reserve_bootmem_region sets the reserved flag on bootmem @@ -39,7 +48,7 @@ static inline void free_bootmem_page(struct page *page) */ VM_BUG_ON_PAGE(page_ref_count(page) != 2, page); - if (magic == SECTION_INFO || magic == MIX_SECTION_INFO) + if (type == SECTION_INFO || type == MIX_SECTION_INFO) put_page_bootmem(page); else VM_BUG_ON_PAGE(1, page); @@ -53,8 +62,18 @@ static inline void put_page_bootmem(struct page *page) { } +static inline enum bootmem_type bootmem_type(const struct page *page) +{ + return SECTION_INFO; +} + +static inline unsigned long bootmem_info(const struct page *page) +{ + return 0; +} + static inline void get_page_bootmem(unsigned long info, struct page *page, - unsigned long type) + enum bootmem_type type) { } diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h index ce91d9b2acb9..7fc69083e745 100644 --- a/include/linux/bpf-cgroup.h +++ b/include/linux/bpf-cgroup.h @@ -138,7 +138,7 @@ int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor, short access, enum cgroup_bpf_attach_type atype); int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head, - struct ctl_table *table, int write, + const struct ctl_table *table, int write, char **buf, size_t *pcount, loff_t *ppos, enum cgroup_bpf_attach_type atype); @@ -209,7 +209,7 @@ static inline bool cgroup_bpf_sock_enabled(struct sock *sk, int __ret = 0; \ if (cgroup_bpf_enabled(CGROUP_INET_EGRESS) && sk) { \ typeof(sk) __sk = sk_to_full_sk(sk); \ - if (sk_fullsock(__sk) && __sk == skb_to_full_sk(skb) && \ + if (__sk && __sk == skb_to_full_sk(skb) && \ cgroup_bpf_sock_enabled(__sk, CGROUP_INET_EGRESS)) \ __ret = __cgroup_bpf_run_filter_skb(__sk, skb, \ CGROUP_INET_EGRESS); \ diff --git a/include/linux/bpf.h b/include/linux/bpf.h index bdadb0bb6cec..6e63dd3443b9 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -203,6 +203,7 @@ enum btf_field_type { BPF_GRAPH_ROOT = BPF_RB_ROOT | BPF_LIST_HEAD, BPF_REFCOUNT = (1 << 9), BPF_WORKQUEUE = (1 << 10), + BPF_UPTR = (1 << 11), }; typedef void (*btf_dtor_kfunc_t)(void *); @@ -322,6 +323,8 @@ static inline const char *btf_field_type_name(enum btf_field_type type) return "kptr"; case BPF_KPTR_PERCPU: return "percpu_kptr"; + case BPF_UPTR: + return "uptr"; case BPF_LIST_HEAD: return "bpf_list_head"; case BPF_LIST_NODE: @@ -350,6 +353,7 @@ static inline u32 btf_field_type_size(enum btf_field_type type) case BPF_KPTR_UNREF: case BPF_KPTR_REF: case BPF_KPTR_PERCPU: + case BPF_UPTR: return sizeof(u64); case BPF_LIST_HEAD: return sizeof(struct bpf_list_head); @@ -379,6 +383,7 @@ static inline u32 btf_field_type_align(enum btf_field_type type) case BPF_KPTR_UNREF: case BPF_KPTR_REF: case BPF_KPTR_PERCPU: + case BPF_UPTR: return __alignof__(u64); case BPF_LIST_HEAD: return __alignof__(struct bpf_list_head); @@ -419,6 +424,7 @@ static inline void bpf_obj_init_field(const struct btf_field *field, void *addr) case BPF_KPTR_UNREF: case BPF_KPTR_REF: case BPF_KPTR_PERCPU: + case BPF_UPTR: break; default: WARN_ON_ONCE(1); @@ -507,6 +513,25 @@ static inline void copy_map_value_long(struct bpf_map *map, void *dst, void *src bpf_obj_memcpy(map->record, dst, src, map->value_size, true); } +static inline void bpf_obj_swap_uptrs(const struct btf_record *rec, void *dst, void *src) +{ + unsigned long *src_uptr, *dst_uptr; + const struct btf_field *field; + int i; + + if (!btf_record_has_field(rec, BPF_UPTR)) + return; + + for (i = 0, field = rec->fields; i < rec->cnt; i++, field++) { + if (field->type != BPF_UPTR) + continue; + + src_uptr = src + field->offset; + dst_uptr = dst + field->offset; + swap(*src_uptr, *dst_uptr); + } +} + static inline void bpf_obj_memzero(struct btf_record *rec, void *dst, u32 size) { u32 curr_off = 0; @@ -907,10 +932,6 @@ enum bpf_reg_type { * additional context, assume the value is non-null. */ PTR_TO_BTF_ID, - /* PTR_TO_BTF_ID_OR_NULL points to a kernel struct that has not - * been checked for null. Used primarily to inform the verifier - * an explicit null check is required for this struct. - */ PTR_TO_MEM, /* reg points to valid memory region */ PTR_TO_ARENA, PTR_TO_BUF, /* reg points to a read/write buffer */ @@ -923,6 +944,10 @@ enum bpf_reg_type { PTR_TO_SOCKET_OR_NULL = PTR_MAYBE_NULL | PTR_TO_SOCKET, PTR_TO_SOCK_COMMON_OR_NULL = PTR_MAYBE_NULL | PTR_TO_SOCK_COMMON, PTR_TO_TCP_SOCK_OR_NULL = PTR_MAYBE_NULL | PTR_TO_TCP_SOCK, + /* PTR_TO_BTF_ID_OR_NULL points to a kernel struct that has not + * been checked for null. Used primarily to inform the verifier + * an explicit null check is required for this struct. + */ PTR_TO_BTF_ID_OR_NULL = PTR_MAYBE_NULL | PTR_TO_BTF_ID, /* This must be the last entry. Its purpose is to ensure the enum is @@ -1300,8 +1325,12 @@ void *__bpf_dynptr_data_rw(const struct bpf_dynptr_kern *ptr, u32 len); bool __bpf_dynptr_is_rdonly(const struct bpf_dynptr_kern *ptr); #ifdef CONFIG_BPF_JIT -int bpf_trampoline_link_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr); -int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr); +int bpf_trampoline_link_prog(struct bpf_tramp_link *link, + struct bpf_trampoline *tr, + struct bpf_prog *tgt_prog); +int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link, + struct bpf_trampoline *tr, + struct bpf_prog *tgt_prog); struct bpf_trampoline *bpf_trampoline_get(u64 key, struct bpf_attach_target_info *tgt_info); void bpf_trampoline_put(struct bpf_trampoline *tr); @@ -1373,7 +1402,8 @@ int arch_prepare_bpf_dispatcher(void *image, void *buf, s64 *funcs, int num_func void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, struct bpf_prog *from, struct bpf_prog *to); /* Called only from JIT-enabled code, so there's no need for stubs. */ -void bpf_image_ksym_add(void *data, unsigned int size, struct bpf_ksym *ksym); +void bpf_image_ksym_init(void *data, unsigned int size, struct bpf_ksym *ksym); +void bpf_image_ksym_add(struct bpf_ksym *ksym); void bpf_image_ksym_del(struct bpf_ksym *ksym); void bpf_ksym_add(struct bpf_ksym *ksym); void bpf_ksym_del(struct bpf_ksym *ksym); @@ -1382,12 +1412,14 @@ void bpf_jit_uncharge_modmem(u32 size); bool bpf_prog_has_trampoline(const struct bpf_prog *prog); #else static inline int bpf_trampoline_link_prog(struct bpf_tramp_link *link, - struct bpf_trampoline *tr) + struct bpf_trampoline *tr, + struct bpf_prog *tgt_prog) { return -ENOTSUPP; } static inline int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link, - struct bpf_trampoline *tr) + struct bpf_trampoline *tr, + struct bpf_prog *tgt_prog) { return -ENOTSUPP; } @@ -1476,6 +1508,7 @@ struct bpf_prog_aux { u32 max_rdwr_access; struct btf *attach_btf; const struct bpf_ctx_arg_aux *ctx_arg_info; + void __percpu *priv_stack_ptr; struct mutex dst_mutex; /* protects dst_* pointers below, *after* prog becomes visible */ struct bpf_prog *dst_prog; struct bpf_trampoline *dst_trampoline; @@ -1491,7 +1524,14 @@ struct bpf_prog_aux { bool xdp_has_frags; bool exception_cb; bool exception_boundary; + bool is_extended; /* true if extended by freplace program */ + bool jits_use_priv_stack; + bool priv_stack_requested; + bool changes_pkt_data; + u64 prog_array_member_cnt; /* counts how many times as member of prog_array */ + struct mutex ext_mutex; /* mutex for is_extended and prog_array_member_cnt */ struct bpf_arena *arena; + void (*recursion_detected)(struct bpf_prog *prog); /* callback if recursion is detected */ /* BTF_KIND_FUNC_PROTO for valid attach_btf_id */ const struct btf_type *attach_func_proto; /* function name for valid attach_btf_id */ @@ -1606,6 +1646,11 @@ struct bpf_link { enum bpf_link_type type; const struct bpf_link_ops *ops; struct bpf_prog *prog; + /* whether BPF link itself has "sleepable" semantics, which can differ + * from underlying BPF program having a "sleepable" semantics, as BPF + * link's semantics is determined by target attach hook + */ + bool sleepable; /* rcu is used before freeing, work can be used to schedule that * RCU-based freeing before that, so they never overlap */ @@ -1622,8 +1667,10 @@ struct bpf_link_ops { */ void (*dealloc)(struct bpf_link *link); /* deallocate link resources callback, called after RCU grace period; - * if underlying BPF program is sleepable we go through tasks trace - * RCU GP and then "classic" RCU GP + * if either the underlying BPF program is sleepable or BPF link's + * target hook is sleepable, we'll go through tasks trace RCU GP and + * then "classic" RCU GP; this need for chaining tasks trace and + * classic RCU GPs is designated by setting bpf_link->sleepable flag */ void (*dealloc_deferred)(struct bpf_link *link); int (*detach)(struct bpf_link *link); @@ -2147,26 +2194,25 @@ bpf_prog_run_array(const struct bpf_prog_array *array, * rcu-protected dynamically sized maps. */ static __always_inline u32 -bpf_prog_run_array_uprobe(const struct bpf_prog_array __rcu *array_rcu, +bpf_prog_run_array_uprobe(const struct bpf_prog_array *array, const void *ctx, bpf_prog_run_fn run_prog) { const struct bpf_prog_array_item *item; const struct bpf_prog *prog; - const struct bpf_prog_array *array; struct bpf_run_ctx *old_run_ctx; struct bpf_trace_run_ctx run_ctx; u32 ret = 1; might_fault(); + RCU_LOCKDEP_WARN(!rcu_read_lock_trace_held(), "no rcu lock held"); + + if (unlikely(!array)) + return ret; - rcu_read_lock_trace(); migrate_disable(); run_ctx.is_uprobe = true; - array = rcu_dereference_check(array_rcu, rcu_read_lock_trace_held()); - if (unlikely(!array)) - goto out; old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx); item = &array->items[0]; while ((prog = READ_ONCE(item->prog))) { @@ -2181,9 +2227,7 @@ bpf_prog_run_array_uprobe(const struct bpf_prog_array __rcu *array_rcu, rcu_read_unlock(); } bpf_reset_run_ctx(old_run_ctx); -out: migrate_enable(); - rcu_read_unlock_trace(); return ret; } @@ -2370,6 +2414,9 @@ int bpf_prog_new_fd(struct bpf_prog *prog); void bpf_link_init(struct bpf_link *link, enum bpf_link_type type, const struct bpf_link_ops *ops, struct bpf_prog *prog); +void bpf_link_init_sleepable(struct bpf_link *link, enum bpf_link_type type, + const struct bpf_link_ops *ops, struct bpf_prog *prog, + bool sleepable); int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer); int bpf_link_settle(struct bpf_link_primer *primer); void bpf_link_cleanup(struct bpf_link_primer *primer); @@ -2725,6 +2772,12 @@ static inline void bpf_link_init(struct bpf_link *link, enum bpf_link_type type, { } +static inline void bpf_link_init_sleepable(struct bpf_link *link, enum bpf_link_type type, + const struct bpf_link_ops *ops, struct bpf_prog *prog, + bool sleepable) +{ +} + static inline int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer) { diff --git a/include/linux/bpf_local_storage.h b/include/linux/bpf_local_storage.h index dcddb0aef7d8..ab7244d8108f 100644 --- a/include/linux/bpf_local_storage.h +++ b/include/linux/bpf_local_storage.h @@ -77,7 +77,13 @@ struct bpf_local_storage_elem { struct hlist_node map_node; /* Linked to bpf_local_storage_map */ struct hlist_node snode; /* Linked to bpf_local_storage */ struct bpf_local_storage __rcu *local_storage; - struct rcu_head rcu; + union { + struct rcu_head rcu; + struct hlist_node free_node; /* used to postpone + * bpf_selem_free + * after raw_spin_unlock + */ + }; /* 8 bytes hole */ /* The data is stored in another cacheline to minimize * the number of cachelines access during a cache hit. @@ -181,7 +187,7 @@ void bpf_selem_link_map(struct bpf_local_storage_map *smap, struct bpf_local_storage_elem * bpf_selem_alloc(struct bpf_local_storage_map *smap, void *owner, void *value, - bool charge_mem, gfp_t gfp_flags); + bool charge_mem, bool swap_uptrs, gfp_t gfp_flags); void bpf_selem_free(struct bpf_local_storage_elem *selem, struct bpf_local_storage_map *smap, @@ -195,7 +201,7 @@ bpf_local_storage_alloc(void *owner, struct bpf_local_storage_data * bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap, - void *value, u64 map_flags, gfp_t gfp_flags); + void *value, u64 map_flags, bool swap_uptrs, gfp_t gfp_flags); u64 bpf_local_storage_map_mem_usage(const struct bpf_map *map); diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index 4513372c5bc8..48b7b2eeb7e2 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -48,22 +48,6 @@ enum bpf_reg_liveness { REG_LIVE_DONE = 0x8, /* liveness won't be updating this register anymore */ }; -/* For every reg representing a map value or allocated object pointer, - * we consider the tuple of (ptr, id) for them to be unique in verifier - * context and conside them to not alias each other for the purposes of - * tracking lock state. - */ -struct bpf_active_lock { - /* This can either be reg->map_ptr or reg->btf. If ptr is NULL, - * there's no active lock held, and other fields have no - * meaning. If non-NULL, it indicates that a lock is held and - * id member has the reg->id of the register which can be >= 0. - */ - void *ptr; - /* This will be reg->id */ - u32 id; -}; - #define ITER_PREFIX "bpf_iter_" enum bpf_iter_state { @@ -266,6 +250,13 @@ struct bpf_stack_state { }; struct bpf_reference_state { + /* Each reference object has a type. Ensure REF_TYPE_PTR is zero to + * default to pointer reference on zero initialization of a state. + */ + enum ref_state_type { + REF_TYPE_PTR = 0, + REF_TYPE_LOCK, + } type; /* Track each reference created with a unique id, even if the same * instruction creates the reference multiple times (eg, via CALL). */ @@ -274,17 +265,10 @@ struct bpf_reference_state { * is used purely to inform the user of a reference leak. */ int insn_idx; - /* There can be a case like: - * main (frame 0) - * cb (frame 1) - * func (frame 3) - * cb (frame 4) - * Hence for frame 4, if callback_ref just stored boolean, it would be - * impossible to distinguish nested callback refs. Hence store the - * frameno and compare that to callback_ref in check_reference_leak when - * exiting a callback function. - */ - int callback_ref; + /* Use to keep track of the source object of a lock, to ensure + * it matches on unlock. + */ + void *ptr; }; struct bpf_retval_range { @@ -332,6 +316,7 @@ struct bpf_func_state { /* The following fields should be last. See copy_func_state() */ int acquired_refs; + int active_locks; struct bpf_reference_state *refs; /* The state of the stack. Each element of the array describes BPF_REG_SIZE * (i.e. 8) bytes worth of stack memory. @@ -349,7 +334,7 @@ struct bpf_func_state { #define MAX_CALL_FRAMES 8 -/* instruction history flags, used in bpf_jmp_history_entry.flags field */ +/* instruction history flags, used in bpf_insn_hist_entry.flags field */ enum { /* instruction references stack slot through PTR_TO_STACK register; * we also store stack's frame number in lower 3 bits (MAX_CALL_FRAMES is 8) @@ -367,7 +352,7 @@ enum { static_assert(INSN_F_FRAMENO_MASK + 1 >= MAX_CALL_FRAMES); static_assert(INSN_F_SPI_MASK + 1 >= MAX_BPF_STACK / 8); -struct bpf_jmp_history_entry { +struct bpf_insn_hist_entry { u32 idx; /* insn idx can't be bigger than 1 million */ u32 prev_idx : 22; @@ -434,7 +419,6 @@ struct bpf_verifier_state { u32 insn_idx; u32 curframe; - struct bpf_active_lock active_lock; bool speculative; bool active_rcu_lock; u32 active_preempt_lock; @@ -458,13 +442,14 @@ struct bpf_verifier_state { * See get_loop_entry() for more information. */ struct bpf_verifier_state *loop_entry; - /* jmp history recorded from first to last. - * backtracking is using it to go from last to first. - * For most states jmp_history_cnt is [0-3]. + /* Sub-range of env->insn_hist[] corresponding to this state's + * instruction history. + * Backtracking is using it to go from last to first. + * For most states instruction history is short, 0-3 instructions. * For loops can go up to ~40. */ - struct bpf_jmp_history_entry *jmp_history; - u32 jmp_history_cnt; + u32 insn_hist_start; + u32 insn_hist_end; u32 dfs_depth; u32 callback_unroll_depth; u32 may_goto_depth; @@ -649,6 +634,12 @@ struct bpf_subprog_arg_info { }; }; +enum priv_stack_mode { + PRIV_STACK_UNKNOWN, + NO_PRIV_STACK, + PRIV_STACK_ADAPTIVE, +}; + struct bpf_subprog_info { /* 'start' has to be the first field otherwise find_subprog() won't work */ u32 start; /* insn idx of function entry point */ @@ -668,7 +659,9 @@ struct bpf_subprog_info { bool args_cached: 1; /* true if bpf_fastcall stack region is used by functions that can't be inlined */ bool keep_fastcall_stack: 1; + bool changes_pkt_data: 1; + enum priv_stack_mode priv_stack_mode; u8 arg_cnt; struct bpf_subprog_arg_info args[MAX_BPF_FUNC_REG_ARGS]; }; @@ -747,7 +740,9 @@ struct bpf_verifier_env { int cur_stack; } cfg; struct backtrack_state bt; - struct bpf_jmp_history_entry *cur_hist_ent; + struct bpf_insn_hist_entry *insn_hist; + struct bpf_insn_hist_entry *cur_hist_ent; + u32 insn_hist_cap; u32 pass_cnt; /* number of times do_check() was called */ u32 subprog_cnt; /* number of instructions analyzed by the verifier */ @@ -888,6 +883,7 @@ static inline bool bpf_prog_check_recur(const struct bpf_prog *prog) case BPF_PROG_TYPE_TRACING: return prog->expected_attach_type != BPF_TRACE_ITER; case BPF_PROG_TYPE_STRUCT_OPS: + return prog->aux->jits_use_priv_stack; case BPF_PROG_TYPE_LSM: return false; default: diff --git a/include/linux/btf.h b/include/linux/btf.h index b8a583194c4a..4214e76c9168 100644 --- a/include/linux/btf.h +++ b/include/linux/btf.h @@ -75,6 +75,7 @@ #define KF_ITER_NEXT (1 << 9) /* kfunc implements BPF iter next method */ #define KF_ITER_DESTROY (1 << 10) /* kfunc implements BPF iter destructor */ #define KF_RCU_PROTECTED (1 << 11) /* kfunc should be protected by rcu cs when they are invoked */ +#define KF_FASTCALL (1 << 12) /* kfunc supports bpf_fastcall protocol */ /* * Tag marking a kernel function as a kfunc. This is meant to minimize the @@ -581,6 +582,16 @@ int get_kern_ctx_btf_id(struct bpf_verifier_log *log, enum bpf_prog_type prog_ty bool btf_types_are_same(const struct btf *btf1, u32 id1, const struct btf *btf2, u32 id2); int btf_check_iter_arg(struct btf *btf, const struct btf_type *func, int arg_idx); + +static inline bool btf_type_is_struct_ptr(struct btf *btf, const struct btf_type *t) +{ + if (!btf_type_is_ptr(t)) + return false; + + t = btf_type_skip_modifiers(btf, t->type, NULL); + + return btf_type_is_struct(t); +} #else static inline const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id) @@ -660,15 +671,4 @@ static inline int btf_check_iter_arg(struct btf *btf, const struct btf_type *fun return -EOPNOTSUPP; } #endif - -static inline bool btf_type_is_struct_ptr(struct btf *btf, const struct btf_type *t) -{ - if (!btf_type_is_ptr(t)) - return false; - - t = btf_type_skip_modifiers(btf, t->type, NULL); - - return btf_type_is_struct(t); -} - #endif diff --git a/include/linux/btf_ids.h b/include/linux/btf_ids.h index c0e3e1426a82..139bdececdcf 100644 --- a/include/linux/btf_ids.h +++ b/include/linux/btf_ids.h @@ -283,5 +283,6 @@ extern u32 btf_tracing_ids[]; extern u32 bpf_cgroup_btf_id[]; extern u32 bpf_local_storage_map_btf_id[]; extern u32 btf_bpf_map_id[]; +extern u32 bpf_kmem_cache_btf_id[]; #endif diff --git a/include/linux/cacheinfo.h b/include/linux/cacheinfo.h index 108060612bb8..7ad736538649 100644 --- a/include/linux/cacheinfo.h +++ b/include/linux/cacheinfo.h @@ -155,8 +155,14 @@ static inline int get_cpu_cacheinfo_id(int cpu, int level) #ifndef CONFIG_ARCH_HAS_CPU_CACHE_ALIASING #define cpu_dcache_is_aliasing() false +#define cpu_icache_is_aliasing() cpu_dcache_is_aliasing() #else #include <asm/cachetype.h> + +#ifndef cpu_icache_is_aliasing +#define cpu_icache_is_aliasing() cpu_dcache_is_aliasing() +#endif + #endif #endif /* _LINUX_CACHEINFO_H */ diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h index ee1d0e5f9789..2d7d86f0290d 100644 --- a/include/linux/ceph/ceph_fs.h +++ b/include/linux/ceph/ceph_fs.h @@ -808,7 +808,7 @@ struct ceph_mds_caps { struct ceph_mds_cap_peer { __le64 cap_id; - __le32 seq; + __le32 issue_seq; __le32 mseq; __le32 mds; __u8 flags; @@ -822,7 +822,7 @@ struct ceph_mds_cap_release { struct ceph_mds_cap_item { __le64 ino; __le64 cap_id; - __le32 migrate_seq, seq; + __le32 migrate_seq, issue_seq; } __attribute__ ((packed)); #define CEPH_MDS_LEASE_REVOKE 1 /* mds -> client */ diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h index 15fb566d3f46..733e7f93db66 100644 --- a/include/linux/ceph/libceph.h +++ b/include/linux/ceph/libceph.h @@ -317,12 +317,6 @@ extern void ceph_release_page_vector(struct page **pages, int num_pages); extern void ceph_put_page_vector(struct page **pages, int num_pages, bool dirty); extern struct page **ceph_alloc_page_vector(int num_pages, gfp_t flags); -extern int ceph_copy_user_to_page_vector(struct page **pages, - const void __user *data, - loff_t off, size_t len); -extern void ceph_copy_to_page_vector(struct page **pages, - const void *data, - loff_t off, size_t len); extern void ceph_copy_from_page_vector(struct page **pages, void *data, loff_t off, size_t len); diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h index d7941478158c..d55b30057a45 100644 --- a/include/linux/ceph/osd_client.h +++ b/include/linux/ceph/osd_client.h @@ -626,8 +626,6 @@ int ceph_osdc_notify(struct ceph_osd_client *osdc, u32 timeout, struct page ***preply_pages, size_t *preply_len); -int ceph_osdc_watch_check(struct ceph_osd_client *osdc, - struct ceph_osd_linger_request *lreq); int ceph_osdc_list_watchers(struct ceph_osd_client *osdc, struct ceph_object_id *oid, struct ceph_object_locator *oloc, diff --git a/include/linux/ceph/pagelist.h b/include/linux/ceph/pagelist.h index 5dead8486fd8..879bec0863aa 100644 --- a/include/linux/ceph/pagelist.h +++ b/include/linux/ceph/pagelist.h @@ -17,12 +17,6 @@ struct ceph_pagelist { refcount_t refcnt; }; -struct ceph_pagelist_cursor { - struct ceph_pagelist *pl; /* pagelist, for error checking */ - struct list_head *page_lru; /* page in list */ - size_t room; /* room remaining to reset to */ -}; - struct ceph_pagelist *ceph_pagelist_alloc(gfp_t gfp_flags); extern void ceph_pagelist_release(struct ceph_pagelist *pl); @@ -33,12 +27,6 @@ extern int ceph_pagelist_reserve(struct ceph_pagelist *pl, size_t space); extern int ceph_pagelist_free_reserve(struct ceph_pagelist *pl); -extern void ceph_pagelist_set_cursor(struct ceph_pagelist *pl, - struct ceph_pagelist_cursor *c); - -extern int ceph_pagelist_truncate(struct ceph_pagelist *pl, - struct ceph_pagelist_cursor *c); - static inline int ceph_pagelist_encode_64(struct ceph_pagelist *pl, u64 v) { __le64 ev = cpu_to_le64(v); diff --git a/include/linux/cfag12864b.h b/include/linux/cfag12864b.h index 6617d9c68d86..83e6613d12ae 100644 --- a/include/linux/cfag12864b.h +++ b/include/linux/cfag12864b.h @@ -28,13 +28,6 @@ extern unsigned char * cfag12864b_buffer; /* - * Get the refresh rate of the LCD - * - * Returns the refresh rate (hertz). - */ -extern unsigned int cfag12864b_getrate(void); - -/* * Enable refreshing * * Returns 0 if successful (anyone was using it), @@ -50,16 +43,6 @@ extern unsigned char cfag12864b_enable(void); extern void cfag12864b_disable(void); /* - * Is enabled refreshing? (is anyone using the module?) - * - * Returns 0 if refreshing is not enabled (anyone is using it), - * or != 0 if refreshing is enabled (someone is using it). - * - * Useful for buffer read-only modules. - */ -extern unsigned char cfag12864b_isenabled(void); - -/* * Is the module inited? */ extern unsigned char cfag12864b_isinited(void); diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index 47ae4c4d924c..1b20d2d8ef7c 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h @@ -327,6 +327,7 @@ struct cgroup_base_stat { #ifdef CONFIG_SCHED_CORE u64 forceidle_sum; #endif + u64 ntime; }; /* @@ -397,7 +398,7 @@ struct cgroup_freezer_state { bool freeze; /* Should the cgroup actually be frozen? */ - int e_freeze; + bool e_freeze; /* Fields below are protected by css_set_lock */ diff --git a/include/linux/cleanup.h b/include/linux/cleanup.h index 038b2d523bf8..ec00e3f7af2b 100644 --- a/include/linux/cleanup.h +++ b/include/linux/cleanup.h @@ -234,7 +234,7 @@ const volatile void * __must_check_fn(const volatile void *val) * DEFINE_CLASS(fdget, struct fd, fdput(_T), fdget(fd), int fd) * * CLASS(fdget, f)(fd); - * if (!fd_file(f)) + * if (fd_empty(f)) * return -EBADF; * * // use 'f' without concern @@ -285,14 +285,20 @@ static inline class_##_name##_t class_##_name##ext##_constructor(_init_args) \ * similar to scoped_guard(), except it does fail when the lock * acquire fails. * + * Only for conditional locks. */ +#define __DEFINE_CLASS_IS_CONDITIONAL(_name, _is_cond) \ +static __maybe_unused const bool class_##_name##_is_conditional = _is_cond + #define DEFINE_GUARD(_name, _type, _lock, _unlock) \ + __DEFINE_CLASS_IS_CONDITIONAL(_name, false); \ DEFINE_CLASS(_name, _type, if (_T) { _unlock; }, ({ _lock; _T; }), _type _T); \ static inline void * class_##_name##_lock_ptr(class_##_name##_t *_T) \ - { return *_T; } + { return (void *)(__force unsigned long)*_T; } #define DEFINE_GUARD_COND(_name, _ext, _condlock) \ + __DEFINE_CLASS_IS_CONDITIONAL(_name##_ext, true); \ EXTEND_CLASS(_name, _ext, \ ({ void *_t = _T; if (_T && !(_condlock)) _t = NULL; _t; }), \ class_##_name##_t _T) \ @@ -303,16 +309,40 @@ static inline class_##_name##_t class_##_name##ext##_constructor(_init_args) \ CLASS(_name, __UNIQUE_ID(guard)) #define __guard_ptr(_name) class_##_name##_lock_ptr +#define __is_cond_ptr(_name) class_##_name##_is_conditional -#define scoped_guard(_name, args...) \ - for (CLASS(_name, scope)(args), \ - *done = NULL; __guard_ptr(_name)(&scope) && !done; done = (void *)1) - -#define scoped_cond_guard(_name, _fail, args...) \ - for (CLASS(_name, scope)(args), \ - *done = NULL; !done; done = (void *)1) \ - if (!__guard_ptr(_name)(&scope)) _fail; \ - else +/* + * Helper macro for scoped_guard(). + * + * Note that the "!__is_cond_ptr(_name)" part of the condition ensures that + * compiler would be sure that for the unconditional locks the body of the + * loop (caller-provided code glued to the else clause) could not be skipped. + * It is needed because the other part - "__guard_ptr(_name)(&scope)" - is too + * hard to deduce (even if could be proven true for unconditional locks). + */ +#define __scoped_guard(_name, _label, args...) \ + for (CLASS(_name, scope)(args); \ + __guard_ptr(_name)(&scope) || !__is_cond_ptr(_name); \ + ({ goto _label; })) \ + if (0) { \ +_label: \ + break; \ + } else + +#define scoped_guard(_name, args...) \ + __scoped_guard(_name, __UNIQUE_ID(label), args) + +#define __scoped_cond_guard(_name, _fail, _label, args...) \ + for (CLASS(_name, scope)(args); true; ({ goto _label; })) \ + if (!__guard_ptr(_name)(&scope)) { \ + BUILD_BUG_ON(!__is_cond_ptr(_name)); \ + _fail; \ +_label: \ + break; \ + } else + +#define scoped_cond_guard(_name, _fail, args...) \ + __scoped_cond_guard(_name, _fail, __UNIQUE_ID(label), args) /* * Additional helper macros for generating lock guards with types, either for @@ -347,7 +377,7 @@ static inline void class_##_name##_destructor(class_##_name##_t *_T) \ \ static inline void *class_##_name##_lock_ptr(class_##_name##_t *_T) \ { \ - return _T->lock; \ + return (void *)(__force unsigned long)_T->lock; \ } @@ -369,14 +399,17 @@ static inline class_##_name##_t class_##_name##_constructor(void) \ } #define DEFINE_LOCK_GUARD_1(_name, _type, _lock, _unlock, ...) \ +__DEFINE_CLASS_IS_CONDITIONAL(_name, false); \ __DEFINE_UNLOCK_GUARD(_name, _type, _unlock, __VA_ARGS__) \ __DEFINE_LOCK_GUARD_1(_name, _type, _lock) #define DEFINE_LOCK_GUARD_0(_name, _lock, _unlock, ...) \ +__DEFINE_CLASS_IS_CONDITIONAL(_name, false); \ __DEFINE_UNLOCK_GUARD(_name, void, _unlock, __VA_ARGS__) \ __DEFINE_LOCK_GUARD_0(_name, _lock) #define DEFINE_LOCK_GUARD_1_COND(_name, _ext, _condlock) \ + __DEFINE_CLASS_IS_CONDITIONAL(_name##_ext, true); \ EXTEND_CLASS(_name, _ext, \ ({ class_##_name##_t _t = { .lock = l }, *_T = &_t;\ if (_T->lock && !(_condlock)) _T->lock = NULL; \ diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h index 7e43caabb54b..2e6e603b7493 100644 --- a/include/linux/clk-provider.h +++ b/include/linux/clk-provider.h @@ -623,6 +623,24 @@ struct clk *clk_register_gate(struct device *dev, const char *name, NULL, (flags), (reg), (bit_idx), \ (clk_gate_flags), (lock)) /** + * devm_clk_hw_register_gate_parent_hw - register a gate clock with the clock + * framework + * @dev: device that is registering this clock + * @name: name of this clock + * @parent_hw: pointer to parent clk + * @flags: framework-specific flags for this clock + * @reg: register address to control gating of this clock + * @bit_idx: which bit in the register controls gating of this clock + * @clk_gate_flags: gate-specific flags for this clock + * @lock: shared register lock for this clock + */ +#define devm_clk_hw_register_gate_parent_hw(dev, name, parent_hw, flags, \ + reg, bit_idx, clk_gate_flags, \ + lock) \ + __devm_clk_hw_register_gate((dev), NULL, (name), NULL, (parent_hw), \ + NULL, (flags), (reg), (bit_idx), \ + (clk_gate_flags), (lock)) +/** * devm_clk_hw_register_gate_parent_data - register a gate clock with the * clock framework * @dev: device that is registering this clock @@ -689,13 +707,15 @@ struct clk_div_table { * CLK_DIVIDER_BIG_ENDIAN - By default little endian register accesses are used * for the divider register. Setting this flag makes the register accesses * big endian. + * CLK_DIVIDER_EVEN_INTEGERS - clock divisor is 2, 4, 6, 8, 10, etc. + * Formula is 2 * (value read from hardware + 1). */ struct clk_divider { struct clk_hw hw; void __iomem *reg; u8 shift; u8 width; - u8 flags; + u16 flags; const struct clk_div_table *table; spinlock_t *lock; }; @@ -711,6 +731,7 @@ struct clk_divider { #define CLK_DIVIDER_READ_ONLY BIT(5) #define CLK_DIVIDER_MAX_AT_ZERO BIT(6) #define CLK_DIVIDER_BIG_ENDIAN BIT(7) +#define CLK_DIVIDER_EVEN_INTEGERS BIT(8) extern const struct clk_ops clk_divider_ops; extern const struct clk_ops clk_divider_ro_ops; @@ -740,19 +761,21 @@ struct clk_hw *__clk_hw_register_divider(struct device *dev, struct device_node *np, const char *name, const char *parent_name, const struct clk_hw *parent_hw, const struct clk_parent_data *parent_data, unsigned long flags, - void __iomem *reg, u8 shift, u8 width, u8 clk_divider_flags, + void __iomem *reg, u8 shift, u8 width, + unsigned long clk_divider_flags, const struct clk_div_table *table, spinlock_t *lock); struct clk_hw *__devm_clk_hw_register_divider(struct device *dev, struct device_node *np, const char *name, const char *parent_name, const struct clk_hw *parent_hw, const struct clk_parent_data *parent_data, unsigned long flags, - void __iomem *reg, u8 shift, u8 width, u8 clk_divider_flags, + void __iomem *reg, u8 shift, u8 width, + unsigned long clk_divider_flags, const struct clk_div_table *table, spinlock_t *lock); struct clk *clk_register_divider_table(struct device *dev, const char *name, const char *parent_name, unsigned long flags, void __iomem *reg, u8 shift, u8 width, - u8 clk_divider_flags, const struct clk_div_table *table, - spinlock_t *lock); + unsigned long clk_divider_flags, + const struct clk_div_table *table, spinlock_t *lock); /** * clk_register_divider - register a divider clock with the clock framework * @dev: device registering this clock @@ -1137,6 +1160,9 @@ struct clk_hw *clk_hw_register_fixed_factor_with_accuracy_fwname(struct device * struct device_node *np, const char *name, const char *fw_name, unsigned long flags, unsigned int mult, unsigned int div, unsigned long acc); +struct clk_hw *clk_hw_register_fixed_factor_index(struct device *dev, + const char *name, unsigned int index, unsigned long flags, + unsigned int mult, unsigned int div); void clk_hw_unregister_fixed_factor(struct clk_hw *hw); struct clk_hw *devm_clk_hw_register_fixed_factor(struct device *dev, const char *name, const char *parent_name, unsigned long flags, @@ -1360,7 +1386,6 @@ unsigned long clk_hw_get_flags(const struct clk_hw *hw); (clk_hw_get_flags((hw)) & CLK_SET_RATE_PARENT) bool clk_hw_is_prepared(const struct clk_hw *hw); -bool clk_hw_rate_is_protected(const struct clk_hw *hw); bool clk_hw_is_enabled(const struct clk_hw *hw); bool __clk_is_enabled(struct clk *clk); struct clk *__clk_lookup(const char *name); diff --git a/include/linux/clk.h b/include/linux/clk.h index 851a0f2cf42c..1dcee6d701e4 100644 --- a/include/linux/clk.h +++ b/include/linux/clk.h @@ -496,11 +496,13 @@ int __must_check devm_clk_bulk_get_all(struct device *dev, struct clk_bulk_data **clks); /** - * devm_clk_bulk_get_all_enable - Get and enable all clocks of the consumer (managed) + * devm_clk_bulk_get_all_enabled - Get and enable all clocks of the consumer (managed) * @dev: device for clock "consumer" * @clks: pointer to the clk_bulk_data table of consumer * - * Returns success (0) or negative errno. + * Returns a positive value for the number of clocks obtained while the + * clock references are stored in the clk_bulk_data table in @clks field. + * Returns 0 if there're none and a negative value if something failed. * * This helper function allows drivers to get all clocks of the * consumer and enables them in one operation with management. @@ -508,8 +510,8 @@ int __must_check devm_clk_bulk_get_all(struct device *dev, * is unbound. */ -int __must_check devm_clk_bulk_get_all_enable(struct device *dev, - struct clk_bulk_data **clks); +int __must_check devm_clk_bulk_get_all_enabled(struct device *dev, + struct clk_bulk_data **clks); /** * devm_clk_get - lookup and obtain a managed reference to a clock producer. @@ -1034,7 +1036,7 @@ static inline int __must_check devm_clk_bulk_get_all(struct device *dev, return 0; } -static inline int __must_check devm_clk_bulk_get_all_enable(struct device *dev, +static inline int __must_check devm_clk_bulk_get_all_enabled(struct device *dev, struct clk_bulk_data **clks) { return 0; @@ -1136,6 +1138,15 @@ static inline void clk_restore_context(void) {} #endif +/* Deprecated. Use devm_clk_bulk_get_all_enabled() */ +static inline int __must_check +devm_clk_bulk_get_all_enable(struct device *dev, struct clk_bulk_data **clks) +{ + int ret = devm_clk_bulk_get_all_enabled(dev, clks); + + return ret > 0 ? 0 : ret; +} + /* clk_prepare_enable helps cases using clk_enable in non-atomic context. */ static inline int clk_prepare_enable(struct clk *clk) { diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h index d35b677b08fe..65b7c41471c3 100644 --- a/include/linux/clocksource.h +++ b/include/linux/clocksource.h @@ -49,6 +49,7 @@ struct module; * @archdata: Optional arch-specific data * @max_cycles: Maximum safe cycle value which won't overflow on * multiplication + * @max_raw_delta: Maximum safe delta value for negative motion detection * @name: Pointer to clocksource name * @list: List head for registration (internal) * @freq_khz: Clocksource frequency in khz. @@ -109,6 +110,7 @@ struct clocksource { struct arch_clocksource_data archdata; #endif u64 max_cycles; + u64 max_raw_delta; const char *name; struct list_head list; u32 freq_khz; @@ -215,7 +217,6 @@ static inline s64 clocksource_cyc2ns(u64 cycles, u32 mult, u32 shift) extern int clocksource_unregister(struct clocksource*); extern void clocksource_touch_watchdog(void); -extern void clocksource_change_rating(struct clocksource *cs, int rating); extern void clocksource_suspend(void); extern void clocksource_resume(void); extern struct clocksource * __init clocksource_default_clock(void); diff --git a/include/linux/clocksource_ids.h b/include/linux/clocksource_ids.h index 2bb4d8c2f1b0..c4ef4ae2eded 100644 --- a/include/linux/clocksource_ids.h +++ b/include/linux/clocksource_ids.h @@ -6,6 +6,7 @@ enum clocksource_ids { CSID_GENERIC = 0, CSID_ARM_ARCH_COUNTER, + CSID_S390_TOD, CSID_X86_TSC_EARLY, CSID_X86_TSC, CSID_X86_KVM_CLK, diff --git a/include/linux/codetag.h b/include/linux/codetag.h index c2a579ccd455..d14dbd26b370 100644 --- a/include/linux/codetag.h +++ b/include/linux/codetag.h @@ -13,6 +13,9 @@ struct codetag_module; struct seq_buf; struct module; +#define CODETAG_SECTION_START_PREFIX "__start_" +#define CODETAG_SECTION_STOP_PREFIX "__stop_" + /* * An instance of this structure is created in a special ELF section at every * code location being tagged. At runtime, the special section is treated as @@ -35,8 +38,15 @@ struct codetag_type_desc { size_t tag_size; void (*module_load)(struct codetag_type *cttype, struct codetag_module *cmod); - bool (*module_unload)(struct codetag_type *cttype, + void (*module_unload)(struct codetag_type *cttype, struct codetag_module *cmod); +#ifdef CONFIG_MODULES + void (*module_replaced)(struct module *mod, struct module *new_mod); + bool (*needs_section_mem)(struct module *mod, unsigned long size); + void *(*alloc_section_mem)(struct module *mod, unsigned long size, + unsigned int prepend, unsigned long align); + void (*free_section_mem)(struct module *mod, bool used); +#endif }; struct codetag_iterator { @@ -71,11 +81,31 @@ struct codetag_type * codetag_register_type(const struct codetag_type_desc *desc); #if defined(CONFIG_CODE_TAGGING) && defined(CONFIG_MODULES) + +bool codetag_needs_module_section(struct module *mod, const char *name, + unsigned long size); +void *codetag_alloc_module_section(struct module *mod, const char *name, + unsigned long size, unsigned int prepend, + unsigned long align); +void codetag_free_module_sections(struct module *mod); +void codetag_module_replaced(struct module *mod, struct module *new_mod); void codetag_load_module(struct module *mod); -bool codetag_unload_module(struct module *mod); -#else +void codetag_unload_module(struct module *mod); + +#else /* defined(CONFIG_CODE_TAGGING) && defined(CONFIG_MODULES) */ + +static inline bool +codetag_needs_module_section(struct module *mod, const char *name, + unsigned long size) { return false; } +static inline void * +codetag_alloc_module_section(struct module *mod, const char *name, + unsigned long size, unsigned int prepend, + unsigned long align) { return NULL; } +static inline void codetag_free_module_sections(struct module *mod) {} +static inline void codetag_module_replaced(struct module *mod, struct module *new_mod) {} static inline void codetag_load_module(struct module *mod) {} -static inline bool codetag_unload_module(struct module *mod) { return true; } -#endif +static inline void codetag_unload_module(struct module *mod) {} + +#endif /* defined(CONFIG_CODE_TAGGING) && defined(CONFIG_MODULES) */ #endif /* _LINUX_CODETAG_H */ diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h index 4c1a39dcb624..2e7c2c282f3a 100644 --- a/include/linux/compiler-clang.h +++ b/include/linux/compiler-clang.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_COMPILER_TYPES_H -#error "Please don't include <linux/compiler-clang.h> directly, include <linux/compiler.h> instead." +#error "Please do not include <linux/compiler-clang.h> directly, include <linux/compiler.h> instead." #endif /* Compiler specific definitions for Clang compiler */ diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index cd6f9aae311f..d0ed9583743f 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_COMPILER_TYPES_H -#error "Please don't include <linux/compiler-gcc.h> directly, include <linux/compiler.h> instead." +#error "Please do not include <linux/compiler-gcc.h> directly, include <linux/compiler.h> instead." #endif /* diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 4d4e23b6e3e7..240c632c5b95 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -216,34 +216,56 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val, #endif /* __KERNEL__ */ +/** + * offset_to_ptr - convert a relative memory offset to an absolute pointer + * @off: the address of the 32-bit offset value + */ +static inline void *offset_to_ptr(const int *off) +{ + return (void *)((unsigned long)off + *off); +} + +#endif /* __ASSEMBLY__ */ + +#ifdef CONFIG_64BIT +#define ARCH_SEL(a,b) a +#else +#define ARCH_SEL(a,b) b +#endif + /* * Force the compiler to emit 'sym' as a symbol, so that we can reference * it from inline assembler. Necessary in case 'sym' could be inlined * otherwise, or eliminated entirely due to lack of references that are * visible to the compiler. */ -#define ___ADDRESSABLE(sym, __attrs) \ - static void * __used __attrs \ +#define ___ADDRESSABLE(sym, __attrs) \ + static void * __used __attrs \ __UNIQUE_ID(__PASTE(__addressable_,sym)) = (void *)(uintptr_t)&sym; + #define __ADDRESSABLE(sym) \ ___ADDRESSABLE(sym, __section(".discard.addressable")) -/** - * offset_to_ptr - convert a relative memory offset to an absolute pointer - * @off: the address of the 32-bit offset value - */ -static inline void *offset_to_ptr(const int *off) -{ - return (void *)((unsigned long)off + *off); -} +#define __ADDRESSABLE_ASM(sym) \ + .pushsection .discard.addressable,"aw"; \ + .align ARCH_SEL(8,4); \ + ARCH_SEL(.quad, .long) __stringify(sym); \ + .popsection; -#endif /* __ASSEMBLY__ */ +#define __ADDRESSABLE_ASM_STR(sym) __stringify(__ADDRESSABLE_ASM(sym)) + +#ifdef __CHECKER__ +#define __BUILD_BUG_ON_ZERO_MSG(e, msg) (0) +#else /* __CHECKER__ */ +#define __BUILD_BUG_ON_ZERO_MSG(e, msg) ((int)sizeof(struct {_Static_assert(!(e), msg);})) +#endif /* __CHECKER__ */ /* &a[0] degrades to a pointer: a different type from an array */ -#define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0])) +#define __must_be_array(a) __BUILD_BUG_ON_ZERO_MSG(__same_type((a), &(a)[0]), "must be array") /* Require C Strings (i.e. NUL-terminated) lack the "nonstring" attribute. */ -#define __must_be_cstr(p) BUILD_BUG_ON_ZERO(__annotated(p, nonstring)) +#define __must_be_cstr(p) \ + __BUILD_BUG_ON_ZERO_MSG(__annotated(p, nonstring), "must be cstr (NUL-terminated)") /* * This returns a constant expression while determining if an argument is diff --git a/include/linux/compiler_attributes.h b/include/linux/compiler_attributes.h index 32284cd26d52..c16d4199bf92 100644 --- a/include/linux/compiler_attributes.h +++ b/include/linux/compiler_attributes.h @@ -95,19 +95,6 @@ #endif /* - * Optional: only supported since gcc >= 15 - * Optional: only supported since clang >= 18 - * - * gcc: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=108896 - * clang: https://github.com/llvm/llvm-project/pull/76348 - */ -#if __has_attribute(__counted_by__) -# define __counted_by(member) __attribute__((__counted_by__(member))) -#else -# define __counted_by(member) -#endif - -/* * Optional: not supported by gcc * Optional: only supported since clang >= 14.0 * diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h index 1a957ea2f4fe..981cc3d7e3aa 100644 --- a/include/linux/compiler_types.h +++ b/include/linux/compiler_types.h @@ -266,6 +266,12 @@ struct ftrace_likely_data { #define noinline_for_stack noinline /* + * Use noinline_for_tracing for functions that should not be inlined. + * For tracing reasons. + */ +#define noinline_for_tracing noinline + +/* * Sanitizer helper attributes: Because using __always_inline and * __no_sanitize_* conflict, provide helper attributes that will either expand * to __no_sanitize_* in compilation units where instrumentation is enabled @@ -324,6 +330,25 @@ struct ftrace_likely_data { #endif /* + * Optional: only supported since gcc >= 15 + * Optional: only supported since clang >= 18 + * + * gcc: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=108896 + * clang: https://github.com/llvm/llvm-project/pull/76348 + * + * __bdos on clang < 19.1.2 can erroneously return 0: + * https://github.com/llvm/llvm-project/pull/110497 + * + * __bdos on clang < 19.1.3 can be off by 4: + * https://github.com/llvm/llvm-project/pull/112636 + */ +#ifdef CONFIG_CC_HAS_COUNTED_BY +# define __counted_by(member) __attribute__((__counted_by__(member))) +#else +# define __counted_by(member) +#endif + +/* * Apply __counted_by() when the Endianness matches to increase test coverage. */ #ifdef __LITTLE_ENDIAN diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 2361ed4d2b15..a04b73c40173 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -147,6 +147,7 @@ enum cpuhp_state { CPUHP_AP_IRQ_EIOINTC_STARTING, CPUHP_AP_IRQ_AVECINTC_STARTING, CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING, + CPUHP_AP_IRQ_THEAD_ACLINT_SSWI_STARTING, CPUHP_AP_IRQ_RISCV_IMSIC_STARTING, CPUHP_AP_IRQ_RISCV_SBI_IPI_STARTING, CPUHP_AP_ARM_MVEBU_COHERENCY, @@ -208,7 +209,6 @@ enum cpuhp_state { CPUHP_AP_PERF_X86_UNCORE_ONLINE, CPUHP_AP_PERF_X86_AMD_UNCORE_ONLINE, CPUHP_AP_PERF_X86_AMD_POWER_ONLINE, - CPUHP_AP_PERF_X86_RAPL_ONLINE, CPUHP_AP_PERF_S390_CF_ONLINE, CPUHP_AP_PERF_S390_SF_ONLINE, CPUHP_AP_PERF_ARM_CCI_ONLINE, @@ -227,6 +227,7 @@ enum cpuhp_state { CPUHP_AP_PERF_ARM_APM_XGENE_ONLINE, CPUHP_AP_PERF_ARM_CAVIUM_TX2_UNCORE_ONLINE, CPUHP_AP_PERF_ARM_MARVELL_CN10K_DDR_ONLINE, + CPUHP_AP_PERF_ARM_MRVL_PEM_ONLINE, CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE, CPUHP_AP_PERF_POWERPC_CORE_IMC_ONLINE, CPUHP_AP_PERF_POWERPC_THREAD_IMC_ONLINE, diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h index 3183aeb7f5b4..a9ee4fe55dcf 100644 --- a/include/linux/cpuidle.h +++ b/include/linux/cpuidle.h @@ -61,7 +61,7 @@ struct cpuidle_state { struct cpuidle_driver *drv, int index); - int (*enter_dead) (struct cpuidle_device *dev, int index); + void (*enter_dead) (struct cpuidle_device *dev, int index); /* * CPUs execute ->enter_s2idle with the local tick or entire timekeeping diff --git a/include/linux/cred.h b/include/linux/cred.h index 2976f534a7a3..e4a3155fe409 100644 --- a/include/linux/cred.h +++ b/include/linux/cred.h @@ -172,6 +172,24 @@ static inline bool cap_ambient_invariant_ok(const struct cred *cred) cred->cap_inheritable)); } +/* + * Override creds without bumping reference count. Caller must ensure + * reference remains valid or has taken reference. Almost always not the + * interface you want. Use override_creds()/revert_creds() instead. + */ +static inline const struct cred *override_creds_light(const struct cred *override_cred) +{ + const struct cred *old = current->cred; + + rcu_assign_pointer(current->cred, override_cred); + return old; +} + +static inline void revert_creds_light(const struct cred *revert_cred) +{ + rcu_assign_pointer(current->cred, revert_cred); +} + /** * get_new_cred_many - Get references on a new set of credentials * @cred: The new credentials to reference diff --git a/include/linux/dax.h b/include/linux/dax.h index 9d3e3327af4c..df41a0017b31 100644 --- a/include/linux/dax.h +++ b/include/linux/dax.h @@ -27,12 +27,6 @@ struct dax_operations { */ long (*direct_access)(struct dax_device *, pgoff_t, long, enum dax_access_mode, void **, pfn_t *); - /* - * Validate whether this device is usable as an fsdax backing - * device. - */ - bool (*dax_supported)(struct dax_device *, struct block_device *, int, - sector_t, sector_t); /* zero_page_range: required operation. Zero page range */ int (*zero_page_range)(struct dax_device *, pgoff_t, size_t); /* diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h index 0928a6c8ae1e..59444b495d49 100644 --- a/include/linux/debugfs.h +++ b/include/linux/debugfs.h @@ -71,9 +71,63 @@ typedef struct vfsmount *(*debugfs_automount_t)(struct dentry *, void *); struct dentry *debugfs_lookup(const char *name, struct dentry *parent); -struct dentry *debugfs_create_file(const char *name, umode_t mode, - struct dentry *parent, void *data, - const struct file_operations *fops); +struct debugfs_short_fops { + ssize_t (*read)(struct file *, char __user *, size_t, loff_t *); + ssize_t (*write)(struct file *, const char __user *, size_t, loff_t *); + loff_t (*llseek) (struct file *, loff_t, int); +}; + +struct dentry *debugfs_create_file_full(const char *name, umode_t mode, + struct dentry *parent, void *data, + const struct file_operations *fops); +struct dentry *debugfs_create_file_short(const char *name, umode_t mode, + struct dentry *parent, void *data, + const struct debugfs_short_fops *fops); + +/** + * debugfs_create_file - create a file in the debugfs filesystem + * @name: a pointer to a string containing the name of the file to create. + * @mode: the permission that the file should have. + * @parent: a pointer to the parent dentry for this file. This should be a + * directory dentry if set. If this parameter is NULL, then the + * file will be created in the root of the debugfs filesystem. + * @data: a pointer to something that the caller will want to get to later + * on. The inode.i_private pointer will point to this value on + * the open() call. + * @fops: a pointer to a struct file_operations or struct debugfs_short_fops that + * should be used for this file. + * + * This is the basic "create a file" function for debugfs. It allows for a + * wide range of flexibility in creating a file, or a directory (if you want + * to create a directory, the debugfs_create_dir() function is + * recommended to be used instead.) + * + * This function will return a pointer to a dentry if it succeeds. This + * pointer must be passed to the debugfs_remove() function when the file is + * to be removed (no automatic cleanup happens if your module is unloaded, + * you are responsible here.) If an error occurs, ERR_PTR(-ERROR) will be + * returned. + * + * If debugfs is not enabled in the kernel, the value -%ENODEV will be + * returned. + * + * If fops points to a struct debugfs_short_fops, then simple_open() will be + * used for the open, and only read/write/llseek are supported and are proxied, + * so no module reference or release are needed. + * + * NOTE: it's expected that most callers should _ignore_ the errors returned + * by this function. Other debugfs functions handle the fact that the "dentry" + * passed to them could be an error and they don't crash in that case. + * Drivers should generally work fine even if debugfs fails to init anyway. + */ +#define debugfs_create_file(name, mode, parent, data, fops) \ + _Generic(fops, \ + const struct file_operations *: debugfs_create_file_full, \ + const struct debugfs_short_fops *: debugfs_create_file_short, \ + struct file_operations *: debugfs_create_file_full, \ + struct debugfs_short_fops *: debugfs_create_file_short) \ + (name, mode, parent, data, fops) + struct dentry *debugfs_create_file_unsafe(const char *name, umode_t mode, struct dentry *parent, void *data, const struct file_operations *fops); @@ -207,7 +261,7 @@ static inline struct dentry *debugfs_lookup(const char *name, static inline struct dentry *debugfs_create_file(const char *name, umode_t mode, struct dentry *parent, void *data, - const struct file_operations *fops) + const void *fops) { return ERR_PTR(-ENODEV); } diff --git a/include/linux/debugobjects.h b/include/linux/debugobjects.h index 32444686b6ff..8b95545e7924 100644 --- a/include/linux/debugobjects.h +++ b/include/linux/debugobjects.h @@ -23,13 +23,17 @@ struct debug_obj_descr; * @state: tracked object state * @astate: current active state * @object: pointer to the real object + * @batch_last: pointer to the last hlist node in a batch * @descr: pointer to an object type specific debug description structure */ struct debug_obj { - struct hlist_node node; - enum debug_obj_state state; - unsigned int astate; - void *object; + struct hlist_node node; + enum debug_obj_state state; + unsigned int astate; + union { + void *object; + struct hlist_node *batch_last; + }; const struct debug_obj_descr *descr; }; diff --git a/include/linux/delay.h b/include/linux/delay.h index ff9cda975e30..89866bab100d 100644 --- a/include/linux/delay.h +++ b/include/linux/delay.h @@ -6,21 +6,12 @@ * Copyright (C) 1993 Linus Torvalds * * Delay routines, using a pre-computed "loops_per_jiffy" value. - * - * Please note that ndelay(), udelay() and mdelay() may return early for - * several reasons: - * 1. computed loops_per_jiffy too low (due to the time taken to - * execute the timer interrupt.) - * 2. cache behaviour affecting the time it takes to execute the - * loop function. - * 3. CPU clock rate changes. - * - * Please see this thread: - * https://lists.openwall.net/linux-kernel/2011/01/09/56 + * Sleep routines using timer list timers or hrtimers. */ #include <linux/math.h> #include <linux/sched.h> +#include <linux/jiffies.h> extern unsigned long loops_per_jiffy; @@ -35,12 +26,21 @@ extern unsigned long loops_per_jiffy; * The 2nd mdelay() definition ensures GCC will optimize away the * while loop for the common cases where n <= MAX_UDELAY_MS -- Paul G. */ - #ifndef MAX_UDELAY_MS #define MAX_UDELAY_MS 5 #endif #ifndef mdelay +/** + * mdelay - Inserting a delay based on milliseconds with busy waiting + * @n: requested delay in milliseconds + * + * See udelay() for basic information about mdelay() and it's variants. + * + * Please double check, whether mdelay() is the right way to go or whether a + * refactoring of the code is the better variant to be able to use msleep() + * instead. + */ #define mdelay(n) (\ (__builtin_constant_p(n) && (n)<=MAX_UDELAY_MS) ? udelay((n)*1000) : \ ({unsigned long __ms=(n); while (__ms--) udelay(1000);})) @@ -63,30 +63,75 @@ unsigned long msleep_interruptible(unsigned int msecs); void usleep_range_state(unsigned long min, unsigned long max, unsigned int state); +/** + * usleep_range - Sleep for an approximate time + * @min: Minimum time in microseconds to sleep + * @max: Maximum time in microseconds to sleep + * + * For basic information please refere to usleep_range_state(). + * + * The task will be in the state TASK_UNINTERRUPTIBLE during the sleep. + */ static inline void usleep_range(unsigned long min, unsigned long max) { usleep_range_state(min, max, TASK_UNINTERRUPTIBLE); } -static inline void usleep_idle_range(unsigned long min, unsigned long max) +/** + * usleep_range_idle - Sleep for an approximate time with idle time accounting + * @min: Minimum time in microseconds to sleep + * @max: Maximum time in microseconds to sleep + * + * For basic information please refere to usleep_range_state(). + * + * The sleeping task has the state TASK_IDLE during the sleep to prevent + * contribution to the load avarage. + */ +static inline void usleep_range_idle(unsigned long min, unsigned long max) { usleep_range_state(min, max, TASK_IDLE); } +/** + * ssleep - wrapper for seconds around msleep + * @seconds: Requested sleep duration in seconds + * + * Please refere to msleep() for detailed information. + */ static inline void ssleep(unsigned int seconds) { msleep(seconds * 1000); } -/* see Documentation/timers/timers-howto.rst for the thresholds */ +static const unsigned int max_slack_shift = 2; +#define USLEEP_RANGE_UPPER_BOUND ((TICK_NSEC << max_slack_shift) / NSEC_PER_USEC) + +/** + * fsleep - flexible sleep which autoselects the best mechanism + * @usecs: requested sleep duration in microseconds + * + * flseep() selects the best mechanism that will provide maximum 25% slack + * to the requested sleep duration. Therefore it uses: + * + * * udelay() loop for sleep durations <= 10 microseconds to avoid hrtimer + * overhead for really short sleep durations. + * * usleep_range() for sleep durations which would lead with the usage of + * msleep() to a slack larger than 25%. This depends on the granularity of + * jiffies. + * * msleep() for all other sleep durations. + * + * Note: When %CONFIG_HIGH_RES_TIMERS is not set, all sleeps are processed with + * the granularity of jiffies and the slack might exceed 25% especially for + * short sleep durations. + */ static inline void fsleep(unsigned long usecs) { if (usecs <= 10) udelay(usecs); - else if (usecs <= 20000) - usleep_range(usecs, 2 * usecs); + else if (usecs < USLEEP_RANGE_UPPER_BOUND) + usleep_range(usecs, usecs + (usecs >> max_slack_shift)); else - msleep(DIV_ROUND_UP(usecs, 1000)); + msleep(DIV_ROUND_UP(usecs, USEC_PER_MSEC)); } #endif /* defined(_LINUX_DELAY_H) */ diff --git a/include/linux/dev_printk.h b/include/linux/dev_printk.h index ca32b5bb28eb..eb2094e43050 100644 --- a/include/linux/dev_printk.h +++ b/include/linux/dev_printk.h @@ -276,6 +276,7 @@ do { \ dev_driver_string(dev), dev_name(dev), ## arg) __printf(3, 4) int dev_err_probe(const struct device *dev, int err, const char *fmt, ...); +__printf(3, 4) int dev_warn_probe(const struct device *dev, int err, const char *fmt, ...); /* Simple helper for dev_err_probe() when ERR_PTR() is to be returned. */ #define dev_err_ptr_probe(dev, ___err, fmt, ...) \ diff --git a/include/linux/dim.h b/include/linux/dim.h index 1b581ff25a15..06543fd40fcc 100644 --- a/include/linux/dim.h +++ b/include/linux/dim.h @@ -351,7 +351,8 @@ void dim_park_tired(struct dim *dim); * Takes into consideration counter wrap-around. * Returned boolean indicates whether curr_stats are reliable. */ -bool dim_calc_stats(struct dim_sample *start, struct dim_sample *end, +bool dim_calc_stats(const struct dim_sample *start, + const struct dim_sample *end, struct dim_stats *curr_stats); /** @@ -424,7 +425,7 @@ struct dim_cq_moder net_dim_get_def_tx_moderation(u8 cq_period_mode); * This is the main logic of the algorithm, where data is processed in order * to decide on next required action. */ -void net_dim(struct dim *dim, struct dim_sample end_sample); +void net_dim(struct dim *dim, const struct dim_sample *end_sample); /* RDMA DIM */ diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h index e06bad467f55..e7ad819962e3 100644 --- a/include/linux/dma-fence.h +++ b/include/linux/dma-fence.h @@ -574,6 +574,12 @@ int dma_fence_get_status(struct dma_fence *fence); * rather than success. This must be set before signaling (so that the value * is visible before any waiters on the signal callback are woken). This * helper exists to help catching erroneous setting of #dma_fence.error. + * + * Examples of error codes which drivers should use: + * + * * %-ENODATA This operation produced no data, no other operation affected. + * * %-ECANCELED All operations from the same context have been canceled. + * * %-ETIME Operation caused a timeout and potentially device reset. */ static inline void dma_fence_set_error(struct dma_fence *fence, int error) diff --git a/include/linux/dma-map-ops.h b/include/linux/dma-map-ops.h index b7773201414c..e172522cd936 100644 --- a/include/linux/dma-map-ops.h +++ b/include/linux/dma-map-ops.h @@ -242,7 +242,7 @@ static inline bool dev_is_dma_coherent(struct device *dev) { return true; } -#endif /* CONFIG_ARCH_HAS_DMA_COHERENCE_H */ +#endif static inline void dma_reset_need_sync(struct device *dev) { diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index 1524da363734..b79925b1c433 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -2,15 +2,11 @@ #ifndef _LINUX_DMA_MAPPING_H #define _LINUX_DMA_MAPPING_H -#include <linux/cache.h> -#include <linux/sizes.h> -#include <linux/string.h> #include <linux/device.h> #include <linux/err.h> #include <linux/dma-direction.h> #include <linux/scatterlist.h> #include <linux/bug.h> -#include <linux/mem_encrypt.h> /** * List of possible attributes associated with a DMA mapping. The semantics diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h index 8d0e34dad446..c5ab6fd9ebe8 100644 --- a/include/linux/dma-resv.h +++ b/include/linux/dma-resv.h @@ -105,10 +105,10 @@ enum dma_resv_usage { * This should be used by submissions which don't want to participate in * any implicit synchronization. * - * The most common case are preemption fences, page table updates, TLB - * flushes as well as explicit synced user submissions. + * The most common cases are preemption fences, page table updates, TLB + * flushes as well as explicitly synced user submissions. * - * Explicit synced user user submissions can be promoted to + * Explicitly synced user submissions can be promoted to * DMA_RESV_USAGE_READ or DMA_RESV_USAGE_WRITE as needed using * dma_buf_import_sync_file() when implicit synchronization should * become necessary after initial adding of the fence. diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index b137fdb56093..346251bf1026 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -84,7 +84,7 @@ enum dma_transfer_direction { DMA_TRANS_NONE, }; -/** +/* * Interleaved Transfer Request * ---------------------------- * A chunk is collection of contiguous bytes to be transferred. @@ -223,7 +223,7 @@ enum sum_check_bits { }; /** - * enum pq_check_flags - result of async_{xor,pq}_zero_sum operations + * enum sum_check_flags - result of async_{xor,pq}_zero_sum operations * @SUM_CHECK_P_RESULT - 1 if xor zero sum error, 0 otherwise * @SUM_CHECK_Q_RESULT - 1 if reed-solomon zero sum error, 0 otherwise */ @@ -286,7 +286,7 @@ typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t; * pointer to the engine's metadata area * 4. Read out the metadata from the pointer * - * Note: the two mode is not compatible and clients must use one mode for a + * Warning: the two modes are not compatible and clients must use one mode for a * descriptor. */ enum dma_desc_metadata_mode { @@ -594,9 +594,13 @@ struct dma_descriptor_metadata_ops { * @phys: physical address of the descriptor * @chan: target channel for this operation * @tx_submit: accept the descriptor, assign ordered cookie and mark the + * @desc_free: driver's callback function to free a resusable descriptor + * after completion * descriptor pending. To be pushed on .issue_pending() call * @callback: routine to call after this operation is complete + * @callback_result: error result from a DMA transaction * @callback_param: general parameter to pass to the callback routine + * @unmap: hook for generic DMA unmap data * @desc_metadata_mode: core managed metadata mode to protect mixed use of * DESC_METADATA_CLIENT or DESC_METADATA_ENGINE. Otherwise * DESC_METADATA_NONE @@ -827,6 +831,9 @@ struct dma_filter { * @device_prep_dma_memset: prepares a memset operation * @device_prep_dma_memset_sg: prepares a memset operation over a scatter list * @device_prep_dma_interrupt: prepares an end of chain interrupt operation + * @device_prep_peripheral_dma_vec: prepares a scatter-gather DMA transfer, + * where the address and size of each segment is located in one entry of + * the dma_vec array. * @device_prep_slave_sg: prepares a slave dma operation * @device_prep_dma_cyclic: prepare a cyclic dma operation suitable for audio. * The function takes a buffer of size buf_len. The callback function will diff --git a/include/linux/dmar.h b/include/linux/dmar.h index 499bb2c63483..692b2b445761 100644 --- a/include/linux/dmar.h +++ b/include/linux/dmar.h @@ -292,7 +292,6 @@ static inline void dmar_copy_shared_irte(struct irte *dst, struct irte *src) struct irq_data; extern void dmar_msi_unmask(struct irq_data *data); extern void dmar_msi_mask(struct irq_data *data); -extern void dmar_msi_read(int irq, struct msi_msg *msg); extern void dmar_msi_write(int irq, struct msi_msg *msg); extern int dmar_set_interrupt(struct intel_iommu *iommu); extern irqreturn_t dmar_fault(int irq, void *dev_id); diff --git a/include/linux/dpll.h b/include/linux/dpll.h index 81f7b623d0ba..5e4f9ab1cf75 100644 --- a/include/linux/dpll.h +++ b/include/linux/dpll.h @@ -26,6 +26,10 @@ struct dpll_device_ops { struct netlink_ext_ack *extack); int (*temp_get)(const struct dpll_device *dpll, void *dpll_priv, s32 *temp, struct netlink_ext_ack *extack); + int (*clock_quality_level_get)(const struct dpll_device *dpll, + void *dpll_priv, + unsigned long *qls, + struct netlink_ext_ack *extack); }; struct dpll_pin_ops { diff --git a/include/linux/dsa/ocelot.h b/include/linux/dsa/ocelot.h index 6fbfbde68a37..620a3260fc08 100644 --- a/include/linux/dsa/ocelot.h +++ b/include/linux/dsa/ocelot.h @@ -15,6 +15,7 @@ struct ocelot_skb_cb { struct sk_buff *clone; unsigned int ptp_class; /* valid only for clones */ + unsigned long ptp_tx_time; /* valid only for clones */ u32 tstamp_lo; u8 ptp_cmd; u8 ts_id; diff --git a/include/linux/dw_apb_timer.h b/include/linux/dw_apb_timer.h index 82ebf9223948..f8811c46b89e 100644 --- a/include/linux/dw_apb_timer.h +++ b/include/linux/dw_apb_timer.h @@ -34,9 +34,6 @@ struct dw_apb_clocksource { }; void dw_apb_clockevent_register(struct dw_apb_clock_event_device *dw_ced); -void dw_apb_clockevent_pause(struct dw_apb_clock_event_device *dw_ced); -void dw_apb_clockevent_resume(struct dw_apb_clock_event_device *dw_ced); -void dw_apb_clockevent_stop(struct dw_apb_clock_event_device *dw_ced); struct dw_apb_clock_event_device * dw_apb_clockevent_init(int cpu, const char *name, unsigned rating, diff --git a/include/linux/dynamic_queue_limits.h b/include/linux/dynamic_queue_limits.h index 281298e77a15..808b1a5102e7 100644 --- a/include/linux/dynamic_queue_limits.h +++ b/include/linux/dynamic_queue_limits.h @@ -127,7 +127,7 @@ static inline void dql_queued(struct dql *dql, unsigned int count) if (WARN_ON_ONCE(count > DQL_MAX_OBJECT)) return; - dql->last_obj_cnt = count; + WRITE_ONCE(dql->last_obj_cnt, count); /* We want to force a write first, so that cpu do not attempt * to get cache line containing last_obj_cnt, num_queued, adj_limit diff --git a/include/linux/eeprom_93cx6.h b/include/linux/eeprom_93cx6.h index c860c72a921d..3a485cc0e0fa 100644 --- a/include/linux/eeprom_93cx6.h +++ b/include/linux/eeprom_93cx6.h @@ -11,6 +11,8 @@ Supported chipsets: 93c46, 93c56 and 93c66. */ +#include <linux/bits.h> + /* * EEPROM operation defines. */ @@ -34,6 +36,7 @@ * @register_write(struct eeprom_93cx6 *eeprom): handler to * write to the eeprom register by using all reg_* fields. * @width: eeprom width, should be one of the PCI_EEPROM_WIDTH_* defines + * @quirks: eeprom or controller quirks * @drive_data: Set if we're driving the data line. * @reg_data_in: register field to indicate data input * @reg_data_out: register field to indicate data output @@ -50,6 +53,9 @@ struct eeprom_93cx6 { void (*register_write)(struct eeprom_93cx6 *eeprom); int width; + unsigned int quirks; +/* Some EEPROMs require an extra clock cycle before reading */ +#define PCI_EEPROM_QUIRK_EXTRA_READ_CYCLE BIT(0) char drive_data; char reg_data_in; @@ -71,3 +77,8 @@ extern void eeprom_93cx6_wren(struct eeprom_93cx6 *eeprom, bool enable); extern void eeprom_93cx6_write(struct eeprom_93cx6 *eeprom, u8 addr, u16 data); + +static inline bool has_quirk_extra_read_cycle(struct eeprom_93cx6 *eeprom) +{ + return eeprom->quirks & PCI_EEPROM_QUIRK_EXTRA_READ_CYCLE; +} diff --git a/include/linux/efi.h b/include/linux/efi.h index e28d88066033..e5815867aba9 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -379,7 +379,6 @@ void efi_native_runtime_setup(void); #define EFI_SYSTEM_RESOURCE_TABLE_GUID EFI_GUID(0xb122a263, 0x3661, 0x4f68, 0x99, 0x29, 0x78, 0xf8, 0xb0, 0xd6, 0x21, 0x80) #define EFI_FILE_SYSTEM_GUID EFI_GUID(0x964e5b22, 0x6459, 0x11d2, 0x8e, 0x39, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b) #define DEVICE_TREE_GUID EFI_GUID(0xb1b621d5, 0xf19c, 0x41a5, 0x83, 0x0b, 0xd9, 0x15, 0x2c, 0x69, 0xaa, 0xe0) -#define EFI_PROPERTIES_TABLE_GUID EFI_GUID(0x880aaca3, 0x4adc, 0x4a04, 0x90, 0x79, 0xb7, 0x47, 0x34, 0x08, 0x25, 0xe5) #define EFI_RNG_PROTOCOL_GUID EFI_GUID(0x3152bca5, 0xeade, 0x433d, 0x86, 0x2e, 0xc0, 0x1c, 0xdc, 0x29, 0x1f, 0x44) #define EFI_RNG_ALGORITHM_RAW EFI_GUID(0xe43176d7, 0xb6e8, 0x4827, 0xb7, 0x84, 0x7f, 0xfd, 0xc4, 0xb6, 0x85, 0x61) #define EFI_MEMORY_ATTRIBUTES_TABLE_GUID EFI_GUID(0xdcfa911d, 0x26eb, 0x469f, 0xa2, 0x20, 0x38, 0xb7, 0xdc, 0x46, 0x12, 0x20) @@ -581,15 +580,6 @@ struct efi_mem_range { }; typedef struct { - u32 version; - u32 length; - u64 memory_protection_attribute; -} efi_properties_table_t; - -#define EFI_PROPERTIES_TABLE_VERSION 0x00010000 -#define EFI_PROPERTIES_RUNTIME_MEMORY_PROTECTION_NON_EXECUTABLE_PE_DATA 0x1 - -typedef struct { u16 version; u16 length; u32 runtime_services_supported; @@ -871,10 +861,9 @@ static inline int efi_range_is_wc(unsigned long start, unsigned long len) #define EFI_PARAVIRT 6 /* Access is via a paravirt interface */ #define EFI_ARCH_1 7 /* First arch-specific bit */ #define EFI_DBG 8 /* Print additional debug info at runtime */ -#define EFI_NX_PE_DATA 9 /* Can runtime data regions be mapped non-executable? */ -#define EFI_MEM_ATTR 10 /* Did firmware publish an EFI_MEMORY_ATTRIBUTES table? */ -#define EFI_MEM_NO_SOFT_RESERVE 11 /* Is the kernel configured to ignore soft reservations? */ -#define EFI_PRESERVE_BS_REGIONS 12 /* Are EFI boot-services memory segments available? */ +#define EFI_MEM_ATTR 9 /* Did firmware publish an EFI_MEMORY_ATTRIBUTES table? */ +#define EFI_MEM_NO_SOFT_RESERVE 10 /* Is the kernel configured to ignore soft reservations? */ +#define EFI_PRESERVE_BS_REGIONS 11 /* Are EFI boot-services memory segments available? */ #ifdef CONFIG_EFI /* diff --git a/include/linux/energy_model.h b/include/linux/energy_model.h index 1ff52020cf75..752e0b297582 100644 --- a/include/linux/energy_model.h +++ b/include/linux/energy_model.h @@ -55,6 +55,8 @@ struct em_perf_table { * struct em_perf_domain - Performance domain * @em_table: Pointer to the runtime modifiable em_perf_table * @nr_perf_states: Number of performance states + * @min_perf_state: Minimum allowed Performance State index + * @max_perf_state: Maximum allowed Performance State index * @flags: See "em_perf_domain flags" * @cpus: Cpumask covering the CPUs of the domain. It's here * for performance reasons to avoid potential cache @@ -70,6 +72,8 @@ struct em_perf_table { struct em_perf_domain { struct em_perf_table __rcu *em_table; int nr_perf_states; + int min_perf_state; + int max_perf_state; unsigned long flags; unsigned long cpus[]; }; @@ -173,13 +177,14 @@ void em_table_free(struct em_perf_table __rcu *table); int em_dev_compute_costs(struct device *dev, struct em_perf_state *table, int nr_states); int em_dev_update_chip_binning(struct device *dev); +int em_update_performance_limits(struct em_perf_domain *pd, + unsigned long freq_min_khz, unsigned long freq_max_khz); /** * em_pd_get_efficient_state() - Get an efficient performance state from the EM * @table: List of performance states, in ascending order - * @nr_perf_states: Number of performance states + * @pd: performance domain for which this must be done * @max_util: Max utilization to map with the EM - * @pd_flags: Performance Domain flags * * It is called from the scheduler code quite frequently and as a consequence * doesn't implement any check. @@ -188,13 +193,16 @@ int em_dev_update_chip_binning(struct device *dev); * requirement. */ static inline int -em_pd_get_efficient_state(struct em_perf_state *table, int nr_perf_states, - unsigned long max_util, unsigned long pd_flags) +em_pd_get_efficient_state(struct em_perf_state *table, + struct em_perf_domain *pd, unsigned long max_util) { + unsigned long pd_flags = pd->flags; + int min_ps = pd->min_perf_state; + int max_ps = pd->max_perf_state; struct em_perf_state *ps; int i; - for (i = 0; i < nr_perf_states; i++) { + for (i = min_ps; i <= max_ps; i++) { ps = &table[i]; if (ps->performance >= max_util) { if (pd_flags & EM_PERF_DOMAIN_SKIP_INEFFICIENCIES && @@ -204,7 +212,7 @@ em_pd_get_efficient_state(struct em_perf_state *table, int nr_perf_states, } } - return nr_perf_states - 1; + return max_ps; } /** @@ -253,8 +261,7 @@ static inline unsigned long em_cpu_energy(struct em_perf_domain *pd, * requested performance. */ em_table = rcu_dereference(pd->em_table); - i = em_pd_get_efficient_state(em_table->state, pd->nr_perf_states, - max_util, pd->flags); + i = em_pd_get_efficient_state(em_table->state, pd, max_util); ps = &em_table->state[i]; /* @@ -391,6 +398,12 @@ static inline int em_dev_update_chip_binning(struct device *dev) { return -EINVAL; } +static inline +int em_update_performance_limits(struct em_perf_domain *pd, + unsigned long freq_min_khz, unsigned long freq_max_khz) +{ + return -EINVAL; +} #endif #endif diff --git a/include/linux/entry-common.h b/include/linux/entry-common.h index 1e50cdb83ae5..fc61d0205c97 100644 --- a/include/linux/entry-common.h +++ b/include/linux/entry-common.h @@ -64,7 +64,8 @@ #define EXIT_TO_USER_MODE_WORK \ (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \ - _TIF_NEED_RESCHED | _TIF_PATCH_PENDING | _TIF_NOTIFY_SIGNAL | \ + _TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY | \ + _TIF_PATCH_PENDING | _TIF_NOTIFY_SIGNAL | \ ARCH_EXIT_TO_USER_MODE_WORK) /** diff --git a/include/linux/entry-kvm.h b/include/linux/entry-kvm.h index 6813171afccb..16149f6625e4 100644 --- a/include/linux/entry-kvm.h +++ b/include/linux/entry-kvm.h @@ -17,8 +17,9 @@ #endif #define XFER_TO_GUEST_MODE_WORK \ - (_TIF_NEED_RESCHED | _TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL | \ - _TIF_NOTIFY_RESUME | ARCH_XFER_TO_GUEST_MODE_WORK) + (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY | _TIF_SIGPENDING | \ + _TIF_NOTIFY_SIGNAL | _TIF_NOTIFY_RESUME | \ + ARCH_XFER_TO_GUEST_MODE_WORK) struct kvm_vcpu; diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index 12f6dc567598..b8b935b52603 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h @@ -734,6 +734,9 @@ struct kernel_ethtool_ts_info { * @rxfh_per_ctx_key: device supports setting different RSS key for each * additional context. Netlink API should report hfunc, key, and input_xfrm * for every context, not just context 0. + * @cap_rss_rxnfc_adds: device supports nonzero ring_cookie in filters with + * %FLOW_RSS flag; the queue ID from the filter is added to the value from + * the indirection table to determine the delivery queue. * @rxfh_indir_space: max size of RSS indirection tables, if indirection table * size as returned by @get_rxfh_indir_size may change during lifetime * of the device. Leave as 0 if the table size is constant. @@ -956,6 +959,7 @@ struct ethtool_ops { u32 cap_rss_ctx_supported:1; u32 cap_rss_sym_xor_supported:1; u32 rxfh_per_ctx_key:1; + u32 cap_rss_rxnfc_adds:1; u32 rxfh_indir_space; u16 rxfh_key_space; u16 rxfh_priv_size; diff --git a/include/linux/eventpoll.h b/include/linux/eventpoll.h index 3337745d81bd..0c0d00fcd131 100644 --- a/include/linux/eventpoll.h +++ b/include/linux/eventpoll.h @@ -42,7 +42,7 @@ static inline void eventpoll_release(struct file *file) * because the file in on the way to be removed and nobody ( but * eventpoll ) has still a reference to this file. */ - if (likely(!file->f_ep)) + if (likely(!READ_ONCE(file->f_ep))) return; /* diff --git a/include/linux/execmem.h b/include/linux/execmem.h index 32cef1144117..64130ae19690 100644 --- a/include/linux/execmem.h +++ b/include/linux/execmem.h @@ -46,11 +46,27 @@ enum execmem_type { /** * enum execmem_range_flags - options for executable memory allocations * @EXECMEM_KASAN_SHADOW: allocate kasan shadow + * @EXECMEM_ROX_CACHE: allocations should use ROX cache of huge pages */ enum execmem_range_flags { EXECMEM_KASAN_SHADOW = (1 << 0), + EXECMEM_ROX_CACHE = (1 << 1), }; +#ifdef CONFIG_ARCH_HAS_EXECMEM_ROX +/** + * execmem_fill_trapping_insns - set memory to contain instructions that + * will trap + * @ptr: pointer to memory to fill + * @size: size of the range to fill + * @writable: is the memory poited by @ptr is writable or ROX + * + * A hook for architecures to fill execmem ranges with invalid instructions. + * Architectures that use EXECMEM_ROX_CACHE must implement this. + */ +void execmem_fill_trapping_insns(void *ptr, size_t size, bool writable); +#endif + /** * struct execmem_range - definition of an address space suitable for code and * related data allocations @@ -123,6 +139,39 @@ void *execmem_alloc(enum execmem_type type, size_t size); */ void execmem_free(void *ptr); +#ifdef CONFIG_MMU +/** + * execmem_vmap - create virtual mapping for EXECMEM_MODULE_DATA memory + * @size: size of the virtual mapping in bytes + * + * Maps virtually contiguous area in the range suitable for EXECMEM_MODULE_DATA. + * + * Return: the area descriptor on success or %NULL on failure. + */ +struct vm_struct *execmem_vmap(size_t size); +#endif + +/** + * execmem_update_copy - copy an update to executable memory + * @dst: destination address to update + * @src: source address containing the data + * @size: how many bytes of memory shold be copied + * + * Copy @size bytes from @src to @dst using text poking if the memory at + * @dst is read-only. + * + * Return: a pointer to @dst or NULL on error + */ +void *execmem_update_copy(void *dst, const void *src, size_t size); + +/** + * execmem_is_rox - check if execmem is read-only + * @type - the execmem type to check + * + * Return: %true if the @type is read-only, %false if it's writable + */ +bool execmem_is_rox(enum execmem_type type); + #if defined(CONFIG_EXECMEM) && !defined(CONFIG_ARCH_WANTS_EXECMEM_LATE) void execmem_init(void); #else diff --git a/include/linux/export.h b/include/linux/export.h index 0bbd02fd351d..2633df4d31e6 100644 --- a/include/linux/export.h +++ b/include/linux/export.h @@ -60,14 +60,14 @@ #endif #ifdef DEFAULT_SYMBOL_NAMESPACE -#define _EXPORT_SYMBOL(sym, license) __EXPORT_SYMBOL(sym, license, __stringify(DEFAULT_SYMBOL_NAMESPACE)) +#define _EXPORT_SYMBOL(sym, license) __EXPORT_SYMBOL(sym, license, DEFAULT_SYMBOL_NAMESPACE) #else #define _EXPORT_SYMBOL(sym, license) __EXPORT_SYMBOL(sym, license, "") #endif #define EXPORT_SYMBOL(sym) _EXPORT_SYMBOL(sym, "") #define EXPORT_SYMBOL_GPL(sym) _EXPORT_SYMBOL(sym, "GPL") -#define EXPORT_SYMBOL_NS(sym, ns) __EXPORT_SYMBOL(sym, "", __stringify(ns)) -#define EXPORT_SYMBOL_NS_GPL(sym, ns) __EXPORT_SYMBOL(sym, "GPL", __stringify(ns)) +#define EXPORT_SYMBOL_NS(sym, ns) __EXPORT_SYMBOL(sym, "", ns) +#define EXPORT_SYMBOL_NS_GPL(sym, ns) __EXPORT_SYMBOL(sym, "GPL", ns) #endif /* _LINUX_EXPORT_H */ diff --git a/include/linux/exportfs.h b/include/linux/exportfs.h index 893a1d21dc1c..4cc8801e50e3 100644 --- a/include/linux/exportfs.h +++ b/include/linux/exportfs.h @@ -160,6 +160,19 @@ struct fid { #define EXPORT_FH_FID 0x2 /* File handle may be non-decodeable */ #define EXPORT_FH_DIR_ONLY 0x4 /* Only decode file handle for a directory */ +/* + * Filesystems use only lower 8 bits of file_handle type for fid_type. + * name_to_handle_at() uses upper 16 bits of type as user flags to be + * interpreted by open_by_handle_at(). + */ +#define FILEID_USER_FLAGS_MASK 0xffff0000 +#define FILEID_USER_FLAGS(type) ((type) & FILEID_USER_FLAGS_MASK) + +/* Flags supported in encoded handle_type that is exported to user */ +#define FILEID_IS_CONNECTABLE 0x10000 +#define FILEID_IS_DIR 0x20000 +#define FILEID_VALID_USER_FLAGS (FILEID_IS_CONNECTABLE | FILEID_IS_DIR) + /** * struct export_operations - for nfsd to communicate with file systems * @encode_fh: encode a file handle fragment from a dentry @@ -250,19 +263,6 @@ struct export_operations { unsigned long flags; }; -/** - * exportfs_lock_op_is_async() - export op supports async lock operation - * @export_ops: the nfs export operations to check - * - * Returns true if the nfs export_operations structure has - * EXPORT_OP_ASYNC_LOCK in their flags set - */ -static inline bool -exportfs_lock_op_is_async(const struct export_operations *export_ops) -{ - return export_ops->flags & EXPORT_OP_ASYNC_LOCK; -} - extern int exportfs_encode_inode_fh(struct inode *inode, struct fid *fid, int *max_len, struct inode *parent, int flags); diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h index b0b821edfd97..c24f8bc01045 100644 --- a/include/linux/f2fs_fs.h +++ b/include/linux/f2fs_fs.h @@ -24,10 +24,11 @@ #define NEW_ADDR ((block_t)-1) /* used as block_t addresses */ #define COMPRESS_ADDR ((block_t)-2) /* used as compressed data flag */ -#define F2FS_BYTES_TO_BLK(bytes) ((bytes) >> F2FS_BLKSIZE_BITS) -#define F2FS_BLK_TO_BYTES(blk) ((blk) << F2FS_BLKSIZE_BITS) +#define F2FS_BLKSIZE_MASK (F2FS_BLKSIZE - 1) +#define F2FS_BYTES_TO_BLK(bytes) ((unsigned long long)(bytes) >> F2FS_BLKSIZE_BITS) +#define F2FS_BLK_TO_BYTES(blk) ((unsigned long long)(blk) << F2FS_BLKSIZE_BITS) #define F2FS_BLK_END_BYTES(blk) (F2FS_BLK_TO_BYTES(blk + 1) - 1) -#define F2FS_BLK_ALIGN(x) (F2FS_BYTES_TO_BLK((x) + F2FS_BLKSIZE - 1)) +#define F2FS_BLK_ALIGN(x) (F2FS_BYTES_TO_BLK((x) + F2FS_BLKSIZE - 1)) /* 0, 1(node nid), 2(meta nid) are reserved node id */ #define F2FS_RESERVED_NODE_NUM 3 diff --git a/include/linux/fanotify.h b/include/linux/fanotify.h index 4f1c4f603118..89ff45bd6f01 100644 --- a/include/linux/fanotify.h +++ b/include/linux/fanotify.h @@ -36,6 +36,7 @@ #define FANOTIFY_ADMIN_INIT_FLAGS (FANOTIFY_PERM_CLASSES | \ FAN_REPORT_TID | \ FAN_REPORT_PIDFD | \ + FAN_REPORT_FD_ERROR | \ FAN_UNLIMITED_QUEUE | \ FAN_UNLIMITED_MARKS) diff --git a/include/linux/fb.h b/include/linux/fb.h index 267b59ead432..5ba187e08cf7 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -21,6 +21,7 @@ struct fb_info; struct file; struct i2c_adapter; struct inode; +struct lcd_device; struct module; struct notifier_block; struct page; @@ -480,6 +481,13 @@ struct fb_info { struct mutex bl_curve_mutex; u8 bl_curve[FB_BACKLIGHT_LEVELS]; #endif + + /* + * Assigned LCD device; set before framebuffer + * registration, remove after unregister + */ + struct lcd_device *lcd_dev; + #ifdef CONFIG_FB_DEFERRED_IO struct delayed_work deferred_work; unsigned long npagerefs; @@ -754,6 +762,11 @@ static inline struct backlight_device *fb_bl_device(struct fb_info *info) } #endif +static inline struct lcd_device *fb_lcd_device(struct fb_info *info) +{ + return info->lcd_dev; +} + /* fbmon.c */ #define FB_MAXTIMINGS 0 #define FB_VSYNCTIMINGS 1 diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h index b1c5722f2b3c..c45306a9f007 100644 --- a/include/linux/fdtable.h +++ b/include/linux/fdtable.h @@ -92,10 +92,6 @@ static inline struct file *files_lookup_fd_locked(struct files_struct *files, un return files_lookup_fd_raw(files, fd); } -struct file *lookup_fdget_rcu(unsigned int fd); -struct file *task_lookup_fdget_rcu(struct task_struct *task, unsigned int fd); -struct file *task_lookup_next_fdget_rcu(struct task_struct *task, unsigned int *fd); - static inline bool close_on_exec(unsigned int fd, const struct files_struct *files) { return test_bit(fd, files_fdtable(files)->close_on_exec); @@ -115,7 +111,6 @@ int iterate_fd(struct files_struct *, unsigned, const void *); extern int close_fd(unsigned int fd); -extern int __close_range(unsigned int fd, unsigned int max_fd, unsigned int flags); extern struct file *file_close_fd(unsigned int fd); extern struct kmem_cache *files_cachep; diff --git a/include/linux/file.h b/include/linux/file.h index f98de143245a..302f11355b10 100644 --- a/include/linux/file.h +++ b/include/linux/file.h @@ -30,12 +30,6 @@ extern struct file *alloc_file_pseudo_noaccount(struct inode *, struct vfsmount extern struct file *alloc_file_clone(struct file *, int flags, const struct file_operations *); -static inline void fput_light(struct file *file, int fput_needed) -{ - if (fput_needed) - fput(file); -} - /* either a reference to struct file + flags * (cloned vs. borrowed, pos locked), with * flags stored in lower bits of value, @@ -72,6 +66,7 @@ static inline void fdput(struct fd fd) extern struct file *fget(unsigned int fd); extern struct file *fget_raw(unsigned int fd); extern struct file *fget_task(struct task_struct *task, unsigned int fd); +extern struct file *fget_task_next(struct task_struct *task, unsigned int *fd); extern void __f_unlock_pos(struct file *); struct fd fdget(unsigned int fd); @@ -87,6 +82,7 @@ static inline void fdput_pos(struct fd f) DEFINE_CLASS(fd, struct fd, fdput(_T), fdget(fd), int fd) DEFINE_CLASS(fd_raw, struct fd, fdput(_T), fdget_raw(fd), int fd) +DEFINE_CLASS(fd_pos, struct fd, fdput_pos(_T), fdget_pos(fd), int fd) extern int f_dupfd(unsigned int from, struct file *file, unsigned flags); extern int replace_fd(unsigned fd, struct file *file, unsigned flags); diff --git a/include/linux/file_ref.h b/include/linux/file_ref.h new file mode 100644 index 000000000000..9b3a8d9b17ab --- /dev/null +++ b/include/linux/file_ref.h @@ -0,0 +1,177 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef _LINUX_FILE_REF_H +#define _LINUX_FILE_REF_H + +#include <linux/atomic.h> +#include <linux/preempt.h> +#include <linux/types.h> + +/* + * file_ref is a reference count implementation specifically for use by + * files. It takes inspiration from rcuref but differs in key aspects + * such as support for SLAB_TYPESAFE_BY_RCU type caches. + * + * FILE_REF_ONEREF FILE_REF_MAXREF + * 0x0000000000000000UL 0x7FFFFFFFFFFFFFFFUL + * <-------------------valid -------------------> + * + * FILE_REF_SATURATED + * 0x8000000000000000UL 0xA000000000000000UL 0xBFFFFFFFFFFFFFFFUL + * <-----------------------saturation zone----------------------> + * + * FILE_REF_RELEASED FILE_REF_DEAD + * 0xC000000000000000UL 0xE000000000000000UL + * <-------------------dead zone-------------------> + * + * FILE_REF_NOREF + * 0xFFFFFFFFFFFFFFFFUL + */ + +#ifdef CONFIG_64BIT +#define FILE_REF_ONEREF 0x0000000000000000UL +#define FILE_REF_MAXREF 0x7FFFFFFFFFFFFFFFUL +#define FILE_REF_SATURATED 0xA000000000000000UL +#define FILE_REF_RELEASED 0xC000000000000000UL +#define FILE_REF_DEAD 0xE000000000000000UL +#define FILE_REF_NOREF 0xFFFFFFFFFFFFFFFFUL +#else +#define FILE_REF_ONEREF 0x00000000U +#define FILE_REF_MAXREF 0x7FFFFFFFU +#define FILE_REF_SATURATED 0xA0000000U +#define FILE_REF_RELEASED 0xC0000000U +#define FILE_REF_DEAD 0xE0000000U +#define FILE_REF_NOREF 0xFFFFFFFFU +#endif + +typedef struct { +#ifdef CONFIG_64BIT + atomic64_t refcnt; +#else + atomic_t refcnt; +#endif +} file_ref_t; + +/** + * file_ref_init - Initialize a file reference count + * @ref: Pointer to the reference count + * @cnt: The initial reference count typically '1' + */ +static inline void file_ref_init(file_ref_t *ref, unsigned long cnt) +{ + atomic_long_set(&ref->refcnt, cnt - 1); +} + +bool __file_ref_put(file_ref_t *ref, unsigned long cnt); + +/** + * file_ref_get - Acquire one reference on a file + * @ref: Pointer to the reference count + * + * Similar to atomic_inc_not_zero() but saturates at FILE_REF_MAXREF. + * + * Provides full memory ordering. + * + * Return: False if the attempt to acquire a reference failed. This happens + * when the last reference has been put already. True if a reference + * was successfully acquired + */ +static __always_inline __must_check bool file_ref_get(file_ref_t *ref) +{ + /* + * Unconditionally increase the reference count with full + * ordering. The saturation and dead zones provide enough + * tolerance for this. + * + * If this indicates negative the file in question the fail can + * be freed and immediately reused due to SLAB_TYPSAFE_BY_RCU. + * Hence, unconditionally altering the file reference count to + * e.g., reset the file reference count back to the middle of + * the deadzone risk end up marking someone else's file as dead + * behind their back. + * + * It would be possible to do a careful: + * + * cnt = atomic_long_inc_return(); + * if (likely(cnt >= 0)) + * return true; + * + * and then something like: + * + * if (cnt >= FILE_REF_RELEASE) + * atomic_long_try_cmpxchg(&ref->refcnt, &cnt, FILE_REF_DEAD), + * + * to set the value back to the middle of the deadzone. But it's + * practically impossible to go from FILE_REF_DEAD to + * FILE_REF_ONEREF. It would need 2305843009213693952/2^61 + * file_ref_get()s to resurrect such a dead file. + */ + return !atomic_long_add_negative(1, &ref->refcnt); +} + +/** + * file_ref_inc - Acquire one reference on a file + * @ref: Pointer to the reference count + * + * Acquire an additional reference on a file. Warns if the caller didn't + * already hold a reference. + */ +static __always_inline void file_ref_inc(file_ref_t *ref) +{ + long prior = atomic_long_fetch_inc_relaxed(&ref->refcnt); + WARN_ONCE(prior < 0, "file_ref_inc() on a released file reference"); +} + +/** + * file_ref_put -- Release a file reference + * @ref: Pointer to the reference count + * + * Provides release memory ordering, such that prior loads and stores + * are done before, and provides an acquire ordering on success such + * that free() must come after. + * + * Return: True if this was the last reference with no future references + * possible. This signals the caller that it can safely release + * the object which is protected by the reference counter. + * False if there are still active references or the put() raced + * with a concurrent get()/put() pair. Caller is not allowed to + * release the protected object. + */ +static __always_inline __must_check bool file_ref_put(file_ref_t *ref) +{ + long cnt; + + /* + * While files are SLAB_TYPESAFE_BY_RCU and thus file_ref_put() + * calls don't risk UAFs when a file is recyclyed, it is still + * vulnerable to UAFs caused by freeing the whole slab page once + * it becomes unused. Prevent file_ref_put() from being + * preempted protects against this. + */ + guard(preempt)(); + /* + * Unconditionally decrease the reference count. The saturation + * and dead zones provide enough tolerance for this. If this + * fails then we need to handle the last reference drop and + * cases inside the saturation and dead zones. + */ + cnt = atomic_long_dec_return(&ref->refcnt); + if (cnt >= 0) + return false; + return __file_ref_put(ref, cnt); +} + +/** + * file_ref_read - Read the number of file references + * @ref: Pointer to the reference count + * + * Return: The number of held references (0 ... N) + */ +static inline unsigned long file_ref_read(file_ref_t *ref) +{ + unsigned long c = atomic_long_read(&ref->refcnt); + + /* Return 0 if within the DEAD zone. */ + return c >= FILE_REF_RELEASED ? 0 : c + 1; +} + +#endif diff --git a/include/linux/filelock.h b/include/linux/filelock.h index bb44224c6676..c412ded9171e 100644 --- a/include/linux/filelock.h +++ b/include/linux/filelock.h @@ -180,6 +180,11 @@ static inline void locks_wake_up(struct file_lock *fl) wake_up(&fl->c.flc_wait); } +static inline bool locks_can_async_lock(const struct file_operations *fops) +{ + return !fops->lock || fops->fop_flags & FOP_ASYNC_LOCK; +} + /* fs/locks.c */ void locks_free_lock_context(struct inode *inode); void locks_free_lock(struct file_lock *fl); diff --git a/include/linux/filter.h b/include/linux/filter.h index 7d7578a8eac1..0477254bc2d3 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -1119,9 +1119,10 @@ bool bpf_jit_supports_exceptions(void); bool bpf_jit_supports_ptr_xchg(void); bool bpf_jit_supports_arena(void); bool bpf_jit_supports_insn(struct bpf_insn *insn, bool in_arena); +bool bpf_jit_supports_private_stack(void); u64 bpf_arch_uaddress_limit(void); void arch_bpf_stack_walk(bool (*consume_fn)(void *cookie, u64 ip, u64 sp, u64 bp), void *cookie); -bool bpf_helper_changes_pkt_data(void *func); +bool bpf_helper_changes_pkt_data(enum bpf_func_id func_id); static inline bool bpf_dump_raw_ok(const struct cred *cred) { diff --git a/include/linux/firmware/qcom/qcom_scm.h b/include/linux/firmware/qcom/qcom_scm.h index 9f14976399ab..4621aec0328c 100644 --- a/include/linux/firmware/qcom/qcom_scm.h +++ b/include/linux/firmware/qcom/qcom_scm.h @@ -85,6 +85,8 @@ int qcom_scm_io_writel(phys_addr_t addr, unsigned int val); bool qcom_scm_restore_sec_cfg_available(void); int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare); +int qcom_scm_set_gpu_smmu_aperture(unsigned int context_bank); +bool qcom_scm_set_gpu_smmu_aperture_is_available(void); int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size); int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare); int qcom_scm_iommu_set_cp_pool_size(u32 spare, u32 size); diff --git a/include/linux/firmware/xlnx-zynqmp.h b/include/linux/firmware/xlnx-zynqmp.h index d7d07afc0532..6d4dbc196b93 100644 --- a/include/linux/firmware/xlnx-zynqmp.h +++ b/include/linux/firmware/xlnx-zynqmp.h @@ -3,7 +3,7 @@ * Xilinx Zynq MPSoC Firmware layer * * Copyright (C) 2014-2021 Xilinx - * Copyright (C) 2022 - 2023, Advanced Micro Devices, Inc. + * Copyright (C) 2022 - 2024, Advanced Micro Devices, Inc. * * Michal Simek <[email protected]> * Davorin Mista <[email protected]> @@ -32,6 +32,19 @@ /* SMC SIP service Call Function Identifier Prefix */ #define PM_SIP_SVC 0xC2000000 +/* SMC function ID to get SiP SVC version */ +#define GET_SIP_SVC_VERSION (0x8200ff03U) + +/* SiP Service Calls version numbers */ +#define SIP_SVC_VERSION_MAJOR (0U) +#define SIP_SVC_VERSION_MINOR (2U) + +#define SIP_SVC_PASSTHROUGH_VERSION ((SIP_SVC_VERSION_MAJOR << 16) | \ + SIP_SVC_VERSION_MINOR) + +/* Fixed ID for FW specific APIs */ +#define PASS_THROUGH_FW_CMD_ID GENMASK(11, 0) + /* PM API versions */ #define PM_API_VERSION_1 1 #define PM_API_VERSION_2 2 @@ -51,6 +64,7 @@ #define API_ID_MASK GENMASK(7, 0) #define MODULE_ID_MASK GENMASK(11, 8) +#define PLM_MODULE_ID_MASK GENMASK(15, 8) /* Firmware feature check version mask */ #define FIRMWARE_VERSION_MASK 0xFFFFU @@ -62,7 +76,13 @@ #define GET_CALLBACK_DATA 0xa01 /* Number of 32bits values in payload */ -#define PAYLOAD_ARG_CNT 4U +#define PAYLOAD_ARG_CNT 7U + +/* Number of 64bits arguments for SMC call */ +#define SMC_ARG_CNT_64 8U + +/* Number of 32bits arguments for SMC call */ +#define SMC_ARG_CNT_32 13U /* Number of arguments for a callback */ #define CB_ARG_CNT 4 @@ -130,6 +150,7 @@ enum pm_module_id { PM_MODULE_ID = 0x0, + XPM_MODULE_ID = 0x2, XSEM_MODULE_ID = 0x3, TF_A_MODULE_ID = 0xa, }; @@ -218,9 +239,13 @@ enum pm_ioctl_id { /* Runtime feature configuration */ IOCTL_SET_FEATURE_CONFIG = 26, IOCTL_GET_FEATURE_CONFIG = 27, + /* IOCTL for Secure Read/Write Interface */ + IOCTL_READ_REG = 28, /* Dynamic SD/GEM configuration */ IOCTL_SET_SD_CONFIG = 30, IOCTL_SET_GEM_CONFIG = 31, + /* IOCTL to get default/current QoS */ + IOCTL_GET_QOS = 34, }; enum pm_query_id { @@ -238,6 +263,7 @@ enum pm_query_id { PM_QID_PINCTRL_GET_PIN_GROUPS = 11, PM_QID_CLOCK_GET_NUM_CLOCKS = 12, PM_QID_CLOCK_GET_MAX_DIVISOR = 13, + PM_QID_PINCTRL_GET_ATTRIBUTES = 15, }; enum rpu_oper_mode { @@ -533,6 +559,7 @@ struct zynqmp_pm_query_data { }; int zynqmp_pm_invoke_fn(u32 pm_api_id, u32 *ret_payload, u32 num_args, ...); +int zynqmp_pm_invoke_fw_fn(u32 pm_api_id, u32 *ret_payload, u32 num_args, ...); #if IS_REACHABLE(CONFIG_ZYNQMP_FIRMWARE) int zynqmp_pm_get_api_version(u32 *version); @@ -553,9 +580,9 @@ int zynqmp_pm_get_pll_frac_data(u32 clk_id, u32 *data); int zynqmp_pm_set_sd_tapdelay(u32 node_id, u32 type, u32 value); int zynqmp_pm_sd_dll_reset(u32 node_id, u32 type); int zynqmp_pm_ospi_mux_select(u32 dev_id, u32 select); -int zynqmp_pm_reset_assert(const enum zynqmp_pm_reset reset, +int zynqmp_pm_reset_assert(const u32 reset, const enum zynqmp_pm_reset_action assert_flag); -int zynqmp_pm_reset_get_status(const enum zynqmp_pm_reset reset, u32 *status); +int zynqmp_pm_reset_get_status(const u32 reset, u32 *status); unsigned int zynqmp_pm_bootmode_read(u32 *ps_mode); int zynqmp_pm_bootmode_write(u32 ps_mode); int zynqmp_pm_init_finalize(void); @@ -698,14 +725,13 @@ static inline int zynqmp_pm_ospi_mux_select(u32 dev_id, u32 select) return -ENODEV; } -static inline int zynqmp_pm_reset_assert(const enum zynqmp_pm_reset reset, +static inline int zynqmp_pm_reset_assert(const u32 reset, const enum zynqmp_pm_reset_action assert_flag) { return -ENODEV; } -static inline int zynqmp_pm_reset_get_status(const enum zynqmp_pm_reset reset, - u32 *status) +static inline int zynqmp_pm_reset_get_status(const u32 reset, u32 *status) { return -ENODEV; } diff --git a/include/linux/fortify-string.h b/include/linux/fortify-string.h index 0d99bf11d260..e4ce1cae03bf 100644 --- a/include/linux/fortify-string.h +++ b/include/linux/fortify-string.h @@ -616,6 +616,12 @@ __FORTIFY_INLINE bool fortify_memcpy_chk(__kernel_size_t size, return false; } +/* + * To work around what seems to be an optimizer bug, the macro arguments + * need to have const copies or the values end up changed by the time they + * reach fortify_warn_once(). See commit 6f7630b1b5bc ("fortify: Capture + * __bos() results in const temp vars") for more details. + */ #define __fortify_memcpy_chk(p, q, size, p_size, q_size, \ p_size_field, q_size_field, op) ({ \ const size_t __fortify_size = (size_t)(size); \ @@ -623,6 +629,8 @@ __FORTIFY_INLINE bool fortify_memcpy_chk(__kernel_size_t size, const size_t __q_size = (q_size); \ const size_t __p_size_field = (p_size_field); \ const size_t __q_size_field = (q_size_field); \ + /* Keep a mutable version of the size for the final copy. */ \ + size_t __copy_size = __fortify_size; \ fortify_warn_once(fortify_memcpy_chk(__fortify_size, __p_size, \ __q_size, __p_size_field, \ __q_size_field, FORTIFY_FUNC_ ##op), \ @@ -630,7 +638,11 @@ __FORTIFY_INLINE bool fortify_memcpy_chk(__kernel_size_t size, __fortify_size, \ "field \"" #p "\" at " FILE_LINE, \ __p_size_field); \ - __underlying_##op(p, q, __fortify_size); \ + /* Hide only the run-time size from value range tracking to */ \ + /* silence compile-time false positive bounds warnings. */ \ + if (!__builtin_constant_p(__copy_size)) \ + OPTIMIZER_HIDE_VAR(__copy_size); \ + __underlying_##op(p, q, __copy_size); \ }) /* diff --git a/include/linux/fs.h b/include/linux/fs.h index 3559446279c1..7e29433c5ecc 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -45,6 +45,8 @@ #include <linux/slab.h> #include <linux/maple_tree.h> #include <linux/rw_hint.h> +#include <linux/file_ref.h> +#include <linux/unicode.h> #include <asm/byteorder.h> #include <uapi/linux/fs.h> @@ -623,6 +625,7 @@ is_uncached_acl(struct posix_acl *acl) #define IOP_NOFOLLOW 0x0004 #define IOP_XATTR 0x0008 #define IOP_DEFAULT_READLINK 0x0010 +#define IOP_MGTIME 0x0020 /* * Keep mostly read-only and often accessed (especially for @@ -1005,7 +1008,7 @@ static inline int ra_has_index(struct file_ra_state *ra, pgoff_t index) /** * struct file - Represents a file - * @f_count: reference count + * @f_ref: reference count * @f_lock: Protects f_ep, f_flags. Must not be taken from IRQ context. * @f_mode: FMODE_* flags often used in hotpaths * @f_op: file operations @@ -1030,7 +1033,7 @@ static inline int ra_has_index(struct file_ra_state *ra, pgoff_t index) * @f_freeptr: Pointer used by SLAB_TYPESAFE_BY_RCU file cache (don't touch.) */ struct file { - atomic_long_t f_count; + file_ref_t f_ref; spinlock_t f_lock; fmode_t f_mode; const struct file_operations *f_op; @@ -1078,15 +1081,14 @@ struct file_handle { static inline struct file *get_file(struct file *f) { - long prior = atomic_long_fetch_inc_relaxed(&f->f_count); - WARN_ONCE(!prior, "struct file::f_count incremented from zero; use-after-free condition present!\n"); + file_ref_inc(&f->f_ref); return f; } struct file *get_file_rcu(struct file __rcu **f); struct file *get_file_active(struct file **f); -#define file_count(x) atomic_long_read(&(x)->f_count) +#define file_count(f) file_ref_read(&(f)->f_ref) #define MAX_NON_LFS ((1UL<<31) - 1) @@ -1584,6 +1586,8 @@ static inline bool fsuidgid_has_mapping(struct super_block *sb, struct timespec64 current_time(struct inode *inode); struct timespec64 inode_set_ctime_current(struct inode *inode); +struct timespec64 inode_set_ctime_deleg(struct inode *inode, + struct timespec64 update); static inline time64_t inode_get_atime_sec(const struct inode *inode) { @@ -1653,6 +1657,17 @@ static inline struct timespec64 inode_set_mtime(struct inode *inode, return inode_set_mtime_to_ts(inode, ts); } +/* + * Multigrain timestamps + * + * Conditionally use fine-grained ctime and mtime timestamps when there + * are users actively observing them via getattr. The primary use-case + * for this is NFS clients that use the ctime to distinguish between + * different states of the file, and that are often fooled by multiple + * operations that occur in the same coarse-grained timer tick. + */ +#define I_CTIME_QUERIED ((u32)BIT(31)) + static inline time64_t inode_get_ctime_sec(const struct inode *inode) { return inode->i_ctime_sec; @@ -1660,7 +1675,7 @@ static inline time64_t inode_get_ctime_sec(const struct inode *inode) static inline long inode_get_ctime_nsec(const struct inode *inode) { - return inode->i_ctime_nsec; + return inode->i_ctime_nsec & ~I_CTIME_QUERIED; } static inline struct timespec64 inode_get_ctime(const struct inode *inode) @@ -1671,13 +1686,7 @@ static inline struct timespec64 inode_get_ctime(const struct inode *inode) return ts; } -static inline struct timespec64 inode_set_ctime_to_ts(struct inode *inode, - struct timespec64 ts) -{ - inode->i_ctime_sec = ts.tv_sec; - inode->i_ctime_nsec = ts.tv_nsec; - return ts; -} +struct timespec64 inode_set_ctime_to_ts(struct inode *inode, struct timespec64 ts); /** * inode_set_ctime - set the ctime in the inode @@ -2116,6 +2125,8 @@ struct file_operations { #define FOP_HUGE_PAGES ((__force fop_flags_t)(1 << 4)) /* Treat loff_t as unsigned (e.g., /dev/mem) */ #define FOP_UNSIGNED_OFFSET ((__force fop_flags_t)(1 << 5)) +/* Supports asynchronous lock callbacks */ +#define FOP_ASYNC_LOCK ((__force fop_flags_t)(1 << 6)) /* Wrap a directory iterator that needs exclusive inode access */ int wrap_directory_iterator(struct file *, struct dir_context *, @@ -2542,6 +2553,7 @@ struct file_system_type { #define FS_USERNS_MOUNT 8 /* Can be mounted by userns root */ #define FS_DISALLOW_NOTIFY_PERM 16 /* Disable fanotify permission events */ #define FS_ALLOW_IDMAP 32 /* FS has been updated to handle vfs idmappings. */ +#define FS_MGTIME 64 /* FS uses multigrain timestamps */ #define FS_RENAME_DOES_D_MOVE 32768 /* FS will handle d_move() during rename() internally. */ int (*init_fs_context)(struct fs_context *); const struct fs_parameter_spec *parameters; @@ -2565,6 +2577,17 @@ struct file_system_type { #define MODULE_ALIAS_FS(NAME) MODULE_ALIAS("fs-" NAME) +/** + * is_mgtime: is this inode using multigrain timestamps + * @inode: inode to test for multigrain timestamps + * + * Return true if the inode uses multigrain timestamps, false otherwise. + */ +static inline bool is_mgtime(const struct inode *inode) +{ + return inode->i_opflags & IOP_MGTIME; +} + extern struct dentry *mount_bdev(struct file_system_type *fs_type, int flags, const char *dev_name, void *data, int (*fill_super)(struct super_block *, void *, int)); @@ -2766,6 +2789,16 @@ extern struct filename *getname_flags(const char __user *, int); extern struct filename *getname_uflags(const char __user *, int); extern struct filename *getname(const char __user *); extern struct filename *getname_kernel(const char *); +extern struct filename *__getname_maybe_null(const char __user *); +static inline struct filename *getname_maybe_null(const char __user *name, int flags) +{ + if (!(flags & AT_EMPTY_PATH)) + return getname(name); + + if (!name) + return NULL; + return __getname_maybe_null(name); +} extern void putname(struct filename *name); extern int finish_open(struct file *file, struct dentry *dentry, @@ -3326,6 +3359,7 @@ extern void page_put_link(void *); extern int page_symlink(struct inode *inode, const char *symname, int len); extern const struct inode_operations page_symlink_inode_operations; extern void kfree_link(void *); +void fill_mg_cmtime(struct kstat *stat, u32 request_mask, struct inode *inode); void generic_fillattr(struct mnt_idmap *, u32, struct inode *, struct kstat *); void generic_fill_statx_attr(struct inode *inode, struct kstat *stat); void generic_fill_statx_atomic_writes(struct kstat *stat, @@ -3456,6 +3490,54 @@ extern int generic_ci_match(const struct inode *parent, const struct qstr *folded_name, const u8 *de_name, u32 de_name_len); +#if IS_ENABLED(CONFIG_UNICODE) +int generic_ci_d_hash(const struct dentry *dentry, struct qstr *str); +int generic_ci_d_compare(const struct dentry *dentry, unsigned int len, + const char *str, const struct qstr *name); + +/** + * generic_ci_validate_strict_name - Check if a given name is suitable + * for a directory + * + * This functions checks if the proposed filename is valid for the + * parent directory. That means that only valid UTF-8 filenames will be + * accepted for casefold directories from filesystems created with the + * strict encoding flag. That also means that any name will be + * accepted for directories that doesn't have casefold enabled, or + * aren't being strict with the encoding. + * + * @dir: inode of the directory where the new file will be created + * @name: name of the new file + * + * Return: + * * True: if the filename is suitable for this directory. It can be + * true if a given name is not suitable for a strict encoding + * directory, but the directory being used isn't strict + * * False if the filename isn't suitable for this directory. This only + * happens when a directory is casefolded and the filesystem is strict + * about its encoding. + */ +static inline bool generic_ci_validate_strict_name(struct inode *dir, struct qstr *name) +{ + if (!IS_CASEFOLDED(dir) || !sb_has_strict_encoding(dir->i_sb)) + return true; + + /* + * A casefold dir must have a encoding set, unless the filesystem + * is corrupted + */ + if (WARN_ON_ONCE(!dir->i_sb->s_encoding)) + return true; + + return !utf8_validate(dir->i_sb->s_encoding, name); +} +#else +static inline bool generic_ci_validate_strict_name(struct inode *dir, struct qstr *name) +{ + return true; +} +#endif + static inline bool sb_has_encoding(const struct super_block *sb) { #if IS_ENABLED(CONFIG_UNICODE) @@ -3726,6 +3808,6 @@ static inline bool vfs_empty_path(int dfd, const char __user *path) return !c; } -bool generic_atomic_write_valid(struct iov_iter *iter, loff_t pos); +int generic_atomic_write_valid(struct kiocb *iocb, struct iov_iter *iter); #endif /* _LINUX_FS_H */ diff --git a/include/linux/fs_parser.h b/include/linux/fs_parser.h index 6cf713a7e6c6..3cef566088fc 100644 --- a/include/linux/fs_parser.h +++ b/include/linux/fs_parser.h @@ -28,7 +28,8 @@ typedef int fs_param_type(struct p_log *, */ fs_param_type fs_param_is_bool, fs_param_is_u32, fs_param_is_s32, fs_param_is_u64, fs_param_is_enum, fs_param_is_string, fs_param_is_blob, fs_param_is_blockdev, - fs_param_is_path, fs_param_is_fd, fs_param_is_uid, fs_param_is_gid; + fs_param_is_path, fs_param_is_fd, fs_param_is_uid, fs_param_is_gid, + fs_param_is_file_or_string; /* * Specification of the type of value a parameter wants. @@ -133,6 +134,8 @@ static inline bool fs_validate_description(const char *name, #define fsparam_bdev(NAME, OPT) __fsparam(fs_param_is_blockdev, NAME, OPT, 0, NULL) #define fsparam_path(NAME, OPT) __fsparam(fs_param_is_path, NAME, OPT, 0, NULL) #define fsparam_fd(NAME, OPT) __fsparam(fs_param_is_fd, NAME, OPT, 0, NULL) +#define fsparam_file_or_string(NAME, OPT) \ + __fsparam(fs_param_is_file_or_string, NAME, OPT, 0, NULL) #define fsparam_uid(NAME, OPT) __fsparam(fs_param_is_uid, NAME, OPT, 0, NULL) #define fsparam_gid(NAME, OPT) __fsparam(fs_param_is_gid, NAME, OPT, 0, NULL) diff --git a/include/linux/fsl/netc_global.h b/include/linux/fsl/netc_global.h new file mode 100644 index 000000000000..fdecca8c90f0 --- /dev/null +++ b/include/linux/fsl/netc_global.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */ +/* Copyright 2024 NXP + */ +#ifndef __NETC_GLOBAL_H +#define __NETC_GLOBAL_H + +#include <linux/io.h> + +static inline u32 netc_read(void __iomem *reg) +{ + return ioread32(reg); +} + +static inline void netc_write(void __iomem *reg, u32 val) +{ + iowrite32(val, reg); +} + +#endif diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index fd5e84d0ec47..aa9ddd1e4bb6 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -113,14 +113,54 @@ static inline int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *val #ifdef CONFIG_FUNCTION_TRACER -extern int ftrace_enabled; +#include <linux/ftrace_regs.h> -#ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS +extern int ftrace_enabled; +/** + * ftrace_regs - ftrace partial/optimal register set + * + * ftrace_regs represents a group of registers which is used at the + * function entry and exit. There are three types of registers. + * + * - Registers for passing the parameters to callee, including the stack + * pointer. (e.g. rcx, rdx, rdi, rsi, r8, r9 and rsp on x86_64) + * - Registers for passing the return values to caller. + * (e.g. rax and rdx on x86_64) + * - Registers for hooking the function call and return including the + * frame pointer (the frame pointer is architecture/config dependent) + * (e.g. rip, rbp and rsp for x86_64) + * + * Also, architecture dependent fields can be used for internal process. + * (e.g. orig_ax on x86_64) + * + * On the function entry, those registers will be restored except for + * the stack pointer, so that user can change the function parameters + * and instruction pointer (e.g. live patching.) + * On the function exit, only registers which is used for return values + * are restored. + * + * NOTE: user *must not* access regs directly, only do it via APIs, because + * the member can be changed according to the architecture. + * This is why the structure is empty here, so that nothing accesses + * the ftrace_regs directly. + */ struct ftrace_regs { - struct pt_regs regs; + /* Nothing to see here, use the accessor functions! */ }; -#define arch_ftrace_get_regs(fregs) (&(fregs)->regs) + +#define ftrace_regs_size() sizeof(struct __arch_ftrace_regs) + +#ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS +/* + * Architectures that define HAVE_DYNAMIC_FTRACE_WITH_ARGS must define their own + * arch_ftrace_get_regs() where it only returns pt_regs *if* it is fully + * populated. It should return NULL otherwise. + */ +static inline struct pt_regs *arch_ftrace_get_regs(struct ftrace_regs *fregs) +{ + return &arch_ftrace_regs(fregs)->regs; +} /* * ftrace_regs_set_instruction_pointer() is to be defined by the architecture @@ -150,23 +190,6 @@ static __always_inline bool ftrace_regs_has_args(struct ftrace_regs *fregs) return ftrace_get_regs(fregs) != NULL; } -#ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS -#define ftrace_regs_get_instruction_pointer(fregs) \ - instruction_pointer(ftrace_get_regs(fregs)) -#define ftrace_regs_get_argument(fregs, n) \ - regs_get_kernel_argument(ftrace_get_regs(fregs), n) -#define ftrace_regs_get_stack_pointer(fregs) \ - kernel_stack_pointer(ftrace_get_regs(fregs)) -#define ftrace_regs_return_value(fregs) \ - regs_return_value(ftrace_get_regs(fregs)) -#define ftrace_regs_set_return_value(fregs, ret) \ - regs_set_return_value(ftrace_get_regs(fregs), ret) -#define ftrace_override_function_with_return(fregs) \ - override_function_with_return(ftrace_get_regs(fregs)) -#define ftrace_regs_query_register_offset(name) \ - regs_query_register_offset(name) -#endif - typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip, struct ftrace_ops *op, struct ftrace_regs *fregs); @@ -1015,6 +1038,17 @@ struct ftrace_graph_ent { } __packed; /* + * Structure that defines an entry function trace with retaddr. + * It's already packed but the attribute "packed" is needed + * to remove extra padding at the end. + */ +struct fgraph_retaddr_ent { + unsigned long func; /* Current function */ + int depth; + unsigned long retaddr; /* Return address */ +} __packed; + +/* * Structure that defines a return function trace. * It's already packed but the attribute "packed" is needed * to remove extra padding at the end. @@ -1039,7 +1073,8 @@ typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *, typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *, struct fgraph_ops *); /* entry */ -extern int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace, struct fgraph_ops *gops); +extern int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace, + struct fgraph_ops *gops); bool ftrace_pids_enabled(struct ftrace_ops *ops); #ifdef CONFIG_FUNCTION_GRAPH_TRACER @@ -1055,6 +1090,7 @@ struct fgraph_ops { void *fgraph_reserve_data(int idx, int size_bytes); void *fgraph_retrieve_data(int idx, int *size_bytes); +void *fgraph_retrieve_parent_data(int idx, int *size_bytes, int depth); /* * Stack of return addresses for functions @@ -1064,10 +1100,6 @@ void *fgraph_retrieve_data(int idx, int *size_bytes); struct ftrace_ret_stack { unsigned long ret; unsigned long func; - unsigned long long calltime; -#ifdef CONFIG_FUNCTION_PROFILER - unsigned long long subtime; -#endif #ifdef HAVE_FUNCTION_GRAPH_FP_TEST unsigned long fp; #endif @@ -1087,6 +1119,7 @@ function_graph_enter(unsigned long ret, unsigned long func, struct ftrace_ret_stack * ftrace_graph_get_ret_stack(struct task_struct *task, int skip); +unsigned long ftrace_graph_top_ret_addr(struct task_struct *task); unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx, unsigned long ret, unsigned long *retp); diff --git a/include/linux/ftrace_regs.h b/include/linux/ftrace_regs.h new file mode 100644 index 000000000000..be1ed0c891d0 --- /dev/null +++ b/include/linux/ftrace_regs.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_FTRACE_REGS_H +#define _LINUX_FTRACE_REGS_H + +/* + * For archs that just copy pt_regs in ftrace regs, it can use this default. + * If an architecture does not use pt_regs, it must define all the below + * accessor functions. + */ +#ifndef HAVE_ARCH_FTRACE_REGS +struct __arch_ftrace_regs { + struct pt_regs regs; +}; + +#define arch_ftrace_regs(fregs) ((struct __arch_ftrace_regs *)(fregs)) + +struct ftrace_regs; + +#define ftrace_regs_get_instruction_pointer(fregs) \ + instruction_pointer(&arch_ftrace_regs(fregs)->regs) +#define ftrace_regs_get_argument(fregs, n) \ + regs_get_kernel_argument(&arch_ftrace_regs(fregs)->regs, n) +#define ftrace_regs_get_stack_pointer(fregs) \ + kernel_stack_pointer(&arch_ftrace_regs(fregs)->regs) +#define ftrace_regs_get_return_value(fregs) \ + regs_return_value(&arch_ftrace_regs(fregs)->regs) +#define ftrace_regs_set_return_value(fregs, ret) \ + regs_set_return_value(&arch_ftrace_regs(fregs)->regs, ret) +#define ftrace_override_function_with_return(fregs) \ + override_function_with_return(&arch_ftrace_regs(fregs)->regs) +#define ftrace_regs_query_register_offset(name) \ + regs_query_register_offset(name) + +#endif /* HAVE_ARCH_FTRACE_REGS */ + +#endif /* _LINUX_FTRACE_REGS_H */ diff --git a/include/linux/fw_table.h b/include/linux/fw_table.h index 3ff4c277296f..9bd605b87c4c 100644 --- a/include/linux/fw_table.h +++ b/include/linux/fw_table.h @@ -54,7 +54,7 @@ int cdat_table_parse(enum acpi_cdat_type type, #define EXPORT_SYMBOL_FWTBL_LIB(x) EXPORT_SYMBOL_ACPI_LIB(x) #define __init_or_fwtbl_lib __init_or_acpilib #else -#define EXPORT_SYMBOL_FWTBL_LIB(x) EXPORT_SYMBOL_NS_GPL(x, CXL) +#define EXPORT_SYMBOL_FWTBL_LIB(x) EXPORT_SYMBOL_NS_GPL(x, "CXL") #define __init_or_fwtbl_lib #endif diff --git a/include/linux/gfp.h b/include/linux/gfp.h index a951de920e20..b0fe9f62d15b 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -306,7 +306,7 @@ struct folio *folio_alloc_noprof(gfp_t gfp, unsigned int order); struct folio *folio_alloc_mpol_noprof(gfp_t gfp, unsigned int order, struct mempolicy *mpol, pgoff_t ilx, int nid); struct folio *vma_alloc_folio_noprof(gfp_t gfp, int order, struct vm_area_struct *vma, - unsigned long addr, bool hugepage); + unsigned long addr); #else static inline struct page *alloc_pages_noprof(gfp_t gfp_mask, unsigned int order) { @@ -326,7 +326,7 @@ static inline struct folio *folio_alloc_mpol_noprof(gfp_t gfp, unsigned int orde { return folio_alloc_noprof(gfp, order); } -#define vma_alloc_folio_noprof(gfp, order, vma, addr, hugepage) \ +#define vma_alloc_folio_noprof(gfp, order, vma, addr) \ folio_alloc_noprof(gfp, order) #endif @@ -341,7 +341,7 @@ static inline struct folio *folio_alloc_mpol_noprof(gfp_t gfp, unsigned int orde static inline struct page *alloc_page_vma_noprof(gfp_t gfp, struct vm_area_struct *vma, unsigned long addr) { - struct folio *folio = vma_alloc_folio_noprof(gfp, 0, vma, addr, false); + struct folio *folio = vma_alloc_folio_noprof(gfp, 0, vma, addr); return &folio->page; } @@ -371,28 +371,6 @@ __meminit void *alloc_pages_exact_nid_noprof(int nid, size_t size, gfp_t gfp_mas extern void __free_pages(struct page *page, unsigned int order); extern void free_pages(unsigned long addr, unsigned int order); -struct page_frag_cache; -void page_frag_cache_drain(struct page_frag_cache *nc); -extern void __page_frag_cache_drain(struct page *page, unsigned int count); -void *__page_frag_alloc_align(struct page_frag_cache *nc, unsigned int fragsz, - gfp_t gfp_mask, unsigned int align_mask); - -static inline void *page_frag_alloc_align(struct page_frag_cache *nc, - unsigned int fragsz, gfp_t gfp_mask, - unsigned int align) -{ - WARN_ON_ONCE(!is_power_of_2(align)); - return __page_frag_alloc_align(nc, fragsz, gfp_mask, -align); -} - -static inline void *page_frag_alloc(struct page_frag_cache *nc, - unsigned int fragsz, gfp_t gfp_mask) -{ - return __page_frag_alloc_align(nc, fragsz, gfp_mask, ~0u); -} - -extern void page_frag_free(void *addr); - #define __free_page(page) __free_pages((page), 0) #define free_page(addr) free_pages((addr), 0) diff --git a/include/linux/gpio.h b/include/linux/gpio.h index 2d105be7bbc3..6270150f4e29 100644 --- a/include/linux/gpio.h +++ b/include/linux/gpio.h @@ -21,9 +21,6 @@ struct device; #define GPIOF_OUT_INIT_LOW ((0 << 0) | (0 << 1)) #define GPIOF_OUT_INIT_HIGH ((0 << 0) | (1 << 1)) -/* Gpio pin is active-low */ -#define GPIOF_ACTIVE_LOW (1 << 2) - /** * struct gpio - a structure describing a GPIO with configuration * @gpio: the GPIO number diff --git a/include/linux/hid-sensor-ids.h b/include/linux/hid-sensor-ids.h index 6730ee900ee1..8a03d9696b1c 100644 --- a/include/linux/hid-sensor-ids.h +++ b/include/linux/hid-sensor-ids.h @@ -30,6 +30,8 @@ #define HID_USAGE_SENSOR_PROX 0x200011 #define HID_USAGE_SENSOR_DATA_PRESENCE 0x2004b0 #define HID_USAGE_SENSOR_HUMAN_PRESENCE 0x2004b1 +#define HID_USAGE_SENSOR_HUMAN_PROXIMITY 0x2004b2 +#define HID_USAGE_SENSOR_HUMAN_ATTENTION 0x2004bd /* Pressure (200031) */ #define HID_USAGE_SENSOR_PRESSURE 0x200031 diff --git a/include/linux/hid.h b/include/linux/hid.h index 121d5b8bc867..d11e9c9a5f15 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h @@ -359,6 +359,7 @@ struct hid_item { * | @HID_QUIRK_NO_OUTPUT_REPORTS_ON_INTR_EP: * | @HID_QUIRK_HAVE_SPECIAL_DRIVER: * | @HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE: + * | @HID_QUIRK_IGNORE_SPECIAL_DRIVER * | @HID_QUIRK_FULLSPEED_INTERVAL: * | @HID_QUIRK_NO_INIT_REPORTS: * | @HID_QUIRK_NO_IGNORE: @@ -384,6 +385,7 @@ struct hid_item { #define HID_QUIRK_HAVE_SPECIAL_DRIVER BIT(19) #define HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE BIT(20) #define HID_QUIRK_NOINVERT BIT(21) +#define HID_QUIRK_IGNORE_SPECIAL_DRIVER BIT(22) #define HID_QUIRK_FULLSPEED_INTERVAL BIT(28) #define HID_QUIRK_NO_INIT_REPORTS BIT(29) #define HID_QUIRK_NO_IGNORE BIT(30) @@ -599,15 +601,17 @@ enum hid_battery_status { struct hid_driver; struct hid_ll_driver; -struct hid_device { /* device report descriptor */ - const __u8 *dev_rdesc; - unsigned dev_rsize; - const __u8 *rdesc; - unsigned rsize; +struct hid_device { + const __u8 *dev_rdesc; /* device report descriptor */ + const __u8 *bpf_rdesc; /* bpf modified report descriptor, if any */ + const __u8 *rdesc; /* currently used report descriptor */ + unsigned int dev_rsize; + unsigned int bpf_rsize; + unsigned int rsize; + unsigned int collection_size; /* Number of allocated hid_collections */ struct hid_collection *collection; /* List of HID collections */ - unsigned collection_size; /* Number of allocated hid_collections */ - unsigned maxcollection; /* Number of parsed collections */ - unsigned maxapplication; /* Number of applications */ + unsigned int maxcollection; /* Number of parsed collections */ + unsigned int maxapplication; /* Number of applications */ __u16 bus; /* BUS ID */ __u16 group; /* Report group */ __u32 vendor; /* Vendor ID */ @@ -974,7 +978,6 @@ const struct hid_device_id *hid_match_device(struct hid_device *hdev, struct hid_driver *hdrv); bool hid_compare_device_paths(struct hid_device *hdev_a, struct hid_device *hdev_b, char separator); -s32 hid_snto32(__u32 value, unsigned n); __u32 hid_field_extract(const struct hid_device *hid, __u8 *report, unsigned offset, unsigned n); diff --git a/include/linux/hid_bpf.h b/include/linux/hid_bpf.h index 6a47223e6460..a2e47dbcf82c 100644 --- a/include/linux/hid_bpf.h +++ b/include/linux/hid_bpf.h @@ -78,7 +78,7 @@ struct hid_ops { const struct bus_type *bus_type; }; -extern struct hid_ops *hid_ops; +extern const struct hid_ops *hid_ops; /** * struct hid_bpf_ops - A BPF struct_ops of callbacks allowing to attach HID-BPF @@ -212,7 +212,7 @@ int hid_bpf_connect_device(struct hid_device *hdev); void hid_bpf_disconnect_device(struct hid_device *hdev); void hid_bpf_destroy_device(struct hid_device *hid); int hid_bpf_device_init(struct hid_device *hid); -u8 *call_hid_bpf_rdesc_fixup(struct hid_device *hdev, const u8 *rdesc, unsigned int *size); +const u8 *call_hid_bpf_rdesc_fixup(struct hid_device *hdev, const u8 *rdesc, unsigned int *size); #else /* CONFIG_HID_BPF */ static inline u8 *dispatch_hid_bpf_device_event(struct hid_device *hid, enum hid_report_type type, u8 *data, u32 *size, int interrupt, @@ -228,13 +228,8 @@ static inline int hid_bpf_connect_device(struct hid_device *hdev) { return 0; } static inline void hid_bpf_disconnect_device(struct hid_device *hdev) {} static inline void hid_bpf_destroy_device(struct hid_device *hid) {} static inline int hid_bpf_device_init(struct hid_device *hid) { return 0; } -/* - * This specialized allocator has to be a macro for its allocations to be - * accounted separately (to have a separate alloc_tag). The typecast is - * intentional to enforce typesafety. - */ -#define call_hid_bpf_rdesc_fixup(_hdev, _rdesc, _size) \ - ((u8 *)kmemdup(_rdesc, *(_size), GFP_KERNEL)) +static inline const u8 *call_hid_bpf_rdesc_fixup(struct hid_device *hdev, const u8 *rdesc, + unsigned int *size) { return rdesc; } #endif /* CONFIG_HID_BPF */ diff --git a/include/linux/highmem.h b/include/linux/highmem.h index 930a591b9b61..5c6bea81a90e 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h @@ -226,8 +226,8 @@ struct folio *vma_alloc_zeroed_movable_folio(struct vm_area_struct *vma, { struct folio *folio; - folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, vaddr, false); - if (folio) + folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, vaddr); + if (folio && user_alloc_needs_zeroing()) clear_user_highpage(&folio->page, vaddr); return folio; diff --git a/include/linux/hisi_acc_qm.h b/include/linux/hisi_acc_qm.h index 9d7754ad5e9b..6dbd0d49628f 100644 --- a/include/linux/hisi_acc_qm.h +++ b/include/linux/hisi_acc_qm.h @@ -229,6 +229,12 @@ struct hisi_qm_status { struct hisi_qm; +enum acc_err_result { + ACC_ERR_NONE, + ACC_ERR_NEED_RESET, + ACC_ERR_RECOVERED, +}; + struct hisi_qm_err_info { char *acpi_rst; u32 msi_wr_port; @@ -257,9 +263,9 @@ struct hisi_qm_err_ini { void (*close_axi_master_ooo)(struct hisi_qm *qm); void (*open_sva_prefetch)(struct hisi_qm *qm); void (*close_sva_prefetch)(struct hisi_qm *qm); - void (*log_dev_hw_err)(struct hisi_qm *qm, u32 err_sts); void (*show_last_dfx_regs)(struct hisi_qm *qm); void (*err_info_init)(struct hisi_qm *qm); + enum acc_err_result (*get_err_result)(struct hisi_qm *qm); }; struct hisi_qm_cap_info { @@ -274,13 +280,25 @@ struct hisi_qm_cap_info { u32 v3_val; }; +struct hisi_qm_cap_query_info { + u32 type; + const char *name; + u32 offset; + u32 v1_val; + u32 v2_val; + u32 v3_val; +}; + struct hisi_qm_cap_record { u32 type; + const char *name; u32 cap_val; }; struct hisi_qm_cap_tables { + u32 qm_cap_size; struct hisi_qm_cap_record *qm_cap_table; + u32 dev_cap_size; struct hisi_qm_cap_record *dev_cap_table; }; @@ -436,37 +454,6 @@ struct hisi_qp { struct uacce_queue *uacce_q; }; -static inline int q_num_set(const char *val, const struct kernel_param *kp, - unsigned int device) -{ - struct pci_dev *pdev; - u32 n, q_num; - int ret; - - if (!val) - return -EINVAL; - - pdev = pci_get_device(PCI_VENDOR_ID_HUAWEI, device, NULL); - if (!pdev) { - q_num = min_t(u32, QM_QNUM_V1, QM_QNUM_V2); - pr_info("No device found currently, suppose queue number is %u\n", - q_num); - } else { - if (pdev->revision == QM_HW_V1) - q_num = QM_QNUM_V1; - else - q_num = QM_QNUM_V2; - - pci_dev_put(pdev); - } - - ret = kstrtou32(val, 10, &n); - if (ret || n < QM_MIN_QNUM || n > q_num) - return -EINVAL; - - return param_set_int(val, kp); -} - static inline int vfs_num_set(const char *val, const struct kernel_param *kp) { u32 n; @@ -526,6 +513,8 @@ static inline void hisi_qm_del_list(struct hisi_qm *qm, struct hisi_qm_list *qm_ mutex_unlock(&qm_list->lock); } +int hisi_qm_q_num_set(const char *val, const struct kernel_param *kp, + unsigned int device); int hisi_qm_init(struct hisi_qm *qm); void hisi_qm_uninit(struct hisi_qm *qm); int hisi_qm_start(struct hisi_qm *qm); @@ -583,6 +572,9 @@ void hisi_qm_regs_dump(struct seq_file *s, struct debugfs_regset32 *regset); u32 hisi_qm_get_hw_info(struct hisi_qm *qm, const struct hisi_qm_cap_info *info_table, u32 index, bool is_read); +u32 hisi_qm_get_cap_value(struct hisi_qm *qm, + const struct hisi_qm_cap_query_info *info_table, + u32 index, bool is_read); int hisi_qm_set_algs(struct hisi_qm *qm, u64 alg_msk, const struct qm_dev_alg *dev_algs, u32 dev_algs_size); diff --git a/include/linux/host1x.h b/include/linux/host1x.h index c4dde3aafcac..9fa9c30a34e6 100644 --- a/include/linux/host1x.h +++ b/include/linux/host1x.h @@ -14,12 +14,17 @@ enum host1x_class { HOST1X_CLASS_HOST1X = 0x1, + HOST1X_CLASS_NVJPG1 = 0x7, + HOST1X_CLASS_NVENC = 0x21, + HOST1X_CLASS_NVENC1 = 0x22, HOST1X_CLASS_GR2D = 0x51, HOST1X_CLASS_GR2D_SB = 0x52, HOST1X_CLASS_VIC = 0x5D, HOST1X_CLASS_GR3D = 0x60, + HOST1X_CLASS_NVJPG = 0xC0, HOST1X_CLASS_NVDEC = 0xF0, HOST1X_CLASS_NVDEC1 = 0xF5, + HOST1X_CLASS_OFA = 0xF8, }; struct host1x; diff --git a/include/linux/host1x_context_bus.h b/include/linux/host1x_context_bus.h index 72462737a6db..c928cb432680 100644 --- a/include/linux/host1x_context_bus.h +++ b/include/linux/host1x_context_bus.h @@ -9,7 +9,7 @@ #include <linux/device.h> #ifdef CONFIG_TEGRA_HOST1X_CONTEXT_BUS -extern struct bus_type host1x_context_device_bus_type; +extern const struct bus_type host1x_context_device_bus_type; #endif #endif diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index aa1e65ccb615..7ef5f7ef31a9 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h @@ -228,32 +228,17 @@ static inline void hrtimer_cancel_wait_running(struct hrtimer *timer) /* Initialize timers: */ extern void hrtimer_init(struct hrtimer *timer, clockid_t which_clock, enum hrtimer_mode mode); -extern void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, clockid_t clock_id, - enum hrtimer_mode mode); +extern void hrtimer_setup(struct hrtimer *timer, enum hrtimer_restart (*function)(struct hrtimer *), + clockid_t clock_id, enum hrtimer_mode mode); +extern void hrtimer_setup_on_stack(struct hrtimer *timer, + enum hrtimer_restart (*function)(struct hrtimer *), + clockid_t clock_id, enum hrtimer_mode mode); +extern void hrtimer_setup_sleeper_on_stack(struct hrtimer_sleeper *sl, clockid_t clock_id, + enum hrtimer_mode mode); #ifdef CONFIG_DEBUG_OBJECTS_TIMERS -extern void hrtimer_init_on_stack(struct hrtimer *timer, clockid_t which_clock, - enum hrtimer_mode mode); -extern void hrtimer_init_sleeper_on_stack(struct hrtimer_sleeper *sl, - clockid_t clock_id, - enum hrtimer_mode mode); - extern void destroy_hrtimer_on_stack(struct hrtimer *timer); #else -static inline void hrtimer_init_on_stack(struct hrtimer *timer, - clockid_t which_clock, - enum hrtimer_mode mode) -{ - hrtimer_init(timer, which_clock, mode); -} - -static inline void hrtimer_init_sleeper_on_stack(struct hrtimer_sleeper *sl, - clockid_t clock_id, - enum hrtimer_mode mode) -{ - hrtimer_init_sleeper(sl, clock_id, mode); -} - static inline void destroy_hrtimer_on_stack(struct hrtimer *timer) { } #endif @@ -337,6 +322,28 @@ static inline int hrtimer_callback_running(struct hrtimer *timer) return timer->base->running == timer; } +/** + * hrtimer_update_function - Update the timer's callback function + * @timer: Timer to update + * @function: New callback function + * + * Only safe to call if the timer is not enqueued. Can be called in the callback function if the + * timer is not enqueued at the same time (see the comments above HRTIMER_STATE_ENQUEUED). + */ +static inline void hrtimer_update_function(struct hrtimer *timer, + enum hrtimer_restart (*function)(struct hrtimer *)) +{ + guard(raw_spinlock_irqsave)(&timer->base->cpu_base->lock); + + if (WARN_ON_ONCE(hrtimer_is_queued(timer))) + return; + + if (WARN_ON_ONCE(!function)) + return; + + timer->function = function; +} + /* Forward a hrtimer so it expires after now: */ extern u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval); diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index ef5b80e48599..b94c2e8ee918 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -2,7 +2,6 @@ #ifndef _LINUX_HUGE_MM_H #define _LINUX_HUGE_MM_H -#include <linux/sched/coredump.h> #include <linux/mm_types.h> #include <linux/fs.h> /* only for vma_is_dax() */ @@ -120,6 +119,8 @@ enum mthp_stat_item { MTHP_STAT_ANON_FAULT_ALLOC, MTHP_STAT_ANON_FAULT_FALLBACK, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE, + MTHP_STAT_ZSWPOUT, + MTHP_STAT_SWPIN, MTHP_STAT_SWPOUT, MTHP_STAT_SWPOUT_FALLBACK, MTHP_STAT_SHMEM_ALLOC, @@ -253,19 +254,6 @@ static inline unsigned long thp_vma_suitable_orders(struct vm_area_struct *vma, return orders; } -static inline bool file_thp_enabled(struct vm_area_struct *vma) -{ - struct inode *inode; - - if (!vma->vm_file) - return false; - - inode = vma->vm_file->f_inode; - - return (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS)) && - !inode_is_open_for_write(inode) && S_ISREG(inode->i_mode); -} - unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma, unsigned long vm_flags, unsigned long tva_flags, diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index e4697539b665..ae4fe8615bb6 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -546,16 +546,10 @@ static inline struct hstate *hstate_inode(struct inode *i) } #endif /* !CONFIG_HUGETLBFS */ -#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA -unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, - unsigned long len, unsigned long pgoff, - unsigned long flags); -#endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */ - unsigned long -generic_hugetlb_get_unmapped_area(struct file *file, unsigned long addr, - unsigned long len, unsigned long pgoff, - unsigned long flags); +hugetlb_get_unmapped_area(struct file *file, unsigned long addr, + unsigned long len, unsigned long pgoff, + unsigned long flags); /* * huegtlb page specific state flags. These flags are located in page.private @@ -1035,9 +1029,19 @@ void hugetlb_unregister_node(struct node *node); */ bool is_raw_hwpoison_page_in_hugepage(struct page *page); +static inline unsigned long huge_page_mask_align(struct file *file) +{ + return PAGE_MASK & ~huge_page_mask(hstate_file(file)); +} + #else /* CONFIG_HUGETLB_PAGE */ struct hstate {}; +static inline unsigned long huge_page_mask_align(struct file *file) +{ + return 0; +} + static inline struct hugepage_subpool *hugetlb_folio_subpool(struct folio *folio) { return NULL; diff --git a/include/linux/hwmon.h b/include/linux/hwmon.h index 5c6a421ad580..3a63dff62d03 100644 --- a/include/linux/hwmon.h +++ b/include/linux/hwmon.h @@ -368,7 +368,9 @@ enum hwmon_intrusion_attributes { /** * struct hwmon_ops - hwmon device operations - * @is_visible: Callback to return attribute visibility. Mandatory. + * @visible: Static visibility. If non-zero, 'is_visible' is ignored. + * @is_visible: Callback to return attribute visibility. Mandatory unless + * 'visible' is non-zero. * Parameters are: * @const void *drvdata: * Pointer to driver-private data structure passed @@ -412,6 +414,7 @@ enum hwmon_intrusion_attributes { * The function returns 0 on success or a negative error number. */ struct hwmon_ops { + umode_t visible; umode_t (*is_visible)(const void *drvdata, enum hwmon_sensor_types type, u32 attr, int channel); int (*read)(struct device *dev, enum hwmon_sensor_types type, diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index 22c22fb91042..02a226bcf0ed 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h @@ -1559,6 +1559,7 @@ struct hv_util_service { void *channel; void (*util_cb)(void *); int (*util_init)(struct hv_util_service *); + int (*util_init_transport)(void); void (*util_deinit)(void); int (*util_pre_suspend)(void); int (*util_pre_resume)(void); diff --git a/include/linux/i2c-of-prober.h b/include/linux/i2c-of-prober.h new file mode 100644 index 000000000000..bb6d47f50ee5 --- /dev/null +++ b/include/linux/i2c-of-prober.h @@ -0,0 +1,140 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Definitions for the Linux I2C OF component prober + * + * Copyright (C) 2024 Google LLC + */ + +#ifndef _LINUX_I2C_OF_PROBER_H +#define _LINUX_I2C_OF_PROBER_H + +#include <linux/kconfig.h> +#include <linux/types.h> + +struct device; +struct device_node; + +/** + * struct i2c_of_probe_ops - I2C OF component prober callbacks + * + * A set of callbacks to be used by i2c_of_probe_component(). + * + * All callbacks are optional. Callbacks are called only once per run, and are + * used in the order they are defined in this structure. + * + * All callbacks that have return values shall return %0 on success, + * or a negative error number on failure. + * + * The @dev parameter passed to the callbacks is the same as @dev passed to + * i2c_of_probe_component(). It should only be used for dev_printk() calls + * and nothing else, especially not managed device resource (devres) APIs. + */ +struct i2c_of_probe_ops { + /** + * @enable: Retrieve and enable resources so that the components respond to probes. + * + * It is OK for this callback to return -EPROBE_DEFER since the intended use includes + * retrieving resources and enables them. Resources should be reverted to their initial + * state and released before returning if this fails. + */ + int (*enable)(struct device *dev, struct device_node *bus_node, void *data); + + /** + * @cleanup_early: Release exclusive resources prior to calling probe() on a + * detected component. + * + * Only called if a matching component is actually found. If none are found, + * resources that would have been released in this callback should be released in + * @free_resourcs_late instead. + */ + void (*cleanup_early)(struct device *dev, void *data); + + /** + * @cleanup: Opposite of @enable to balance refcounts and free resources after probing. + * + * Should check if resources were already freed by @cleanup_early. + */ + void (*cleanup)(struct device *dev, void *data); +}; + +/** + * struct i2c_of_probe_cfg - I2C OF component prober configuration + * @ops: Callbacks for the prober to use. + * @type: A string to match the device node name prefix to probe for. + */ +struct i2c_of_probe_cfg { + const struct i2c_of_probe_ops *ops; + const char *type; +}; + +#if IS_ENABLED(CONFIG_OF_DYNAMIC) + +int i2c_of_probe_component(struct device *dev, const struct i2c_of_probe_cfg *cfg, void *ctx); + +/** + * DOC: I2C OF component prober simple helpers + * + * Components such as trackpads are commonly connected to a devices baseboard + * with a 6-pin ribbon cable. That gives at most one voltage supply and one + * GPIO (commonly a "enable" or "reset" line) besides the I2C bus, interrupt + * pin, and common ground. Touchscreens, while integrated into the display + * panel's connection, typically have the same set of connections. + * + * A simple set of helpers are provided here for use with the I2C OF component + * prober. This implementation targets such components, allowing for at most + * one regulator supply. + * + * The following helpers are provided: + * * i2c_of_probe_simple_enable() + * * i2c_of_probe_simple_cleanup_early() + * * i2c_of_probe_simple_cleanup() + */ + +/** + * struct i2c_of_probe_simple_opts - Options for simple I2C component prober callbacks + * @res_node_compatible: Compatible string of device node to retrieve resources from. + * @supply_name: Name of regulator supply. + * @gpio_name: Name of GPIO. NULL if no GPIO line is used. Empty string ("") if GPIO + * line is unnamed. + * @post_power_on_delay_ms: Delay after regulators are powered on. Passed to msleep(). + * @post_gpio_config_delay_ms: Delay after GPIO is configured. Passed to msleep(). + * @gpio_assert_to_enable: %true if GPIO should be asserted, i.e. set to logical high, + * to enable the component. + * + * This describes power sequences common for the class of components supported by the + * simple component prober: + * * @gpio_name is configured to the non-active setting according to @gpio_assert_to_enable. + * * @supply_name regulator supply is enabled. + * * Wait for @post_power_on_delay_ms to pass. + * * @gpio_name is configured to the active setting according to @gpio_assert_to_enable. + * * Wait for @post_gpio_config_delay_ms to pass. + */ +struct i2c_of_probe_simple_opts { + const char *res_node_compatible; + const char *supply_name; + const char *gpio_name; + unsigned int post_power_on_delay_ms; + unsigned int post_gpio_config_delay_ms; + bool gpio_assert_to_enable; +}; + +struct gpio_desc; +struct regulator; + +struct i2c_of_probe_simple_ctx { + /* public: provided by user before helpers are used. */ + const struct i2c_of_probe_simple_opts *opts; + /* private: internal fields for helpers. */ + struct regulator *supply; + struct gpio_desc *gpiod; +}; + +int i2c_of_probe_simple_enable(struct device *dev, struct device_node *bus_node, void *data); +void i2c_of_probe_simple_cleanup_early(struct device *dev, void *data); +void i2c_of_probe_simple_cleanup(struct device *dev, void *data); + +extern struct i2c_of_probe_ops i2c_of_probe_simple_ops; + +#endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */ + +#endif /* _LINUX_I2C_OF_PROBER_H */ diff --git a/include/linux/i3c/master.h b/include/linux/i3c/master.h index 2a1ed05d5782..12d532b012c5 100644 --- a/include/linux/i3c/master.h +++ b/include/linux/i3c/master.h @@ -298,7 +298,9 @@ enum i3c_open_drain_speed { * @I3C_ADDR_SLOT_I2C_DEV: address is assigned to an I2C device * @I3C_ADDR_SLOT_I3C_DEV: address is assigned to an I3C device * @I3C_ADDR_SLOT_STATUS_MASK: address slot mask - * + * @I3C_ADDR_SLOT_EXT_STATUS_MASK: address slot mask with extended information + * @I3C_ADDR_SLOT_EXT_DESIRED: the bitmask represents addresses that are preferred by some devices, + * such as the "assigned-address" property in a device tree source. * On an I3C bus, addresses are assigned dynamically, and we need to know which * addresses are free to use and which ones are already assigned. * @@ -311,8 +313,12 @@ enum i3c_addr_slot_status { I3C_ADDR_SLOT_I2C_DEV, I3C_ADDR_SLOT_I3C_DEV, I3C_ADDR_SLOT_STATUS_MASK = 3, + I3C_ADDR_SLOT_EXT_STATUS_MASK = 7, + I3C_ADDR_SLOT_EXT_DESIRED = BIT(2), }; +#define I3C_ADDR_SLOT_STATUS_BITS 4 + /** * struct i3c_bus - I3C bus object * @cur_master: I3C master currently driving the bus. Since I3C is multi-master @@ -354,7 +360,7 @@ enum i3c_addr_slot_status { struct i3c_bus { struct i3c_dev_desc *cur_master; int id; - unsigned long addrslots[((I2C_MAX_ADDR + 1) * 2) / BITS_PER_LONG]; + unsigned long addrslots[((I2C_MAX_ADDR + 1) * I3C_ADDR_SLOT_STATUS_BITS) / BITS_PER_LONG]; enum i3c_bus_mode mode; struct { unsigned long i3c; diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index 456bca45ff05..05dedc45505c 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -1445,6 +1445,8 @@ struct ieee80211_mgmt { __le16 status; __le16 capab; __le16 timeout; + /* followed by BA Extension */ + u8 variable[]; } __packed addba_resp; struct{ u8 action_code; diff --git a/include/linux/if_ltalk.h b/include/linux/if_ltalk.h deleted file mode 100644 index 4cc1c0b77870..000000000000 --- a/include/linux/if_ltalk.h +++ /dev/null @@ -1,8 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef __LINUX_LTALK_H -#define __LINUX_LTALK_H - -#include <uapi/linux/if_ltalk.h> - -extern struct net_device *alloc_ltalkdev(int sizeof_priv); -#endif diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h index c1645c86eed9..d65b5d71b93b 100644 --- a/include/linux/if_vlan.h +++ b/include/linux/if_vlan.h @@ -585,13 +585,16 @@ static inline int vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci) * vlan_get_protocol - get protocol EtherType. * @skb: skbuff to query * @type: first vlan protocol + * @mac_offset: MAC offset * @depth: buffer to store length of eth and vlan tags in bytes * * Returns the EtherType of the packet, regardless of whether it is * vlan encapsulated (normal or hardware accelerated) or not. */ -static inline __be16 __vlan_get_protocol(const struct sk_buff *skb, __be16 type, - int *depth) +static inline __be16 __vlan_get_protocol_offset(const struct sk_buff *skb, + __be16 type, + int mac_offset, + int *depth) { unsigned int vlan_depth = skb->mac_len, parse_depth = VLAN_MAX_DEPTH; @@ -610,7 +613,8 @@ static inline __be16 __vlan_get_protocol(const struct sk_buff *skb, __be16 type, do { struct vlan_hdr vhdr, *vh; - vh = skb_header_pointer(skb, vlan_depth, sizeof(vhdr), &vhdr); + vh = skb_header_pointer(skb, mac_offset + vlan_depth, + sizeof(vhdr), &vhdr); if (unlikely(!vh || !--parse_depth)) return 0; @@ -625,6 +629,12 @@ static inline __be16 __vlan_get_protocol(const struct sk_buff *skb, __be16 type, return type; } +static inline __be16 __vlan_get_protocol(const struct sk_buff *skb, __be16 type, + int *depth) +{ + return __vlan_get_protocol_offset(skb, type, 0, depth); +} + /** * vlan_get_protocol - get protocol EtherType. * @skb: skbuff to query diff --git a/include/linux/iio/backend.h b/include/linux/iio/backend.h index 37d56914d485..10be00f3b120 100644 --- a/include/linux/iio/backend.h +++ b/include/linux/iio/backend.h @@ -14,12 +14,14 @@ struct iio_dev; enum iio_backend_data_type { IIO_BACKEND_TWOS_COMPLEMENT, IIO_BACKEND_OFFSET_BINARY, + IIO_BACKEND_DATA_UNSIGNED, IIO_BACKEND_DATA_TYPE_MAX }; enum iio_backend_data_source { IIO_BACKEND_INTERNAL_CONTINUOUS_WAVE, IIO_BACKEND_EXTERNAL, + IIO_BACKEND_INTERNAL_RAMP_16BIT, IIO_BACKEND_DATA_SOURCE_MAX }; @@ -89,6 +91,11 @@ enum iio_backend_sample_trigger { * @read_raw: Read a channel attribute from a backend device * @debugfs_print_chan_status: Print channel status into a buffer. * @debugfs_reg_access: Read or write register value of backend. + * @ddr_enable: Enable interface DDR (Double Data Rate) mode. + * @ddr_disable: Disable interface DDR (Double Data Rate) mode. + * @data_stream_enable: Enable data stream. + * @data_stream_disable: Disable data stream. + * @data_transfer_addr: Set data address. **/ struct iio_backend_ops { int (*enable)(struct iio_backend *back); @@ -129,6 +136,11 @@ struct iio_backend_ops { size_t len); int (*debugfs_reg_access)(struct iio_backend *back, unsigned int reg, unsigned int writeval, unsigned int *readval); + int (*ddr_enable)(struct iio_backend *back); + int (*ddr_disable)(struct iio_backend *back); + int (*data_stream_enable)(struct iio_backend *back); + int (*data_stream_disable)(struct iio_backend *back); + int (*data_transfer_addr)(struct iio_backend *back, u32 address); }; /** @@ -164,6 +176,11 @@ int iio_backend_data_sample_trigger(struct iio_backend *back, int devm_iio_backend_request_buffer(struct device *dev, struct iio_backend *back, struct iio_dev *indio_dev); +int iio_backend_ddr_enable(struct iio_backend *back); +int iio_backend_ddr_disable(struct iio_backend *back); +int iio_backend_data_stream_enable(struct iio_backend *back); +int iio_backend_data_stream_disable(struct iio_backend *back); +int iio_backend_data_transfer_addr(struct iio_backend *back, u32 address); ssize_t iio_backend_ext_info_set(struct iio_dev *indio_dev, uintptr_t private, const struct iio_chan_spec *chan, const char *buf, size_t len); diff --git a/include/linux/iio/driver.h b/include/linux/iio/driver.h index 7a157ed218f6..7f8b55551ed0 100644 --- a/include/linux/iio/driver.h +++ b/include/linux/iio/driver.h @@ -18,7 +18,7 @@ struct iio_map; * @map: array of mappings specifying association of channel with client */ int iio_map_array_register(struct iio_dev *indio_dev, - struct iio_map *map); + const struct iio_map *map); /** * iio_map_array_unregister() - tell the core to remove consumer mappings for @@ -38,6 +38,7 @@ int iio_map_array_unregister(struct iio_dev *indio_dev); * handle de-registration of the IIO map object when the device's refcount goes to * zero. */ -int devm_iio_map_array_register(struct device *dev, struct iio_dev *indio_dev, struct iio_map *maps); +int devm_iio_map_array_register(struct device *dev, struct iio_dev *indio_dev, + const struct iio_map *maps); #endif diff --git a/include/linux/iio/events.h b/include/linux/iio/events.h index a4558c45a548..72062a0c7c87 100644 --- a/include/linux/iio/events.h +++ b/include/linux/iio/events.h @@ -10,7 +10,7 @@ #include <uapi/linux/iio/events.h> /** - * IIO_EVENT_CODE() - create event identifier + * _IIO_EVENT_CODE() - create event identifier * @chan_type: Type of the channel. Should be one of enum iio_chan_type. * @diff: Whether the event is for an differential channel or not. * @modifier: Modifier for the channel. Should be one of enum iio_modifier. @@ -19,10 +19,13 @@ * @chan: Channel number for non-differential channels. * @chan1: First channel number for differential channels. * @chan2: Second channel number for differential channels. + * + * Drivers should use the specialized macros below instead of using this one + * directly. */ -#define IIO_EVENT_CODE(chan_type, diff, modifier, direction, \ - type, chan, chan1, chan2) \ +#define _IIO_EVENT_CODE(chan_type, diff, modifier, direction, \ + type, chan, chan1, chan2) \ (((u64)type << 56) | ((u64)diff << 55) | \ ((u64)direction << 48) | ((u64)modifier << 40) | \ ((u64)chan_type << 32) | (((u16)chan2) << 16) | ((u16)chan1) | \ @@ -30,7 +33,8 @@ /** - * IIO_MOD_EVENT_CODE() - create event identifier for modified channels + * IIO_MOD_EVENT_CODE() - create event identifier for modified (non + * differential) channels * @chan_type: Type of the channel. Should be one of enum iio_chan_type. * @number: Channel number. * @modifier: Modifier for the channel. Should be one of enum iio_modifier. @@ -40,10 +44,11 @@ #define IIO_MOD_EVENT_CODE(chan_type, number, modifier, \ type, direction) \ - IIO_EVENT_CODE(chan_type, 0, modifier, direction, type, number, 0, 0) + _IIO_EVENT_CODE(chan_type, 0, modifier, direction, type, number, 0, 0) /** - * IIO_UNMOD_EVENT_CODE() - create event identifier for unmodified channels + * IIO_UNMOD_EVENT_CODE() - create event identifier for unmodified (non + * differential) channels * @chan_type: Type of the channel. Should be one of enum iio_chan_type. * @number: Channel number. * @type: Type of the event. Should be one of enum iio_event_type. @@ -51,6 +56,18 @@ */ #define IIO_UNMOD_EVENT_CODE(chan_type, number, type, direction) \ - IIO_EVENT_CODE(chan_type, 0, 0, direction, type, number, 0, 0) + _IIO_EVENT_CODE(chan_type, 0, 0, direction, type, number, 0, 0) + +/** + * IIO_DIFF_EVENT_CODE() - create event identifier for differential channels + * @chan_type: Type of the channel. Should be one of enum iio_chan_type. + * @chan1: First channel number for differential channels. + * @chan2: Second channel number for differential channels. + * @type: Type of the event. Should be one of enum iio_event_type. + * @direction: Direction of the event. One of enum iio_event_direction. + */ + +#define IIO_DIFF_EVENT_CODE(chan_type, chan1, chan2, type, direction) \ + _IIO_EVENT_CODE(chan_type, 1, 0, direction, type, 0, chan1, chan2) #endif diff --git a/include/linux/iio/iio-opaque.h b/include/linux/iio/iio-opaque.h index 5aec3945555b..a89e7e43e441 100644 --- a/include/linux/iio/iio-opaque.h +++ b/include/linux/iio/iio-opaque.h @@ -70,7 +70,7 @@ struct iio_dev_opaque { #if defined(CONFIG_DEBUG_FS) struct dentry *debugfs_dentry; - unsigned cached_reg_addr; + unsigned int cached_reg_addr; char read_buf[20]; unsigned int read_buf_len; #endif diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h index 18779b631e90..ae65890d4567 100644 --- a/include/linux/iio/iio.h +++ b/include/linux/iio/iio.h @@ -282,11 +282,11 @@ struct iio_chan_spec { const struct iio_chan_spec_ext_info *ext_info; const char *extend_name; const char *datasheet_name; - unsigned modified:1; - unsigned indexed:1; - unsigned output:1; - unsigned differential:1; - unsigned has_ext_scan_type:1; + unsigned int modified:1; + unsigned int indexed:1; + unsigned int output:1; + unsigned int differential:1; + unsigned int has_ext_scan_type:1; }; @@ -514,7 +514,7 @@ struct iio_info { const struct iio_chan_spec *chan, enum iio_event_type type, enum iio_event_direction dir, - int state); + bool state); int (*read_event_value)(struct iio_dev *indio_dev, const struct iio_chan_spec *chan, @@ -541,13 +541,13 @@ struct iio_info { int (*update_scan_mode)(struct iio_dev *indio_dev, const unsigned long *scan_mask); int (*debugfs_reg_access)(struct iio_dev *indio_dev, - unsigned reg, unsigned writeval, - unsigned *readval); + unsigned int reg, unsigned int writeval, + unsigned int *readval); int (*fwnode_xlate)(struct iio_dev *indio_dev, const struct fwnode_reference_args *iiospec); - int (*hwfifo_set_watermark)(struct iio_dev *indio_dev, unsigned val); + int (*hwfifo_set_watermark)(struct iio_dev *indio_dev, unsigned int val); int (*hwfifo_flush_to_buffer)(struct iio_dev *indio_dev, - unsigned count); + unsigned int count); }; /** @@ -609,7 +609,7 @@ struct iio_dev { int scan_bytes; const unsigned long *available_scan_masks; - unsigned __private masklength; + unsigned int __private masklength; const unsigned long *active_scan_mask; bool scan_timestamp; struct iio_trigger *trig; @@ -624,7 +624,7 @@ struct iio_dev { const struct iio_info *info; const struct iio_buffer_setup_ops *setup_ops; - void *priv; + void *__private priv; }; int iio_device_id(struct iio_dev *indio_dev); @@ -785,7 +785,7 @@ struct iio_dev *iio_device_alloc(struct device *parent, int sizeof_priv); /* The information at the returned address is guaranteed to be cacheline aligned */ static inline void *iio_priv(const struct iio_dev *indio_dev) { - return indio_dev->priv; + return ACCESS_PRIVATE(indio_dev, priv); } void iio_device_free(struct iio_dev *indio_dev); @@ -831,6 +831,7 @@ int iio_device_resume_triggering(struct iio_dev *indio_dev); bool iio_read_acpi_mount_matrix(struct device *dev, struct iio_mount_matrix *orientation, char *acpi_method); +const char *iio_get_acpi_device_name_and_data(struct device *dev, const void **data); #else static inline bool iio_read_acpi_mount_matrix(struct device *dev, struct iio_mount_matrix *orientation, @@ -838,7 +839,16 @@ static inline bool iio_read_acpi_mount_matrix(struct device *dev, { return false; } +static inline const char * +iio_get_acpi_device_name_and_data(struct device *dev, const void **data) +{ + return NULL; +} #endif +static inline const char *iio_get_acpi_device_name(struct device *dev) +{ + return iio_get_acpi_device_name_and_data(dev, NULL); +} /** * iio_get_current_scan_type - Get the current scan type for a channel diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h index cb5280e6cc21..5730ba6b1cfa 100644 --- a/include/linux/inetdevice.h +++ b/include/linux/inetdevice.h @@ -141,7 +141,7 @@ static inline void ipv4_devconf_setall(struct in_device *in_dev) ARP_EVICT_NOCARRIER) struct in_ifaddr { - struct hlist_node hash; + struct hlist_node addr_lst; struct in_ifaddr __rcu *ifa_next; struct in_device *ifa_dev; struct rcu_head rcu_head; @@ -226,6 +226,10 @@ static __inline__ bool bad_mask(__be32 mask, __be32 addr) for (ifa = rtnl_dereference((in_dev)->ifa_list); ifa; \ ifa = rtnl_dereference(ifa->ifa_next)) +#define in_dev_for_each_ifa_rtnl_net(net, ifa, in_dev) \ + for (ifa = rtnl_net_dereference(net, (in_dev)->ifa_list); ifa; \ + ifa = rtnl_net_dereference(net, ifa->ifa_next)) + #define in_dev_for_each_ifa_rcu(ifa, in_dev) \ for (ifa = rcu_dereference((in_dev)->ifa_list); ifa; \ ifa = rcu_dereference(ifa->ifa_next)) @@ -252,6 +256,11 @@ static inline struct in_device *__in_dev_get_rtnl(const struct net_device *dev) return rtnl_dereference(dev->ip_ptr); } +static inline struct in_device *__in_dev_get_rtnl_net(const struct net_device *dev) +{ + return rtnl_net_dereference(dev_net(dev), dev->ip_ptr); +} + /* called with rcu_read_lock or rtnl held */ static inline bool ip_ignore_linkdown(const struct net_device *dev) { diff --git a/include/linux/input.h b/include/linux/input.h index cd866b020a01..7d7cb0593a63 100644 --- a/include/linux/input.h +++ b/include/linux/input.h @@ -286,6 +286,10 @@ struct input_handle; * @start: starts handler for given handle. This function is called by * input core right after connect() method and also when a process * that "grabbed" a device releases it + * @passive_observer: set to %true by drivers only interested in observing + * data stream from devices if there are other users present. Such + * drivers will not result in starting underlying hardware device + * when input_open_device() is called for their handles * @legacy_minors: set to %true by drivers using legacy minor ranges * @minor: beginning of range of 32 legacy minors for devices this driver * can provide @@ -321,6 +325,7 @@ struct input_handler { void (*disconnect)(struct input_handle *handle); void (*start)(struct input_handle *handle); + bool passive_observer; bool legacy_minors; int minor; const char *name; diff --git a/include/linux/intel_vsec.h b/include/linux/intel_vsec.h index 11ee185566c3..b94beab64610 100644 --- a/include/linux/intel_vsec.h +++ b/include/linux/intel_vsec.h @@ -74,10 +74,11 @@ enum intel_vsec_quirks { * @pdev: PCI device reference for the callback's use * @guid: ID of data to acccss * @data: buffer for the data to be copied + * @off: offset into the requested buffer * @count: size of buffer */ struct pmt_callbacks { - int (*read_telem)(struct pci_dev *pdev, u32 guid, u64 *data, u32 count); + int (*read_telem)(struct pci_dev *pdev, u32 guid, u64 *data, loff_t off, u32 count); }; /** diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 457151f9f263..8cd9327e4e78 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -616,6 +616,53 @@ extern void __raise_softirq_irqoff(unsigned int nr); extern void raise_softirq_irqoff(unsigned int nr); extern void raise_softirq(unsigned int nr); +/* + * With forced-threaded interrupts enabled a raised softirq is deferred to + * ksoftirqd unless it can be handled within the threaded interrupt. This + * affects timer_list timers and hrtimers which are explicitly marked with + * HRTIMER_MODE_SOFT. + * With PREEMPT_RT enabled more hrtimers are moved to softirq for processing + * which includes all timers which are not explicitly marked HRTIMER_MODE_HARD. + * Userspace controlled timers (like the clock_nanosleep() interface) is divided + * into two categories: Tasks with elevated scheduling policy including + * SCHED_{FIFO|RR|DL} and the remaining scheduling policy. The tasks with the + * elevated scheduling policy are woken up directly from the HARDIRQ while all + * other wake ups are delayed to softirq and so to ksoftirqd. + * + * The ksoftirqd runs at SCHED_OTHER policy at which it should remain since it + * handles the softirq in an overloaded situation (not handled everything + * within its last run). + * If the timers are handled at SCHED_OTHER priority then they competes with all + * other SCHED_OTHER tasks for CPU resources are possibly delayed. + * Moving timers softirqs to a low priority SCHED_FIFO thread instead ensures + * that timer are performed before scheduling any SCHED_OTHER thread. + */ +DECLARE_PER_CPU(struct task_struct *, ktimerd); +DECLARE_PER_CPU(unsigned long, pending_timer_softirq); +void raise_ktimers_thread(unsigned int nr); + +static inline unsigned int local_timers_pending_force_th(void) +{ + return __this_cpu_read(pending_timer_softirq); +} + +static inline void raise_timer_softirq(unsigned int nr) +{ + lockdep_assert_in_irq(); + if (force_irqthreads()) + raise_ktimers_thread(nr); + else + __raise_softirq_irqoff(nr); +} + +static inline unsigned int local_timers_pending(void) +{ + if (force_irqthreads()) + return local_timers_pending_force_th(); + else + return local_softirq_pending(); +} + DECLARE_PER_CPU(struct task_struct *, ksoftirqd); static inline struct task_struct *this_cpu_ksoftirqd(void) diff --git a/include/linux/io-pgtable.h b/include/linux/io-pgtable.h index b1ecfc3cd5bc..ce86b09ae80f 100644 --- a/include/linux/io-pgtable.h +++ b/include/linux/io-pgtable.h @@ -87,6 +87,7 @@ struct io_pgtable_cfg { * attributes set in the TCR for a non-coherent page-table walker. * * IO_PGTABLE_QUIRK_ARM_HD: Enables dirty tracking in stage 1 pagetable. + * IO_PGTABLE_QUIRK_ARM_S2FWB: Use the FWB format for the MemAttrs bits */ #define IO_PGTABLE_QUIRK_ARM_NS BIT(0) #define IO_PGTABLE_QUIRK_NO_PERMS BIT(1) @@ -95,6 +96,7 @@ struct io_pgtable_cfg { #define IO_PGTABLE_QUIRK_ARM_TTBR1 BIT(5) #define IO_PGTABLE_QUIRK_ARM_OUTER_WBWA BIT(6) #define IO_PGTABLE_QUIRK_ARM_HD BIT(7) + #define IO_PGTABLE_QUIRK_ARM_S2FWB BIT(8) unsigned long quirks; unsigned long pgsize_bitmap; unsigned int ias; diff --git a/include/linux/io_uring.h b/include/linux/io_uring.h index e123d5e17b52..85fe4e6b275c 100644 --- a/include/linux/io_uring.h +++ b/include/linux/io_uring.h @@ -15,10 +15,8 @@ bool io_is_uring_fops(struct file *file); static inline void io_uring_files_cancel(void) { - if (current->io_uring) { - io_uring_unreg_ringfd(); + if (current->io_uring) __io_uring_cancel(false); - } } static inline void io_uring_task_cancel(void) { diff --git a/include/linux/io_uring/cmd.h b/include/linux/io_uring/cmd.h index b0aeec834c1d..a3ce553413de 100644 --- a/include/linux/io_uring/cmd.h +++ b/include/linux/io_uring/cmd.h @@ -48,7 +48,7 @@ int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw, * Note: the caller should never hard code @issue_flags and is only allowed * to pass the mask provided by the core io_uring code. */ -void io_uring_cmd_done(struct io_uring_cmd *cmd, ssize_t ret, ssize_t res2, +void io_uring_cmd_done(struct io_uring_cmd *cmd, ssize_t ret, u64 res2, unsigned issue_flags); void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd, @@ -72,7 +72,7 @@ static inline int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw, return -EOPNOTSUPP; } static inline void io_uring_cmd_done(struct io_uring_cmd *cmd, ssize_t ret, - ssize_t ret2, unsigned issue_flags) + u64 ret2, unsigned issue_flags) { } static inline void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd, @@ -115,7 +115,7 @@ static inline void io_uring_cmd_complete_in_task(struct io_uring_cmd *ioucmd, static inline struct task_struct *io_uring_cmd_get_task(struct io_uring_cmd *cmd) { - return cmd_to_io_kiocb(cmd)->task; + return cmd_to_io_kiocb(cmd)->tctx->task; } static inline struct io_uring_cmd_data *io_uring_cmd_get_async_data(struct io_uring_cmd *cmd) diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h index 2ee5dc105b58..fd4cdb0860a2 100644 --- a/include/linux/io_uring_types.h +++ b/include/linux/io_uring_types.h @@ -56,19 +56,18 @@ struct io_wq_work { int cancel_seq; }; -struct io_fixed_file { - /* file * with additional FFS_* flags */ - unsigned long file_ptr; +struct io_rsrc_data { + unsigned int nr; + struct io_rsrc_node **nodes; }; struct io_file_table { - struct io_fixed_file *files; + struct io_rsrc_data data; unsigned long *bitmap; unsigned int alloc_hint; }; struct io_hash_bucket { - spinlock_t lock; struct hlist_head list; } ____cacheline_aligned_in_smp; @@ -77,6 +76,12 @@ struct io_hash_table { unsigned hash_bits; }; +struct io_mapped_region { + struct page **pages; + void *vmap_ptr; + size_t nr_pages; +}; + /* * Arbitrary limit, can be raised if need be */ @@ -86,6 +91,7 @@ struct io_uring_task { /* submission side */ int cached_refs; const struct io_ring_ctx *last; + struct task_struct *task; struct io_wq *io_wq; struct file *registered_rings[IO_RINGFD_REG_MAX]; @@ -271,7 +277,6 @@ struct io_ring_ctx { * Fixed resources fast path, should be accessed only under * uring_lock, and updated through io_uring_register(2) */ - struct io_rsrc_node *rsrc_node; atomic_t cancel_seq; /* @@ -284,15 +289,13 @@ struct io_ring_ctx { struct io_wq_work_list iopoll_list; struct io_file_table file_table; - struct io_mapped_ubuf **user_bufs; - unsigned nr_user_files; - unsigned nr_user_bufs; + struct io_rsrc_data buf_table; struct io_submit_state submit_state; struct xarray io_bl_xa; - struct io_hash_table cancel_table_locked; + struct io_hash_table cancel_table; struct io_alloc_cache apoll_cache; struct io_alloc_cache netmsg_cache; struct io_alloc_cache rw_cache; @@ -303,6 +306,11 @@ struct io_ring_ctx { * ->uring_cmd() by io_uring_cmd_insert_cancelable() */ struct hlist_head cancelable_uring_cmd; + /* + * For Hybrid IOPOLL, runtime in hybrid polling, without + * scheduling time + */ + u64 hybrid_poll_time; } ____cacheline_aligned_in_smp; struct { @@ -317,6 +325,9 @@ struct io_ring_ctx { unsigned cq_entries; struct io_ev_fd __rcu *io_ev_fd; unsigned cq_extra; + + void *cq_wait_arg; + size_t cq_wait_size; } ____cacheline_aligned_in_smp; /* @@ -325,6 +336,7 @@ struct io_ring_ctx { */ struct { struct llist_head work_llist; + struct llist_head retry_llist; unsigned long check_cq; atomic_t cq_wait_nr; atomic_t cq_timeouts; @@ -333,7 +345,7 @@ struct io_ring_ctx { /* timeouts */ struct { - spinlock_t timeout_lock; + raw_spinlock_t timeout_lock; struct list_head timeout_list; struct list_head ltimeout_list; unsigned cq_last_tm_flush; @@ -343,7 +355,6 @@ struct io_ring_ctx { struct list_head io_buffers_comp; struct list_head cq_overflow_list; - struct io_hash_table cancel_table; struct hlist_head waitid_list; @@ -367,16 +378,6 @@ struct io_ring_ctx { struct wait_queue_head poll_wq; struct io_restriction restrictions; - /* slow path rsrc auxilary data, used by update/register */ - struct io_rsrc_data *file_data; - struct io_rsrc_data *buf_data; - - /* protected by ->uring_lock */ - struct list_head rsrc_ref_list; - struct io_alloc_cache rsrc_node_cache; - struct wait_queue_head rsrc_quiesce_wq; - unsigned rsrc_quiesce; - u32 pers_next; struct xarray personalities; @@ -410,7 +411,7 @@ struct io_ring_ctx { /* napi busy poll default timeout */ ktime_t napi_busy_poll_dt; bool napi_prefer_busy_poll; - bool napi_enabled; + u8 napi_track_mode; DECLARE_HASHTABLE(napi_ht, 4); #endif @@ -419,6 +420,13 @@ struct io_ring_ctx { unsigned evfd_last_cq_tail; /* + * Protection for resize vs mmap races - both the mmap and resize + * side will need to grab this lock, to prevent either side from + * being run concurrently with the other. + */ + struct mutex resize_lock; + + /* * If IORING_SETUP_NO_MMAP is used, then the below holds * the gup'ed pages for the two rings, and the sqes. */ @@ -426,6 +434,9 @@ struct io_ring_ctx { unsigned short n_sqe_pages; struct page **ring_pages; struct page **sqe_pages; + + /* used for optimised request parameter and wait argument passing */ + struct io_mapped_region param_region; }; struct io_tw_state { @@ -448,6 +459,7 @@ enum { REQ_F_LINK_TIMEOUT_BIT, REQ_F_NEED_CLEANUP_BIT, REQ_F_POLLED_BIT, + REQ_F_HYBRID_IOPOLL_STATE_BIT, REQ_F_BUFFER_SELECTED_BIT, REQ_F_BUFFER_RING_BIT, REQ_F_REISSUE_BIT, @@ -460,7 +472,6 @@ enum { REQ_F_DOUBLE_POLL_BIT, REQ_F_APOLL_MULTISHOT_BIT, REQ_F_CLEAR_POLLIN_BIT, - REQ_F_HASH_LOCKED_BIT, /* keep async read/write and isreg together and in order */ REQ_F_SUPPORT_NOWAIT_BIT, REQ_F_ISREG_BIT, @@ -469,6 +480,7 @@ enum { REQ_F_BL_EMPTY_BIT, REQ_F_BL_NO_RECYCLE_BIT, REQ_F_BUFFERS_COMMIT_BIT, + REQ_F_BUF_NODE_BIT, /* not a real bit, just to check we're not overflowing the space */ __REQ_F_LAST_BIT, @@ -507,6 +519,8 @@ enum { REQ_F_NEED_CLEANUP = IO_REQ_FLAG(REQ_F_NEED_CLEANUP_BIT), /* already went through poll handler */ REQ_F_POLLED = IO_REQ_FLAG(REQ_F_POLLED_BIT), + /* every req only blocks once in hybrid poll */ + REQ_F_IOPOLL_STATE = IO_REQ_FLAG(REQ_F_HYBRID_IOPOLL_STATE_BIT), /* buffer already selected */ REQ_F_BUFFER_SELECTED = IO_REQ_FLAG(REQ_F_BUFFER_SELECTED_BIT), /* buffer selected from ring, needs commit */ @@ -535,8 +549,6 @@ enum { REQ_F_APOLL_MULTISHOT = IO_REQ_FLAG(REQ_F_APOLL_MULTISHOT_BIT), /* recvmsg special flag, clear EPOLLIN */ REQ_F_CLEAR_POLLIN = IO_REQ_FLAG(REQ_F_CLEAR_POLLIN_BIT), - /* hashed into ->cancel_hash_locked, protected by ->uring_lock */ - REQ_F_HASH_LOCKED = IO_REQ_FLAG(REQ_F_HASH_LOCKED_BIT), /* don't use lazy poll wake for this request */ REQ_F_POLL_NO_LAZY = IO_REQ_FLAG(REQ_F_POLL_NO_LAZY_BIT), /* file is pollable */ @@ -547,6 +559,8 @@ enum { REQ_F_BL_NO_RECYCLE = IO_REQ_FLAG(REQ_F_BL_NO_RECYCLE_BIT), /* buffer ring head needs incrementing on put */ REQ_F_BUFFERS_COMMIT = IO_REQ_FLAG(REQ_F_BUFFERS_COMMIT_BIT), + /* buf node is valid */ + REQ_F_BUF_NODE = IO_REQ_FLAG(REQ_F_BUF_NODE_BIT), }; typedef void (*io_req_tw_func_t)(struct io_kiocb *req, struct io_tw_state *ts); @@ -616,12 +630,9 @@ struct io_kiocb { struct io_cqe cqe; struct io_ring_ctx *ctx; - struct task_struct *task; + struct io_uring_task *tctx; union { - /* store used ubuf, so we can prevent reloading */ - struct io_mapped_ubuf *imu; - /* stores selected buf, valid IFF REQ_F_BUFFER_SELECTED is set */ struct io_buffer *kbuf; @@ -630,6 +641,8 @@ struct io_kiocb { * REQ_F_BUFFER_RING is set. */ struct io_buffer_list *buf_list; + + struct io_rsrc_node *buf_node; }; union { @@ -639,13 +652,20 @@ struct io_kiocb { __poll_t apoll_events; }; - struct io_rsrc_node *rsrc_node; + struct io_rsrc_node *file_node; atomic_t refs; bool cancel_seq_set; struct io_task_work io_task_work; - /* for polled requests, i.e. IORING_OP_POLL_ADD and async armed poll */ - struct hlist_node hash_node; + union { + /* + * for polled requests, i.e. IORING_OP_POLL_ADD and async armed + * poll + */ + struct hlist_node hash_node; + /* For IOPOLL setup queues, with hybrid polling */ + u64 iopoll_start; + }; /* internal polling, see IORING_FEAT_FAST_POLL */ struct async_poll *apoll; /* opcode allocated if it needs to store data for async defer */ @@ -668,4 +688,9 @@ struct io_overflow_cqe { struct io_uring_cqe cqe; }; +static inline bool io_ctx_cqe32(struct io_ring_ctx *ctx) +{ + return ctx->flags & IORING_SETUP_CQE32; +} + #endif diff --git a/include/linux/iomap.h b/include/linux/iomap.h index f61407e3b121..75bf54e76f3b 100644 --- a/include/linux/iomap.h +++ b/include/linux/iomap.h @@ -53,6 +53,9 @@ struct vm_fault; * * IOMAP_F_XATTR indicates that the iomap is for an extended attribute extent * rather than a file data extent. + * + * IOMAP_F_BOUNDARY indicates that I/O and I/O completions for this iomap must + * never be merged with the mapping before it. */ #define IOMAP_F_NEW (1U << 0) #define IOMAP_F_DIRTY (1U << 1) @@ -64,6 +67,7 @@ struct vm_fault; #define IOMAP_F_BUFFER_HEAD 0 #endif /* CONFIG_BUFFER_HEAD */ #define IOMAP_F_XATTR (1U << 5) +#define IOMAP_F_BOUNDARY (1U << 6) /* * Flags set by the core iomap code during operations: @@ -178,6 +182,7 @@ struct iomap_folio_ops { #else #define IOMAP_DAX 0 #endif /* CONFIG_FS_DAX */ +#define IOMAP_ATOMIC (1 << 9) struct iomap_ops { /* @@ -330,7 +335,7 @@ struct iomap_ioend { u16 io_type; u16 io_flags; /* IOMAP_F_* */ struct inode *io_inode; /* file being written to */ - size_t io_size; /* size of the extent */ + size_t io_size; /* size of data within eof */ loff_t io_offset; /* offset in the file */ sector_t io_sector; /* start sector of ioend */ struct bio io_bio; /* MUST BE LAST! */ diff --git a/include/linux/iommu.h b/include/linux/iommu.h index bd722f473635..318d27841130 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -42,6 +42,8 @@ struct notifier_block; struct iommu_sva; struct iommu_dma_cookie; struct iommu_fault_param; +struct iommufd_ctx; +struct iommufd_viommu; #define IOMMU_FAULT_PERM_READ (1 << 0) /* read */ #define IOMMU_FAULT_PERM_WRITE (1 << 1) /* write */ @@ -491,7 +493,9 @@ static inline int __iommu_copy_struct_from_user_array( * @index: Index to the location in the array to copy user data from * @min_last: The last member of the data structure @kdst points in the * initial version. - * Return 0 for success, otherwise -error. + * + * Copy a single entry from a user array. Return 0 for success, otherwise + * -error. */ #define iommu_copy_struct_from_user_array(kdst, user_array, data_type, index, \ min_last) \ @@ -500,6 +504,50 @@ static inline int __iommu_copy_struct_from_user_array( offsetofend(typeof(*(kdst)), min_last)) /** + * iommu_copy_struct_from_full_user_array - Copy iommu driver specific user + * space data from an iommu_user_data_array + * @kdst: Pointer to an iommu driver specific user data that is defined in + * include/uapi/linux/iommufd.h + * @kdst_entry_size: sizeof(*kdst) + * @user_array: Pointer to a struct iommu_user_data_array for a user space + * array + * @data_type: The data type of the @kdst. Must match with @user_array->type + * + * Copy the entire user array. kdst must have room for kdst_entry_size * + * user_array->entry_num bytes. Return 0 for success, otherwise -error. + */ +static inline int +iommu_copy_struct_from_full_user_array(void *kdst, size_t kdst_entry_size, + struct iommu_user_data_array *user_array, + unsigned int data_type) +{ + unsigned int i; + int ret; + + if (user_array->type != data_type) + return -EINVAL; + if (!user_array->entry_num) + return -EINVAL; + if (likely(user_array->entry_len == kdst_entry_size)) { + if (copy_from_user(kdst, user_array->uptr, + user_array->entry_num * + user_array->entry_len)) + return -EFAULT; + } + + /* Copy item by item */ + for (i = 0; i != user_array->entry_num; i++) { + ret = copy_struct_from_user( + kdst + kdst_entry_size * i, kdst_entry_size, + user_array->uptr + user_array->entry_len * i, + user_array->entry_len); + if (ret) + return ret; + } + return 0; +} + +/** * struct iommu_ops - iommu ops and capabilities * @capable: check capability * @hw_info: report iommu hardware information. The data buffer returned by this @@ -509,19 +557,19 @@ static inline int __iommu_copy_struct_from_user_array( * @domain_alloc: allocate and return an iommu domain if success. Otherwise * NULL is returned. The domain is not fully initialized until * the caller iommu_domain_alloc() returns. - * @domain_alloc_user: Allocate an iommu domain corresponding to the input - * parameters as defined in include/uapi/linux/iommufd.h. - * Unlike @domain_alloc, it is called only by IOMMUFD and - * must fully initialize the new domain before return. - * Upon success, if the @user_data is valid and the @parent - * points to a kernel-managed domain, the new domain must be - * IOMMU_DOMAIN_NESTED type; otherwise, the @parent must be - * NULL while the @user_data can be optionally provided, the - * new domain must support __IOMMU_DOMAIN_PAGING. - * Upon failure, ERR_PTR must be returned. + * @domain_alloc_paging_flags: Allocate an iommu domain corresponding to the + * input parameters as defined in + * include/uapi/linux/iommufd.h. The @user_data can be + * optionally provided, the new domain must support + * __IOMMU_DOMAIN_PAGING. Upon failure, ERR_PTR must be + * returned. * @domain_alloc_paging: Allocate an iommu_domain that can be used for - * UNMANAGED, DMA, and DMA_FQ domain types. + * UNMANAGED, DMA, and DMA_FQ domain types. This is the + * same as invoking domain_alloc_paging_flags() with + * @flags=0, @user_data=NULL. A driver should implement + * only one of the two ops. * @domain_alloc_sva: Allocate an iommu_domain for Shared Virtual Addressing. + * @domain_alloc_nested: Allocate an iommu_domain for nested translation. * @probe_device: Add device to iommu driver handling * @release_device: Remove device from iommu driver handling * @probe_finalize: Do final setup work after the device is added to an IOMMU @@ -542,6 +590,14 @@ static inline int __iommu_copy_struct_from_user_array( * @remove_dev_pasid: Remove any translation configurations of a specific * pasid, so that any DMA transactions with this pasid * will be blocked by the hardware. + * @viommu_alloc: Allocate an iommufd_viommu on a physical IOMMU instance behind + * the @dev, as the set of virtualization resources shared/passed + * to user space IOMMU instance. And associate it with a nesting + * @parent_domain. The @viommu_type must be defined in the header + * include/uapi/linux/iommufd.h + * It is required to call iommufd_viommu_alloc() helper for + * a bundled allocation of the core and the driver structures, + * using the given @ictx pointer. * @pgsize_bitmap: bitmap of all possible supported page sizes * @owner: Driver module providing these ops * @identity_domain: An always available, always attachable identity @@ -562,12 +618,15 @@ struct iommu_ops { /* Domain allocation and freeing by the iommu driver */ struct iommu_domain *(*domain_alloc)(unsigned iommu_domain_type); - struct iommu_domain *(*domain_alloc_user)( - struct device *dev, u32 flags, struct iommu_domain *parent, + struct iommu_domain *(*domain_alloc_paging_flags)( + struct device *dev, u32 flags, const struct iommu_user_data *user_data); struct iommu_domain *(*domain_alloc_paging)(struct device *dev); struct iommu_domain *(*domain_alloc_sva)(struct device *dev, struct mm_struct *mm); + struct iommu_domain *(*domain_alloc_nested)( + struct device *dev, struct iommu_domain *parent, u32 flags, + const struct iommu_user_data *user_data); struct iommu_device *(*probe_device)(struct device *dev); void (*release_device)(struct device *dev); @@ -591,6 +650,10 @@ struct iommu_ops { void (*remove_dev_pasid)(struct device *dev, ioasid_t pasid, struct iommu_domain *domain); + struct iommufd_viommu *(*viommu_alloc)( + struct device *dev, struct iommu_domain *parent_domain, + struct iommufd_ctx *ictx, unsigned int viommu_type); + const struct iommu_domain_ops *default_domain_ops; unsigned long pgsize_bitmap; struct module *owner; @@ -616,7 +679,8 @@ struct iommu_ops { * * EBUSY - device is attached to a domain and cannot be changed * * ENODEV - device specific errors, not able to be attached * * <others> - treated as ENODEV by the caller. Use is discouraged - * @set_dev_pasid: set an iommu domain to a pasid of device + * @set_dev_pasid: set or replace an iommu domain to a pasid of device. The pasid of + * the device should be left in the old config in error case. * @map_pages: map a physically contiguous set of pages of the same size to * an iommu domain. * @unmap_pages: unmap a number of pages of the same size from an iommu domain @@ -635,14 +699,13 @@ struct iommu_ops { * @enforce_cache_coherency: Prevent any kind of DMA from bypassing IOMMU_CACHE, * including no-snoop TLPs on PCIe or other platform * specific mechanisms. - * @enable_nesting: Enable nesting * @set_pgtable_quirks: Set io page table quirks (IO_PGTABLE_QUIRK_*) * @free: Release the domain after use. */ struct iommu_domain_ops { int (*attach_dev)(struct iommu_domain *domain, struct device *dev); int (*set_dev_pasid)(struct iommu_domain *domain, struct device *dev, - ioasid_t pasid); + ioasid_t pasid, struct iommu_domain *old); int (*map_pages)(struct iommu_domain *domain, unsigned long iova, phys_addr_t paddr, size_t pgsize, size_t pgcount, @@ -663,7 +726,6 @@ struct iommu_domain_ops { dma_addr_t iova); bool (*enforce_cache_coherency)(struct iommu_domain *domain); - int (*enable_nesting)(struct iommu_domain *domain); int (*set_pgtable_quirks)(struct iommu_domain *domain, unsigned long quirks); @@ -784,12 +846,13 @@ static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather) }; } -extern int bus_iommu_probe(const struct bus_type *bus); -extern bool iommu_present(const struct bus_type *bus); extern bool device_iommu_capable(struct device *dev, enum iommu_cap cap); extern bool iommu_group_has_isolated_msi(struct iommu_group *group); -extern struct iommu_domain *iommu_domain_alloc(const struct bus_type *bus); -struct iommu_domain *iommu_paging_domain_alloc(struct device *dev); +struct iommu_domain *iommu_paging_domain_alloc_flags(struct device *dev, unsigned int flags); +static inline struct iommu_domain *iommu_paging_domain_alloc(struct device *dev) +{ + return iommu_paging_domain_alloc_flags(dev, 0); +} extern void iommu_domain_free(struct iommu_domain *domain); extern int iommu_attach_device(struct iommu_domain *domain, struct device *dev); @@ -844,7 +907,6 @@ extern void iommu_group_put(struct iommu_group *group); extern int iommu_group_id(struct iommu_group *group); extern struct iommu_domain *iommu_group_default_domain(struct iommu_group *); -int iommu_enable_nesting(struct iommu_domain *domain); int iommu_set_pgtable_quirks(struct iommu_domain *domain, unsigned long quirks); @@ -994,6 +1056,8 @@ struct iommu_fwspec { /* ATS is supported */ #define IOMMU_FWSPEC_PCI_RC_ATS (1 << 0) +/* CANWBS is supported */ +#define IOMMU_FWSPEC_PCI_RC_CANWBS (1 << 1) /* * An iommu attach handle represents a relationship between an iommu domain @@ -1081,19 +1145,15 @@ struct iommu_iotlb_gather {}; struct iommu_dirty_bitmap {}; struct iommu_dirty_ops {}; -static inline bool iommu_present(const struct bus_type *bus) -{ - return false; -} - static inline bool device_iommu_capable(struct device *dev, enum iommu_cap cap) { return false; } -static inline struct iommu_domain *iommu_domain_alloc(const struct bus_type *bus) +static inline struct iommu_domain *iommu_paging_domain_alloc_flags(struct device *dev, + unsigned int flags) { - return NULL; + return ERR_PTR(-ENODEV); } static inline struct iommu_domain *iommu_paging_domain_alloc(struct device *dev) diff --git a/include/linux/iommufd.h b/include/linux/iommufd.h index 30f832a60ccb..11110c749200 100644 --- a/include/linux/iommufd.h +++ b/include/linux/iommufd.h @@ -8,16 +8,46 @@ #include <linux/err.h> #include <linux/errno.h> +#include <linux/refcount.h> #include <linux/types.h> +#include <linux/xarray.h> struct device; struct file; struct iommu_group; +struct iommu_user_data; +struct iommu_user_data_array; struct iommufd_access; struct iommufd_ctx; struct iommufd_device; +struct iommufd_viommu_ops; struct page; +enum iommufd_object_type { + IOMMUFD_OBJ_NONE, + IOMMUFD_OBJ_ANY = IOMMUFD_OBJ_NONE, + IOMMUFD_OBJ_DEVICE, + IOMMUFD_OBJ_HWPT_PAGING, + IOMMUFD_OBJ_HWPT_NESTED, + IOMMUFD_OBJ_IOAS, + IOMMUFD_OBJ_ACCESS, + IOMMUFD_OBJ_FAULT, + IOMMUFD_OBJ_VIOMMU, + IOMMUFD_OBJ_VDEVICE, +#ifdef CONFIG_IOMMUFD_TEST + IOMMUFD_OBJ_SELFTEST, +#endif + IOMMUFD_OBJ_MAX, +}; + +/* Base struct for all objects with a userspace ID handle. */ +struct iommufd_object { + refcount_t shortterm_users; + refcount_t users; + enum iommufd_object_type type; + unsigned int id; +}; + struct iommufd_device *iommufd_device_bind(struct iommufd_ctx *ictx, struct device *dev, u32 *id); void iommufd_device_unbind(struct iommufd_device *idev); @@ -54,6 +84,45 @@ void iommufd_access_detach(struct iommufd_access *access); void iommufd_ctx_get(struct iommufd_ctx *ictx); +struct iommufd_viommu { + struct iommufd_object obj; + struct iommufd_ctx *ictx; + struct iommu_device *iommu_dev; + struct iommufd_hwpt_paging *hwpt; + + const struct iommufd_viommu_ops *ops; + + struct xarray vdevs; + + unsigned int type; +}; + +/** + * struct iommufd_viommu_ops - vIOMMU specific operations + * @destroy: Clean up all driver-specific parts of an iommufd_viommu. The memory + * of the vIOMMU will be free-ed by iommufd core after calling this op + * @alloc_domain_nested: Allocate a IOMMU_DOMAIN_NESTED on a vIOMMU that holds a + * nesting parent domain (IOMMU_DOMAIN_PAGING). @user_data + * must be defined in include/uapi/linux/iommufd.h. + * It must fully initialize the new iommu_domain before + * returning. Upon failure, ERR_PTR must be returned. + * @cache_invalidate: Flush hardware cache used by a vIOMMU. It can be used for + * any IOMMU hardware specific cache: TLB and device cache. + * The @array passes in the cache invalidation requests, in + * form of a driver data structure. A driver must update the + * array->entry_num to report the number of handled requests. + * The data structure of the array entry must be defined in + * include/uapi/linux/iommufd.h + */ +struct iommufd_viommu_ops { + void (*destroy)(struct iommufd_viommu *viommu); + struct iommu_domain *(*alloc_domain_nested)( + struct iommufd_viommu *viommu, u32 flags, + const struct iommu_user_data *user_data); + int (*cache_invalidate)(struct iommufd_viommu *viommu, + struct iommu_user_data_array *array); +}; + #if IS_ENABLED(CONFIG_IOMMUFD) struct iommufd_ctx *iommufd_ctx_from_file(struct file *file); struct iommufd_ctx *iommufd_ctx_from_fd(int fd); @@ -111,4 +180,43 @@ static inline int iommufd_vfio_compat_set_no_iommu(struct iommufd_ctx *ictx) return -EOPNOTSUPP; } #endif /* CONFIG_IOMMUFD */ + +#if IS_ENABLED(CONFIG_IOMMUFD_DRIVER_CORE) +struct iommufd_object *_iommufd_object_alloc(struct iommufd_ctx *ictx, + size_t size, + enum iommufd_object_type type); +struct device *iommufd_viommu_find_dev(struct iommufd_viommu *viommu, + unsigned long vdev_id); +#else /* !CONFIG_IOMMUFD_DRIVER_CORE */ +static inline struct iommufd_object * +_iommufd_object_alloc(struct iommufd_ctx *ictx, size_t size, + enum iommufd_object_type type) +{ + return ERR_PTR(-EOPNOTSUPP); +} + +static inline struct device * +iommufd_viommu_find_dev(struct iommufd_viommu *viommu, unsigned long vdev_id) +{ + return NULL; +} +#endif /* CONFIG_IOMMUFD_DRIVER_CORE */ + +/* + * Helpers for IOMMU driver to allocate driver structures that will be freed by + * the iommufd core. The free op will be called prior to freeing the memory. + */ +#define iommufd_viommu_alloc(ictx, drv_struct, member, viommu_ops) \ + ({ \ + drv_struct *ret; \ + \ + static_assert(__same_type(struct iommufd_viommu, \ + ((drv_struct *)NULL)->member)); \ + static_assert(offsetof(drv_struct, member.obj) == 0); \ + ret = (drv_struct *)_iommufd_object_alloc( \ + ictx, sizeof(drv_struct), IOMMUFD_OBJ_VIOMMU); \ + if (!IS_ERR(ret)) \ + ret->member.ops = viommu_ops; \ + ret; \ + }) #endif diff --git a/include/linux/iopoll.h b/include/linux/iopoll.h index 19a7b00baff4..91324c331a4b 100644 --- a/include/linux/iopoll.h +++ b/include/linux/iopoll.h @@ -19,19 +19,19 @@ * @op: accessor function (takes @args as its arguments) * @val: Variable to read the value into * @cond: Break condition (usually involving @val) - * @sleep_us: Maximum time to sleep between reads in us (0 - * tight-loops). Should be less than ~20ms since usleep_range - * is used (see Documentation/timers/timers-howto.rst). + * @sleep_us: Maximum time to sleep between reads in us (0 tight-loops). Please + * read usleep_range() function description for details and + * limitations. * @timeout_us: Timeout in us, 0 means never timeout * @sleep_before_read: if it is true, sleep @sleep_us before read. * @args: arguments for @op poll * - * Returns 0 on success and -ETIMEDOUT upon a timeout. In either - * case, the last read value at @args is stored in @val. Must not - * be called from atomic context if sleep_us or timeout_us are used. - * * When available, you'll probably want to use one of the specialized * macros defined below rather than this macro directly. + * + * Returns: 0 on success and -ETIMEDOUT upon a timeout. In either + * case, the last read value at @args is stored in @val. Must not + * be called from atomic context if sleep_us or timeout_us are used. */ #define read_poll_timeout(op, val, cond, sleep_us, timeout_us, \ sleep_before_read, args...) \ @@ -64,22 +64,22 @@ * @op: accessor function (takes @args as its arguments) * @val: Variable to read the value into * @cond: Break condition (usually involving @val) - * @delay_us: Time to udelay between reads in us (0 tight-loops). Should - * be less than ~10us since udelay is used (see - * Documentation/timers/timers-howto.rst). + * @delay_us: Time to udelay between reads in us (0 tight-loops). Please + * read udelay() function description for details and + * limitations. * @timeout_us: Timeout in us, 0 means never timeout * @delay_before_read: if it is true, delay @delay_us before read. * @args: arguments for @op poll * - * Returns 0 on success and -ETIMEDOUT upon a timeout. In either - * case, the last read value at @args is stored in @val. - * * This macro does not rely on timekeeping. Hence it is safe to call even when * timekeeping is suspended, at the expense of an underestimation of wall clock * time, which is rather minimal with a non-zero delay_us. * * When available, you'll probably want to use one of the specialized * macros defined below rather than this macro directly. + * + * Returns: 0 on success and -ETIMEDOUT upon a timeout. In either + * case, the last read value at @args is stored in @val. */ #define read_poll_timeout_atomic(op, val, cond, delay_us, timeout_us, \ delay_before_read, args...) \ @@ -119,17 +119,17 @@ * @addr: Address to poll * @val: Variable to read the value into * @cond: Break condition (usually involving @val) - * @sleep_us: Maximum time to sleep between reads in us (0 - * tight-loops). Should be less than ~20ms since usleep_range - * is used (see Documentation/timers/timers-howto.rst). + * @sleep_us: Maximum time to sleep between reads in us (0 tight-loops). Please + * read usleep_range() function description for details and + * limitations. * @timeout_us: Timeout in us, 0 means never timeout * - * Returns 0 on success and -ETIMEDOUT upon a timeout. In either - * case, the last read value at @addr is stored in @val. Must not - * be called from atomic context if sleep_us or timeout_us are used. - * * When available, you'll probably want to use one of the specialized * macros defined below rather than this macro directly. + * + * Returns: 0 on success and -ETIMEDOUT upon a timeout. In either + * case, the last read value at @addr is stored in @val. Must not + * be called from atomic context if sleep_us or timeout_us are used. */ #define readx_poll_timeout(op, addr, val, cond, sleep_us, timeout_us) \ read_poll_timeout(op, val, cond, sleep_us, timeout_us, false, addr) @@ -140,16 +140,16 @@ * @addr: Address to poll * @val: Variable to read the value into * @cond: Break condition (usually involving @val) - * @delay_us: Time to udelay between reads in us (0 tight-loops). Should - * be less than ~10us since udelay is used (see - * Documentation/timers/timers-howto.rst). + * @delay_us: Time to udelay between reads in us (0 tight-loops). Please + * read udelay() function description for details and + * limitations. * @timeout_us: Timeout in us, 0 means never timeout * - * Returns 0 on success and -ETIMEDOUT upon a timeout. In either - * case, the last read value at @addr is stored in @val. - * * When available, you'll probably want to use one of the specialized * macros defined below rather than this macro directly. + * + * Returns: 0 on success and -ETIMEDOUT upon a timeout. In either + * case, the last read value at @addr is stored in @val. */ #define readx_poll_timeout_atomic(op, addr, val, cond, delay_us, timeout_us) \ read_poll_timeout_atomic(op, val, cond, delay_us, timeout_us, false, addr) diff --git a/include/linux/ioport.h b/include/linux/ioport.h index 6e9fb667a1c5..5385349f0b8a 100644 --- a/include/linux/ioport.h +++ b/include/linux/ioport.h @@ -249,6 +249,38 @@ struct resource *lookup_resource(struct resource *root, resource_size_t start); int adjust_resource(struct resource *res, resource_size_t start, resource_size_t size); resource_size_t resource_alignment(struct resource *res); + +/** + * resource_set_size - Calculate resource end address from size and start + * @res: Resource descriptor + * @size: Size of the resource + * + * Calculate the end address for @res based on @size. + * + * Note: The start address of @res must be set when calling this function. + * Prefer resource_set_range() if setting both the start address and @size. + */ +static inline void resource_set_size(struct resource *res, resource_size_t size) +{ + res->end = res->start + size - 1; +} + +/** + * resource_set_range - Set resource start and end addresses + * @res: Resource descriptor + * @start: Start address for the resource + * @size: Size of the resource + * + * Set @res start address and calculate the end address based on @size. + */ +static inline void resource_set_range(struct resource *res, + resource_size_t start, + resource_size_t size) +{ + res->start = start; + resource_set_size(res, size); +} + static inline resource_size_t resource_size(const struct resource *res) { return res->end - res->start + 1; diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h index 3f003d5fde53..57b074e0cfbb 100644 --- a/include/linux/irqflags.h +++ b/include/linux/irqflags.h @@ -18,6 +18,8 @@ #include <asm/irqflags.h> #include <asm/percpu.h> +struct task_struct; + /* Currently lockdep_softirqs_on/off is used only by lockdep */ #ifdef CONFIG_PROVE_LOCKING extern void lockdep_softirqs_on(unsigned long ip); @@ -25,12 +27,16 @@ extern void lockdep_hardirqs_on_prepare(void); extern void lockdep_hardirqs_on(unsigned long ip); extern void lockdep_hardirqs_off(unsigned long ip); + extern void lockdep_cleanup_dead_cpu(unsigned int cpu, + struct task_struct *idle); #else static inline void lockdep_softirqs_on(unsigned long ip) { } static inline void lockdep_softirqs_off(unsigned long ip) { } static inline void lockdep_hardirqs_on_prepare(void) { } static inline void lockdep_hardirqs_on(unsigned long ip) { } static inline void lockdep_hardirqs_off(unsigned long ip) { } + static inline void lockdep_cleanup_dead_cpu(unsigned int cpu, + struct task_struct *idle) {} #endif #ifdef CONFIG_TRACE_IRQFLAGS diff --git a/include/linux/irqnr.h b/include/linux/irqnr.h index 3496baa0b07f..e97206c721a0 100644 --- a/include/linux/irqnr.h +++ b/include/linux/irqnr.h @@ -5,30 +5,36 @@ #include <uapi/linux/irqnr.h> -extern int nr_irqs; +unsigned int irq_get_nr_irqs(void) __pure; +unsigned int irq_set_nr_irqs(unsigned int nr); extern struct irq_desc *irq_to_desc(unsigned int irq); unsigned int irq_get_next_irq(unsigned int offset); -# define for_each_irq_desc(irq, desc) \ - for (irq = 0, desc = irq_to_desc(irq); irq < nr_irqs; \ - irq++, desc = irq_to_desc(irq)) \ - if (!desc) \ - ; \ - else - +#define for_each_irq_desc(irq, desc) \ + for (unsigned int __nr_irqs__ = irq_get_nr_irqs(); __nr_irqs__; \ + __nr_irqs__ = 0) \ + for (irq = 0, desc = irq_to_desc(irq); irq < __nr_irqs__; \ + irq++, desc = irq_to_desc(irq)) \ + if (!desc) \ + ; \ + else # define for_each_irq_desc_reverse(irq, desc) \ - for (irq = nr_irqs - 1, desc = irq_to_desc(irq); irq >= 0; \ - irq--, desc = irq_to_desc(irq)) \ + for (irq = irq_get_nr_irqs() - 1, desc = irq_to_desc(irq); \ + irq >= 0; irq--, desc = irq_to_desc(irq)) \ if (!desc) \ ; \ else -# define for_each_active_irq(irq) \ - for (irq = irq_get_next_irq(0); irq < nr_irqs; \ - irq = irq_get_next_irq(irq + 1)) +#define for_each_active_irq(irq) \ + for (unsigned int __nr_irqs__ = irq_get_nr_irqs(); __nr_irqs__; \ + __nr_irqs__ = 0) \ + for (irq = irq_get_next_irq(0); irq < __nr_irqs__; \ + irq = irq_get_next_irq(irq + 1)) -#define for_each_irq_nr(irq) \ - for (irq = 0; irq < nr_irqs; irq++) +#define for_each_irq_nr(irq) \ + for (unsigned int __nr_irqs__ = irq_get_nr_irqs(); __nr_irqs__; \ + __nr_irqs__ = 0) \ + for (irq = 0; irq < __nr_irqs__; irq++) #endif diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index 8aef9bb6ad57..50f7ea8714bf 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -1796,22 +1796,21 @@ static inline unsigned long jbd2_log_space_left(journal_t *journal) static inline u32 jbd2_chksum(journal_t *journal, u32 crc, const void *address, unsigned int length) { - struct { - struct shash_desc shash; - char ctx[JBD_MAX_CHECKSUM_SIZE]; - } desc; + DEFINE_RAW_FLEX(struct shash_desc, desc, __ctx, + DIV_ROUND_UP(JBD_MAX_CHECKSUM_SIZE, + sizeof(*((struct shash_desc *)0)->__ctx))); int err; BUG_ON(crypto_shash_descsize(journal->j_chksum_driver) > JBD_MAX_CHECKSUM_SIZE); - desc.shash.tfm = journal->j_chksum_driver; - *(u32 *)desc.ctx = crc; + desc->tfm = journal->j_chksum_driver; + *(u32 *)desc->__ctx = crc; - err = crypto_shash_update(&desc.shash, address, length); + err = crypto_shash_update(desc, address, length); BUG_ON(err); - return *(u32 *)desc.ctx; + return *(u32 *)desc->__ctx; } /* Return most recent uncommitted transaction */ diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h index 1220f0fbe5bf..ed945f42e064 100644 --- a/include/linux/jiffies.h +++ b/include/linux/jiffies.h @@ -502,7 +502,7 @@ static inline unsigned long _msecs_to_jiffies(const unsigned int m) * - all other values are converted to jiffies by either multiplying * the input value by a factor or dividing it with a factor and * handling any 32-bit overflows. - * for the details see __msecs_to_jiffies() + * for the details see _msecs_to_jiffies() * * msecs_to_jiffies() checks for the passed in value being a constant * via __builtin_constant_p() allowing gcc to eliminate most of the @@ -526,6 +526,19 @@ static __always_inline unsigned long msecs_to_jiffies(const unsigned int m) } } +/** + * secs_to_jiffies: - convert seconds to jiffies + * @_secs: time in seconds + * + * Conversion is done by simple multiplication with HZ + * + * secs_to_jiffies() is defined as a macro rather than a static inline + * function so it can be used in static initializers. + * + * Return: jiffies value + */ +#define secs_to_jiffies(_secs) ((_secs) * HZ) + extern unsigned long __usecs_to_jiffies(const unsigned int u); #if !(USEC_PER_SEC % HZ) static inline unsigned long _usecs_to_jiffies(const unsigned int u) diff --git a/include/linux/kasan.h b/include/linux/kasan.h index 00a3bf7c0d8f..6bbfc8aa42e8 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -29,6 +29,9 @@ typedef unsigned int __bitwise kasan_vmalloc_flags_t; #define KASAN_VMALLOC_VM_ALLOC ((__force kasan_vmalloc_flags_t)0x02u) #define KASAN_VMALLOC_PROT_NORMAL ((__force kasan_vmalloc_flags_t)0x04u) +#define KASAN_VMALLOC_PAGE_RANGE 0x1 /* Apply exsiting page range */ +#define KASAN_VMALLOC_TLB_FLUSH 0x2 /* TLB flush */ + #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) #include <linux/pgtable.h> @@ -564,7 +567,8 @@ void kasan_populate_early_vm_area_shadow(void *start, unsigned long size); int kasan_populate_vmalloc(unsigned long addr, unsigned long size); void kasan_release_vmalloc(unsigned long start, unsigned long end, unsigned long free_region_start, - unsigned long free_region_end); + unsigned long free_region_end, + unsigned long flags); #else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */ @@ -579,7 +583,8 @@ static inline int kasan_populate_vmalloc(unsigned long start, static inline void kasan_release_vmalloc(unsigned long start, unsigned long end, unsigned long free_region_start, - unsigned long free_region_end) { } + unsigned long free_region_end, + unsigned long flags) { } #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */ @@ -614,7 +619,8 @@ static inline int kasan_populate_vmalloc(unsigned long start, static inline void kasan_release_vmalloc(unsigned long start, unsigned long end, unsigned long free_region_start, - unsigned long free_region_end) { } + unsigned long free_region_end, + unsigned long flags) { } static inline void *kasan_unpoison_vmalloc(const void *start, unsigned long size, diff --git a/include/linux/kfifo.h b/include/linux/kfifo.h index 564868bdce89..fd743d4c4b4b 100644 --- a/include/linux/kfifo.h +++ b/include/linux/kfifo.h @@ -37,7 +37,6 @@ */ #include <linux/array_size.h> -#include <linux/dma-mapping.h> #include <linux/spinlock.h> #include <linux/stddef.h> #include <linux/types.h> diff --git a/include/linux/khugepaged.h b/include/linux/khugepaged.h index 30baae91b225..1f46046080f5 100644 --- a/include/linux/khugepaged.h +++ b/include/linux/khugepaged.h @@ -2,8 +2,6 @@ #ifndef _LINUX_KHUGEPAGED_H #define _LINUX_KHUGEPAGED_H -#include <linux/sched/coredump.h> /* MMF_VM_HUGEPAGE */ - extern unsigned int khugepaged_max_ptes_none __read_mostly; #ifdef CONFIG_TRANSPARENT_HUGEPAGE extern struct attribute_group khugepaged_attr_group; diff --git a/include/linux/kmemleak.h b/include/linux/kmemleak.h index 6a3cd1bf4680..93a73c076d16 100644 --- a/include/linux/kmemleak.h +++ b/include/linux/kmemleak.h @@ -26,6 +26,7 @@ extern void kmemleak_free_part(const void *ptr, size_t size) __ref; extern void kmemleak_free_percpu(const void __percpu *ptr) __ref; extern void kmemleak_update_trace(const void *ptr) __ref; extern void kmemleak_not_leak(const void *ptr) __ref; +extern void kmemleak_transient_leak(const void *ptr) __ref; extern void kmemleak_ignore(const void *ptr) __ref; extern void kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp) __ref; extern void kmemleak_no_scan(const void *ptr) __ref; @@ -93,6 +94,9 @@ static inline void kmemleak_update_trace(const void *ptr) static inline void kmemleak_not_leak(const void *ptr) { } +static inline void kmemleak_transient_leak(const void *ptr) +{ +} static inline void kmemleak_ignore(const void *ptr) { } diff --git a/include/linux/ksm.h b/include/linux/ksm.h index ec9c05044d4f..6a53ac4885bb 100644 --- a/include/linux/ksm.h +++ b/include/linux/ksm.h @@ -13,7 +13,6 @@ #include <linux/pagemap.h> #include <linux/rmap.h> #include <linux/sched.h> -#include <linux/sched/coredump.h> #ifdef CONFIG_KSM int ksm_madvise(struct vm_area_struct *vma, unsigned long start, @@ -91,7 +90,7 @@ struct folio *ksm_might_need_to_copy(struct folio *folio, void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc); void folio_migrate_ksm(struct folio *newfolio, struct folio *folio); -void collect_procs_ksm(struct folio *folio, struct page *page, +void collect_procs_ksm(const struct folio *folio, const struct page *page, struct list_head *to_kill, int force_early); long ksm_process_profit(struct mm_struct *); @@ -123,8 +122,9 @@ static inline void ksm_might_unmap_zero_page(struct mm_struct *mm, pte_t pte) { } -static inline void collect_procs_ksm(struct folio *folio, struct page *page, - struct list_head *to_kill, int force_early) +static inline void collect_procs_ksm(const struct folio *folio, + const struct page *page, struct list_head *to_kill, + int force_early) { } diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 45be36e5285f..401439bb21e3 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -97,6 +97,7 @@ #define KVM_PFN_ERR_HWPOISON (KVM_PFN_ERR_MASK + 1) #define KVM_PFN_ERR_RO_FAULT (KVM_PFN_ERR_MASK + 2) #define KVM_PFN_ERR_SIGPENDING (KVM_PFN_ERR_MASK + 3) +#define KVM_PFN_ERR_NEEDS_IO (KVM_PFN_ERR_MASK + 4) /* * error pfns indicate that the gfn is in slot but faild to @@ -153,13 +154,6 @@ static inline bool kvm_is_error_gpa(gpa_t gpa) return gpa == INVALID_GPA; } -#define KVM_ERR_PTR_BAD_PAGE (ERR_PTR(-ENOENT)) - -static inline bool is_error_page(struct page *page) -{ - return IS_ERR(page); -} - #define KVM_REQUEST_MASK GENMASK(7,0) #define KVM_REQUEST_NO_WAKEUP BIT(8) #define KVM_REQUEST_WAIT BIT(9) @@ -219,6 +213,7 @@ enum kvm_bus { KVM_PIO_BUS, KVM_VIRTIO_CCW_NOTIFY_BUS, KVM_FAST_MMIO_BUS, + KVM_IOCSR_BUS, KVM_NR_BUSES }; @@ -279,21 +274,19 @@ enum { READING_SHADOW_PAGE_TABLES, }; -#define KVM_UNMAPPED_PAGE ((void *) 0x500 + POISON_POINTER_DELTA) - struct kvm_host_map { /* * Only valid if the 'pfn' is managed by the host kernel (i.e. There is * a 'struct page' for it. When using mem= kernel parameter some memory * can be used as guest memory but they are not managed by host * kernel). - * If 'pfn' is not managed by the host kernel, this field is - * initialized to KVM_UNMAPPED_PAGE. */ + struct page *pinned_page; struct page *page; void *hva; kvm_pfn_t pfn; kvm_pfn_t gfn; + bool writable; }; /* @@ -342,7 +335,8 @@ struct kvm_vcpu { #ifndef __KVM_HAVE_ARCH_WQP struct rcuwait wait; #endif - struct pid __rcu *pid; + struct pid *pid; + rwlock_t pid_lock; int sigset_active; sigset_t sigset; unsigned int halt_poll_ns; @@ -1176,6 +1170,10 @@ static inline bool kvm_memslot_iter_is_valid(struct kvm_memslot_iter *iter, gfn_ kvm_memslot_iter_is_valid(iter, end); \ kvm_memslot_iter_next(iter)) +struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn); +struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu); +struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn); + /* * KVM_SET_USER_MEMORY_REGION ioctl allows the following operations: * - create a new memory slot @@ -1214,33 +1212,70 @@ void kvm_arch_flush_shadow_all(struct kvm *kvm); void kvm_arch_flush_shadow_memslot(struct kvm *kvm, struct kvm_memory_slot *slot); -int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn, - struct page **pages, int nr_pages); +int kvm_prefetch_pages(struct kvm_memory_slot *slot, gfn_t gfn, + struct page **pages, int nr_pages); + +struct page *__gfn_to_page(struct kvm *kvm, gfn_t gfn, bool write); +static inline struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn) +{ + return __gfn_to_page(kvm, gfn, true); +} -struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn); unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn); unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable); unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn); unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot, gfn_t gfn, bool *writable); + +static inline void kvm_release_page_unused(struct page *page) +{ + if (!page) + return; + + put_page(page); +} + void kvm_release_page_clean(struct page *page); void kvm_release_page_dirty(struct page *page); -kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn); -kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, - bool *writable); -kvm_pfn_t gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn); -kvm_pfn_t gfn_to_pfn_memslot_atomic(const struct kvm_memory_slot *slot, gfn_t gfn); -kvm_pfn_t __gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn, - bool atomic, bool interruptible, bool *async, - bool write_fault, bool *writable, hva_t *hva); - -void kvm_release_pfn_clean(kvm_pfn_t pfn); -void kvm_release_pfn_dirty(kvm_pfn_t pfn); -void kvm_set_pfn_dirty(kvm_pfn_t pfn); -void kvm_set_pfn_accessed(kvm_pfn_t pfn); - -void kvm_release_pfn(kvm_pfn_t pfn, bool dirty); +static inline void kvm_release_faultin_page(struct kvm *kvm, struct page *page, + bool unused, bool dirty) +{ + lockdep_assert_once(lockdep_is_held(&kvm->mmu_lock) || unused); + + if (!page) + return; + + /* + * If the page that KVM got from the *primary MMU* is writable, and KVM + * installed or reused a SPTE, mark the page/folio dirty. Note, this + * may mark a folio dirty even if KVM created a read-only SPTE, e.g. if + * the GFN is write-protected. Folios can't be safely marked dirty + * outside of mmu_lock as doing so could race with writeback on the + * folio. As a result, KVM can't mark folios dirty in the fast page + * fault handler, and so KVM must (somewhat) speculatively mark the + * folio dirty if KVM could locklessly make the SPTE writable. + */ + if (unused) + kvm_release_page_unused(page); + else if (dirty) + kvm_release_page_dirty(page); + else + kvm_release_page_clean(page); +} + +kvm_pfn_t __kvm_faultin_pfn(const struct kvm_memory_slot *slot, gfn_t gfn, + unsigned int foll, bool *writable, + struct page **refcounted_page); + +static inline kvm_pfn_t kvm_faultin_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, + bool write, bool *writable, + struct page **refcounted_page) +{ + return __kvm_faultin_pfn(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn, + write ? FOLL_WRITE : 0, writable, refcounted_page); +} + int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, int len); int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len); @@ -1304,17 +1339,28 @@ int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, }) int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len); -struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn); bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn); bool kvm_vcpu_is_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn); unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn); void mark_page_dirty_in_slot(struct kvm *kvm, const struct kvm_memory_slot *memslot, gfn_t gfn); void mark_page_dirty(struct kvm *kvm, gfn_t gfn); -struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu); -struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn); -int kvm_vcpu_map(struct kvm_vcpu *vcpu, gpa_t gpa, struct kvm_host_map *map); -void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty); +int __kvm_vcpu_map(struct kvm_vcpu *vcpu, gpa_t gpa, struct kvm_host_map *map, + bool writable); +void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map); + +static inline int kvm_vcpu_map(struct kvm_vcpu *vcpu, gpa_t gpa, + struct kvm_host_map *map) +{ + return __kvm_vcpu_map(vcpu, gpa, map, true); +} + +static inline int kvm_vcpu_map_readonly(struct kvm_vcpu *vcpu, gpa_t gpa, + struct kvm_host_map *map) +{ + return __kvm_vcpu_map(vcpu, gpa, map, false); +} + unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn); unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable); int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset, @@ -1686,9 +1732,6 @@ void kvm_arch_sync_events(struct kvm *kvm); int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu); -struct page *kvm_pfn_to_refcounted_page(kvm_pfn_t pfn); -bool kvm_is_zone_device_page(struct page *page); - struct kvm_irq_ack_notifier { struct hlist_node link; unsigned gsi; @@ -2382,12 +2425,6 @@ static inline int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu) } #endif /* CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE */ -typedef int (*kvm_vm_thread_fn_t)(struct kvm *kvm, uintptr_t data); - -int kvm_vm_create_worker_thread(struct kvm *kvm, kvm_vm_thread_fn_t thread_fn, - uintptr_t data, const char *name, - struct task_struct **thread_ptr); - #ifdef CONFIG_KVM_XFER_TO_GUEST_WORK static inline void kvm_handle_signal_exit(struct kvm_vcpu *vcpu) { @@ -2461,11 +2498,13 @@ static inline bool kvm_mem_is_private(struct kvm *kvm, gfn_t gfn) #ifdef CONFIG_KVM_PRIVATE_MEM int kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot, - gfn_t gfn, kvm_pfn_t *pfn, int *max_order); + gfn_t gfn, kvm_pfn_t *pfn, struct page **page, + int *max_order); #else static inline int kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn, - kvm_pfn_t *pfn, int *max_order) + kvm_pfn_t *pfn, struct page **page, + int *max_order) { KVM_BUG_ON(1, kvm); return -EIO; diff --git a/include/linux/lcd.h b/include/linux/lcd.h index 68703a51dc53..c3ccdff4519a 100644 --- a/include/linux/lcd.h +++ b/include/linux/lcd.h @@ -12,7 +12,11 @@ #include <linux/device.h> #include <linux/mutex.h> #include <linux/notifier.h> -#include <linux/fb.h> + +#define LCD_POWER_ON (0) +#define LCD_POWER_REDUCED (1) // deprecated; don't use in new code +#define LCD_POWER_REDUCED_VSYNC_SUSPEND (2) // deprecated; don't use in new code +#define LCD_POWER_OFF (4) /* Notes on locking: * @@ -30,7 +34,6 @@ */ struct lcd_device; -struct fb_info; struct lcd_properties { /* The maximum value for contrast (read-only) */ @@ -47,11 +50,23 @@ struct lcd_ops { int (*get_contrast)(struct lcd_device *); /* Set LCD panel contrast */ int (*set_contrast)(struct lcd_device *, int contrast); - /* Set LCD panel mode (resolutions ...) */ - int (*set_mode)(struct lcd_device *, struct fb_videomode *); - /* Check if given framebuffer device is the one LCD is bound to; - return 0 if not, !=0 if it is. If NULL, lcd always matches the fb. */ - int (*check_fb)(struct lcd_device *, struct fb_info *); + + /* + * Set LCD panel mode (resolutions ...) + */ + int (*set_mode)(struct lcd_device *lcd, u32 xres, u32 yres); + + /* + * Check if the LCD controls the given display device. This + * operation is optional and if not implemented it is assumed that + * the display is always the one controlled by the LCD. + * + * RETURNS: + * + * If display_dev is NULL or display_dev matches the device controlled by + * the LCD, return true. Otherwise return false. + */ + bool (*controls_device)(struct lcd_device *lcd, struct device *display_device); }; struct lcd_device { diff --git a/include/linux/leds.h b/include/linux/leds.h index e5968c3ed4ae..98f9719c924c 100644 --- a/include/linux/leds.h +++ b/include/linux/leds.h @@ -171,6 +171,7 @@ struct led_classdev { int new_blink_brightness; void (*flash_resume)(struct led_classdev *led_cdev); + struct workqueue_struct *wq; /* LED workqueue */ struct work_struct set_brightness_work; int delayed_set_value; unsigned long delayed_delay_on; @@ -238,7 +239,7 @@ struct led_classdev { struct kernfs_node *brightness_hw_changed_kn; #endif - /* Ensures consistent access to the LED Flash Class device */ + /* Ensures consistent access to the LED class device */ struct mutex led_access; }; diff --git a/include/linux/libata.h b/include/linux/libata.h index 9b4a6ff03235..c1a85d46eba6 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -270,9 +270,7 @@ enum { /* bits 24:31 of host->flags are reserved for LLD specific flags */ - /* various lengths of time */ - ATA_TMOUT_BOOT = 30000, /* heuristic */ - ATA_TMOUT_BOOT_QUICK = 7000, /* heuristic */ + /* Various lengths of time */ ATA_TMOUT_INTERNAL_QUICK = 5000, ATA_TMOUT_MAX_PARK = 30000, diff --git a/include/linux/list.h b/include/linux/list.h index 5f4b0a39cf46..29a375889fb8 100644 --- a/include/linux/list.h +++ b/include/linux/list.h @@ -687,14 +687,6 @@ static inline void list_splice_tail_init(struct list_head *list, for (pos = (head)->next; !list_is_head(pos, (head)); pos = pos->next) /** - * list_for_each_reverse - iterate backwards over a list - * @pos: the &struct list_head to use as a loop cursor. - * @head: the head for your list. - */ -#define list_for_each_reverse(pos, head) \ - for (pos = (head)->prev; pos != (head); pos = pos->prev) - -/** * list_for_each_rcu - Iterate over a list in an RCU-safe fashion * @pos: the &struct list_head to use as a loop cursor. * @head: the head for your list. diff --git a/include/linux/list_lru.h b/include/linux/list_lru.h index 5099a8ccd5f4..05c166811f6b 100644 --- a/include/linux/list_lru.h +++ b/include/linux/list_lru.h @@ -32,6 +32,8 @@ struct list_lru_one { struct list_head list; /* may become negative during memcg reparenting */ long nr_items; + /* protects all fields above */ + spinlock_t lock; }; struct list_lru_memcg { @@ -41,11 +43,9 @@ struct list_lru_memcg { }; struct list_lru_node { - /* protects all lists on the node, including per cgroup */ - spinlock_t lock; /* global list, used for the root cgroup in cgroup aware lrus */ struct list_lru_one lru; - long nr_items; + atomic_long_t nr_items; } ____cacheline_aligned_in_smp; struct list_lru { @@ -56,16 +56,28 @@ struct list_lru { bool memcg_aware; struct xarray xa; #endif +#ifdef CONFIG_LOCKDEP + struct lock_class_key *key; +#endif }; void list_lru_destroy(struct list_lru *lru); int __list_lru_init(struct list_lru *lru, bool memcg_aware, - struct lock_class_key *key, struct shrinker *shrinker); + struct shrinker *shrinker); #define list_lru_init(lru) \ - __list_lru_init((lru), false, NULL, NULL) + __list_lru_init((lru), false, NULL) #define list_lru_init_memcg(lru, shrinker) \ - __list_lru_init((lru), true, NULL, shrinker) + __list_lru_init((lru), true, shrinker) + +static inline int list_lru_init_memcg_key(struct list_lru *lru, struct shrinker *shrinker, + struct lock_class_key *key) +{ +#ifdef CONFIG_LOCKDEP + lru->key = key; +#endif + return list_lru_init_memcg(lru, shrinker); +} int memcg_list_lru_alloc(struct mem_cgroup *memcg, struct list_lru *lru, gfp_t gfp); @@ -172,7 +184,7 @@ void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item, struct list_head *head); typedef enum lru_status (*list_lru_walk_cb)(struct list_head *item, - struct list_lru_one *list, spinlock_t *lock, void *cb_arg); + struct list_lru_one *list, void *cb_arg); /** * list_lru_walk_one: walk a @lru, isolating and disposing freeable items. diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h index 61c4b9c41904..c8f0f9458f2c 100644 --- a/include/linux/lockd/lockd.h +++ b/include/linux/lockd/lockd.h @@ -278,9 +278,9 @@ __be32 nlmsvc_lock(struct svc_rqst *, struct nlm_file *, struct nlm_host *, struct nlm_lock *, int, struct nlm_cookie *, int); __be32 nlmsvc_unlock(struct net *net, struct nlm_file *, struct nlm_lock *); -__be32 nlmsvc_testlock(struct svc_rqst *, struct nlm_file *, - struct nlm_host *, struct nlm_lock *, - struct nlm_lock *, struct nlm_cookie *); +__be32 nlmsvc_testlock(struct svc_rqst *rqstp, struct nlm_file *file, + struct nlm_host *host, struct nlm_lock *lock, + struct nlm_lock *conflock); __be32 nlmsvc_cancel_blocked(struct net *net, struct nlm_file *, struct nlm_lock *); void nlmsvc_retry_blocked(struct svc_rqst *rqstp); void nlmsvc_traverse_blocks(struct nlm_host *, struct nlm_file *, diff --git a/include/linux/lockd/xdr.h b/include/linux/lockd/xdr.h index 80cca9426761..17d53165d9f2 100644 --- a/include/linux/lockd/xdr.h +++ b/include/linux/lockd/xdr.h @@ -73,8 +73,6 @@ struct nlm_args { u32 fsm_mode; }; -typedef struct nlm_args nlm_args; - /* * Generic lockd result */ diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 217f7abf2cbf..67964dc4db95 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -173,7 +173,7 @@ static inline void lockdep_init_map(struct lockdep_map *lock, const char *name, (lock)->dep_map.lock_type) #define lockdep_set_subclass(lock, sub) \ - lockdep_init_map_type(&(lock)->dep_map, #lock, (lock)->dep_map.key, sub,\ + lockdep_init_map_type(&(lock)->dep_map, (lock)->dep_map.name, (lock)->dep_map.key, sub,\ (lock)->dep_map.wait_type_inner, \ (lock)->dep_map.wait_type_outer, \ (lock)->dep_map.lock_type) diff --git a/include/linux/logic_pio.h b/include/linux/logic_pio.h index babf4e3c28ba..8f1a9408302f 100644 --- a/include/linux/logic_pio.h +++ b/include/linux/logic_pio.h @@ -17,7 +17,7 @@ enum { struct logic_pio_hwaddr { struct list_head list; - struct fwnode_handle *fwnode; + const struct fwnode_handle *fwnode; resource_size_t hw_start; resource_size_t io_start; resource_size_t size; /* range size populated */ @@ -110,8 +110,8 @@ void logic_outsl(unsigned long addr, const void *buffer, unsigned int count); #endif /* CONFIG_INDIRECT_PIO */ #define MMIO_UPPER_LIMIT (IO_SPACE_LIMIT - PIO_INDIRECT_SIZE) -struct logic_pio_hwaddr *find_io_range_by_fwnode(struct fwnode_handle *fwnode); -unsigned long logic_pio_trans_hwaddr(struct fwnode_handle *fwnode, +struct logic_pio_hwaddr *find_io_range_by_fwnode(const struct fwnode_handle *fwnode); +unsigned long logic_pio_trans_hwaddr(const struct fwnode_handle *fwnode, resource_size_t hw_addr, resource_size_t size); int logic_pio_register_range(struct logic_pio_hwaddr *newrange); void logic_pio_unregister_range(struct logic_pio_hwaddr *range); diff --git a/include/linux/lsm/apparmor.h b/include/linux/lsm/apparmor.h new file mode 100644 index 000000000000..612cbfacb072 --- /dev/null +++ b/include/linux/lsm/apparmor.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Linux Security Module interface to other subsystems. + * AppArmor presents single pointer to an aa_label structure. + */ +#ifndef __LINUX_LSM_APPARMOR_H +#define __LINUX_LSM_APPARMOR_H + +struct aa_label; + +struct lsm_prop_apparmor { +#ifdef CONFIG_SECURITY_APPARMOR + struct aa_label *label; +#endif +}; + +#endif /* ! __LINUX_LSM_APPARMOR_H */ diff --git a/include/linux/lsm/bpf.h b/include/linux/lsm/bpf.h new file mode 100644 index 000000000000..8106e206fcef --- /dev/null +++ b/include/linux/lsm/bpf.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Linux Security Module interface to other subsystems. + * BPF may present a single u32 value. + */ +#ifndef __LINUX_LSM_BPF_H +#define __LINUX_LSM_BPF_H +#include <linux/types.h> + +struct lsm_prop_bpf { +#ifdef CONFIG_BPF_LSM + u32 secid; +#endif +}; + +#endif /* ! __LINUX_LSM_BPF_H */ diff --git a/include/linux/lsm/selinux.h b/include/linux/lsm/selinux.h new file mode 100644 index 000000000000..9455a6b5b910 --- /dev/null +++ b/include/linux/lsm/selinux.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Linux Security Module interface to other subsystems. + * SELinux presents a single u32 value which is known as a secid. + */ +#ifndef __LINUX_LSM_SELINUX_H +#define __LINUX_LSM_SELINUX_H +#include <linux/types.h> + +struct lsm_prop_selinux { +#ifdef CONFIG_SECURITY_SELINUX + u32 secid; +#endif +}; + +#endif /* ! __LINUX_LSM_SELINUX_H */ diff --git a/include/linux/lsm/smack.h b/include/linux/lsm/smack.h new file mode 100644 index 000000000000..ff730dd7a734 --- /dev/null +++ b/include/linux/lsm/smack.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Linux Security Module interface to other subsystems. + * Smack presents a pointer into the global Smack label list. + */ +#ifndef __LINUX_LSM_SMACK_H +#define __LINUX_LSM_SMACK_H + +struct smack_known; + +struct lsm_prop_smack { +#ifdef CONFIG_SECURITY_SMACK + struct smack_known *skp; +#endif +}; + +#endif /* ! __LINUX_LSM_SMACK_H */ diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h index 9eca013aa5e1..eb2937599cb0 100644 --- a/include/linux/lsm_hook_defs.h +++ b/include/linux/lsm_hook_defs.h @@ -176,7 +176,8 @@ LSM_HOOK(int, -EOPNOTSUPP, inode_setsecurity, struct inode *inode, const char *name, const void *value, size_t size, int flags) LSM_HOOK(int, 0, inode_listsecurity, struct inode *inode, char *buffer, size_t buffer_size) -LSM_HOOK(void, LSM_RET_VOID, inode_getsecid, struct inode *inode, u32 *secid) +LSM_HOOK(void, LSM_RET_VOID, inode_getlsmprop, struct inode *inode, + struct lsm_prop *prop) LSM_HOOK(int, 0, inode_copy_up, struct dentry *src, struct cred **new) LSM_HOOK(int, -EOPNOTSUPP, inode_copy_up_xattr, struct dentry *src, const char *name) @@ -217,6 +218,8 @@ LSM_HOOK(int, 0, cred_prepare, struct cred *new, const struct cred *old, LSM_HOOK(void, LSM_RET_VOID, cred_transfer, struct cred *new, const struct cred *old) LSM_HOOK(void, LSM_RET_VOID, cred_getsecid, const struct cred *c, u32 *secid) +LSM_HOOK(void, LSM_RET_VOID, cred_getlsmprop, const struct cred *c, + struct lsm_prop *prop) LSM_HOOK(int, 0, kernel_act_as, struct cred *new, u32 secid) LSM_HOOK(int, 0, kernel_create_files_as, struct cred *new, struct inode *inode) LSM_HOOK(int, 0, kernel_module_request, char *kmod_name) @@ -235,9 +238,9 @@ LSM_HOOK(int, 0, task_fix_setgroups, struct cred *new, const struct cred * old) LSM_HOOK(int, 0, task_setpgid, struct task_struct *p, pid_t pgid) LSM_HOOK(int, 0, task_getpgid, struct task_struct *p) LSM_HOOK(int, 0, task_getsid, struct task_struct *p) -LSM_HOOK(void, LSM_RET_VOID, current_getsecid_subj, u32 *secid) -LSM_HOOK(void, LSM_RET_VOID, task_getsecid_obj, - struct task_struct *p, u32 *secid) +LSM_HOOK(void, LSM_RET_VOID, current_getlsmprop_subj, struct lsm_prop *prop) +LSM_HOOK(void, LSM_RET_VOID, task_getlsmprop_obj, + struct task_struct *p, struct lsm_prop *prop) LSM_HOOK(int, 0, task_setnice, struct task_struct *p, int nice) LSM_HOOK(int, 0, task_setioprio, struct task_struct *p, int ioprio) LSM_HOOK(int, 0, task_getioprio, struct task_struct *p) @@ -256,8 +259,8 @@ LSM_HOOK(void, LSM_RET_VOID, task_to_inode, struct task_struct *p, struct inode *inode) LSM_HOOK(int, 0, userns_create, const struct cred *cred) LSM_HOOK(int, 0, ipc_permission, struct kern_ipc_perm *ipcp, short flag) -LSM_HOOK(void, LSM_RET_VOID, ipc_getsecid, struct kern_ipc_perm *ipcp, - u32 *secid) +LSM_HOOK(void, LSM_RET_VOID, ipc_getlsmprop, struct kern_ipc_perm *ipcp, + struct lsm_prop *prop) LSM_HOOK(int, 0, msg_msg_alloc_security, struct msg_msg *msg) LSM_HOOK(void, LSM_RET_VOID, msg_msg_free_security, struct msg_msg *msg) LSM_HOOK(int, 0, msg_queue_alloc_security, struct kern_ipc_perm *perm) @@ -294,6 +297,8 @@ LSM_HOOK(int, -EINVAL, setprocattr, const char *name, void *value, size_t size) LSM_HOOK(int, 0, ismaclabel, const char *name) LSM_HOOK(int, -EOPNOTSUPP, secid_to_secctx, u32 secid, char **secdata, u32 *seclen) +LSM_HOOK(int, -EOPNOTSUPP, lsmprop_to_secctx, struct lsm_prop *prop, + char **secdata, u32 *seclen) LSM_HOOK(int, 0, secctx_to_secid, const char *secdata, u32 seclen, u32 *secid) LSM_HOOK(void, LSM_RET_VOID, release_secctx, char *secdata, u32 seclen) LSM_HOOK(void, LSM_RET_VOID, inode_invalidate_secctx, struct inode *inode) @@ -416,7 +421,8 @@ LSM_HOOK(void, LSM_RET_VOID, key_post_create_or_update, struct key *keyring, LSM_HOOK(int, 0, audit_rule_init, u32 field, u32 op, char *rulestr, void **lsmrule, gfp_t gfp) LSM_HOOK(int, 0, audit_rule_known, struct audit_krule *krule) -LSM_HOOK(int, 0, audit_rule_match, u32 secid, u32 field, u32 op, void *lsmrule) +LSM_HOOK(int, 0, audit_rule_match, struct lsm_prop *prop, u32 field, u32 op, + void *lsmrule) LSM_HOOK(void, LSM_RET_VOID, audit_rule_free, void *lsmrule) #endif /* CONFIG_AUDIT */ diff --git a/include/linux/maple_tree.h b/include/linux/maple_tree.h index c2c11004085e..cbbcd18d4186 100644 --- a/include/linux/maple_tree.h +++ b/include/linux/maple_tree.h @@ -224,7 +224,7 @@ typedef struct { /* nothing */ } lockdep_map_p; * (set at tree creation time) and dynamic information set under the spinlock. * * Another use of flags are to indicate global states of the tree. This is the - * case with the MAPLE_USE_RCU flag, which indicates the tree is currently in + * case with the MT_FLAGS_USE_RCU flag, which indicates the tree is currently in * RCU mode. This mode was added to allow the tree to reuse nodes instead of * re-allocating and RCU freeing nodes when there is a single user. */ @@ -592,6 +592,20 @@ static __always_inline void mas_reset(struct ma_state *mas) #define mas_for_each(__mas, __entry, __max) \ while (((__entry) = mas_find((__mas), (__max))) != NULL) +/** + * mas_for_each_rev() - Iterate over a range of the maple tree in reverse order. + * @__mas: Maple Tree operation state (maple_state) + * @__entry: Entry retrieved from the tree + * @__min: minimum index to retrieve from the tree + * + * When returned, mas->index and mas->last will hold the entire range for the + * entry. + * + * Note: may return the zero entry. + */ +#define mas_for_each_rev(__mas, __entry, __min) \ + while (((__entry) = mas_find_rev((__mas), (__min))) != NULL) + #ifdef CONFIG_DEBUG_MAPLE_TREE enum mt_dump_format { mt_dump_dec, diff --git a/include/linux/mdio.h b/include/linux/mdio.h index efeca5bd7600..3c3deac57894 100644 --- a/include/linux/mdio.h +++ b/include/linux/mdio.h @@ -165,31 +165,12 @@ extern int mdio_set_flag(const struct mdio_if_info *mdio, bool sense); extern int mdio45_links_ok(const struct mdio_if_info *mdio, u32 mmds); extern int mdio45_nway_restart(const struct mdio_if_info *mdio); -extern void mdio45_ethtool_gset_npage(const struct mdio_if_info *mdio, - struct ethtool_cmd *ecmd, - u32 npage_adv, u32 npage_lpa); extern void mdio45_ethtool_ksettings_get_npage(const struct mdio_if_info *mdio, struct ethtool_link_ksettings *cmd, u32 npage_adv, u32 npage_lpa); /** - * mdio45_ethtool_gset - get settings for ETHTOOL_GSET - * @mdio: MDIO interface - * @ecmd: Ethtool request structure - * - * Since the CSRs for auto-negotiation using next pages are not fully - * standardised, this function does not attempt to decode them. Use - * mdio45_ethtool_gset_npage() to specify advertisement bits from next - * pages. - */ -static inline void mdio45_ethtool_gset(const struct mdio_if_info *mdio, - struct ethtool_cmd *ecmd) -{ - mdio45_ethtool_gset_npage(mdio, ecmd, 0, 0); -} - -/** * mdio45_ethtool_ksettings_get - get settings for ETHTOOL_GLINKSETTINGS * @mdio: MDIO interface * @cmd: Ethtool request structure diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 34d2da05f2f1..5502aa8e138e 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -299,25 +299,10 @@ struct mem_cgroup { /* For oom notifier event fd */ struct list_head oom_notify; - /* - * Should we move charges of a task when a task is moved into this - * mem_cgroup ? And what type of charges should we move ? - */ - unsigned long move_charge_at_immigrate; - /* taken only while moving_account > 0 */ - spinlock_t move_lock; - unsigned long move_lock_flags; - /* Legacy tcp memory accounting */ bool tcpmem_active; int tcpmem_pressure; - /* - * set > 0 if pages under this cgroup are moving to other cgroup. - */ - atomic_t moving_account; - struct task_struct *move_lock_task; - /* List of events which userspace want to receive */ struct list_head event_list; spinlock_t event_list_lock; @@ -433,9 +418,7 @@ static inline struct obj_cgroup *__folio_objcg(struct folio *folio) * * - the folio lock * - LRU isolation - * - folio_memcg_lock() * - exclusive reference - * - mem_cgroup_trylock_pages() * * For a kmem folio a caller should hold an rcu read lock to protect memcg * associated with a kmem folio from being released. @@ -460,35 +443,6 @@ static inline bool folio_memcg_charged(struct folio *folio) return __folio_memcg(folio) != NULL; } -/** - * folio_memcg_rcu - Locklessly get the memory cgroup associated with a folio. - * @folio: Pointer to the folio. - * - * This function assumes that the folio is known to have a - * proper memory cgroup pointer. It's not safe to call this function - * against some type of folios, e.g. slab folios or ex-slab folios. - * - * Return: A pointer to the memory cgroup associated with the folio, - * or NULL. - */ -static inline struct mem_cgroup *folio_memcg_rcu(struct folio *folio) -{ - unsigned long memcg_data = READ_ONCE(folio->memcg_data); - - VM_BUG_ON_FOLIO(folio_test_slab(folio), folio); - - if (memcg_data & MEMCG_DATA_KMEM) { - struct obj_cgroup *objcg; - - objcg = (void *)(memcg_data & ~OBJEXTS_FLAGS_MASK); - return obj_cgroup_memcg(objcg); - } - - WARN_ON_ONCE(!rcu_read_lock_held()); - - return (struct mem_cgroup *)(memcg_data & ~OBJEXTS_FLAGS_MASK); -} - /* * folio_memcg_check - Get the memory cgroup associated with a folio. * @folio: Pointer to the folio. @@ -504,9 +458,7 @@ static inline struct mem_cgroup *folio_memcg_rcu(struct folio *folio) * * - the folio lock * - LRU isolation - * - lock_folio_memcg() * - exclusive reference - * - mem_cgroup_trylock_pages() * * For a kmem folio a caller should hold an rcu read lock to protect memcg * associated with a kmem folio from being released. @@ -1103,10 +1055,9 @@ static inline struct mem_cgroup *folio_memcg(struct folio *folio) return NULL; } -static inline struct mem_cgroup *folio_memcg_rcu(struct folio *folio) +static inline bool folio_memcg_charged(struct folio *folio) { - WARN_ON_ONCE(!rcu_read_lock_held()); - return NULL; + return false; } static inline struct mem_cgroup *folio_memcg_check(struct folio *folio) @@ -1282,6 +1233,10 @@ struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css) return NULL; } +static inline void obj_cgroup_get(struct obj_cgroup *objcg) +{ +} + static inline void obj_cgroup_put(struct obj_cgroup *objcg) { } @@ -1760,8 +1715,9 @@ static inline int memcg_kmem_id(struct mem_cgroup *memcg) struct mem_cgroup *mem_cgroup_from_slab_obj(void *p); -static inline void count_objcg_event(struct obj_cgroup *objcg, - enum vm_event_item idx) +static inline void count_objcg_events(struct obj_cgroup *objcg, + enum vm_event_item idx, + unsigned long count) { struct mem_cgroup *memcg; @@ -1770,7 +1726,7 @@ static inline void count_objcg_event(struct obj_cgroup *objcg, rcu_read_lock(); memcg = obj_cgroup_memcg(objcg); - count_memcg_events(memcg, idx, 1); + count_memcg_events(memcg, idx, count); rcu_read_unlock(); } @@ -1825,8 +1781,9 @@ static inline struct mem_cgroup *mem_cgroup_from_slab_obj(void *p) return NULL; } -static inline void count_objcg_event(struct obj_cgroup *objcg, - enum vm_event_item idx) +static inline void count_objcg_events(struct obj_cgroup *objcg, + enum vm_event_item idx, + unsigned long count) { } @@ -1872,26 +1829,6 @@ static inline bool task_in_memcg_oom(struct task_struct *p) return p->memcg_in_oom; } -void folio_memcg_lock(struct folio *folio); -void folio_memcg_unlock(struct folio *folio); - -/* try to stablize folio_memcg() for all the pages in a memcg */ -static inline bool mem_cgroup_trylock_pages(struct mem_cgroup *memcg) -{ - rcu_read_lock(); - - if (mem_cgroup_disabled() || !atomic_read(&memcg->moving_account)) - return true; - - rcu_read_unlock(); - return false; -} - -static inline void mem_cgroup_unlock_pages(void) -{ - rcu_read_unlock(); -} - static inline void mem_cgroup_enter_user_fault(void) { WARN_ON(current->in_user_fault); @@ -1913,26 +1850,6 @@ unsigned long memcg1_soft_limit_reclaim(pg_data_t *pgdat, int order, return 0; } -static inline void folio_memcg_lock(struct folio *folio) -{ -} - -static inline void folio_memcg_unlock(struct folio *folio) -{ -} - -static inline bool mem_cgroup_trylock_pages(struct mem_cgroup *memcg) -{ - /* to match folio_memcg_rcu() */ - rcu_read_lock(); - return true; -} - -static inline void mem_cgroup_unlock_pages(void) -{ - rcu_read_unlock(); -} - static inline bool task_in_memcg_oom(struct task_struct *p) { return false; diff --git a/include/linux/memfd.h b/include/linux/memfd.h index 3f2cf339ceaf..d437e3070850 100644 --- a/include/linux/memfd.h +++ b/include/linux/memfd.h @@ -7,6 +7,7 @@ #ifdef CONFIG_MEMFD_CREATE extern long memfd_fcntl(struct file *file, unsigned int cmd, unsigned int arg); struct folio *memfd_alloc_folio(struct file *memfd, pgoff_t idx); +unsigned int *memfd_file_seals_ptr(struct file *file); #else static inline long memfd_fcntl(struct file *f, unsigned int c, unsigned int a) { @@ -16,6 +17,19 @@ static inline struct folio *memfd_alloc_folio(struct file *memfd, pgoff_t idx) { return ERR_PTR(-EINVAL); } + +static inline unsigned int *memfd_file_seals_ptr(struct file *file) +{ + return NULL; +} #endif +/* Retrieve memfd seals associated with the file, if any. */ +static inline unsigned int memfd_file_seals(struct file *file) +{ + unsigned int *sealsp = memfd_file_seals_ptr(file); + + return sealsp ? *sealsp : 0; +} + #endif /* __LINUX_MEMFD_H */ diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h index 1add16f21612..ce9885e0178a 100644 --- a/include/linux/mempolicy.h +++ b/include/linux/mempolicy.h @@ -47,7 +47,7 @@ struct mempolicy { atomic_t refcnt; unsigned short mode; /* See MPOL_* above */ unsigned short flags; /* See set_mempolicy() MPOL_F_* above */ - nodemask_t nodes; /* interleave/bind/perfer */ + nodemask_t nodes; /* interleave/bind/preferred/etc */ int home_node; /* Home node to use for MPOL_BIND and MPOL_PREFERRED_MANY */ union { diff --git a/include/linux/memstick.h b/include/linux/memstick.h index ebf73d4ee969..107bdcbedf79 100644 --- a/include/linux/memstick.h +++ b/include/linux/memstick.h @@ -293,7 +293,7 @@ struct memstick_host { }; struct memstick_driver { - struct memstick_device_id *id_table; + const struct memstick_device_id *id_table; int (*probe)(struct memstick_dev *card); void (*remove)(struct memstick_dev *card); int (*suspend)(struct memstick_dev *card, diff --git a/include/linux/mfd/88pm886.h b/include/linux/mfd/88pm886.h index 133aa302e492..85eca44f39ab 100644 --- a/include/linux/mfd/88pm886.h +++ b/include/linux/mfd/88pm886.h @@ -31,6 +31,15 @@ #define PM886_INT_WC BIT(1) #define PM886_INT_MASK_MODE BIT(2) +#define PM886_REG_RTC_CNT1 0xd1 +#define PM886_REG_RTC_CNT2 0xd2 +#define PM886_REG_RTC_CNT3 0xd3 +#define PM886_REG_RTC_CNT4 0xd4 +#define PM886_REG_RTC_SPARE1 0xea +#define PM886_REG_RTC_SPARE2 0xeb +#define PM886_REG_RTC_SPARE3 0xec +#define PM886_REG_RTC_SPARE4 0xed +#define PM886_REG_RTC_SPARE5 0xee #define PM886_REG_RTC_SPARE6 0xef #define PM886_REG_BUCK_EN 0x08 diff --git a/include/linux/mfd/axp20x.h b/include/linux/mfd/axp20x.h index f4dfc1871a95..c3df0e615fbf 100644 --- a/include/linux/mfd/axp20x.h +++ b/include/linux/mfd/axp20x.h @@ -19,6 +19,7 @@ enum axp20x_variants { AXP223_ID, AXP288_ID, AXP313A_ID, + AXP323_ID, AXP717_ID, AXP803_ID, AXP806_ID, @@ -113,6 +114,7 @@ enum axp20x_variants { #define AXP313A_SHUTDOWN_CTRL 0x1a #define AXP313A_IRQ_EN 0x20 #define AXP313A_IRQ_STATE 0x21 +#define AXP323_DCDC_MODE_CTRL2 0x22 #define AXP717_ON_INDICATE 0x00 #define AXP717_PMU_STATUS_2 0x01 @@ -959,7 +961,7 @@ struct axp20x_dev { unsigned long irq_flags; struct regmap *regmap; struct regmap_irq_chip_data *regmap_irqc; - long variant; + enum axp20x_variants variant; int nr_cells; const struct mfd_cell *cells; const struct regmap_config *regmap_cfg; diff --git a/include/linux/mfd/cgbc.h b/include/linux/mfd/cgbc.h new file mode 100644 index 000000000000..badbec4c7033 --- /dev/null +++ b/include/linux/mfd/cgbc.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Congatec Board Controller driver definitions + * + * Copyright (C) 2024 Bootlin + * Author: Thomas Richard <[email protected]> + */ + +#ifndef _LINUX_MFD_CGBC_H_ + +/** + * struct cgbc_version - Board Controller device version structure + * @feature: Board Controller feature number + * @major: Board Controller major revision + * @minor: Board Controller minor revision + */ +struct cgbc_version { + unsigned char feature; + unsigned char major; + unsigned char minor; +}; + +/** + * struct cgbc_device_data - Internal representation of the Board Controller device + * @io_session: Pointer to the session IO memory + * @io_cmd: Pointer to the command IO memory + * @session: Session id returned by the Board Controller + * @dev: Pointer to kernel device structure + * @cgbc_version: Board Controller version structure + * @mutex: Board Controller mutex + */ +struct cgbc_device_data { + void __iomem *io_session; + void __iomem *io_cmd; + u8 session; + struct device *dev; + struct cgbc_version version; + struct mutex lock; +}; + +int cgbc_command(struct cgbc_device_data *cgbc, void *cmd, unsigned int cmd_size, + void *data, unsigned int data_size, u8 *status); + +#endif /*_LINUX_MFD_CGBC_H_*/ diff --git a/include/linux/mfd/da9063/core.h b/include/linux/mfd/da9063/core.h index 8db52324f416..eae82f421414 100644 --- a/include/linux/mfd/da9063/core.h +++ b/include/linux/mfd/da9063/core.h @@ -78,6 +78,7 @@ struct da9063 { enum da9063_type type; unsigned char variant_code; unsigned int flags; + bool use_sw_pm; /* Control interface */ struct regmap *regmap; diff --git a/include/linux/mfd/max5970.h b/include/linux/mfd/max5970.h index 762a7d40c843..fc50e89edfaa 100644 --- a/include/linux/mfd/max5970.h +++ b/include/linux/mfd/max5970.h @@ -16,18 +16,6 @@ #define MAX5978_NUM_SWITCHES 1 #define MAX5970_NUM_LEDS 4 -struct max5970_data { - int num_switches; - u32 irng[MAX5970_NUM_SWITCHES]; - u32 mon_rng[MAX5970_NUM_SWITCHES]; - u32 shunt_micro_ohms[MAX5970_NUM_SWITCHES]; -}; - -enum max5970_chip_type { - TYPE_MAX5978 = 1, - TYPE_MAX5970, -}; - #define MAX5970_REG_CURRENT_L(ch) (0x01 + (ch) * 4) #define MAX5970_REG_CURRENT_H(ch) (0x00 + (ch) * 4) #define MAX5970_REG_VOLTAGE_L(ch) (0x03 + (ch) * 4) diff --git a/include/linux/mfd/max77693-private.h b/include/linux/mfd/max77693-private.h index 20c5e02ed9da..c324d548619e 100644 --- a/include/linux/mfd/max77693-private.h +++ b/include/linux/mfd/max77693-private.h @@ -419,17 +419,6 @@ enum max77693_haptic_reg { #define MAX77693_CONFIG2_MEN 6 #define MAX77693_CONFIG2_HTYP 5 -enum max77693_irq_source { - LED_INT = 0, - TOPSYS_INT, - CHG_INT, - MUIC_INT1, - MUIC_INT2, - MUIC_INT3, - - MAX77693_IRQ_GROUP_NR, -}; - #define SRC_IRQ_CHARGER BIT(0) #define SRC_IRQ_TOP BIT(1) #define SRC_IRQ_FLASH BIT(2) diff --git a/include/linux/mfd/mt6328/core.h b/include/linux/mfd/mt6328/core.h new file mode 100644 index 000000000000..9a08aed72b9f --- /dev/null +++ b/include/linux/mfd/mt6328/core.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2015 MediaTek Inc. + * Copyright (c) 2022 Yassine Oudjana <[email protected]> + */ + +#ifndef __MFD_MT6328_CORE_H__ +#define __MFD_MT6328_CORE_H__ + +enum mt6328_irq_status_numbers { + MT6328_IRQ_STATUS_PWRKEY = 0, + MT6328_IRQ_STATUS_HOMEKEY, + MT6328_IRQ_STATUS_PWRKEY_R, + MT6328_IRQ_STATUS_HOMEKEY_R, + MT6328_IRQ_STATUS_THR_H, + MT6328_IRQ_STATUS_THR_L, + MT6328_IRQ_STATUS_BAT_H, + MT6328_IRQ_STATUS_BAT_L, + MT6328_IRQ_STATUS_RTC, + MT6328_IRQ_STATUS_AUDIO, + MT6328_IRQ_STATUS_ACCDET, + MT6328_IRQ_STATUS_ACCDET_EINT, + MT6328_IRQ_STATUS_ACCDET_NEGV, + MT6328_IRQ_STATUS_NI_LBAT_INT, + MT6328_IRQ_STATUS_VPROC_OC = 16, + MT6328_IRQ_STATUS_VSYS_OC, + MT6328_IRQ_STATUS_VLTE_OC, + MT6328_IRQ_STATUS_VCORE_OC, + MT6328_IRQ_STATUS_VPA_OC, + MT6328_IRQ_STATUS_LDO_OC, + MT6328_IRQ_STATUS_BAT2_H, + MT6328_IRQ_STATUS_BAT2_L, + MT6328_IRQ_STATUS_VISMPS0_H, + MT6328_IRQ_STATUS_VISMPS0_L, + MT6328_IRQ_STATUS_AUXADC_IMP, + MT6328_IRQ_STATUS_OV = 32, + MT6328_IRQ_STATUS_BVALID_DET, + MT6328_IRQ_STATUS_VBATON_HV, + MT6328_IRQ_STATUS_VBATON_UNDET, + MT6328_IRQ_STATUS_WATCHDOG, + MT6328_IRQ_STATUS_PCHR_CM_VDEC, + MT6328_IRQ_STATUS_CHRDET, + MT6328_IRQ_STATUS_PCHR_CM_VINC, + MT6328_IRQ_STATUS_FG_BAT_H, + MT6328_IRQ_STATUS_FG_BAT_L, + MT6328_IRQ_STATUS_FG_CUR_H, + MT6328_IRQ_STATUS_FG_CUR_L, + MT6328_IRQ_STATUS_FG_ZCV, + MT6328_IRQ_STATUS_SPKL_D, + MT6328_IRQ_STATUS_SPKL_AB, +}; + +#endif /* __MFD_MT6323_CORE_H__ */ diff --git a/include/linux/mfd/mt6328/registers.h b/include/linux/mfd/mt6328/registers.h new file mode 100644 index 000000000000..8199aaea27b9 --- /dev/null +++ b/include/linux/mfd/mt6328/registers.h @@ -0,0 +1,822 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022 Yassine Oudjana <[email protected]> + */ + +#ifndef __MFD_MT6328_REGISTERS_H__ +#define __MFD_MT6328_REGISTERS_H__ + +/* PMIC Registers */ +#define MT6328_STRUP_CON0 0x0000 +#define MT6328_STRUP_CON2 0x0002 +#define MT6328_STRUP_CON3 0x0004 +#define MT6328_STRUP_CON4 0x0006 +#define MT6328_STRUP_CON5 0x0008 +#define MT6328_STRUP_CON6 0x000a +#define MT6328_STRUP_CON7 0x000c +#define MT6328_STRUP_CON8 0x000e +#define MT6328_STRUP_CON9 0x0010 +#define MT6328_STRUP_CON10 0x0012 +#define MT6328_STRUP_CON11 0x0014 +#define MT6328_STRUP_CON12 0x0016 +#define MT6328_STRUP_CON13 0x0018 +#define MT6328_STRUP_CON14 0x001a +#define MT6328_STRUP_CON15 0x001c +#define MT6328_STRUP_CON16 0x001e +#define MT6328_STRUP_CON17 0x0020 +#define MT6328_STRUP_CON18 0x0022 +#define MT6328_STRUP_CON19 0x0024 +#define MT6328_STRUP_CON20 0x0026 +#define MT6328_STRUP_CON21 0x0028 +#define MT6328_STRUP_CON22 0x002a +#define MT6328_STRUP_CON23 0x002c +#define MT6328_STRUP_CON24 0x002e +#define MT6328_STRUP_CON25 0x0030 +#define MT6328_STRUP_CON26 0x0032 +#define MT6328_STRUP_CON27 0x0034 +#define MT6328_STRUP_CON28 0x0036 +#define MT6328_STRUP_CON29 0x0038 +#define MT6328_STRUP_CON30 0x003a +#define MT6328_STRUP_CON31 0x003c +#define MT6328_STRUP_CON32 0x003e +#define MT6328_STRUP_ANA_CON0 0x0040 +#define MT6328_HWCID 0x0200 +#define MT6328_SWCID 0x0202 +#define MT6328_TOP_CON 0x0204 +#define MT6328_TEST_OUT 0x0206 +#define MT6328_TEST_CON0 0x0208 +#define MT6328_TEST_CON1 0x020a +#define MT6328_TESTMODE_SW 0x020c +#define MT6328_EN_STATUS0 0x020e +#define MT6328_EN_STATUS1 0x0210 +#define MT6328_EN_STATUS2 0x0212 +#define MT6328_OCSTATUS0 0x0214 +#define MT6328_OCSTATUS1 0x0216 +#define MT6328_OCSTATUS2 0x0218 +#define MT6328_PGDEBSTATUS 0x021a +#define MT6328_PGSTATUS 0x021c +#define MT6328_THERMALSTATUS 0x021e +#define MT6328_TOPSTATUS 0x0220 +#define MT6328_TDSEL_CON 0x0222 +#define MT6328_RDSEL_CON 0x0224 +#define MT6328_SMT_CON0 0x0226 +#define MT6328_SMT_CON1 0x0228 +#define MT6328_SMT_CON2 0x022a +#define MT6328_DRV_CON0 0x022c +#define MT6328_DRV_CON1 0x022e +#define MT6328_DRV_CON2 0x0230 +#define MT6328_DRV_CON3 0x0232 +#define MT6328_TOP_STATUS 0x0234 +#define MT6328_TOP_STATUS_SET 0x0236 +#define MT6328_TOP_STATUS_CLR 0x0238 +#define MT6328_RGS_ANA_MON 0x023a +#define MT6328_TOP_CKPDN_CON0 0x023c +#define MT6328_TOP_CKPDN_CON0_SET 0x023e +#define MT6328_TOP_CKPDN_CON0_CLR 0x0240 +#define MT6328_TOP_CKPDN_CON1 0x0242 +#define MT6328_TOP_CKPDN_CON1_SET 0x0244 +#define MT6328_TOP_CKPDN_CON1_CLR 0x0246 +#define MT6328_TOP_CKPDN_CON2 0x0248 +#define MT6328_TOP_CKPDN_CON2_SET 0x024a +#define MT6328_TOP_CKPDN_CON2_CLR 0x024c +#define MT6328_TOP_CKPDN_CON3 0x024e +#define MT6328_TOP_CKPDN_CON3_SET 0x0250 +#define MT6328_TOP_CKPDN_CON3_CLR 0x0252 +#define MT6328_TOP_CKPDN_CON4 0x0254 +#define MT6328_TOP_CKPDN_CON4_SET 0x0256 +#define MT6328_TOP_CKPDN_CON4_CLR 0x0258 +#define MT6328_TOP_CKSEL_CON0 0x025a +#define MT6328_TOP_CKSEL_CON0_SET 0x025c +#define MT6328_TOP_CKSEL_CON0_CLR 0x025e +#define MT6328_TOP_CKSEL_CON1 0x0260 +#define MT6328_TOP_CKSEL_CON1_SET 0x0262 +#define MT6328_TOP_CKSEL_CON1_CLR 0x0264 +#define MT6328_TOP_CKSEL_CON2 0x0266 +#define MT6328_TOP_CKSEL_CON2_SET 0x0268 +#define MT6328_TOP_CKSEL_CON2_CLR 0x026a +#define MT6328_TOP_CKDIVSEL_CON0 0x026c +#define MT6328_TOP_CKDIVSEL_CON0_SET 0x026e +#define MT6328_TOP_CKDIVSEL_CON0_CLR 0x0270 +#define MT6328_TOP_CKDIVSEL_CON1 0x0272 +#define MT6328_TOP_CKDIVSEL_CON1_SET 0x0274 +#define MT6328_TOP_CKDIVSEL_CON1_CLR 0x0276 +#define MT6328_TOP_CKHWEN_CON0 0x0278 +#define MT6328_TOP_CKHWEN_CON0_SET 0x027a +#define MT6328_TOP_CKHWEN_CON0_CLR 0x027c +#define MT6328_TOP_CKHWEN_CON1 0x027e +#define MT6328_TOP_CKHWEN_CON1_SET 0x0280 +#define MT6328_TOP_CKHWEN_CON1_CLR 0x0282 +#define MT6328_TOP_CKTST_CON0 0x0284 +#define MT6328_TOP_CKTST_CON1 0x0286 +#define MT6328_TOP_CKTST_CON2 0x0288 +#define MT6328_TOP_CLKSQ 0x028a +#define MT6328_TOP_CLKSQ_SET 0x028c +#define MT6328_TOP_CLKSQ_CLR 0x028e +#define MT6328_TOP_CLKSQ_RTC 0x0290 +#define MT6328_TOP_CLKSQ_RTC_SET 0x0292 +#define MT6328_TOP_CLKSQ_RTC_CLR 0x0294 +#define MT6328_TOP_CLK_TRIM 0x0296 +#define MT6328_TOP_RST_CON0 0x0298 +#define MT6328_TOP_RST_CON0_SET 0x029a +#define MT6328_TOP_RST_CON0_CLR 0x029c +#define MT6328_TOP_RST_CON1 0x029e +#define MT6328_TOP_RST_MISC 0x02a0 +#define MT6328_TOP_RST_MISC_SET 0x02a2 +#define MT6328_TOP_RST_MISC_CLR 0x02a4 +#define MT6328_TOP_RST_STATUS 0x02a6 +#define MT6328_TOP_RST_STATUS_SET 0x02a8 +#define MT6328_TOP_RST_STATUS_CLR 0x02aa +#define MT6328_INT_CON0 0x02ac +#define MT6328_INT_CON0_SET 0x02ae +#define MT6328_INT_CON0_CLR 0x02b0 +#define MT6328_INT_CON1 0x02b2 +#define MT6328_INT_CON1_SET 0x02b4 +#define MT6328_INT_CON1_CLR 0x02b6 +#define MT6328_INT_CON2 0x02b8 +#define MT6328_INT_CON2_SET 0x02ba +#define MT6328_INT_CON2_CLR 0x02bc +#define MT6328_INT_MISC_CON 0x02be +#define MT6328_INT_MISC_CON_SET 0x02c0 +#define MT6328_INT_MISC_CON_CLR 0x02c2 +#define MT6328_INT_STATUS0 0x02c4 +#define MT6328_INT_STATUS1 0x02c6 +#define MT6328_INT_STATUS2 0x02c8 +#define MT6328_OC_GEAR_0 0x02ca +#define MT6328_FQMTR_CON0 0x02cc +#define MT6328_FQMTR_CON1 0x02ce +#define MT6328_FQMTR_CON2 0x02d0 +#define MT6328_RG_SPI_CON 0x02d2 +#define MT6328_DEW_DIO_EN 0x02d4 +#define MT6328_DEW_READ_TEST 0x02d6 +#define MT6328_DEW_WRITE_TEST 0x02d8 +#define MT6328_DEW_CRC_SWRST 0x02da +#define MT6328_DEW_CRC_EN 0x02dc +#define MT6328_DEW_CRC_VAL 0x02de +#define MT6328_DEW_DBG_MON_SEL 0x02e0 +#define MT6328_DEW_CIPHER_KEY_SEL 0x02e2 +#define MT6328_DEW_CIPHER_IV_SEL 0x02e4 +#define MT6328_DEW_CIPHER_EN 0x02e6 +#define MT6328_DEW_CIPHER_RDY 0x02e8 +#define MT6328_DEW_CIPHER_MODE 0x02ea +#define MT6328_DEW_CIPHER_SWRST 0x02ec +#define MT6328_DEW_RDDMY_NO 0x02ee +#define MT6328_INT_TYPE_CON0 0x02f0 +#define MT6328_INT_TYPE_CON0_SET 0x02f2 +#define MT6328_INT_TYPE_CON0_CLR 0x02f4 +#define MT6328_INT_TYPE_CON1 0x02f6 +#define MT6328_INT_TYPE_CON1_SET 0x02f8 +#define MT6328_INT_TYPE_CON1_CLR 0x02fa +#define MT6328_INT_TYPE_CON2 0x02fc +#define MT6328_INT_TYPE_CON2_SET 0x02fe +#define MT6328_INT_TYPE_CON2_CLR 0x0300 +#define MT6328_INT_STA 0x0302 +#define MT6328_BUCK_ALL_CON0 0x0400 +#define MT6328_BUCK_ALL_CON1 0x0402 +#define MT6328_BUCK_ALL_CON2 0x0404 +#define MT6328_BUCK_ALL_CON3 0x0406 +#define MT6328_BUCK_ALL_CON4 0x0408 +#define MT6328_BUCK_ALL_CON5 0x040a +#define MT6328_BUCK_ALL_CON6 0x040c +#define MT6328_BUCK_ALL_CON9 0x040e +#define MT6328_BUCK_ALL_CON12 0x0410 +#define MT6328_BUCK_ALL_CON13 0x0412 +#define MT6328_BUCK_ALL_CON14 0x0414 +#define MT6328_BUCK_ALL_CON16 0x0416 +#define MT6328_BUCK_ALL_CON18 0x0418 +#define MT6328_BUCK_ALL_CON19 0x041a +#define MT6328_BUCK_ALL_CON20 0x041c +#define MT6328_BUCK_ALL_CON21 0x041e +#define MT6328_BUCK_ALL_CON22 0x0420 +#define MT6328_BUCK_ALL_CON23 0x0422 +#define MT6328_BUCK_ALL_CON24 0x0424 +#define MT6328_BUCK_ALL_CON25 0x0426 +#define MT6328_BUCK_ALL_CON26 0x0428 +#define MT6328_BUCK_ALL_CON27 0x042a +#define MT6328_BUCK_ALL_CON28 0x042c +#define MT6328_SMPS_TOP_ANA_CON0 0x042e +#define MT6328_SMPS_TOP_ANA_CON1 0x0430 +#define MT6328_SMPS_TOP_ANA_CON2 0x0432 +#define MT6328_SMPS_TOP_ANA_CON3 0x0434 +#define MT6328_SMPS_TOP_ANA_CON4 0x0436 +#define MT6328_SMPS_TOP_ANA_CON5 0x0438 +#define MT6328_SMPS_TOP_ANA_CON6 0x043a +#define MT6328_SMPS_TOP_ANA_CON7 0x043c +#define MT6328_SMPS_TOP_ANA_CON8 0x043e +#define MT6328_VCORE_ANA_CON0 0x0440 +#define MT6328_VCORE_ANA_CON1 0x0442 +#define MT6328_VCORE_ANA_CON2 0x0444 +#define MT6328_VCORE_ANA_CON3 0x0446 +#define MT6328_VCORE_ANA_CON4 0x0448 +#define MT6328_VSYS22_ANA_CON0 0x044a +#define MT6328_VSYS22_ANA_CON1 0x044c +#define MT6328_VSYS22_ANA_CON2 0x044e +#define MT6328_VSYS22_ANA_CON3 0x0450 +#define MT6328_VSYS22_ANA_CON4 0x0452 +#define MT6328_VPROC_ANA_CON0 0x0454 +#define MT6328_VPROC_ANA_CON1 0x0456 +#define MT6328_VPROC_ANA_CON2 0x0458 +#define MT6328_VPROC_ANA_CON3 0x045a +#define MT6328_VPROC_ANA_CON4 0x045c +#define MT6328_OSC32_ANA_CON0 0x045e +#define MT6328_OSC32_ANA_CON1 0x0460 +#define MT6328_VPA_ANA_CON0 0x0462 +#define MT6328_VPA_ANA_CON1 0x0464 +#define MT6328_VPA_ANA_CON2 0x0466 +#define MT6328_VPA_ANA_CON3 0x0468 +#define MT6328_VLTE_ANA_CON0 0x046a +#define MT6328_VLTE_ANA_CON1 0x046c +#define MT6328_VLTE_ANA_CON2 0x046e +#define MT6328_VLTE_ANA_CON3 0x0470 +#define MT6328_VLTE_ANA_CON4 0x0472 +#define MT6328_VPROC_CON0 0x0474 +#define MT6328_VPROC_CON1 0x0476 +#define MT6328_VPROC_CON2 0x0478 +#define MT6328_VPROC_CON3 0x047a +#define MT6328_VPROC_CON4 0x047c +#define MT6328_VPROC_CON5 0x047e +#define MT6328_VPROC_CON6 0x0480 +#define MT6328_VPROC_CON7 0x0482 +#define MT6328_VPROC_CON8 0x0484 +#define MT6328_VPROC_CON9 0x0486 +#define MT6328_VPROC_CON10 0x0488 +#define MT6328_VPROC_CON11 0x048a +#define MT6328_VPROC_CON12 0x048c +#define MT6328_VPROC_CON13 0x048e +#define MT6328_VPROC_CON14 0x0490 +#define MT6328_VPROC_CON15 0x0492 +#define MT6328_VPROC_CON16 0x0494 +#define MT6328_VPROC_CON17 0x0496 +#define MT6328_VPROC_CON18 0x0498 +#define MT6328_VPROC_CON19 0x049a +#define MT6328_VSRAM_CON0 0x049c +#define MT6328_VSRAM_CON1 0x049e +#define MT6328_VSRAM_CON2 0x04a0 +#define MT6328_VSRAM_CON3 0x04a2 +#define MT6328_VSRAM_CON4 0x04a4 +#define MT6328_VSRAM_CON5 0x04a6 +#define MT6328_VSRAM_CON6 0x04a8 +#define MT6328_VSRAM_CON7 0x04aa +#define MT6328_VSRAM_CON8 0x04ac +#define MT6328_VSRAM_CON9 0x04ae +#define MT6328_VSRAM_CON10 0x04b0 +#define MT6328_VSRAM_CON11 0x04b2 +#define MT6328_VSRAM_CON12 0x04b4 +#define MT6328_VSRAM_CON13 0x04b6 +#define MT6328_VSRAM_CON14 0x04b8 +#define MT6328_VSRAM_CON15 0x04ba +#define MT6328_VSRAM_CON16 0x04bc +#define MT6328_VSRAM_CON17 0x04be +#define MT6328_VSRAM_CON18 0x04c0 +#define MT6328_VSRAM_CON19 0x04c2 +#define MT6328_VLTE_CON0 0x04c4 +#define MT6328_VLTE_CON1 0x04c6 +#define MT6328_VLTE_CON2 0x04c8 +#define MT6328_VLTE_CON3 0x04ca +#define MT6328_VLTE_CON4 0x04cc +#define MT6328_VLTE_CON5 0x04ce +#define MT6328_VLTE_CON6 0x04d0 +#define MT6328_VLTE_CON7 0x04d2 +#define MT6328_VLTE_CON8 0x04d4 +#define MT6328_VLTE_CON9 0x04d6 +#define MT6328_VLTE_CON10 0x04d8 +#define MT6328_VLTE_CON11 0x04da +#define MT6328_VLTE_CON12 0x04dc +#define MT6328_VLTE_CON13 0x04de +#define MT6328_VLTE_CON14 0x04e0 +#define MT6328_VLTE_CON15 0x04e2 +#define MT6328_VLTE_CON16 0x04e4 +#define MT6328_VLTE_CON17 0x04e6 +#define MT6328_VLTE_CON18 0x04e8 +#define MT6328_VLTE_CON19 0x04ea +#define MT6328_VCORE1_CON0 0x0600 +#define MT6328_VCORE1_CON1 0x0602 +#define MT6328_VCORE1_CON2 0x0604 +#define MT6328_VCORE1_CON3 0x0606 +#define MT6328_VCORE1_CON4 0x0608 +#define MT6328_VCORE1_CON5 0x060a +#define MT6328_VCORE1_CON6 0x060c +#define MT6328_VCORE1_CON7 0x060e +#define MT6328_VCORE1_CON8 0x0610 +#define MT6328_VCORE1_CON9 0x0612 +#define MT6328_VCORE1_CON10 0x0614 +#define MT6328_VCORE1_CON11 0x0616 +#define MT6328_VCORE1_CON12 0x0618 +#define MT6328_VCORE1_CON13 0x061a +#define MT6328_VCORE1_CON14 0x061c +#define MT6328_VCORE1_CON15 0x061e +#define MT6328_VCORE1_CON16 0x0620 +#define MT6328_VCORE1_CON17 0x0622 +#define MT6328_VCORE1_CON18 0x0624 +#define MT6328_VCORE1_CON19 0x0626 +#define MT6328_VSYS22_CON0 0x0628 +#define MT6328_VSYS22_CON1 0x062a +#define MT6328_VSYS22_CON2 0x062c +#define MT6328_VSYS22_CON3 0x062e +#define MT6328_VSYS22_CON4 0x0630 +#define MT6328_VSYS22_CON5 0x0632 +#define MT6328_VSYS22_CON6 0x0634 +#define MT6328_VSYS22_CON7 0x0636 +#define MT6328_VSYS22_CON8 0x0638 +#define MT6328_VSYS22_CON9 0x063a +#define MT6328_VSYS22_CON10 0x063c +#define MT6328_VSYS22_CON11 0x063e +#define MT6328_VSYS22_CON12 0x0640 +#define MT6328_VSYS22_CON13 0x0642 +#define MT6328_VSYS22_CON14 0x0644 +#define MT6328_VSYS22_CON15 0x0646 +#define MT6328_VSYS22_CON16 0x0648 +#define MT6328_VSYS22_CON17 0x064a +#define MT6328_VSYS22_CON18 0x064c +#define MT6328_VSYS22_CON19 0x064e +#define MT6328_VPA_CON0 0x0650 +#define MT6328_VPA_CON1 0x0652 +#define MT6328_VPA_CON2 0x0654 +#define MT6328_VPA_CON3 0x0656 +#define MT6328_VPA_CON4 0x0658 +#define MT6328_VPA_CON5 0x065a +#define MT6328_VPA_CON6 0x065c +#define MT6328_VPA_CON7 0x065e +#define MT6328_VPA_CON8 0x0660 +#define MT6328_VPA_CON9 0x0662 +#define MT6328_VPA_CON10 0x0664 +#define MT6328_VPA_CON11 0x0666 +#define MT6328_VPA_CON12 0x0668 +#define MT6328_VPA_CON13 0x066a +#define MT6328_VPA_CON14 0x066c +#define MT6328_VPA_CON15 0x066e +#define MT6328_VPA_CON16 0x0670 +#define MT6328_VPA_CON17 0x0672 +#define MT6328_VPA_CON18 0x0674 +#define MT6328_VPA_CON19 0x0676 +#define MT6328_VPA_CON20 0x0678 +#define MT6328_VPA_CON21 0x067a +#define MT6328_VPA_CON22 0x067c +#define MT6328_VPA_CON23 0x067e +#define MT6328_VPA_CON24 0x0680 +#define MT6328_BUCK_K_CON0 0x0682 +#define MT6328_BUCK_K_CON1 0x0684 +#define MT6328_BUCK_K_CON2 0x0686 +#define MT6328_BUCK_K_CON3 0x0688 +#define MT6328_ZCD_CON0 0x0800 +#define MT6328_ZCD_CON1 0x0802 +#define MT6328_ZCD_CON2 0x0804 +#define MT6328_ZCD_CON3 0x0806 +#define MT6328_ZCD_CON4 0x0808 +#define MT6328_ZCD_CON5 0x080a +#define MT6328_ISINK0_CON0 0x080c +#define MT6328_ISINK0_CON1 0x080e +#define MT6328_ISINK0_CON2 0x0810 +#define MT6328_ISINK0_CON3 0x0812 +#define MT6328_ISINK1_CON0 0x0814 +#define MT6328_ISINK1_CON1 0x0816 +#define MT6328_ISINK1_CON2 0x0818 +#define MT6328_ISINK1_CON3 0x081a +#define MT6328_ISINK2_CON1 0x081c +#define MT6328_ISINK3_CON1 0x081e +#define MT6328_ISINK_ANA0 0x0820 +#define MT6328_ISINK_ANA1 0x0822 +#define MT6328_ISINK_PHASE_DLY 0x0824 +#define MT6328_ISINK_SFSTR 0x0826 +#define MT6328_ISINK_EN_CTRL 0x0828 +#define MT6328_ISINK_MODE_CTRL 0x082a +#define MT6328_VTCXO_0_CON0 0x0a00 +#define MT6328_VTCXO_1_CON0 0x0a02 +#define MT6328_VAUD28_CON0 0x0a04 +#define MT6328_VAUX18_CON0 0x0a06 +#define MT6328_VRF18_0_CON0 0x0a08 +#define MT6328_VRF18_0_CON1 0x0a0a +#define MT6328_VCAMA_CON0 0x0a0c +#define MT6328_VCN28_CON0 0x0a0e +#define MT6328_VCN33_CON0 0x0a10 +#define MT6328_VCN33_CON1 0x0a12 +#define MT6328_VCN33_CON2 0x0a14 +#define MT6328_VRF18_1_CON0 0x0a16 +#define MT6328_VRF18_1_CON1 0x0a18 +#define MT6328_VUSB33_CON0 0x0a1a +#define MT6328_VMCH_CON0 0x0a1c +#define MT6328_VMCH_CON1 0x0a1e +#define MT6328_VMC_CON0 0x0a20 +#define MT6328_VMC_CON1 0x0a22 +#define MT6328_VEMC_3V3_CON0 0x0a24 +#define MT6328_VEMC_3V3_CON1 0x0a26 +#define MT6328_VIO28_CON0 0x0a28 +#define MT6328_VCAMAF_CON0 0x0a2a +#define MT6328_VGP1_CON0 0x0a2c +#define MT6328_VGP1_CON1 0x0a2e +#define MT6328_VEFUSE_CON0 0x0a30 +#define MT6328_VSIM1_CON0 0x0a32 +#define MT6328_VSIM2_CON0 0x0a34 +#define MT6328_VIO18_CON0 0x0a36 +#define MT6328_VIBR_CON0 0x0a38 +#define MT6328_VCN18_CON0 0x0a3a +#define MT6328_VCAM_CON0 0x0a3c +#define MT6328_VCAMIO_CON0 0x0a3e +#define MT6328_LDO_VSRAM_CON0 0x0a40 +#define MT6328_LDO_VSRAM_CON1 0x0a42 +#define MT6328_VTREF_CON0 0x0a44 +#define MT6328_VM_CON0 0x0a46 +#define MT6328_VM_CON1 0x0a48 +#define MT6328_VRTC_CON0 0x0a4a +#define MT6328_LDO_OCFB0 0x0a4c +#define MT6328_ALDO_ANA_CON0 0x0a4e +#define MT6328_ADLDO_ANA_CON1 0x0a50 +#define MT6328_ADLDO_ANA_CON2 0x0a52 +#define MT6328_ADLDO_ANA_CON3 0x0a54 +#define MT6328_ADLDO_ANA_CON4 0x0a56 +#define MT6328_ADLDO_ANA_CON5 0x0a58 +#define MT6328_ADLDO_ANA_CON6 0x0a5a +#define MT6328_ADLDO_ANA_CON7 0x0a5c +#define MT6328_ADLDO_ANA_CON8 0x0a5e +#define MT6328_ADLDO_ANA_CON9 0x0a60 +#define MT6328_ADLDO_ANA_CON10 0x0a62 +#define MT6328_ADLDO_ANA_CON11 0x0a64 +#define MT6328_ADLDO_ANA_CON12 0x0a66 +#define MT6328_ADLDO_ANA_CON13 0x0a68 +#define MT6328_DLDO_ANA_CON0 0x0a6a +#define MT6328_DLDO_ANA_CON1 0x0a6c +#define MT6328_DLDO_ANA_CON2 0x0a6e +#define MT6328_DLDO_ANA_CON3 0x0a70 +#define MT6328_DLDO_ANA_CON4 0x0a72 +#define MT6328_DLDO_ANA_CON5 0x0a74 +#define MT6328_SLDO_ANA_CON0 0x0a76 +#define MT6328_SLDO_ANA_CON1 0x0a78 +#define MT6328_SLDO_ANA_CON2 0x0a7a +#define MT6328_SLDO_ANA_CON3 0x0a7c +#define MT6328_SLDO_ANA_CON4 0x0a7e +#define MT6328_SLDO_ANA_CON5 0x0a80 +#define MT6328_SLDO_ANA_CON6 0x0a82 +#define MT6328_SLDO_ANA_CON7 0x0a84 +#define MT6328_SLDO_ANA_CON8 0x0a86 +#define MT6328_SLDO_ANA_CON9 0x0a88 +#define MT6328_SLDO_ANA_CON10 0x0a8a +#define MT6328_LDO_RSV_CON0 0x0a8c +#define MT6328_LDO_RSV_CON1 0x0a8e +#define MT6328_SPK_CON0 0x0a90 +#define MT6328_SPK_CON1 0x0a92 +#define MT6328_SPK_CON2 0x0a94 +#define MT6328_SPK_CON3 0x0a96 +#define MT6328_SPK_CON4 0x0a98 +#define MT6328_SPK_CON5 0x0a9a +#define MT6328_SPK_CON6 0x0a9c +#define MT6328_SPK_CON7 0x0a9e +#define MT6328_SPK_CON8 0x0aa0 +#define MT6328_SPK_CON9 0x0aa2 +#define MT6328_SPK_CON10 0x0aa4 +#define MT6328_SPK_CON11 0x0aa6 +#define MT6328_SPK_CON12 0x0aa8 +#define MT6328_SPK_CON13 0x0aaa +#define MT6328_SPK_CON14 0x0aac +#define MT6328_SPK_CON15 0x0aae +#define MT6328_SPK_CON16 0x0ab0 +#define MT6328_SPK_ANA_CON0 0x0ab2 +#define MT6328_SPK_ANA_CON1 0x0ab4 +#define MT6328_SPK_ANA_CON3 0x0ab6 +#define MT6328_OTP_CON0 0x0c00 +#define MT6328_OTP_CON1 0x0c02 +#define MT6328_OTP_CON2 0x0c04 +#define MT6328_OTP_CON3 0x0c06 +#define MT6328_OTP_CON4 0x0c08 +#define MT6328_OTP_CON5 0x0c0a +#define MT6328_OTP_CON6 0x0c0c +#define MT6328_OTP_CON7 0x0c0e +#define MT6328_OTP_CON8 0x0c10 +#define MT6328_OTP_CON9 0x0c12 +#define MT6328_OTP_CON10 0x0c14 +#define MT6328_OTP_CON11 0x0c16 +#define MT6328_OTP_CON12 0x0c18 +#define MT6328_OTP_CON13 0x0c1a +#define MT6328_OTP_CON14 0x0c1c +#define MT6328_OTP_DOUT_0_15 0x0c1e +#define MT6328_OTP_DOUT_16_31 0x0c20 +#define MT6328_OTP_DOUT_32_47 0x0c22 +#define MT6328_OTP_DOUT_48_63 0x0c24 +#define MT6328_OTP_DOUT_64_79 0x0c26 +#define MT6328_OTP_DOUT_80_95 0x0c28 +#define MT6328_OTP_DOUT_96_111 0x0c2a +#define MT6328_OTP_DOUT_112_127 0x0c2c +#define MT6328_OTP_DOUT_128_143 0x0c2e +#define MT6328_OTP_DOUT_144_159 0x0c30 +#define MT6328_OTP_DOUT_160_175 0x0c32 +#define MT6328_OTP_DOUT_176_191 0x0c34 +#define MT6328_OTP_DOUT_192_207 0x0c36 +#define MT6328_OTP_DOUT_208_223 0x0c38 +#define MT6328_OTP_DOUT_224_239 0x0c3a +#define MT6328_OTP_DOUT_240_255 0x0c3c +#define MT6328_OTP_DOUT_256_271 0x0c3e +#define MT6328_OTP_DOUT_272_287 0x0c40 +#define MT6328_OTP_DOUT_288_303 0x0c42 +#define MT6328_OTP_DOUT_304_319 0x0c44 +#define MT6328_OTP_DOUT_320_335 0x0c46 +#define MT6328_OTP_DOUT_336_351 0x0c48 +#define MT6328_OTP_DOUT_352_367 0x0c4a +#define MT6328_OTP_DOUT_368_383 0x0c4c +#define MT6328_OTP_DOUT_384_399 0x0c4e +#define MT6328_OTP_DOUT_400_415 0x0c50 +#define MT6328_OTP_DOUT_416_431 0x0c52 +#define MT6328_OTP_DOUT_432_447 0x0c54 +#define MT6328_OTP_DOUT_448_463 0x0c56 +#define MT6328_OTP_DOUT_464_479 0x0c58 +#define MT6328_OTP_DOUT_480_495 0x0c5a +#define MT6328_OTP_DOUT_496_511 0x0c5c +#define MT6328_OTP_VAL_0_15 0x0c5e +#define MT6328_OTP_VAL_16_31 0x0c60 +#define MT6328_OTP_VAL_32_47 0x0c62 +#define MT6328_OTP_VAL_48_63 0x0c64 +#define MT6328_OTP_VAL_64_79 0x0c66 +#define MT6328_OTP_VAL_80_95 0x0c68 +#define MT6328_OTP_VAL_96_111 0x0c6a +#define MT6328_OTP_VAL_112_127 0x0c6c +#define MT6328_OTP_VAL_128_143 0x0c6e +#define MT6328_OTP_VAL_144_159 0x0c70 +#define MT6328_OTP_VAL_160_175 0x0c72 +#define MT6328_OTP_VAL_176_191 0x0c74 +#define MT6328_OTP_VAL_192_207 0x0c76 +#define MT6328_OTP_VAL_208_223 0x0c78 +#define MT6328_OTP_VAL_224_239 0x0c7a +#define MT6328_OTP_VAL_240_255 0x0c7c +#define MT6328_OTP_VAL_256_271 0x0c7e +#define MT6328_OTP_VAL_272_287 0x0c80 +#define MT6328_OTP_VAL_288_303 0x0c82 +#define MT6328_OTP_VAL_304_319 0x0c84 +#define MT6328_OTP_VAL_320_335 0x0c86 +#define MT6328_OTP_VAL_336_351 0x0c88 +#define MT6328_OTP_VAL_352_367 0x0c8a +#define MT6328_OTP_VAL_368_383 0x0c8c +#define MT6328_OTP_VAL_384_399 0x0c8e +#define MT6328_OTP_VAL_400_415 0x0c90 +#define MT6328_OTP_VAL_416_431 0x0c92 +#define MT6328_OTP_VAL_432_447 0x0c94 +#define MT6328_OTP_VAL_448_463 0x0c96 +#define MT6328_OTP_VAL_464_479 0x0c98 +#define MT6328_OTP_VAL_480_495 0x0c9a +#define MT6328_OTP_VAL_496_511 0x0c9c +#define MT6328_RTC_MIX_CON0 0x0c9e +#define MT6328_RTC_MIX_CON1 0x0ca0 +#define MT6328_RTC_MIX_CON2 0x0ca2 +#define MT6328_FGADC_CON0 0x0ca4 +#define MT6328_FGADC_CON1 0x0ca6 +#define MT6328_FGADC_CON2 0x0ca8 +#define MT6328_FGADC_CON3 0x0caa +#define MT6328_FGADC_CON4 0x0cac +#define MT6328_FGADC_CON5 0x0cae +#define MT6328_FGADC_CON6 0x0cb0 +#define MT6328_FGADC_CON7 0x0cb2 +#define MT6328_FGADC_CON8 0x0cb4 +#define MT6328_FGADC_CON9 0x0cb6 +#define MT6328_FGADC_CON10 0x0cb8 +#define MT6328_FGADC_CON11 0x0cba +#define MT6328_FGADC_CON12 0x0cbc +#define MT6328_FGADC_CON13 0x0cbe +#define MT6328_FGADC_CON14 0x0cc0 +#define MT6328_FGADC_CON15 0x0cc2 +#define MT6328_FGADC_CON16 0x0cc4 +#define MT6328_FGADC_CON17 0x0cc6 +#define MT6328_FGADC_CON18 0x0cc8 +#define MT6328_FGADC_CON19 0x0cca +#define MT6328_FGADC_CON20 0x0ccc +#define MT6328_FGADC_CON21 0x0cce +#define MT6328_FGADC_CON22 0x0cd0 +#define MT6328_FGADC_CON23 0x0cd2 +#define MT6328_FGADC_CON24 0x0cd4 +#define MT6328_FGADC_CON25 0x0cd6 +#define MT6328_FGADC_CON26 0x0cd8 +#define MT6328_FGADC_CON27 0x0cda +#define MT6328_AUDDEC_ANA_CON0 0x0cdc +#define MT6328_AUDDEC_ANA_CON1 0x0cde +#define MT6328_AUDDEC_ANA_CON2 0x0ce0 +#define MT6328_AUDDEC_ANA_CON3 0x0ce2 +#define MT6328_AUDDEC_ANA_CON4 0x0ce4 +#define MT6328_AUDDEC_ANA_CON5 0x0ce6 +#define MT6328_AUDDEC_ANA_CON6 0x0ce8 +#define MT6328_AUDDEC_ANA_CON7 0x0cea +#define MT6328_AUDDEC_ANA_CON8 0x0cec +#define MT6328_AUDENC_ANA_CON0 0x0cee +#define MT6328_AUDENC_ANA_CON1 0x0cf0 +#define MT6328_AUDENC_ANA_CON2 0x0cf2 +#define MT6328_AUDENC_ANA_CON3 0x0cf4 +#define MT6328_AUDENC_ANA_CON4 0x0cf6 +#define MT6328_AUDENC_ANA_CON5 0x0cf8 +#define MT6328_AUDENC_ANA_CON6 0x0cfa +#define MT6328_AUDENC_ANA_CON7 0x0cfc +#define MT6328_AUDENC_ANA_CON8 0x0cfe +#define MT6328_AUDENC_ANA_CON9 0x0d00 +#define MT6328_AUDENC_ANA_CON10 0x0d02 +#define MT6328_AUDNCP_CLKDIV_CON0 0x0d04 +#define MT6328_AUDNCP_CLKDIV_CON1 0x0d06 +#define MT6328_AUDNCP_CLKDIV_CON2 0x0d08 +#define MT6328_AUDNCP_CLKDIV_CON3 0x0d0a +#define MT6328_AUDNCP_CLKDIV_CON4 0x0d0c +#define MT6328_AUXADC_ADC0 0x0e00 +#define MT6328_AUXADC_ADC1 0x0e02 +#define MT6328_AUXADC_ADC2 0x0e04 +#define MT6328_AUXADC_ADC3 0x0e06 +#define MT6328_AUXADC_ADC4 0x0e08 +#define MT6328_AUXADC_ADC5 0x0e0a +#define MT6328_AUXADC_ADC6 0x0e0c +#define MT6328_AUXADC_ADC7 0x0e0e +#define MT6328_AUXADC_ADC8 0x0e10 +#define MT6328_AUXADC_ADC9 0x0e12 +#define MT6328_AUXADC_ADC10 0x0e14 +#define MT6328_AUXADC_ADC11 0x0e16 +#define MT6328_AUXADC_ADC12 0x0e18 +#define MT6328_AUXADC_ADC13 0x0e1a +#define MT6328_AUXADC_ADC14 0x0e1c +#define MT6328_AUXADC_ADC15 0x0e1e +#define MT6328_AUXADC_ADC16 0x0e20 +#define MT6328_AUXADC_ADC17 0x0e22 +#define MT6328_AUXADC_ADC18 0x0e24 +#define MT6328_AUXADC_ADC19 0x0e26 +#define MT6328_AUXADC_ADC20 0x0e28 +#define MT6328_AUXADC_ADC21 0x0e2a +#define MT6328_AUXADC_ADC22 0x0e2c +#define MT6328_AUXADC_ADC23 0x0e2e +#define MT6328_AUXADC_ADC24 0x0e30 +#define MT6328_AUXADC_ADC25 0x0e32 +#define MT6328_AUXADC_ADC26 0x0e34 +#define MT6328_AUXADC_ADC27 0x0e36 +#define MT6328_AUXADC_ADC28 0x0e38 +#define MT6328_AUXADC_ADC29 0x0e3a +#define MT6328_AUXADC_ADC30 0x0e3c +#define MT6328_AUXADC_ADC31 0x0e3e +#define MT6328_AUXADC_ADC32 0x0e40 +#define MT6328_AUXADC_ADC33 0x0e42 +#define MT6328_AUXADC_BUF0 0x0e44 +#define MT6328_AUXADC_BUF1 0x0e46 +#define MT6328_AUXADC_BUF2 0x0e48 +#define MT6328_AUXADC_BUF3 0x0e4a +#define MT6328_AUXADC_BUF4 0x0e4c +#define MT6328_AUXADC_BUF5 0x0e4e +#define MT6328_AUXADC_BUF6 0x0e50 +#define MT6328_AUXADC_BUF7 0x0e52 +#define MT6328_AUXADC_BUF8 0x0e54 +#define MT6328_AUXADC_BUF9 0x0e56 +#define MT6328_AUXADC_BUF10 0x0e58 +#define MT6328_AUXADC_BUF11 0x0e5a +#define MT6328_AUXADC_BUF12 0x0e5c +#define MT6328_AUXADC_BUF13 0x0e5e +#define MT6328_AUXADC_BUF14 0x0e60 +#define MT6328_AUXADC_BUF15 0x0e62 +#define MT6328_AUXADC_BUF16 0x0e64 +#define MT6328_AUXADC_BUF17 0x0e66 +#define MT6328_AUXADC_BUF18 0x0e68 +#define MT6328_AUXADC_BUF19 0x0e6a +#define MT6328_AUXADC_BUF20 0x0e6c +#define MT6328_AUXADC_BUF21 0x0e6e +#define MT6328_AUXADC_BUF22 0x0e70 +#define MT6328_AUXADC_BUF23 0x0e72 +#define MT6328_AUXADC_BUF24 0x0e74 +#define MT6328_AUXADC_BUF25 0x0e76 +#define MT6328_AUXADC_BUF26 0x0e78 +#define MT6328_AUXADC_BUF27 0x0e7a +#define MT6328_AUXADC_BUF28 0x0e7c +#define MT6328_AUXADC_BUF29 0x0e7e +#define MT6328_AUXADC_BUF30 0x0e80 +#define MT6328_AUXADC_BUF31 0x0e82 +#define MT6328_AUXADC_STA0 0x0e84 +#define MT6328_AUXADC_STA1 0x0e86 +#define MT6328_AUXADC_RQST0 0x0e88 +#define MT6328_AUXADC_RQST0_SET 0x0e8a +#define MT6328_AUXADC_RQST0_CLR 0x0e8c +#define MT6328_AUXADC_RQST1 0x0e8e +#define MT6328_AUXADC_RQST1_SET 0x0e90 +#define MT6328_AUXADC_RQST1_CLR 0x0e92 +#define MT6328_AUXADC_CON0 0x0e94 +#define MT6328_AUXADC_CON0_SET 0x0e96 +#define MT6328_AUXADC_CON0_CLR 0x0e98 +#define MT6328_AUXADC_CON1 0x0e9a +#define MT6328_AUXADC_CON2 0x0e9c +#define MT6328_AUXADC_CON3 0x0e9e +#define MT6328_AUXADC_CON4 0x0ea0 +#define MT6328_AUXADC_CON5 0x0ea2 +#define MT6328_AUXADC_CON6 0x0ea4 +#define MT6328_AUXADC_CON7 0x0ea6 +#define MT6328_AUXADC_CON8 0x0ea8 +#define MT6328_AUXADC_CON9 0x0eaa +#define MT6328_AUXADC_CON10 0x0eac +#define MT6328_AUXADC_CON11 0x0eae +#define MT6328_AUXADC_CON12 0x0eb0 +#define MT6328_AUXADC_CON13 0x0eb2 +#define MT6328_AUXADC_CON14 0x0eb4 +#define MT6328_AUXADC_CON15 0x0eb6 +#define MT6328_AUXADC_CON16 0x0eb8 +#define MT6328_AUXADC_AUTORPT0 0x0eba +#define MT6328_AUXADC_LBAT0 0x0ebc +#define MT6328_AUXADC_LBAT1 0x0ebe +#define MT6328_AUXADC_LBAT2 0x0ec0 +#define MT6328_AUXADC_LBAT3 0x0ec2 +#define MT6328_AUXADC_LBAT4 0x0ec4 +#define MT6328_AUXADC_LBAT5 0x0ec6 +#define MT6328_AUXADC_LBAT6 0x0ec8 +#define MT6328_AUXADC_ACCDET 0x0eca +#define MT6328_AUXADC_THR0 0x0ecc +#define MT6328_AUXADC_THR1 0x0ece +#define MT6328_AUXADC_THR2 0x0ed0 +#define MT6328_AUXADC_THR3 0x0ed2 +#define MT6328_AUXADC_THR4 0x0ed4 +#define MT6328_AUXADC_THR5 0x0ed6 +#define MT6328_AUXADC_THR6 0x0ed8 +#define MT6328_AUXADC_EFUSE0 0x0eda +#define MT6328_AUXADC_EFUSE1 0x0edc +#define MT6328_AUXADC_EFUSE2 0x0ede +#define MT6328_AUXADC_EFUSE3 0x0ee0 +#define MT6328_AUXADC_EFUSE4 0x0ee2 +#define MT6328_AUXADC_EFUSE5 0x0ee4 +#define MT6328_AUXADC_DBG0 0x0ee6 +#define MT6328_AUXADC_IMP0 0x0ee8 +#define MT6328_AUXADC_IMP1 0x0eea +#define MT6328_AUXADC_VISMPS0_1 0x0eec +#define MT6328_AUXADC_VISMPS0_2 0x0eee +#define MT6328_AUXADC_VISMPS0_3 0x0ef0 +#define MT6328_AUXADC_VISMPS0_4 0x0ef2 +#define MT6328_AUXADC_VISMPS0_5 0x0ef4 +#define MT6328_AUXADC_VISMPS0_6 0x0ef6 +#define MT6328_AUXADC_VISMPS0_7 0x0ef8 +#define MT6328_AUXADC_LBAT2_1 0x0efa +#define MT6328_AUXADC_LBAT2_2 0x0efc +#define MT6328_AUXADC_LBAT2_3 0x0efe +#define MT6328_AUXADC_LBAT2_4 0x0f00 +#define MT6328_AUXADC_LBAT2_5 0x0f02 +#define MT6328_AUXADC_LBAT2_6 0x0f04 +#define MT6328_AUXADC_LBAT2_7 0x0f06 +#define MT6328_AUXADC_MDBG_0 0x0f08 +#define MT6328_AUXADC_MDBG_1 0x0f0a +#define MT6328_AUXADC_MDBG_2 0x0f0c +#define MT6328_AUXADC_MDRT_0 0x0f0e +#define MT6328_AUXADC_MDRT_1 0x0f10 +#define MT6328_AUXADC_MDRT_2 0x0f12 +#define MT6328_ACCDET_CON0 0x0f14 +#define MT6328_ACCDET_CON1 0x0f16 +#define MT6328_ACCDET_CON2 0x0f18 +#define MT6328_ACCDET_CON3 0x0f1a +#define MT6328_ACCDET_CON4 0x0f1c +#define MT6328_ACCDET_CON5 0x0f1e +#define MT6328_ACCDET_CON6 0x0f20 +#define MT6328_ACCDET_CON7 0x0f22 +#define MT6328_ACCDET_CON8 0x0f24 +#define MT6328_ACCDET_CON9 0x0f26 +#define MT6328_ACCDET_CON10 0x0f28 +#define MT6328_ACCDET_CON11 0x0f2a +#define MT6328_ACCDET_CON12 0x0f2c +#define MT6328_ACCDET_CON13 0x0f2e +#define MT6328_ACCDET_CON14 0x0f30 +#define MT6328_ACCDET_CON15 0x0f32 +#define MT6328_ACCDET_CON16 0x0f34 +#define MT6328_ACCDET_CON17 0x0f36 +#define MT6328_ACCDET_CON18 0x0f38 +#define MT6328_ACCDET_CON19 0x0f3a +#define MT6328_ACCDET_CON20 0x0f3c +#define MT6328_ACCDET_CON21 0x0f3e +#define MT6328_ACCDET_CON22 0x0f40 +#define MT6328_ACCDET_CON23 0x0f42 +#define MT6328_ACCDET_CON24 0x0f44 +#define MT6328_ACCDET_CON25 0x0f46 +#define MT6328_CHR_CON0 0x0f48 +#define MT6328_CHR_CON1 0x0f4a +#define MT6328_CHR_CON2 0x0f4c +#define MT6328_CHR_CON3 0x0f4e +#define MT6328_CHR_CON4 0x0f50 +#define MT6328_CHR_CON5 0x0f52 +#define MT6328_CHR_CON6 0x0f54 +#define MT6328_CHR_CON7 0x0f56 +#define MT6328_CHR_CON8 0x0f58 +#define MT6328_CHR_CON9 0x0f5a +#define MT6328_CHR_CON10 0x0f5c +#define MT6328_CHR_CON11 0x0f5e +#define MT6328_CHR_CON12 0x0f60 +#define MT6328_CHR_CON13 0x0f62 +#define MT6328_CHR_CON14 0x0f64 +#define MT6328_CHR_CON15 0x0f66 +#define MT6328_CHR_CON16 0x0f68 +#define MT6328_CHR_CON17 0x0f6a +#define MT6328_CHR_CON18 0x0f6c +#define MT6328_CHR_CON19 0x0f6e +#define MT6328_CHR_CON20 0x0f70 +#define MT6328_CHR_CON21 0x0f72 +#define MT6328_CHR_CON22 0x0f74 +#define MT6328_CHR_CON23 0x0f76 +#define MT6328_CHR_CON24 0x0f78 +#define MT6328_CHR_CON25 0x0f7a +#define MT6328_CHR_CON26 0x0f7c +#define MT6328_CHR_CON27 0x0f7e +#define MT6328_CHR_CON28 0x0f80 +#define MT6328_CHR_CON29 0x0f82 +#define MT6328_CHR_CON30 0x0f84 +#define MT6328_CHR_CON31 0x0f86 +#define MT6328_CHR_CON32 0x0f88 +#define MT6328_CHR_CON33 0x0f8a +#define MT6328_CHR_CON34 0x0f8c +#define MT6328_CHR_CON35 0x0f8e +#define MT6328_CHR_CON36 0x0f90 +#define MT6328_CHR_CON37 0x0f92 +#define MT6328_CHR_CON38 0x0f94 +#define MT6328_CHR_CON39 0x0f96 +#define MT6328_CHR_CON40 0x0f98 +#define MT6328_CHR_CON41 0x0f9a +#define MT6328_CHR_CON42 0x0f9c +#define MT6328_BATON_CON0 0x0f9e +#define MT6328_CHR_CON43 0x0fa0 +#define MT6328_EOSC_CALI_CON0 0x0faa +#define MT6328_EOSC_CALI_CON1 0x0fac +#define MT6328_VRTC_PWM_CON0 0x0fae + +#endif /* __MFD_MT6328_REGISTERS_H__ */ diff --git a/include/linux/mfd/mt6397/core.h b/include/linux/mfd/mt6397/core.h index 627487e26287..b774c3a4bb62 100644 --- a/include/linux/mfd/mt6397/core.h +++ b/include/linux/mfd/mt6397/core.h @@ -12,6 +12,7 @@ enum chip_id { MT6323_CHIP_ID = 0x23, + MT6328_CHIP_ID = 0x30, MT6331_CHIP_ID = 0x20, MT6332_CHIP_ID = 0x20, MT6357_CHIP_ID = 0x57, @@ -65,11 +66,11 @@ struct mt6397_chip { int irq; struct irq_domain *irq_domain; struct mutex irqlock; - u16 wake_mask[2]; - u16 irq_masks_cur[2]; - u16 irq_masks_cache[2]; - u16 int_con[2]; - u16 int_status[2]; + u16 wake_mask[3]; + u16 irq_masks_cur[3]; + u16 irq_masks_cache[3]; + u16 int_con[3]; + u16 int_status[3]; u16 chip_id; void *irq_data; }; diff --git a/include/linux/mfd/palmas.h b/include/linux/mfd/palmas.h index eda1ffd99c1a..dabcc0dea802 100644 --- a/include/linux/mfd/palmas.h +++ b/include/linux/mfd/palmas.h @@ -98,8 +98,8 @@ struct palmas_sleep_requestor_info { }; struct palmas_regs_info { - char *name; - char *sname; + const char *name; + const char *sname; u8 vsel_addr; u8 ctrl_addr; u8 tstep_addr; diff --git a/include/linux/mfd/samsung/core.h b/include/linux/mfd/samsung/core.h index a212b9f72bc9..750274d41fc0 100644 --- a/include/linux/mfd/samsung/core.h +++ b/include/linux/mfd/samsung/core.h @@ -37,6 +37,7 @@ struct gpio_desc; enum sec_device_type { S5M8767X, + S2DOS05, S2MPA01, S2MPS11X, S2MPS13X, diff --git a/include/linux/min_heap.h b/include/linux/min_heap.h index 43a7b9dcf15e..e781727c8916 100644 --- a/include/linux/min_heap.h +++ b/include/linux/min_heap.h @@ -38,9 +38,176 @@ struct min_heap_callbacks { void (*swp)(void *lhs, void *rhs, void *args); }; +/** + * is_aligned - is this pointer & size okay for word-wide copying? + * @base: pointer to data + * @size: size of each element + * @align: required alignment (typically 4 or 8) + * + * Returns true if elements can be copied using word loads and stores. + * The size must be a multiple of the alignment, and the base address must + * be if we do not have CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS. + * + * For some reason, gcc doesn't know to optimize "if (a & mask || b & mask)" + * to "if ((a | b) & mask)", so we do that by hand. + */ +__attribute_const__ __always_inline +static bool is_aligned(const void *base, size_t size, unsigned char align) +{ + unsigned char lsbits = (unsigned char)size; + + (void)base; +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS + lsbits |= (unsigned char)(uintptr_t)base; +#endif + return (lsbits & (align - 1)) == 0; +} + +/** + * swap_words_32 - swap two elements in 32-bit chunks + * @a: pointer to the first element to swap + * @b: pointer to the second element to swap + * @n: element size (must be a multiple of 4) + * + * Exchange the two objects in memory. This exploits base+index addressing, + * which basically all CPUs have, to minimize loop overhead computations. + * + * For some reason, on x86 gcc 7.3.0 adds a redundant test of n at the + * bottom of the loop, even though the zero flag is still valid from the + * subtract (since the intervening mov instructions don't alter the flags). + * Gcc 8.1.0 doesn't have that problem. + */ +static __always_inline +void swap_words_32(void *a, void *b, size_t n) +{ + do { + u32 t = *(u32 *)(a + (n -= 4)); + *(u32 *)(a + n) = *(u32 *)(b + n); + *(u32 *)(b + n) = t; + } while (n); +} + +/** + * swap_words_64 - swap two elements in 64-bit chunks + * @a: pointer to the first element to swap + * @b: pointer to the second element to swap + * @n: element size (must be a multiple of 8) + * + * Exchange the two objects in memory. This exploits base+index + * addressing, which basically all CPUs have, to minimize loop overhead + * computations. + * + * We'd like to use 64-bit loads if possible. If they're not, emulating + * one requires base+index+4 addressing which x86 has but most other + * processors do not. If CONFIG_64BIT, we definitely have 64-bit loads, + * but it's possible to have 64-bit loads without 64-bit pointers (e.g. + * x32 ABI). Are there any cases the kernel needs to worry about? + */ +static __always_inline +void swap_words_64(void *a, void *b, size_t n) +{ + do { +#ifdef CONFIG_64BIT + u64 t = *(u64 *)(a + (n -= 8)); + *(u64 *)(a + n) = *(u64 *)(b + n); + *(u64 *)(b + n) = t; +#else + /* Use two 32-bit transfers to avoid base+index+4 addressing */ + u32 t = *(u32 *)(a + (n -= 4)); + *(u32 *)(a + n) = *(u32 *)(b + n); + *(u32 *)(b + n) = t; + + t = *(u32 *)(a + (n -= 4)); + *(u32 *)(a + n) = *(u32 *)(b + n); + *(u32 *)(b + n) = t; +#endif + } while (n); +} + +/** + * swap_bytes - swap two elements a byte at a time + * @a: pointer to the first element to swap + * @b: pointer to the second element to swap + * @n: element size + * + * This is the fallback if alignment doesn't allow using larger chunks. + */ +static __always_inline +void swap_bytes(void *a, void *b, size_t n) +{ + do { + char t = ((char *)a)[--n]; + ((char *)a)[n] = ((char *)b)[n]; + ((char *)b)[n] = t; + } while (n); +} + +/* + * The values are arbitrary as long as they can't be confused with + * a pointer, but small integers make for the smallest compare + * instructions. + */ +#define SWAP_WORDS_64 ((void (*)(void *, void *, void *))0) +#define SWAP_WORDS_32 ((void (*)(void *, void *, void *))1) +#define SWAP_BYTES ((void (*)(void *, void *, void *))2) + +/* + * Selects the appropriate swap function based on the element size. + */ +static __always_inline +void *select_swap_func(const void *base, size_t size) +{ + if (is_aligned(base, size, 8)) + return SWAP_WORDS_64; + else if (is_aligned(base, size, 4)) + return SWAP_WORDS_32; + else + return SWAP_BYTES; +} + +static __always_inline +void do_swap(void *a, void *b, size_t size, void (*swap_func)(void *lhs, void *rhs, void *args), + void *priv) +{ + if (swap_func == SWAP_WORDS_64) + swap_words_64(a, b, size); + else if (swap_func == SWAP_WORDS_32) + swap_words_32(a, b, size); + else if (swap_func == SWAP_BYTES) + swap_bytes(a, b, size); + else + swap_func(a, b, priv); +} + +/** + * parent - given the offset of the child, find the offset of the parent. + * @i: the offset of the heap element whose parent is sought. Non-zero. + * @lsbit: a precomputed 1-bit mask, equal to "size & -size" + * @size: size of each element + * + * In terms of array indexes, the parent of element j = @i/@size is simply + * (j-1)/2. But when working in byte offsets, we can't use implicit + * truncation of integer divides. + * + * Fortunately, we only need one bit of the quotient, not the full divide. + * @size has a least significant bit. That bit will be clear if @i is + * an even multiple of @size, and set if it's an odd multiple. + * + * Logically, we're doing "if (i & lsbit) i -= size;", but since the + * branch is unpredictable, it's done with a bit of clever branch-free + * code instead. + */ +__attribute_const__ __always_inline +static size_t parent(size_t i, unsigned int lsbit, size_t size) +{ + i -= size; + i -= size & -(i & lsbit); + return i / 2; +} + /* Initialize a min-heap. */ static __always_inline -void __min_heap_init(min_heap_char *heap, void *data, int size) +void __min_heap_init_inline(min_heap_char *heap, void *data, int size) { heap->nr = 0; heap->size = size; @@ -50,105 +217,114 @@ void __min_heap_init(min_heap_char *heap, void *data, int size) heap->data = heap->preallocated; } -#define min_heap_init(_heap, _data, _size) \ - __min_heap_init((min_heap_char *)_heap, _data, _size) +#define min_heap_init_inline(_heap, _data, _size) \ + __min_heap_init_inline((min_heap_char *)_heap, _data, _size) /* Get the minimum element from the heap. */ static __always_inline -void *__min_heap_peek(struct min_heap_char *heap) +void *__min_heap_peek_inline(struct min_heap_char *heap) { return heap->nr ? heap->data : NULL; } -#define min_heap_peek(_heap) \ - (__minheap_cast(_heap) __min_heap_peek((min_heap_char *)_heap)) +#define min_heap_peek_inline(_heap) \ + (__minheap_cast(_heap) __min_heap_peek_inline((min_heap_char *)_heap)) /* Check if the heap is full. */ static __always_inline -bool __min_heap_full(min_heap_char *heap) +bool __min_heap_full_inline(min_heap_char *heap) { return heap->nr == heap->size; } -#define min_heap_full(_heap) \ - __min_heap_full((min_heap_char *)_heap) +#define min_heap_full_inline(_heap) \ + __min_heap_full_inline((min_heap_char *)_heap) /* Sift the element at pos down the heap. */ static __always_inline -void __min_heap_sift_down(min_heap_char *heap, int pos, size_t elem_size, - const struct min_heap_callbacks *func, void *args) +void __min_heap_sift_down_inline(min_heap_char *heap, int pos, size_t elem_size, + const struct min_heap_callbacks *func, void *args) { - void *left, *right; + const unsigned long lsbit = elem_size & -elem_size; void *data = heap->data; - void *root = data + pos * elem_size; - int i = pos, j; + void (*swp)(void *lhs, void *rhs, void *args) = func->swp; + /* pre-scale counters for performance */ + size_t a = pos * elem_size; + size_t b, c, d; + size_t n = heap->nr * elem_size; + + if (!swp) + swp = select_swap_func(data, elem_size); /* Find the sift-down path all the way to the leaves. */ - for (;;) { - if (i * 2 + 2 >= heap->nr) - break; - left = data + (i * 2 + 1) * elem_size; - right = data + (i * 2 + 2) * elem_size; - i = func->less(left, right, args) ? i * 2 + 1 : i * 2 + 2; - } + for (b = a; c = 2 * b + elem_size, (d = c + elem_size) < n;) + b = func->less(data + c, data + d, args) ? c : d; /* Special case for the last leaf with no sibling. */ - if (i * 2 + 2 == heap->nr) - i = i * 2 + 1; + if (d == n) + b = c; /* Backtrack to the correct location. */ - while (i != pos && func->less(root, data + i * elem_size, args)) - i = (i - 1) / 2; + while (b != a && func->less(data + a, data + b, args)) + b = parent(b, lsbit, elem_size); /* Shift the element into its correct place. */ - j = i; - while (i != pos) { - i = (i - 1) / 2; - func->swp(data + i * elem_size, data + j * elem_size, args); + c = b; + while (b != a) { + b = parent(b, lsbit, elem_size); + do_swap(data + b, data + c, elem_size, swp, args); } } -#define min_heap_sift_down(_heap, _pos, _func, _args) \ - __min_heap_sift_down((min_heap_char *)_heap, _pos, __minheap_obj_size(_heap), _func, _args) +#define min_heap_sift_down_inline(_heap, _pos, _func, _args) \ + __min_heap_sift_down_inline((min_heap_char *)_heap, _pos, __minheap_obj_size(_heap), \ + _func, _args) /* Sift up ith element from the heap, O(log2(nr)). */ static __always_inline -void __min_heap_sift_up(min_heap_char *heap, size_t elem_size, size_t idx, - const struct min_heap_callbacks *func, void *args) +void __min_heap_sift_up_inline(min_heap_char *heap, size_t elem_size, size_t idx, + const struct min_heap_callbacks *func, void *args) { + const unsigned long lsbit = elem_size & -elem_size; void *data = heap->data; - size_t parent; + void (*swp)(void *lhs, void *rhs, void *args) = func->swp; + /* pre-scale counters for performance */ + size_t a = idx * elem_size, b; + + if (!swp) + swp = select_swap_func(data, elem_size); - while (idx) { - parent = (idx - 1) / 2; - if (func->less(data + parent * elem_size, data + idx * elem_size, args)) + while (a) { + b = parent(a, lsbit, elem_size); + if (func->less(data + b, data + a, args)) break; - func->swp(data + parent * elem_size, data + idx * elem_size, args); - idx = parent; + do_swap(data + a, data + b, elem_size, swp, args); + a = b; } } -#define min_heap_sift_up(_heap, _idx, _func, _args) \ - __min_heap_sift_up((min_heap_char *)_heap, __minheap_obj_size(_heap), _idx, _func, _args) +#define min_heap_sift_up_inline(_heap, _idx, _func, _args) \ + __min_heap_sift_up_inline((min_heap_char *)_heap, __minheap_obj_size(_heap), _idx, \ + _func, _args) /* Floyd's approach to heapification that is O(nr). */ static __always_inline -void __min_heapify_all(min_heap_char *heap, size_t elem_size, - const struct min_heap_callbacks *func, void *args) +void __min_heapify_all_inline(min_heap_char *heap, size_t elem_size, + const struct min_heap_callbacks *func, void *args) { int i; for (i = heap->nr / 2 - 1; i >= 0; i--) - __min_heap_sift_down(heap, i, elem_size, func, args); + __min_heap_sift_down_inline(heap, i, elem_size, func, args); } -#define min_heapify_all(_heap, _func, _args) \ - __min_heapify_all((min_heap_char *)_heap, __minheap_obj_size(_heap), _func, _args) +#define min_heapify_all_inline(_heap, _func, _args) \ + __min_heapify_all_inline((min_heap_char *)_heap, __minheap_obj_size(_heap), _func, _args) /* Remove minimum element from the heap, O(log2(nr)). */ static __always_inline -bool __min_heap_pop(min_heap_char *heap, size_t elem_size, - const struct min_heap_callbacks *func, void *args) +bool __min_heap_pop_inline(min_heap_char *heap, size_t elem_size, + const struct min_heap_callbacks *func, void *args) { void *data = heap->data; @@ -158,13 +334,13 @@ bool __min_heap_pop(min_heap_char *heap, size_t elem_size, /* Place last element at the root (position 0) and then sift down. */ heap->nr--; memcpy(data, data + (heap->nr * elem_size), elem_size); - __min_heap_sift_down(heap, 0, elem_size, func, args); + __min_heap_sift_down_inline(heap, 0, elem_size, func, args); return true; } -#define min_heap_pop(_heap, _func, _args) \ - __min_heap_pop((min_heap_char *)_heap, __minheap_obj_size(_heap), _func, _args) +#define min_heap_pop_inline(_heap, _func, _args) \ + __min_heap_pop_inline((min_heap_char *)_heap, __minheap_obj_size(_heap), _func, _args) /* * Remove the minimum element and then push the given element. The @@ -172,22 +348,21 @@ bool __min_heap_pop(min_heap_char *heap, size_t elem_size, * efficient than a pop followed by a push that does 2. */ static __always_inline -void __min_heap_pop_push(min_heap_char *heap, - const void *element, size_t elem_size, - const struct min_heap_callbacks *func, - void *args) +void __min_heap_pop_push_inline(min_heap_char *heap, const void *element, size_t elem_size, + const struct min_heap_callbacks *func, void *args) { memcpy(heap->data, element, elem_size); - __min_heap_sift_down(heap, 0, elem_size, func, args); + __min_heap_sift_down_inline(heap, 0, elem_size, func, args); } -#define min_heap_pop_push(_heap, _element, _func, _args) \ - __min_heap_pop_push((min_heap_char *)_heap, _element, __minheap_obj_size(_heap), _func, _args) +#define min_heap_pop_push_inline(_heap, _element, _func, _args) \ + __min_heap_pop_push_inline((min_heap_char *)_heap, _element, __minheap_obj_size(_heap), \ + _func, _args) /* Push an element on to the heap, O(log2(nr)). */ static __always_inline -bool __min_heap_push(min_heap_char *heap, const void *element, size_t elem_size, - const struct min_heap_callbacks *func, void *args) +bool __min_heap_push_inline(min_heap_char *heap, const void *element, size_t elem_size, + const struct min_heap_callbacks *func, void *args) { void *data = heap->data; int pos; @@ -201,35 +376,81 @@ bool __min_heap_push(min_heap_char *heap, const void *element, size_t elem_size, heap->nr++; /* Sift child at pos up. */ - __min_heap_sift_up(heap, elem_size, pos, func, args); + __min_heap_sift_up_inline(heap, elem_size, pos, func, args); return true; } -#define min_heap_push(_heap, _element, _func, _args) \ - __min_heap_push((min_heap_char *)_heap, _element, __minheap_obj_size(_heap), _func, _args) +#define min_heap_push_inline(_heap, _element, _func, _args) \ + __min_heap_push_inline((min_heap_char *)_heap, _element, __minheap_obj_size(_heap), \ + _func, _args) /* Remove ith element from the heap, O(log2(nr)). */ static __always_inline -bool __min_heap_del(min_heap_char *heap, size_t elem_size, size_t idx, - const struct min_heap_callbacks *func, void *args) +bool __min_heap_del_inline(min_heap_char *heap, size_t elem_size, size_t idx, + const struct min_heap_callbacks *func, void *args) { void *data = heap->data; + void (*swp)(void *lhs, void *rhs, void *args) = func->swp; if (WARN_ONCE(heap->nr <= 0, "Popping an empty heap")) return false; + if (!swp) + swp = select_swap_func(data, elem_size); + /* Place last element at the root (position 0) and then sift down. */ heap->nr--; if (idx == heap->nr) return true; - func->swp(data + (idx * elem_size), data + (heap->nr * elem_size), args); - __min_heap_sift_up(heap, elem_size, idx, func, args); - __min_heap_sift_down(heap, idx, elem_size, func, args); + do_swap(data + (idx * elem_size), data + (heap->nr * elem_size), elem_size, swp, args); + __min_heap_sift_up_inline(heap, elem_size, idx, func, args); + __min_heap_sift_down_inline(heap, idx, elem_size, func, args); return true; } +#define min_heap_del_inline(_heap, _idx, _func, _args) \ + __min_heap_del_inline((min_heap_char *)_heap, __minheap_obj_size(_heap), _idx, \ + _func, _args) + +void __min_heap_init(min_heap_char *heap, void *data, int size); +void *__min_heap_peek(struct min_heap_char *heap); +bool __min_heap_full(min_heap_char *heap); +void __min_heap_sift_down(min_heap_char *heap, int pos, size_t elem_size, + const struct min_heap_callbacks *func, void *args); +void __min_heap_sift_up(min_heap_char *heap, size_t elem_size, size_t idx, + const struct min_heap_callbacks *func, void *args); +void __min_heapify_all(min_heap_char *heap, size_t elem_size, + const struct min_heap_callbacks *func, void *args); +bool __min_heap_pop(min_heap_char *heap, size_t elem_size, + const struct min_heap_callbacks *func, void *args); +void __min_heap_pop_push(min_heap_char *heap, const void *element, size_t elem_size, + const struct min_heap_callbacks *func, void *args); +bool __min_heap_push(min_heap_char *heap, const void *element, size_t elem_size, + const struct min_heap_callbacks *func, void *args); +bool __min_heap_del(min_heap_char *heap, size_t elem_size, size_t idx, + const struct min_heap_callbacks *func, void *args); + +#define min_heap_init(_heap, _data, _size) \ + __min_heap_init((min_heap_char *)_heap, _data, _size) +#define min_heap_peek(_heap) \ + (__minheap_cast(_heap) __min_heap_peek((min_heap_char *)_heap)) +#define min_heap_full(_heap) \ + __min_heap_full((min_heap_char *)_heap) +#define min_heap_sift_down(_heap, _pos, _func, _args) \ + __min_heap_sift_down((min_heap_char *)_heap, _pos, __minheap_obj_size(_heap), _func, _args) +#define min_heap_sift_up(_heap, _idx, _func, _args) \ + __min_heap_sift_up((min_heap_char *)_heap, __minheap_obj_size(_heap), _idx, _func, _args) +#define min_heapify_all(_heap, _func, _args) \ + __min_heapify_all((min_heap_char *)_heap, __minheap_obj_size(_heap), _func, _args) +#define min_heap_pop(_heap, _func, _args) \ + __min_heap_pop((min_heap_char *)_heap, __minheap_obj_size(_heap), _func, _args) +#define min_heap_pop_push(_heap, _element, _func, _args) \ + __min_heap_pop_push((min_heap_char *)_heap, _element, __minheap_obj_size(_heap), \ + _func, _args) +#define min_heap_push(_heap, _element, _func, _args) \ + __min_heap_push((min_heap_char *)_heap, _element, __minheap_obj_size(_heap), _func, _args) #define min_heap_del(_heap, _idx, _func, _args) \ __min_heap_del((min_heap_char *)_heap, __minheap_obj_size(_heap), _idx, _func, _args) diff --git a/include/linux/misc/keba.h b/include/linux/misc/keba.h index 323b31a847c5..a81d6fa70851 100644 --- a/include/linux/misc/keba.h +++ b/include/linux/misc/keba.h @@ -7,6 +7,7 @@ #include <linux/auxiliary_bus.h> struct i2c_board_info; +struct spi_board_info; /** * struct keba_i2c_auxdev - KEBA I2C auxiliary device @@ -22,4 +23,50 @@ struct keba_i2c_auxdev { struct i2c_board_info *info; }; +/** + * struct keba_spi_auxdev - KEBA SPI auxiliary device + * @auxdev: auxiliary device object + * @io: address range of SPI controller IO memory + * @info_size: number of SPI devices to be probed + * @info: SPI devices to be probed + */ +struct keba_spi_auxdev { + struct auxiliary_device auxdev; + struct resource io; + int info_size; + struct spi_board_info *info; +}; + +/** + * struct keba_fan_auxdev - KEBA fan auxiliary device + * @auxdev: auxiliary device object + * @io: address range of fan controller IO memory + */ +struct keba_fan_auxdev { + struct auxiliary_device auxdev; + struct resource io; +}; + +/** + * struct keba_batt_auxdev - KEBA battery auxiliary device + * @auxdev: auxiliary device object + * @io: address range of battery controller IO memory + */ +struct keba_batt_auxdev { + struct auxiliary_device auxdev; + struct resource io; +}; + +/** + * struct keba_uart_auxdev - KEBA UART auxiliary device + * @auxdev: auxiliary device object + * @io: address range of UART controller IO memory + * @irq: number of UART controller interrupt + */ +struct keba_uart_auxdev { + struct auxiliary_device auxdev; + struct resource io; + unsigned int irq; +}; + #endif /* _LINUX_MISC_KEBA_H */ diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index e23c692a34c7..ea48eb879a0f 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -45,7 +45,6 @@ #include <linux/workqueue.h> #include <linux/mempool.h> #include <linux/interrupt.h> -#include <linux/idr.h> #include <linux/notifier.h> #include <linux/refcount.h> #include <linux/auxiliary_bus.h> @@ -474,36 +473,6 @@ struct mlx5_core_sriov { u16 max_ec_vfs; }; -struct mlx5_fc_pool { - struct mlx5_core_dev *dev; - struct mutex pool_lock; /* protects pool lists */ - struct list_head fully_used; - struct list_head partially_used; - struct list_head unused; - int available_fcs; - int used_fcs; - int threshold; -}; - -struct mlx5_fc_stats { - spinlock_t counters_idr_lock; /* protects counters_idr */ - struct idr counters_idr; - struct list_head counters; - struct llist_head addlist; - struct llist_head dellist; - - struct workqueue_struct *wq; - struct delayed_work work; - unsigned long next_query; - unsigned long sampling_interval; /* jiffies */ - u32 *bulk_query_out; - int bulk_query_len; - size_t num_counters; - bool bulk_query_alloc_failed; - unsigned long next_bulk_query_alloc; - struct mlx5_fc_pool fc_pool; -}; - struct mlx5_events; struct mlx5_mpfs; struct mlx5_eswitch; @@ -555,6 +524,7 @@ enum { * creation/deletion on drivers rescan. Unset during device attach. */ MLX5_PRIV_FLAGS_DETACH = 1 << 2, + MLX5_PRIV_FLAGS_SWITCH_LEGACY = 1 << 3, }; struct mlx5_adev { @@ -630,7 +600,7 @@ struct mlx5_priv { struct mlx5_devcom_comp_dev *hca_devcom_comp; struct mlx5_fw_reset *fw_reset; struct mlx5_core_roce roce; - struct mlx5_fc_stats fc_stats; + struct mlx5_fc_stats *fc_stats; struct mlx5_rl_table rl_table; struct mlx5_ft_pool *ft_pool; @@ -1233,6 +1203,12 @@ static inline bool mlx5_core_is_vf(const struct mlx5_core_dev *dev) return dev->coredev_type == MLX5_COREDEV_VF; } +static inline bool mlx5_core_same_coredev_type(const struct mlx5_core_dev *dev1, + const struct mlx5_core_dev *dev2) +{ + return dev1->coredev_type == dev2->coredev_type; +} + static inline bool mlx5_core_is_ecpf(const struct mlx5_core_dev *dev) { return dev->caps.embedded_cpu; diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h index b744e554f014..438db888bde0 100644 --- a/include/linux/mlx5/fs.h +++ b/include/linux/mlx5/fs.h @@ -298,9 +298,6 @@ int mlx5_modify_rule_destination(struct mlx5_flow_handle *handler, struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging); -/* As mlx5_fc_create() but doesn't queue stats refresh thread. */ -struct mlx5_fc *mlx5_fc_create_ex(struct mlx5_core_dev *dev, bool aging); - void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter); u64 mlx5_fc_query_lastuse(struct mlx5_fc *counter); void mlx5_fc_query_cached(struct mlx5_fc *counter, diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 96d369112bfa..48d47181c7cd 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -1872,7 +1872,11 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 reserved_at_328[0x2]; u8 relaxed_ordering_read[0x1]; u8 log_max_pd[0x5]; - u8 reserved_at_330[0x5]; + u8 dp_ordering_ooo_all_ud[0x1]; + u8 dp_ordering_ooo_all_uc[0x1]; + u8 dp_ordering_ooo_all_xrc[0x1]; + u8 dp_ordering_ooo_all_dc[0x1]; + u8 dp_ordering_ooo_all_rc[0x1]; u8 pcie_reset_using_hotreset_method[0x1]; u8 pci_sync_for_fw_update_with_driver_unload[0x1]; u8 vnic_env_cnt_steering_fail[0x1]; @@ -2094,7 +2098,9 @@ struct mlx5_ifc_cmd_hca_cap_2_bits { u8 reserved_at_0[0x80]; u8 migratable[0x1]; - u8 reserved_at_81[0x11]; + u8 reserved_at_81[0x7]; + u8 dp_ordering_force[0x1]; + u8 reserved_at_89[0x9]; u8 query_vuid[0x1]; u8 reserved_at_93[0x5]; u8 umr_log_entity_size_5[0x1]; @@ -2113,7 +2119,9 @@ struct mlx5_ifc_cmd_hca_cap_2_bits { u8 migration_in_chunks[0x1]; u8 reserved_at_d1[0x1]; u8 sf_eq_usage[0x1]; - u8 reserved_at_d3[0xd]; + u8 reserved_at_d3[0x5]; + u8 multiplane[0x1]; + u8 reserved_at_d9[0x7]; u8 cross_vhca_object_to_object_supported[0x20]; @@ -3524,7 +3532,8 @@ struct mlx5_ifc_qpc_bits { u8 latency_sensitive[0x1]; u8 reserved_at_24[0x1]; u8 drain_sigerr[0x1]; - u8 reserved_at_26[0x2]; + u8 reserved_at_26[0x1]; + u8 dp_ordering_force[0x1]; u8 pd[0x18]; u8 mtu[0x3]; @@ -3597,7 +3606,8 @@ struct mlx5_ifc_qpc_bits { u8 rae[0x1]; u8 reserved_at_493[0x1]; u8 page_offset[0x6]; - u8 reserved_at_49a[0x3]; + u8 reserved_at_49a[0x2]; + u8 dp_ordering_1[0x1]; u8 cd_slave_receive[0x1]; u8 cd_slave_send[0x1]; u8 cd_master[0x1]; @@ -4105,11 +4115,47 @@ enum { ELEMENT_TYPE_CAP_MASK_QUEUE_GROUP = 1 << 4, }; +enum { + TSAR_ELEMENT_TSAR_TYPE_DWRR = 0x0, + TSAR_ELEMENT_TSAR_TYPE_ROUND_ROBIN = 0x1, + TSAR_ELEMENT_TSAR_TYPE_ETS = 0x2, +}; + +enum { + TSAR_TYPE_CAP_MASK_DWRR = 1 << 0, + TSAR_TYPE_CAP_MASK_ROUND_ROBIN = 1 << 1, + TSAR_TYPE_CAP_MASK_ETS = 1 << 2, +}; + +struct mlx5_ifc_tsar_element_bits { + u8 reserved_at_0[0x8]; + u8 tsar_type[0x8]; + u8 reserved_at_10[0x10]; +}; + +struct mlx5_ifc_vport_element_bits { + u8 reserved_at_0[0x10]; + u8 vport_number[0x10]; +}; + +struct mlx5_ifc_vport_tc_element_bits { + u8 traffic_class[0x4]; + u8 reserved_at_4[0xc]; + u8 vport_number[0x10]; +}; + +union mlx5_ifc_element_attributes_bits { + struct mlx5_ifc_tsar_element_bits tsar; + struct mlx5_ifc_vport_element_bits vport; + struct mlx5_ifc_vport_tc_element_bits vport_tc; + u8 reserved_at_0[0x20]; +}; + struct mlx5_ifc_scheduling_context_bits { u8 element_type[0x8]; u8 reserved_at_8[0x18]; - u8 element_attributes[0x20]; + union mlx5_ifc_element_attributes_bits element_attributes; u8 parent_element_id[0x20]; @@ -4507,7 +4553,8 @@ struct mlx5_ifc_dctc_bits { u8 state[0x4]; u8 reserved_at_8[0x18]; - u8 reserved_at_20[0x8]; + u8 reserved_at_20[0x7]; + u8 dp_ordering_force[0x1]; u8 user_index[0x18]; u8 reserved_at_40[0x8]; @@ -4522,7 +4569,9 @@ struct mlx5_ifc_dctc_bits { u8 latency_sensitive[0x1]; u8 rlky[0x1]; u8 free_ar[0x1]; - u8 reserved_at_73[0xd]; + u8 reserved_at_73[0x1]; + u8 dp_ordering_1[0x1]; + u8 reserved_at_75[0xb]; u8 reserved_at_80[0x8]; u8 cs_res[0x8]; @@ -4798,35 +4847,6 @@ struct mlx5_ifc_register_loopback_control_bits { u8 reserved_at_20[0x60]; }; -struct mlx5_ifc_vport_tc_element_bits { - u8 traffic_class[0x4]; - u8 reserved_at_4[0xc]; - u8 vport_number[0x10]; -}; - -struct mlx5_ifc_vport_element_bits { - u8 reserved_at_0[0x10]; - u8 vport_number[0x10]; -}; - -enum { - TSAR_ELEMENT_TSAR_TYPE_DWRR = 0x0, - TSAR_ELEMENT_TSAR_TYPE_ROUND_ROBIN = 0x1, - TSAR_ELEMENT_TSAR_TYPE_ETS = 0x2, -}; - -enum { - TSAR_TYPE_CAP_MASK_DWRR = 1 << 0, - TSAR_TYPE_CAP_MASK_ROUND_ROBIN = 1 << 1, - TSAR_TYPE_CAP_MASK_ETS = 1 << 2, -}; - -struct mlx5_ifc_tsar_element_bits { - u8 reserved_at_0[0x8]; - u8 tsar_type[0x8]; - u8 reserved_at_10[0x10]; -}; - enum { MLX5_TEARDOWN_HCA_OUT_FORCE_STATE_SUCCESS = 0x0, MLX5_TEARDOWN_HCA_OUT_FORCE_STATE_FAIL = 0x1, diff --git a/include/linux/mm.h b/include/linux/mm.h index 61fff5d34ed5..b1c3db9cf355 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -31,6 +31,7 @@ #include <linux/kasan.h> #include <linux/memremap.h> #include <linux/slab.h> +#include <linux/cacheinfo.h> struct mempolicy; struct anon_vma; @@ -97,11 +98,11 @@ extern const int mmap_rnd_compat_bits_max; extern int mmap_rnd_compat_bits __read_mostly; #endif -#ifndef PHYSMEM_END +#ifndef DIRECT_MAP_PHYSMEM_END # ifdef MAX_PHYSMEM_BITS -# define PHYSMEM_END ((1ULL << MAX_PHYSMEM_BITS) - 1) +# define DIRECT_MAP_PHYSMEM_END ((1ULL << MAX_PHYSMEM_BITS) - 1) # else -# define PHYSMEM_END (((phys_addr_t)-1)&~(1ULL<<63)) +# define DIRECT_MAP_PHYSMEM_END (((phys_addr_t)-1)&~(1ULL<<63)) # endif #endif @@ -329,12 +330,14 @@ extern unsigned int kobjsize(const void *objp); #define VM_HIGH_ARCH_BIT_3 35 /* bit only usable on 64-bit architectures */ #define VM_HIGH_ARCH_BIT_4 36 /* bit only usable on 64-bit architectures */ #define VM_HIGH_ARCH_BIT_5 37 /* bit only usable on 64-bit architectures */ +#define VM_HIGH_ARCH_BIT_6 38 /* bit only usable on 64-bit architectures */ #define VM_HIGH_ARCH_0 BIT(VM_HIGH_ARCH_BIT_0) #define VM_HIGH_ARCH_1 BIT(VM_HIGH_ARCH_BIT_1) #define VM_HIGH_ARCH_2 BIT(VM_HIGH_ARCH_BIT_2) #define VM_HIGH_ARCH_3 BIT(VM_HIGH_ARCH_BIT_3) #define VM_HIGH_ARCH_4 BIT(VM_HIGH_ARCH_BIT_4) #define VM_HIGH_ARCH_5 BIT(VM_HIGH_ARCH_BIT_5) +#define VM_HIGH_ARCH_6 BIT(VM_HIGH_ARCH_BIT_6) #endif /* CONFIG_ARCH_USES_HIGH_VMA_FLAGS */ #ifdef CONFIG_ARCH_HAS_PKEYS @@ -365,7 +368,17 @@ extern unsigned int kobjsize(const void *objp); * for more details on the guard size. */ # define VM_SHADOW_STACK VM_HIGH_ARCH_5 -#else +#endif + +#if defined(CONFIG_ARM64_GCS) +/* + * arm64's Guarded Control Stack implements similar functionality and + * has similar constraints to shadow stacks. + */ +# define VM_SHADOW_STACK VM_HIGH_ARCH_6 +#endif + +#ifndef VM_SHADOW_STACK # define VM_SHADOW_STACK VM_NONE #endif @@ -1286,8 +1299,6 @@ static inline struct folio *virt_to_folio(const void *x) void __folio_put(struct folio *folio); -void put_pages_list(struct list_head *pages); - void split_page(struct page *page, unsigned int order); void folio_copy(struct folio *dst, struct folio *src); int folio_mc_copy(struct folio *dst, struct folio *src); @@ -1895,7 +1906,7 @@ static inline unsigned long page_to_section(const struct page *page) * * Return: The Page Frame Number of the first page in the folio. */ -static inline unsigned long folio_pfn(struct folio *folio) +static inline unsigned long folio_pfn(const struct folio *folio) { return page_to_pfn(&folio->page); } @@ -2524,6 +2535,7 @@ long pin_user_pages_unlocked(unsigned long start, unsigned long nr_pages, long memfd_pin_folios(struct file *memfd, loff_t start, loff_t end, struct folio **folios, unsigned int max_folios, pgoff_t *offset); +int folio_add_pins(struct folio *folio, unsigned int pins); int get_user_pages_fast(unsigned long start, int nr_pages, unsigned int gup_flags, struct page **pages); @@ -2539,6 +2551,7 @@ struct kvec; struct page *get_dump_page(unsigned long addr); bool folio_mark_dirty(struct folio *folio); +bool folio_mark_dirty_lock(struct folio *folio); bool set_page_dirty(struct page *page); int set_page_dirty_lock(struct page *page); @@ -2998,7 +3011,15 @@ static inline void pagetable_pte_dtor(struct ptdesc *ptdesc) lruvec_stat_sub_folio(folio, NR_PAGETABLE); } -pte_t *__pte_offset_map(pmd_t *pmd, unsigned long addr, pmd_t *pmdvalp); +pte_t *___pte_offset_map(pmd_t *pmd, unsigned long addr, pmd_t *pmdvalp); +static inline pte_t *__pte_offset_map(pmd_t *pmd, unsigned long addr, + pmd_t *pmdvalp) +{ + pte_t *pte; + + __cond_lock(RCU, pte = ___pte_offset_map(pmd, addr, pmdvalp)); + return pte; +} static inline pte_t *pte_offset_map(pmd_t *pmd, unsigned long addr) { return __pte_offset_map(pmd, addr, NULL); @@ -3011,12 +3032,16 @@ static inline pte_t *pte_offset_map_lock(struct mm_struct *mm, pmd_t *pmd, { pte_t *pte; - __cond_lock(*ptlp, pte = __pte_offset_map_lock(mm, pmd, addr, ptlp)); + __cond_lock(RCU, __cond_lock(*ptlp, + pte = __pte_offset_map_lock(mm, pmd, addr, ptlp))); return pte; } -pte_t *pte_offset_map_nolock(struct mm_struct *mm, pmd_t *pmd, - unsigned long addr, spinlock_t **ptlp); +pte_t *pte_offset_map_ro_nolock(struct mm_struct *mm, pmd_t *pmd, + unsigned long addr, spinlock_t **ptlp); +pte_t *pte_offset_map_rw_nolock(struct mm_struct *mm, pmd_t *pmd, + unsigned long addr, pmd_t *pmdvalp, + spinlock_t **ptlp); #define pte_unmap_unlock(pte, ptl) do { \ spin_unlock(ptl); \ @@ -3100,6 +3125,7 @@ static inline bool pagetable_pmd_ctor(struct ptdesc *ptdesc) if (!pmd_ptlock_init(ptdesc)) return false; __folio_set_pgtable(folio); + ptdesc_pmd_pts_init(ptdesc); lruvec_stat_add_folio(folio, NR_PAGETABLE); return true; } @@ -3818,9 +3844,6 @@ void *sparse_buffer_alloc(unsigned long size); struct page * __populate_section_memmap(unsigned long pfn, unsigned long nr_pages, int nid, struct vmem_altmap *altmap, struct dev_pagemap *pgmap); -void pud_init(void *addr); -void pmd_init(void *addr); -void kernel_pte_init(void *addr); pgd_t *vmemmap_pgd_populate(unsigned long addr, int node); p4d_t *vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node); pud_t *vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node); @@ -4079,6 +4102,37 @@ void mem_dump_obj(void *object); static inline void mem_dump_obj(void *object) {} #endif +static inline bool is_write_sealed(int seals) +{ + return seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE); +} + +/** + * is_readonly_sealed - Checks whether write-sealed but mapped read-only, + * in which case writes should be disallowing moving + * forwards. + * @seals: the seals to check + * @vm_flags: the VMA flags to check + * + * Returns whether readonly sealed, in which case writess should be disallowed + * going forward. + */ +static inline bool is_readonly_sealed(int seals, vm_flags_t vm_flags) +{ + /* + * Since an F_SEAL_[FUTURE_]WRITE sealed memfd can be mapped as + * MAP_SHARED and read-only, take care to not allow mprotect to + * revert protections on such mappings. Do this only for shared + * mappings. For private mappings, don't need to mask + * VM_MAYWRITE as we still want them to be COW-writable. + */ + if (is_write_sealed(seals) && + ((vm_flags & (VM_SHARED | VM_WRITE)) == VM_SHARED)) + return true; + + return false; +} + /** * seal_check_write - Check for F_SEAL_WRITE or F_SEAL_FUTURE_WRITE flags and * handle them. @@ -4090,24 +4144,15 @@ static inline void mem_dump_obj(void *object) {} */ static inline int seal_check_write(int seals, struct vm_area_struct *vma) { - if (seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) { - /* - * New PROT_WRITE and MAP_SHARED mmaps are not allowed when - * write seals are active. - */ - if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE)) - return -EPERM; - - /* - * Since an F_SEAL_[FUTURE_]WRITE sealed memfd can be mapped as - * MAP_SHARED and read-only, take care to not allow mprotect to - * revert protections on such mappings. Do this only for shared - * mappings. For private mappings, don't need to mask - * VM_MAYWRITE as we still want them to be COW-writable. - */ - if (vma->vm_flags & VM_SHARED) - vm_flags_clear(vma, VM_MAYWRITE); - } + if (!is_write_sealed(seals)) + return 0; + + /* + * New PROT_WRITE and MAP_SHARED mmaps are not allowed when + * write seals are active. + */ + if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE)) + return -EPERM; return 0; } @@ -4163,61 +4208,25 @@ static inline int do_mseal(unsigned long start, size_t len_in, unsigned long fla } #endif -#ifdef CONFIG_MEM_ALLOC_PROFILING -static inline void pgalloc_tag_split(struct folio *folio, int old_order, int new_order) -{ - int i; - struct alloc_tag *tag; - unsigned int nr_pages = 1 << new_order; - - if (!mem_alloc_profiling_enabled()) - return; - - tag = pgalloc_tag_get(&folio->page); - if (!tag) - return; - - for (i = nr_pages; i < (1 << old_order); i += nr_pages) { - union codetag_ref *ref = get_page_tag_ref(folio_page(folio, i)); - - if (ref) { - /* Set new reference to point to the original tag */ - alloc_tag_ref_set(ref, tag); - put_page_tag_ref(ref); - } - } -} - -static inline void pgalloc_tag_copy(struct folio *new, struct folio *old) -{ - struct alloc_tag *tag; - union codetag_ref *ref; - - tag = pgalloc_tag_get(&old->page); - if (!tag) - return; - - ref = get_page_tag_ref(&new->page); - if (!ref) - return; - - /* Clear the old ref to the original allocation tag. */ - clear_page_tag_ref(&old->page); - /* Decrement the counters of the tag on get_new_folio. */ - alloc_tag_sub(ref, folio_nr_pages(new)); - - __alloc_tag_ref_set(ref, tag); - - put_page_tag_ref(ref); -} -#else /* !CONFIG_MEM_ALLOC_PROFILING */ -static inline void pgalloc_tag_split(struct folio *folio, int old_order, int new_order) +/* + * user_alloc_needs_zeroing checks if a user folio from page allocator needs to + * be zeroed or not. + */ +static inline bool user_alloc_needs_zeroing(void) { + /* + * for user folios, arch with cache aliasing requires cache flush and + * arc changes folio->flags to make icache coherent with dcache, so + * always return false to make caller use + * clear_user_page()/clear_user_highpage(). + */ + return cpu_dcache_is_aliasing() || cpu_icache_is_aliasing() || + !static_branch_maybe(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, + &init_on_alloc); } -static inline void pgalloc_tag_copy(struct folio *new, struct folio *old) -{ -} -#endif /* CONFIG_MEM_ALLOC_PROFILING */ +int arch_get_shadow_stack_status(struct task_struct *t, unsigned long __user *status); +int arch_set_shadow_stack_status(struct task_struct *t, unsigned long status); +int arch_lock_shadow_stack_status(struct task_struct *t, unsigned long status); #endif /* _LINUX_MM_H */ diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h index f4fe593c1400..1b6a917fffa4 100644 --- a/include/linux/mm_inline.h +++ b/include/linux/mm_inline.h @@ -155,6 +155,11 @@ static inline int folio_lru_refs(struct folio *folio) return ((flags & LRU_REFS_MASK) >> LRU_REFS_PGOFF) + workingset; } +static inline void folio_clear_lru_refs(struct folio *folio) +{ + set_mask_bits(&folio->flags, LRU_REFS_MASK | LRU_REFS_FLAGS, 0); +} + static inline int folio_lru_gen(struct folio *folio) { unsigned long flags = READ_ONCE(folio->flags); @@ -222,6 +227,7 @@ static inline bool lru_gen_add_folio(struct lruvec *lruvec, struct folio *folio, { unsigned long seq; unsigned long flags; + unsigned long mask; int gen = folio_lru_gen(folio); int type = folio_is_file_lru(folio); int zone = folio_zonenum(folio); @@ -257,7 +263,14 @@ static inline bool lru_gen_add_folio(struct lruvec *lruvec, struct folio *folio, gen = lru_gen_from_seq(seq); flags = (gen + 1UL) << LRU_GEN_PGOFF; /* see the comment on MIN_NR_GENS about PG_active */ - set_mask_bits(&folio->flags, LRU_GEN_MASK | BIT(PG_active), flags); + mask = LRU_GEN_MASK; + /* + * Don't clear PG_workingset here because it can affect PSI accounting + * if the activation is due to workingset refault. + */ + if (folio_test_active(folio)) + mask |= LRU_REFS_MASK | BIT(PG_referenced) | BIT(PG_active); + set_mask_bits(&folio->flags, mask, flags); lru_gen_update_size(lruvec, folio, -1, gen); /* for folio_rotate_reclaimable() */ @@ -291,6 +304,12 @@ static inline bool lru_gen_del_folio(struct lruvec *lruvec, struct folio *folio, return true; } +static inline void folio_migrate_refs(struct folio *new, struct folio *old) +{ + unsigned long refs = READ_ONCE(old->flags) & LRU_REFS_MASK; + + set_mask_bits(&new->flags, LRU_REFS_MASK, refs); +} #else /* !CONFIG_LRU_GEN */ static inline bool lru_gen_enabled(void) @@ -313,6 +332,10 @@ static inline bool lru_gen_del_folio(struct lruvec *lruvec, struct folio *folio, return false; } +static inline void folio_migrate_refs(struct folio *new, struct folio *old) +{ + +} #endif /* CONFIG_LRU_GEN */ static __always_inline @@ -521,7 +544,7 @@ static inline pte_marker copy_pte_marker( { pte_marker srcm = pte_marker_get(entry); /* Always copy error entries. */ - pte_marker dstm = srcm & PTE_MARKER_POISONED; + pte_marker dstm = srcm & (PTE_MARKER_POISONED | PTE_MARKER_GUARD); /* Only copy PTE markers if UFFD register matches. */ if ((srcm & PTE_MARKER_UFFD_WP) && userfaultfd_wp(dst_vma)) diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 6e3bdf8e38bc..332cee285662 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -445,6 +445,7 @@ FOLIO_MATCH(compound_head, _head_2a); * @pt_index: Used for s390 gmap. * @pt_mm: Used for x86 pgds. * @pt_frag_refcount: For fragmented page table tracking. Powerpc only. + * @pt_share_count: Used for HugeTLB PMD page table share count. * @_pt_pad_2: Padding to ensure proper alignment. * @ptl: Lock for the page table. * @__page_type: Same as page->page_type. Unused for page tables. @@ -471,6 +472,9 @@ struct ptdesc { pgoff_t pt_index; struct mm_struct *pt_mm; atomic_t pt_frag_refcount; +#ifdef CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING + atomic_t pt_share_count; +#endif }; union { @@ -516,14 +520,37 @@ static_assert(sizeof(struct ptdesc) <= sizeof(struct page)); const struct page *: (const struct ptdesc *)(p), \ struct page *: (struct ptdesc *)(p))) +#ifdef CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING +static inline void ptdesc_pmd_pts_init(struct ptdesc *ptdesc) +{ + atomic_set(&ptdesc->pt_share_count, 0); +} + +static inline void ptdesc_pmd_pts_inc(struct ptdesc *ptdesc) +{ + atomic_inc(&ptdesc->pt_share_count); +} + +static inline void ptdesc_pmd_pts_dec(struct ptdesc *ptdesc) +{ + atomic_dec(&ptdesc->pt_share_count); +} + +static inline int ptdesc_pmd_pts_count(struct ptdesc *ptdesc) +{ + return atomic_read(&ptdesc->pt_share_count); +} +#else +static inline void ptdesc_pmd_pts_init(struct ptdesc *ptdesc) +{ +} +#endif + /* * Used for sizing the vmemmap region on some architectures */ #define STRUCT_PAGE_MAX_SHIFT (order_base_2(sizeof(struct page))) -#define PAGE_FRAG_CACHE_MAX_SIZE __ALIGN_MASK(32768, ~PAGE_MASK) -#define PAGE_FRAG_CACHE_MAX_ORDER get_order(PAGE_FRAG_CACHE_MAX_SIZE) - /* * page_private can be used on tail pages. However, PagePrivate is only * checked by the VM on the head page. So page_private on the tail pages @@ -542,21 +569,6 @@ static inline void *folio_get_private(struct folio *folio) return folio->private; } -struct page_frag_cache { - void * va; -#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) - __u16 offset; - __u16 size; -#else - __u32 offset; -#endif - /* we maintain a pagecount bias, so that we dont dirty cache line - * containing page->_refcount every time we allocate a fragment. - */ - unsigned int pagecnt_bias; - bool pfmemalloc; -}; - typedef unsigned long vm_flags_t; /* @@ -782,6 +794,7 @@ struct vm_area_struct { struct mm_cid { u64 time; int cid; + int recent_cid; }; #endif @@ -852,6 +865,27 @@ struct mm_struct { * When the next mm_cid scan is due (in jiffies). */ unsigned long mm_cid_next_scan; + /** + * @nr_cpus_allowed: Number of CPUs allowed for mm. + * + * Number of CPUs allowed in the union of all mm's + * threads allowed CPUs. + */ + unsigned int nr_cpus_allowed; + /** + * @max_nr_cid: Maximum number of concurrency IDs allocated. + * + * Track the highest number of concurrency IDs allocated for the + * mm. + */ + atomic_t max_nr_cid; + /** + * @cpus_allowed_lock: Lock protecting mm cpus_allowed. + * + * Provide mutual exclusion for mm cpus_allowed and + * mm nr_cpus_allowed updates. + */ + raw_spinlock_t cpus_allowed_lock; #endif #ifdef CONFIG_MMU atomic_long_t pgtables_bytes; /* size of all page tables */ @@ -1170,18 +1204,30 @@ static inline int mm_cid_clear_lazy_put(int cid) return cid & ~MM_CID_LAZY_PUT; } +/* + * mm_cpus_allowed: Union of all mm's threads allowed CPUs. + */ +static inline cpumask_t *mm_cpus_allowed(struct mm_struct *mm) +{ + unsigned long bitmap = (unsigned long)mm; + + bitmap += offsetof(struct mm_struct, cpu_bitmap); + /* Skip cpu_bitmap */ + bitmap += cpumask_size(); + return (struct cpumask *)bitmap; +} + /* Accessor for struct mm_struct's cidmask. */ static inline cpumask_t *mm_cidmask(struct mm_struct *mm) { - unsigned long cid_bitmap = (unsigned long)mm; + unsigned long cid_bitmap = (unsigned long)mm_cpus_allowed(mm); - cid_bitmap += offsetof(struct mm_struct, cpu_bitmap); - /* Skip cpu_bitmap */ + /* Skip mm_cpus_allowed */ cid_bitmap += cpumask_size(); return (struct cpumask *)cid_bitmap; } -static inline void mm_init_cid(struct mm_struct *mm) +static inline void mm_init_cid(struct mm_struct *mm, struct task_struct *p) { int i; @@ -1189,17 +1235,22 @@ static inline void mm_init_cid(struct mm_struct *mm) struct mm_cid *pcpu_cid = per_cpu_ptr(mm->pcpu_cid, i); pcpu_cid->cid = MM_CID_UNSET; + pcpu_cid->recent_cid = MM_CID_UNSET; pcpu_cid->time = 0; } + mm->nr_cpus_allowed = p->nr_cpus_allowed; + atomic_set(&mm->max_nr_cid, 0); + raw_spin_lock_init(&mm->cpus_allowed_lock); + cpumask_copy(mm_cpus_allowed(mm), &p->cpus_mask); cpumask_clear(mm_cidmask(mm)); } -static inline int mm_alloc_cid_noprof(struct mm_struct *mm) +static inline int mm_alloc_cid_noprof(struct mm_struct *mm, struct task_struct *p) { mm->pcpu_cid = alloc_percpu_noprof(struct mm_cid); if (!mm->pcpu_cid) return -ENOMEM; - mm_init_cid(mm); + mm_init_cid(mm, p); return 0; } #define mm_alloc_cid(...) alloc_hooks(mm_alloc_cid_noprof(__VA_ARGS__)) @@ -1212,16 +1263,31 @@ static inline void mm_destroy_cid(struct mm_struct *mm) static inline unsigned int mm_cid_size(void) { - return cpumask_size(); + return 2 * cpumask_size(); /* mm_cpus_allowed(), mm_cidmask(). */ +} + +static inline void mm_set_cpus_allowed(struct mm_struct *mm, const struct cpumask *cpumask) +{ + struct cpumask *mm_allowed = mm_cpus_allowed(mm); + + if (!mm) + return; + /* The mm_cpus_allowed is the union of each thread allowed CPUs masks. */ + raw_spin_lock(&mm->cpus_allowed_lock); + cpumask_or(mm_allowed, mm_allowed, cpumask); + WRITE_ONCE(mm->nr_cpus_allowed, cpumask_weight(mm_allowed)); + raw_spin_unlock(&mm->cpus_allowed_lock); } #else /* CONFIG_SCHED_MM_CID */ -static inline void mm_init_cid(struct mm_struct *mm) { } -static inline int mm_alloc_cid(struct mm_struct *mm) { return 0; } +static inline void mm_init_cid(struct mm_struct *mm, struct task_struct *p) { } +static inline int mm_alloc_cid(struct mm_struct *mm, struct task_struct *p) { return 0; } static inline void mm_destroy_cid(struct mm_struct *mm) { } + static inline unsigned int mm_cid_size(void) { return 0; } +static inline void mm_set_cpus_allowed(struct mm_struct *mm, const struct cpumask *cpumask) { } #endif /* CONFIG_SCHED_MM_CID */ struct mmu_gather; @@ -1499,4 +1565,88 @@ enum { /* See also internal only FOLL flags in mm/internal.h */ }; +/* mm flags */ + +/* + * The first two bits represent core dump modes for set-user-ID, + * the modes are SUID_DUMP_* defined in linux/sched/coredump.h + */ +#define MMF_DUMPABLE_BITS 2 +#define MMF_DUMPABLE_MASK ((1 << MMF_DUMPABLE_BITS) - 1) +/* coredump filter bits */ +#define MMF_DUMP_ANON_PRIVATE 2 +#define MMF_DUMP_ANON_SHARED 3 +#define MMF_DUMP_MAPPED_PRIVATE 4 +#define MMF_DUMP_MAPPED_SHARED 5 +#define MMF_DUMP_ELF_HEADERS 6 +#define MMF_DUMP_HUGETLB_PRIVATE 7 +#define MMF_DUMP_HUGETLB_SHARED 8 +#define MMF_DUMP_DAX_PRIVATE 9 +#define MMF_DUMP_DAX_SHARED 10 + +#define MMF_DUMP_FILTER_SHIFT MMF_DUMPABLE_BITS +#define MMF_DUMP_FILTER_BITS 9 +#define MMF_DUMP_FILTER_MASK \ + (((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT) +#define MMF_DUMP_FILTER_DEFAULT \ + ((1 << MMF_DUMP_ANON_PRIVATE) | (1 << MMF_DUMP_ANON_SHARED) |\ + (1 << MMF_DUMP_HUGETLB_PRIVATE) | MMF_DUMP_MASK_DEFAULT_ELF) + +#ifdef CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS +# define MMF_DUMP_MASK_DEFAULT_ELF (1 << MMF_DUMP_ELF_HEADERS) +#else +# define MMF_DUMP_MASK_DEFAULT_ELF 0 +#endif + /* leave room for more dump flags */ +#define MMF_VM_MERGEABLE 16 /* KSM may merge identical pages */ +#define MMF_VM_HUGEPAGE 17 /* set when mm is available for khugepaged */ + +/* + * This one-shot flag is dropped due to necessity of changing exe once again + * on NFS restore + */ +//#define MMF_EXE_FILE_CHANGED 18 /* see prctl_set_mm_exe_file() */ + +#define MMF_HAS_UPROBES 19 /* has uprobes */ +#define MMF_RECALC_UPROBES 20 /* MMF_HAS_UPROBES can be wrong */ +#define MMF_OOM_SKIP 21 /* mm is of no interest for the OOM killer */ +#define MMF_UNSTABLE 22 /* mm is unstable for copy_from_user */ +#define MMF_HUGE_ZERO_PAGE 23 /* mm has ever used the global huge zero page */ +#define MMF_DISABLE_THP 24 /* disable THP for all VMAs */ +#define MMF_DISABLE_THP_MASK (1 << MMF_DISABLE_THP) +#define MMF_OOM_REAP_QUEUED 25 /* mm was queued for oom_reaper */ +#define MMF_MULTIPROCESS 26 /* mm is shared between processes */ +/* + * MMF_HAS_PINNED: Whether this mm has pinned any pages. This can be either + * replaced in the future by mm.pinned_vm when it becomes stable, or grow into + * a counter on its own. We're aggresive on this bit for now: even if the + * pinned pages were unpinned later on, we'll still keep this bit set for the + * lifecycle of this mm, just for simplicity. + */ +#define MMF_HAS_PINNED 27 /* FOLL_PIN has run, never cleared */ + +#define MMF_HAS_MDWE 28 +#define MMF_HAS_MDWE_MASK (1 << MMF_HAS_MDWE) + + +#define MMF_HAS_MDWE_NO_INHERIT 29 + +#define MMF_VM_MERGE_ANY 30 +#define MMF_VM_MERGE_ANY_MASK (1 << MMF_VM_MERGE_ANY) + +#define MMF_TOPDOWN 31 /* mm searches top down by default */ +#define MMF_TOPDOWN_MASK (1 << MMF_TOPDOWN) + +#define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK |\ + MMF_DISABLE_THP_MASK | MMF_HAS_MDWE_MASK |\ + MMF_VM_MERGE_ANY_MASK | MMF_TOPDOWN_MASK) + +static inline unsigned long mmf_init_flags(unsigned long flags) +{ + if (flags & (1UL << MMF_HAS_MDWE_NO_INHERIT)) + flags &= ~((1UL << MMF_HAS_MDWE) | + (1UL << MMF_HAS_MDWE_NO_INHERIT)); + return flags & MMF_INIT_MASK; +} + #endif /* _LINUX_MM_TYPES_H */ diff --git a/include/linux/mm_types_task.h b/include/linux/mm_types_task.h index bff5706b76e1..a82aa80c0ba4 100644 --- a/include/linux/mm_types_task.h +++ b/include/linux/mm_types_task.h @@ -8,6 +8,7 @@ * (These are defined separately to decouple sched.h from mm_types.h as much as possible.) */ +#include <linux/align.h> #include <linux/types.h> #include <asm/page.h> @@ -43,6 +44,26 @@ struct page_frag { #endif }; +#define PAGE_FRAG_CACHE_MAX_SIZE __ALIGN_MASK(32768, ~PAGE_MASK) +#define PAGE_FRAG_CACHE_MAX_ORDER get_order(PAGE_FRAG_CACHE_MAX_SIZE) +struct page_frag_cache { + /* encoded_page consists of the virtual address, pfmemalloc bit and + * order of a page. + */ + unsigned long encoded_page; + + /* we maintain a pagecount bias, so that we dont dirty cache line + * containing page->_refcount every time we allocate a fragment. + */ +#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) && (BITS_PER_LONG <= 32) + __u16 offset; + __u16 pagecnt_bias; +#else + __u32 offset; + __u32 pagecnt_bias; +#endif +}; + /* Track pages that require TLB flushes */ struct tlbflush_unmap_batch { #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h index f34407cc2788..526fce581657 100644 --- a/include/linux/mmc/card.h +++ b/include/linux/mmc/card.h @@ -35,7 +35,7 @@ struct mmc_csd { unsigned int wp_grp_size; unsigned int read_blkbits; unsigned int write_blkbits; - unsigned int capacity; + sector_t capacity; unsigned int read_partial:1, read_misalign:1, write_partial:1, @@ -188,6 +188,12 @@ struct sd_switch_caps { #define SD_MAX_CURRENT_400 (1 << SD_SET_CURRENT_LIMIT_400) #define SD_MAX_CURRENT_600 (1 << SD_SET_CURRENT_LIMIT_600) #define SD_MAX_CURRENT_800 (1 << SD_SET_CURRENT_LIMIT_800) + +#define SD4_SET_POWER_LIMIT_0_72W 0 +#define SD4_SET_POWER_LIMIT_1_44W 1 +#define SD4_SET_POWER_LIMIT_2_16W 2 +#define SD4_SET_POWER_LIMIT_2_88W 3 +#define SD4_SET_POWER_LIMIT_1_80W 4 }; struct sd_ext_reg { @@ -209,6 +215,34 @@ struct sd_ext_reg { #define SD_EXT_PERF_CMD_QUEUE (1<<4) }; +struct sd_uhs2_config { + u32 node_id; + + u32 n_fcu; + u32 maxblk_len; + u8 n_lanes; + u8 dadr_len; + u8 app_type; + u8 phy_minor_rev; + u8 phy_major_rev; + u8 can_hibernate; + u8 n_lss_sync; + u8 n_lss_dir; + u8 link_minor_rev; + u8 link_major_rev; + u8 dev_type; + u8 n_data_gap; + + u32 n_fcu_set; + u32 maxblk_len_set; + u8 n_lanes_set; + u8 speed_range_set; + u8 n_lss_sync_set; + u8 n_lss_dir_set; + u8 n_data_gap_set; + u8 max_retry_set; +}; + struct sdio_cccr { unsigned int sdio_vsn; unsigned int sd_vsn; @@ -294,6 +328,7 @@ struct mmc_card { #define MMC_QUIRK_BROKEN_SD_DISCARD (1<<14) /* Disable broken SD discard support */ #define MMC_QUIRK_BROKEN_SD_CACHE (1<<15) /* Disable broken SD cache support */ #define MMC_QUIRK_BROKEN_CACHE_FLUSH (1<<16) /* Don't flush cache until the write has occurred */ +#define MMC_QUIRK_BROKEN_SD_POWEROFF_NOTIFY (1<<17) /* Disable broken SD poweroff notify support */ bool written_flag; /* Indicates eMMC has been written since power on */ bool reenable_cmdq; /* Re-enable Command Queue */ @@ -319,6 +354,8 @@ struct mmc_card { struct sd_ext_reg ext_power; /* SD extension reg for PM */ struct sd_ext_reg ext_perf; /* SD extension reg for PERF */ + struct sd_uhs2_config uhs2_config; /* SD UHS-II config */ + unsigned int sdio_funcs; /* number of SDIO functions */ atomic_t sdio_funcs_probed; /* number of probed SDIO funcs */ struct sdio_cccr cccr; /* common card info */ diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h index f0ac2e469b32..56972bd78462 100644 --- a/include/linux/mmc/core.h +++ b/include/linux/mmc/core.h @@ -11,6 +11,20 @@ struct mmc_data; struct mmc_request; +#define UHS2_MAX_PAYLOAD_LEN 2 +#define UHS2_MAX_RESP_LEN 20 + +struct uhs2_command { + u16 header; + u16 arg; + __be32 payload[UHS2_MAX_PAYLOAD_LEN]; + u8 payload_len; + u8 packet_len; + u8 tmode_half_duplex; + u8 uhs2_resp[UHS2_MAX_RESP_LEN]; /* UHS2 native cmd resp */ + u8 uhs2_resp_len; /* UHS2 native cmd resp len */ +}; + struct mmc_command { u32 opcode; u32 arg; @@ -96,6 +110,12 @@ struct mmc_command { unsigned int busy_timeout; /* busy detect timeout in ms */ struct mmc_data *data; /* data segment associated with cmd */ struct mmc_request *mrq; /* associated request */ + + struct uhs2_command *uhs2_cmd; /* UHS2 command */ + + /* for SDUC */ + bool has_ext_addr; + u8 ext_addr; }; struct mmc_data { @@ -154,6 +174,7 @@ struct mmc_request { const struct bio_crypt_ctx *crypto_ctx; int crypto_key_slot; #endif + struct uhs2_command uhs2_cmd; }; struct mmc_card; diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index 8fc2b328ec4d..f166d6611ddb 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h @@ -17,6 +17,7 @@ #include <linux/mmc/pm.h> #include <linux/dma-direction.h> #include <linux/blk-crypto-profile.h> +#include <linux/mmc/sd_uhs2.h> struct mmc_ios { unsigned int clock; /* clock rate */ @@ -64,6 +65,10 @@ struct mmc_ios { #define MMC_TIMING_MMC_HS400 10 #define MMC_TIMING_SD_EXP 11 #define MMC_TIMING_SD_EXP_1_2V 12 +#define MMC_TIMING_UHS2_SPEED_A 13 +#define MMC_TIMING_UHS2_SPEED_A_HD 14 +#define MMC_TIMING_UHS2_SPEED_B 15 +#define MMC_TIMING_UHS2_SPEED_B_HD 16 unsigned char signal_voltage; /* signalling voltage (1.8V or 3.3V) */ @@ -71,6 +76,9 @@ struct mmc_ios { #define MMC_SIGNAL_VOLTAGE_180 1 #define MMC_SIGNAL_VOLTAGE_120 2 + unsigned char vqmmc2_voltage; +#define MMC_VQMMC2_VOLTAGE_180 0 + unsigned char drv_type; /* driver type (A, B, C, D) */ #define MMC_SET_DRIVER_TYPE_B 0 @@ -92,6 +100,43 @@ struct mmc_clk_phase_map { struct mmc_clk_phase phase[MMC_NUM_CLK_PHASES]; }; +struct sd_uhs2_caps { + u32 dap; + u32 gap; + u32 group_desc; + u32 maxblk_len; + u32 n_fcu; + u8 n_lanes; + u8 addr64; + u8 card_type; + u8 phy_rev; + u8 speed_range; + u8 n_lss_sync; + u8 n_lss_dir; + u8 link_rev; + u8 host_type; + u8 n_data_gap; + + u32 maxblk_len_set; + u32 n_fcu_set; + u8 n_lanes_set; + u8 n_lss_sync_set; + u8 n_lss_dir_set; + u8 n_data_gap_set; + u8 max_retry_set; +}; + +enum sd_uhs2_operation { + UHS2_PHY_INIT = 0, + UHS2_SET_CONFIG, + UHS2_ENABLE_INT, + UHS2_DISABLE_INT, + UHS2_ENABLE_CLK, + UHS2_DISABLE_CLK, + UHS2_CHECK_DORMANT, + UHS2_SET_IOS, +}; + struct mmc_host; enum mmc_err_stat { @@ -219,6 +264,14 @@ struct mmc_host_ops { /* Initialize an SD express card, mandatory for MMC_CAP2_SD_EXP. */ int (*init_sd_express)(struct mmc_host *host, struct mmc_ios *ios); + + /* + * The uhs2_control callback is used to execute SD UHS-II specific + * operations. It's mandatory to implement for hosts that supports the + * SD UHS-II interface (MMC_CAP2_SD_UHS2). Expected return values are a + * negative errno in case of a failure or zero for success. + */ + int (*uhs2_control)(struct mmc_host *host, enum sd_uhs2_operation op); }; struct mmc_cqe_ops { @@ -288,6 +341,7 @@ struct mmc_pwrseq; struct mmc_supply { struct regulator *vmmc; /* Card power supply */ struct regulator *vqmmc; /* Optional Vccq supply */ + struct regulator *vqmmc2; /* Optional supply for phy */ }; struct mmc_ctx { @@ -379,6 +433,7 @@ struct mmc_host { MMC_CAP2_HS200_1_2V_SDR) #define MMC_CAP2_SD_EXP (1 << 7) /* SD express via PCIe */ #define MMC_CAP2_SD_EXP_1_2V (1 << 8) /* SD express 1.2V */ +#define MMC_CAP2_SD_UHS2 (1 << 9) /* SD UHS-II support */ #define MMC_CAP2_CD_ACTIVE_HIGH (1 << 10) /* Card-detect signal active high */ #define MMC_CAP2_RO_ACTIVE_HIGH (1 << 11) /* Write-protect signal active high */ #define MMC_CAP2_NO_PRESCAN_POWERUP (1 << 14) /* Don't power up before scan */ @@ -405,6 +460,10 @@ struct mmc_host { #endif #define MMC_CAP2_ALT_GPT_TEGRA (1 << 28) /* Host with eMMC that has GPT entry at a non-standard location */ + bool uhs2_sd_tran; /* UHS-II flag for SD_TRAN state */ + bool uhs2_app_cmd; /* UHS-II flag for APP command */ + struct sd_uhs2_caps uhs2_caps; /* Host UHS-II capabilities */ + int fixed_drv_type; /* fixed driver type for non-removable media */ mmc_pm_flag_t pm_caps; /* supported pm features */ @@ -567,6 +626,7 @@ int mmc_regulator_set_ocr(struct mmc_host *mmc, struct regulator *supply, unsigned short vdd_bit); int mmc_regulator_set_vqmmc(struct mmc_host *mmc, struct mmc_ios *ios); +int mmc_regulator_set_vqmmc2(struct mmc_host *mmc, struct mmc_ios *ios); #else static inline int mmc_regulator_set_ocr(struct mmc_host *mmc, struct regulator *supply, @@ -580,6 +640,12 @@ static inline int mmc_regulator_set_vqmmc(struct mmc_host *mmc, { return -EINVAL; } + +static inline int mmc_regulator_set_vqmmc2(struct mmc_host *mmc, + struct mmc_ios *ios) +{ + return -EINVAL; +} #endif int mmc_regulator_get_supply(struct mmc_host *mmc); @@ -615,6 +681,14 @@ static inline int mmc_card_uhs(struct mmc_card *card) card->host->ios.timing <= MMC_TIMING_UHS_DDR50; } +static inline bool mmc_card_uhs2(struct mmc_host *host) +{ + return host->ios.timing == MMC_TIMING_UHS2_SPEED_A || + host->ios.timing == MMC_TIMING_UHS2_SPEED_A_HD || + host->ios.timing == MMC_TIMING_UHS2_SPEED_B || + host->ios.timing == MMC_TIMING_UHS2_SPEED_B_HD; +} + void mmc_retune_timer_stop(struct mmc_host *host); static inline void mmc_retune_needed(struct mmc_host *host) @@ -649,6 +723,12 @@ static inline void mmc_debugfs_err_stats_inc(struct mmc_host *host, host->err_stats[stat] += 1; } +static inline int mmc_card_uhs2_hd_mode(struct mmc_host *host) +{ + return host->ios.timing == MMC_TIMING_UHS2_SPEED_A_HD || + host->ios.timing == MMC_TIMING_UHS2_SPEED_B_HD; +} + int mmc_sd_switch(struct mmc_card *card, bool mode, int group, u8 value, u8 *resp); int mmc_send_status(struct mmc_card *card, u32 *status); diff --git a/include/linux/mmc/sd.h b/include/linux/mmc/sd.h index 6727576a8755..af5fc70e09a2 100644 --- a/include/linux/mmc/sd.h +++ b/include/linux/mmc/sd.h @@ -15,6 +15,9 @@ #define SD_SEND_IF_COND 8 /* bcr [11:0] See below R7 */ #define SD_SWITCH_VOLTAGE 11 /* ac R1 */ +/* Class 2 */ +#define SD_ADDR_EXT 22 /* ac [5:0] R1 */ + /* class 10 */ #define SD_SWITCH 6 /* adtc [31:0] See below R1 */ @@ -36,6 +39,7 @@ /* OCR bit definitions */ #define SD_OCR_S18R (1 << 24) /* 1.8V switching request */ #define SD_ROCR_S18A SD_OCR_S18R /* 1.8V switching accepted by card */ +#define SD_OCR_2T (1 << 27) /* HO2T/CO2T - SDUC support */ #define SD_OCR_XPC (1 << 28) /* SDXC power control */ #define SD_OCR_CCS (1 << 30) /* Card Capacity Status */ diff --git a/include/linux/mmc/sd_uhs2.h b/include/linux/mmc/sd_uhs2.h new file mode 100644 index 000000000000..7abe9bd870c7 --- /dev/null +++ b/include/linux/mmc/sd_uhs2.h @@ -0,0 +1,240 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Header file for UHS-II packets, Host Controller registers and I/O + * accessors. + * + * Copyright (C) 2014 Intel Corp, All Rights Reserved. + */ +#ifndef LINUX_MMC_UHS2_H +#define LINUX_MMC_UHS2_H + +/* LINK Layer definition */ +/* + * UHS2 Header: + * Refer to UHS-II Addendum Version 1.02 Figure 5-2, the format of CCMD Header is described below: + * bit [3:0] : DID(Destination ID = Node ID of UHS2 card) + * bit [6:4] : TYP(Packet Type) + * 000b: CCMD(Control command packet) + * 001b: DCMD(Data command packet) + * 010b: RES(Response packet) + * 011b: DATA(Data payload packet) + * 111b: MSG(Message packet) + * Others: Reserved + * bit [7] : NP(Native Packet) + * bit [10:8] : TID(Transaction ID) + * bit [11] : Reserved + * bit [15:12]: SID(Source ID 0: Node ID of Host) + * + * Broadcast CCMD issued by Host is represented as DID=SID=0. + */ +/* + * UHS2 Argument: + * Refer to UHS-II Addendum Version 1.02 Figure 6-5, the format of CCMD Argument is described below: + * bit [3:0] : MSB of IOADR + * bit [5:4] : PLEN(Payload Length) + * 00b: 0 byte + * 01b: 4 bytes + * 10b: 8 bytes + * 11b: 16 bytes + * bit [6] : Reserved + * bit [7] : R/W(Read/Write) + * 0: Control read command + * 1: Control write command + * bit [15:8] : LSB of IOADR + * + * I/O Address specifies the address of register in UHS-II I/O space accessed by CCMD. + * The unit of I/O Address is 4 Bytes. It is transmitted in MSB first, LSB last. + */ +#define UHS2_NATIVE_PACKET_POS 7 +#define UHS2_NATIVE_PACKET (1 << UHS2_NATIVE_PACKET_POS) + +#define UHS2_PACKET_TYPE_POS 4 +#define UHS2_PACKET_TYPE_CCMD (0 << UHS2_PACKET_TYPE_POS) +#define UHS2_PACKET_TYPE_DCMD (1 << UHS2_PACKET_TYPE_POS) +#define UHS2_PACKET_TYPE_RES (2 << UHS2_PACKET_TYPE_POS) +#define UHS2_PACKET_TYPE_DATA (3 << UHS2_PACKET_TYPE_POS) +#define UHS2_PACKET_TYPE_MSG (7 << UHS2_PACKET_TYPE_POS) + +#define UHS2_DEST_ID_MASK 0x0F +#define UHS2_DEST_ID 0x1 + +#define UHS2_SRC_ID_POS 12 +#define UHS2_SRC_ID_MASK 0xF000 + +#define UHS2_TRANS_ID_POS 8 +#define UHS2_TRANS_ID_MASK 0x0700 + +/* UHS2 MSG */ +#define UHS2_MSG_CTG_POS 5 +#define UHS2_MSG_CTG_LMSG 0x00 +#define UHS2_MSG_CTG_INT 0x60 +#define UHS2_MSG_CTG_AMSG 0x80 + +#define UHS2_MSG_CTG_FCREQ 0x00 +#define UHS2_MSG_CTG_FCRDY 0x01 +#define UHS2_MSG_CTG_STAT 0x02 + +#define UHS2_MSG_CODE_POS 8 +#define UHS2_MSG_CODE_FC_UNRECOVER_ERR 0x8 +#define UHS2_MSG_CODE_STAT_UNRECOVER_ERR 0x8 +#define UHS2_MSG_CODE_STAT_RECOVER_ERR 0x1 + +/* TRANS Layer definition */ + +/* Native packets*/ +#define UHS2_NATIVE_CMD_RW_POS 7 +#define UHS2_NATIVE_CMD_WRITE (1 << UHS2_NATIVE_CMD_RW_POS) +#define UHS2_NATIVE_CMD_READ (0 << UHS2_NATIVE_CMD_RW_POS) + +#define UHS2_NATIVE_CMD_PLEN_POS 4 +#define UHS2_NATIVE_CMD_PLEN_4B (1 << UHS2_NATIVE_CMD_PLEN_POS) +#define UHS2_NATIVE_CMD_PLEN_8B (2 << UHS2_NATIVE_CMD_PLEN_POS) +#define UHS2_NATIVE_CMD_PLEN_16B (3 << UHS2_NATIVE_CMD_PLEN_POS) + +#define UHS2_NATIVE_CCMD_GET_MIOADR_MASK 0xF00 +#define UHS2_NATIVE_CCMD_MIOADR_MASK 0x0F + +#define UHS2_NATIVE_CCMD_LIOADR_POS 8 +#define UHS2_NATIVE_CCMD_GET_LIOADR_MASK 0x0FF + +#define UHS2_CCMD_DEV_INIT_COMPLETE_FLAG BIT(11) +#define UHS2_DEV_INIT_PAYLOAD_LEN 1 +#define UHS2_DEV_INIT_RESP_LEN 6 +#define UHS2_DEV_ENUM_PAYLOAD_LEN 1 +#define UHS2_DEV_ENUM_RESP_LEN 8 +#define UHS2_CFG_WRITE_PAYLOAD_LEN 2 +#define UHS2_CFG_WRITE_PHY_SET_RESP_LEN 4 +#define UHS2_CFG_WRITE_GENERIC_SET_RESP_LEN 5 +#define UHS2_GO_DORMANT_PAYLOAD_LEN 1 + +/* + * UHS2 Argument: + * Refer to UHS-II Addendum Version 1.02 Figure 6-8, the format of DCMD Argument is described below: + * bit [3:0] : Reserved + * bit [6:3] : TMODE(Transfer Mode) + * bit 3: DAM(Data Access Mode) + * bit 4: TLUM(TLEN Unit Mode) + * bit 5: LM(Length Mode) + * bit 6: DM(Duplex Mode) + * bit [7] : R/W(Read/Write) + * 0: Control read command + * 1: Control write command + * bit [15:8] : Reserved + * + * I/O Address specifies the address of register in UHS-II I/O space accessed by CCMD. + * The unit of I/O Address is 4 Bytes. It is transmitted in MSB first, LSB last. + */ +#define UHS2_DCMD_DM_POS 6 +#define UHS2_DCMD_2L_HD_MODE (1 << UHS2_DCMD_DM_POS) +#define UHS2_DCMD_LM_POS 5 +#define UHS2_DCMD_LM_TLEN_EXIST (1 << UHS2_DCMD_LM_POS) +#define UHS2_DCMD_TLUM_POS 4 +#define UHS2_DCMD_TLUM_BYTE_MODE (1 << UHS2_DCMD_TLUM_POS) +#define UHS2_NATIVE_DCMD_DAM_POS 3 +#define UHS2_NATIVE_DCMD_DAM_IO (1 << UHS2_NATIVE_DCMD_DAM_POS) + +#define UHS2_RES_NACK_POS 7 +#define UHS2_RES_NACK_MASK (0x1 << UHS2_RES_NACK_POS) + +#define UHS2_RES_ECODE_POS 4 +#define UHS2_RES_ECODE_MASK 0x7 +#define UHS2_RES_ECODE_COND 1 +#define UHS2_RES_ECODE_ARG 2 +#define UHS2_RES_ECODE_GEN 3 + +/* IOADR of device registers */ +#define UHS2_IOADR_GENERIC_CAPS 0x00 +#define UHS2_IOADR_PHY_CAPS 0x02 +#define UHS2_IOADR_LINK_CAPS 0x04 +#define UHS2_IOADR_RSV_CAPS 0x06 +#define UHS2_IOADR_GENERIC_SETTINGS 0x08 +#define UHS2_IOADR_PHY_SETTINGS 0x0A +#define UHS2_IOADR_LINK_SETTINGS 0x0C +#define UHS2_IOADR_PRESET 0x40 + +/* SD application packets */ +#define UHS2_SD_CMD_INDEX_POS 8 + +#define UHS2_SD_CMD_APP_POS 14 +#define UHS2_SD_CMD_APP (1 << UHS2_SD_CMD_APP_POS) + +/* UHS-II Device Registers */ +#define UHS2_DEV_CONFIG_REG 0x000 + +/* General Caps and Settings registers */ +#define UHS2_DEV_CONFIG_GEN_CAPS (UHS2_DEV_CONFIG_REG + 0x000) +#define UHS2_DEV_CONFIG_N_LANES_POS 8 +#define UHS2_DEV_CONFIG_N_LANES_MASK 0x3F +#define UHS2_DEV_CONFIG_2L_HD_FD 0x1 +#define UHS2_DEV_CONFIG_2D1U_FD 0x2 +#define UHS2_DEV_CONFIG_1D2U_FD 0x4 +#define UHS2_DEV_CONFIG_2D2U_FD 0x8 +#define UHS2_DEV_CONFIG_DADR_POS 14 +#define UHS2_DEV_CONFIG_DADR_MASK 0x1 +#define UHS2_DEV_CONFIG_APP_POS 16 +#define UHS2_DEV_CONFIG_APP_MASK 0xFF +#define UHS2_DEV_CONFIG_APP_SD_MEM 0x1 + +#define UHS2_DEV_CONFIG_GEN_SET (UHS2_DEV_CONFIG_REG + 0x008) +#define UHS2_DEV_CONFIG_GEN_SET_N_LANES_POS 8 +#define UHS2_DEV_CONFIG_GEN_SET_2L_FD_HD 0x0 +#define UHS2_DEV_CONFIG_GEN_SET_2D1U_FD 0x2 +#define UHS2_DEV_CONFIG_GEN_SET_1D2U_FD 0x3 +#define UHS2_DEV_CONFIG_GEN_SET_2D2U_FD 0x4 +#define UHS2_DEV_CONFIG_GEN_SET_CFG_COMPLETE BIT(31) + +/* PHY Caps and Settings registers */ +#define UHS2_DEV_CONFIG_PHY_CAPS (UHS2_DEV_CONFIG_REG + 0x002) +#define UHS2_DEV_CONFIG_PHY_MINOR_MASK 0xF +#define UHS2_DEV_CONFIG_PHY_MAJOR_POS 4 +#define UHS2_DEV_CONFIG_PHY_MAJOR_MASK 0x3 +#define UHS2_DEV_CONFIG_CAN_HIBER_POS 15 +#define UHS2_DEV_CONFIG_CAN_HIBER_MASK 0x1 +#define UHS2_DEV_CONFIG_PHY_CAPS1 (UHS2_DEV_CONFIG_REG + 0x003) +#define UHS2_DEV_CONFIG_N_LSS_SYN_MASK 0xF +#define UHS2_DEV_CONFIG_N_LSS_DIR_POS 4 +#define UHS2_DEV_CONFIG_N_LSS_DIR_MASK 0xF + +#define UHS2_DEV_CONFIG_PHY_SET (UHS2_DEV_CONFIG_REG + 0x00A) +#define UHS2_DEV_CONFIG_PHY_SET_SPEED_POS 6 +#define UHS2_DEV_CONFIG_PHY_SET_SPEED_A 0x0 +#define UHS2_DEV_CONFIG_PHY_SET_SPEED_B 0x1 + +/* LINK-TRAN Caps and Settings registers */ +#define UHS2_DEV_CONFIG_LINK_TRAN_CAPS (UHS2_DEV_CONFIG_REG + 0x004) +#define UHS2_DEV_CONFIG_LT_MINOR_MASK 0xF +#define UHS2_DEV_CONFIG_LT_MAJOR_POS 4 +#define UHS2_DEV_CONFIG_LT_MAJOR_MASK 0x3 +#define UHS2_DEV_CONFIG_N_FCU_POS 8 +#define UHS2_DEV_CONFIG_N_FCU_MASK 0xFF +#define UHS2_DEV_CONFIG_DEV_TYPE_POS 16 +#define UHS2_DEV_CONFIG_DEV_TYPE_MASK 0x7 +#define UHS2_DEV_CONFIG_MAX_BLK_LEN_POS 20 +#define UHS2_DEV_CONFIG_MAX_BLK_LEN_MASK 0xFFF +#define UHS2_DEV_CONFIG_LINK_TRAN_CAPS1 (UHS2_DEV_CONFIG_REG + 0x005) +#define UHS2_DEV_CONFIG_N_DATA_GAP_MASK 0xFF + +#define UHS2_DEV_CONFIG_LINK_TRAN_SET (UHS2_DEV_CONFIG_REG + 0x00C) +#define UHS2_DEV_CONFIG_LT_SET_MAX_BLK_LEN 0x200 +#define UHS2_DEV_CONFIG_LT_SET_MAX_RETRY_POS 16 + +/* Preset register */ +#define UHS2_DEV_CONFIG_PRESET (UHS2_DEV_CONFIG_REG + 0x040) + +#define UHS2_DEV_INT_REG 0x100 + +#define UHS2_DEV_STATUS_REG 0x180 + +#define UHS2_DEV_CMD_REG 0x200 +#define UHS2_DEV_CMD_FULL_RESET (UHS2_DEV_CMD_REG + 0x000) +#define UHS2_DEV_CMD_GO_DORMANT_STATE (UHS2_DEV_CMD_REG + 0x001) +#define UHS2_DEV_CMD_DORMANT_HIBER BIT(7) +#define UHS2_DEV_CMD_DEVICE_INIT (UHS2_DEV_CMD_REG + 0x002) +#define UHS2_DEV_INIT_COMPLETE_FLAG BIT(11) +#define UHS2_DEV_CMD_ENUMERATE (UHS2_DEV_CMD_REG + 0x003) +#define UHS2_DEV_CMD_TRANS_ABORT (UHS2_DEV_CMD_REG + 0x004) + +#define UHS2_RCLK_MAX 52000000 +#define UHS2_RCLK_MIN 26000000 + +#endif /* LINUX_MMC_UHS2_H */ diff --git a/include/linux/mmdebug.h b/include/linux/mmdebug.h index 39a7714605a7..d7cb1e5ecbda 100644 --- a/include/linux/mmdebug.h +++ b/include/linux/mmdebug.h @@ -46,7 +46,7 @@ void vma_iter_dump_tree(const struct vma_iterator *vmi); } \ } while (0) #define VM_WARN_ON_ONCE_PAGE(cond, page) ({ \ - static bool __section(".data.once") __warned; \ + static bool __section(".data..once") __warned; \ int __ret_warn_once = !!(cond); \ \ if (unlikely(__ret_warn_once && !__warned)) { \ @@ -66,7 +66,7 @@ void vma_iter_dump_tree(const struct vma_iterator *vmi); unlikely(__ret_warn); \ }) #define VM_WARN_ON_ONCE_FOLIO(cond, folio) ({ \ - static bool __section(".data.once") __warned; \ + static bool __section(".data..once") __warned; \ int __ret_warn_once = !!(cond); \ \ if (unlikely(__ret_warn_once && !__warned)) { \ @@ -77,7 +77,7 @@ void vma_iter_dump_tree(const struct vma_iterator *vmi); unlikely(__ret_warn_once); \ }) #define VM_WARN_ON_ONCE_MM(cond, mm) ({ \ - static bool __section(".data.once") __warned; \ + static bool __section(".data..once") __warned; \ int __ret_warn_once = !!(cond); \ \ if (unlikely(__ret_warn_once && !__warned)) { \ diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h index d39ebb10caeb..e2dd57ca368b 100644 --- a/include/linux/mmu_notifier.h +++ b/include/linux/mmu_notifier.h @@ -606,6 +606,13 @@ static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm, return 0; } +static inline int mmu_notifier_clear_young(struct mm_struct *mm, + unsigned long start, + unsigned long end) +{ + return 0; +} + static inline int mmu_notifier_test_young(struct mm_struct *mm, unsigned long address) { diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 80bc5640bb60..b36124145a16 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -220,6 +220,9 @@ enum node_stat_item { PGDEMOTE_KSWAPD, PGDEMOTE_DIRECT, PGDEMOTE_KHUGEPAGED, +#ifdef CONFIG_HUGETLB_PAGE + NR_HUGETLB, +#endif NR_VM_NODE_STAT_ITEMS }; @@ -403,6 +406,8 @@ enum { NR_LRU_GEN_CAPS }; +#define LRU_REFS_FLAGS (BIT(PG_referenced) | BIT(PG_workingset)) + #define MIN_LRU_BATCH BITS_PER_LONG #define MAX_LRU_BATCH (MIN_LRU_BATCH * 64) diff --git a/include/linux/module.h b/include/linux/module.h index 88ecc5e9f523..94acbacdcdf1 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -247,7 +247,7 @@ extern void cleanup_module(void); #ifdef MODULE /* Creates an alias so file2alias.c can find device table. */ #define MODULE_DEVICE_TABLE(type, name) \ -extern typeof(name) __mod_##type##__##name##_device_table \ +extern typeof(name) __mod_device_table__##type##__##name \ __attribute__ ((unused, alias(__stringify(name)))) #else /* !MODULE */ #define MODULE_DEVICE_TABLE(type, name) @@ -296,7 +296,7 @@ extern typeof(name) __mod_##type##__##name##_device_table \ * files require multiple MODULE_FIRMWARE() specifiers */ #define MODULE_FIRMWARE(_firmware) MODULE_INFO(firmware, _firmware) -#define MODULE_IMPORT_NS(ns) MODULE_INFO(import_ns, __stringify(ns)) +#define MODULE_IMPORT_NS(ns) MODULE_INFO(import_ns, ns) struct notifier_block; @@ -367,6 +367,8 @@ enum mod_mem_type { struct module_memory { void *base; + void *rw_copy; + bool is_rox; unsigned int size; #ifdef CONFIG_MODULES_TREE_LOOKUP @@ -767,6 +769,15 @@ static inline bool is_livepatch_module(struct module *mod) void set_module_sig_enforced(void); +void *__module_writable_address(struct module *mod, void *loc); + +static inline void *module_writable_address(struct module *mod, void *loc) +{ + if (!IS_ENABLED(CONFIG_ARCH_HAS_EXECMEM_ROX) || !mod) + return loc; + return __module_writable_address(mod, loc); +} + #else /* !CONFIG_MODULES... */ static inline struct module *__module_address(unsigned long addr) @@ -874,6 +885,11 @@ static inline bool module_is_coming(struct module *mod) { return false; } + +static inline void *module_writable_address(struct module *mod, void *loc) +{ + return loc; +} #endif /* CONFIG_MODULES */ #ifdef CONFIG_SYSFS diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h index e395461d59e5..1f5507ba5a12 100644 --- a/include/linux/moduleloader.h +++ b/include/linux/moduleloader.h @@ -108,6 +108,10 @@ int module_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *mod); +int module_post_finalize(const Elf_Ehdr *hdr, + const Elf_Shdr *sechdrs, + struct module *mod); + #ifdef CONFIG_MODULES void flush_module_init_free_work(void); #else diff --git a/include/linux/mtd/nand-ecc-mxic.h b/include/linux/mtd/nand-ecc-mxic.h index b125926e458c..0da4b2999576 100644 --- a/include/linux/mtd/nand-ecc-mxic.h +++ b/include/linux/mtd/nand-ecc-mxic.h @@ -16,7 +16,7 @@ struct mxic_ecc_engine; #if IS_ENABLED(CONFIG_MTD_NAND_ECC_MXIC) && IS_REACHABLE(CONFIG_MTD_NAND_CORE) -struct nand_ecc_engine_ops *mxic_ecc_get_pipelined_ops(void); +const struct nand_ecc_engine_ops *mxic_ecc_get_pipelined_ops(void); struct nand_ecc_engine *mxic_ecc_get_pipelined_engine(struct platform_device *spi_pdev); void mxic_ecc_put_pipelined_engine(struct nand_ecc_engine *eng); int mxic_ecc_process_data_pipelined(struct nand_ecc_engine *eng, @@ -24,7 +24,7 @@ int mxic_ecc_process_data_pipelined(struct nand_ecc_engine *eng, #else /* !CONFIG_MTD_NAND_ECC_MXIC */ -static inline struct nand_ecc_engine_ops *mxic_ecc_get_pipelined_ops(void) +static inline const struct nand_ecc_engine_ops *mxic_ecc_get_pipelined_ops(void) { return NULL; } diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h index 1e4208040956..0e2f228e8b4a 100644 --- a/include/linux/mtd/nand.h +++ b/include/linux/mtd/nand.h @@ -293,7 +293,7 @@ enum nand_ecc_engine_integration { struct nand_ecc_engine { struct device *dev; struct list_head node; - struct nand_ecc_engine_ops *ops; + const struct nand_ecc_engine_ops *ops; enum nand_ecc_engine_integration integration; void *priv; }; diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h index 66e7d26b70a4..11be70a7929f 100644 --- a/include/linux/netdev_features.h +++ b/include/linux/netdev_features.h @@ -253,4 +253,11 @@ static inline int find_next_netdev_feature(u64 feature, unsigned long start) NETIF_F_GSO_UDP_TUNNEL | \ NETIF_F_GSO_UDP_TUNNEL_CSUM) +static inline netdev_features_t netdev_base_features(netdev_features_t features) +{ + features &= ~NETIF_F_ONE_FOR_ALL; + features |= NETIF_F_ALL_FOR_ALL; + return features; +} + #endif /* _LINUX_NETDEV_FEATURES_H */ diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 8896705ccd63..ecc686409161 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -52,6 +52,7 @@ #include <net/net_trackers.h> #include <net/net_debug.h> #include <net/dropreason-core.h> +#include <net/neighbour_tables.h> struct netpoll_info; struct device; @@ -343,6 +344,16 @@ struct gro_list { #define GRO_HASH_BUCKETS 8 /* + * Structure for per-NAPI config + */ +struct napi_config { + u64 gro_flush_timeout; + u64 irq_suspend_timeout; + u32 defer_hard_irqs; + unsigned int napi_id; +}; + +/* * Structure for NAPI scheduling similar to tasklet but with weighting */ struct napi_struct { @@ -373,10 +384,15 @@ struct napi_struct { unsigned int napi_id; struct hrtimer timer; struct task_struct *thread; + unsigned long gro_flush_timeout; + unsigned long irq_suspend_timeout; + u32 defer_hard_irqs; /* control-path-only fields follow */ struct list_head dev_list; struct hlist_node napi_hash_node; int irq; + int index; + struct napi_config *config; }; enum { @@ -1232,12 +1248,17 @@ struct netdev_net_notifier { * int (*ndo_fdb_add)(struct ndmsg *ndm, struct nlattr *tb[], * struct net_device *dev, * const unsigned char *addr, u16 vid, u16 flags, - * struct netlink_ext_ack *extack); + * bool *notified, struct netlink_ext_ack *extack); * Adds an FDB entry to dev for addr. + * Callee shall set *notified to true if it sent any appropriate + * notification(s). Otherwise core will send a generic one. * int (*ndo_fdb_del)(struct ndmsg *ndm, struct nlattr *tb[], * struct net_device *dev, - * const unsigned char *addr, u16 vid) + * const unsigned char *addr, u16 vid + * bool *notified, struct netlink_ext_ack *extack); * Deletes the FDB entry from dev corresponding to addr. + * Callee shall set *notified to true if it sent any appropriate + * notification(s). Otherwise core will send a generic one. * int (*ndo_fdb_del_bulk)(struct nlmsghdr *nlh, struct net_device *dev, * struct netlink_ext_ack *extack); * int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb, @@ -1412,8 +1433,7 @@ struct net_device_ops { __be16 proto, u16 vid); #ifdef CONFIG_NET_POLL_CONTROLLER void (*ndo_poll_controller)(struct net_device *dev); - int (*ndo_netpoll_setup)(struct net_device *dev, - struct netpoll_info *info); + int (*ndo_netpoll_setup)(struct net_device *dev); void (*ndo_netpoll_cleanup)(struct net_device *dev); #endif int (*ndo_set_vf_mac)(struct net_device *dev, @@ -1510,12 +1530,15 @@ struct net_device_ops { const unsigned char *addr, u16 vid, u16 flags, + bool *notified, struct netlink_ext_ack *extack); int (*ndo_fdb_del)(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *dev, const unsigned char *addr, - u16 vid, struct netlink_ext_ack *extack); + u16 vid, + bool *notified, + struct netlink_ext_ack *extack); int (*ndo_fdb_del_bulk)(struct nlmsghdr *nlh, struct net_device *dev, struct netlink_ext_ack *extack); @@ -1603,6 +1626,14 @@ struct net_device_ops { int (*ndo_hwtstamp_set)(struct net_device *dev, struct kernel_hwtstamp_config *kernel_config, struct netlink_ext_ack *extack); + +#if IS_ENABLED(CONFIG_NET_SHAPER) + /** + * @net_shaper_ops: Device shaping offload operations + * see include/net/net_shapers.h + */ + const struct net_shaper_ops *net_shaper_ops; +#endif }; /** @@ -1773,7 +1804,6 @@ enum netdev_reg_state { * @wireless_handlers: List of functions to handle Wireless Extensions, * instead of ioctl, * see <net/iw_handler.h> for details. - * @wireless_data: Instance data managed by the core of wireless extensions * * @netdev_ops: Includes several pointers to callbacks, * if one wants to override the ndo_*() functions @@ -1858,9 +1888,6 @@ enum netdev_reg_state { * allocated at register_netdev() time * @real_num_rx_queues: Number of RX queues currently active in device * @xdp_prog: XDP sockets filter program pointer - * @gro_flush_timeout: timeout for GRO layer in NAPI - * @napi_defer_hard_irqs: If not zero, provides a counter that would - * allow to avoid NIC hard IRQ, on busy queues. * * @rx_handler: handler for received packets * @rx_handler_data: XXX: need comments on this one @@ -2009,6 +2036,16 @@ enum netdev_reg_state { * @dpll_pin: Pointer to the SyncE source pin of a DPLL subsystem, * where the clock is recovered. * + * @max_pacing_offload_horizon: max EDT offload horizon in nsec. + * @napi_config: An array of napi_config structures containing per-NAPI + * settings. + * @gro_flush_timeout: timeout for GRO layer in NAPI + * @napi_defer_hard_irqs: If not zero, provides a counter that would + * allow to avoid NIC hard IRQ, on busy queues. + * + * @neighbours: List heads pointing to this device's neighbours' + * dev_list, one per address-family. + * * FIXME: cleanup struct net_device such that network protocol info * moves out. */ @@ -2074,8 +2111,6 @@ struct net_device { int ifindex; unsigned int real_num_rx_queues; struct netdev_rx_queue *_rx; - unsigned long gro_flush_timeout; - u32 napi_defer_hard_irqs; unsigned int gro_max_size; unsigned int gro_ipv4_max_size; rx_handler_func_t __rcu *rx_handler; @@ -2150,7 +2185,6 @@ struct net_device { #ifdef CONFIG_WIRELESS_EXT const struct iw_handler_def *wireless_handlers; - struct iw_public_data *wireless_data; #endif const struct ethtool_ops *ethtool_ops; #ifdef CONFIG_NET_L3_MASTER_DEV @@ -2209,6 +2243,9 @@ struct net_device { /* Protocol-specific pointers */ struct in_device __rcu *ip_ptr; + /** @fib_nh_head: nexthops associated with this netdev */ + struct hlist_head fib_nh_head; + #if IS_ENABLED(CONFIG_VLAN_8021Q) struct vlan_info __rcu *vlan_info; #endif @@ -2399,6 +2436,27 @@ struct net_device { /** @irq_moder: dim parameters used if IS_ENABLED(CONFIG_DIMLIB). */ struct dim_irq_moder *irq_moder; + u64 max_pacing_offload_horizon; + struct napi_config *napi_config; + unsigned long gro_flush_timeout; + u32 napi_defer_hard_irqs; + + /** + * @lock: protects @net_shaper_hierarchy, feel free to use for other + * netdev-scope protection. Ordering: take after rtnl_lock. + */ + struct mutex lock; + +#if IS_ENABLED(CONFIG_NET_SHAPER) + /** + * @net_shaper_hierarchy: data tracking the current shaper status + * see include/net/net_shapers.h + */ + struct net_shaper_hierarchy *net_shaper_hierarchy; +#endif + + struct hlist_head neighbours[NEIGH_NR_TABLES]; + u8 priv[] ____cacheline_aligned __counted_by(priv_len); } ____cacheline_aligned; @@ -2649,6 +2707,22 @@ netif_napi_add_tx_weight(struct net_device *dev, } /** + * netif_napi_add_config - initialize a NAPI context with persistent config + * @dev: network device + * @napi: NAPI context + * @poll: polling function + * @index: the NAPI index + */ +static inline void +netif_napi_add_config(struct net_device *dev, struct napi_struct *napi, + int (*poll)(struct napi_struct *, int), int index) +{ + napi->index = index; + napi->config = &dev->napi_config[index]; + netif_napi_add_weight(dev, napi, poll, NAPI_POLL_WEIGHT); +} + +/** * netif_napi_add_tx() - initialize a NAPI context to be used for Tx only * @dev: network device * @napi: NAPI context @@ -3470,7 +3544,7 @@ static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue, * because in netdev_tx_completed_queue we update the dql_completed * before checking the XOFF flag. */ - smp_mb(); + smp_mb__after_atomic(); /* check again in case another CPU has just made room avail */ if (unlikely(dql_avail(&dev_queue->dql) >= 0)) diff --git a/include/linux/netfs.h b/include/linux/netfs.h index 5eaceef41e6c..ecdd5ced16a8 100644 --- a/include/linux/netfs.h +++ b/include/linux/netfs.h @@ -185,6 +185,7 @@ struct netfs_io_subrequest { short error; /* 0 or error that occurred */ unsigned short debug_index; /* Index in list (for debugging output) */ unsigned int nr_segs; /* Number of segs in io_iter */ + u8 retry_count; /* The number of retries (0 on initial pass) */ enum netfs_io_source source; /* Where to read from/write to */ unsigned char stream_nr; /* I/O stream this belongs to */ unsigned char curr_folioq_slot; /* Folio currently being read */ @@ -194,14 +195,13 @@ struct netfs_io_subrequest { #define NETFS_SREQ_COPY_TO_CACHE 0 /* Set if should copy the data to the cache */ #define NETFS_SREQ_CLEAR_TAIL 1 /* Set if the rest of the read should be cleared */ #define NETFS_SREQ_SEEK_DATA_READ 3 /* Set if ->read() should SEEK_DATA first */ -#define NETFS_SREQ_NO_PROGRESS 4 /* Set if we didn't manage to read any data */ +#define NETFS_SREQ_MADE_PROGRESS 4 /* Set if we transferred at least some data */ #define NETFS_SREQ_ONDEMAND 5 /* Set if it's from on-demand read mode */ #define NETFS_SREQ_BOUNDARY 6 /* Set if ends on hard boundary (eg. ceph object) */ #define NETFS_SREQ_HIT_EOF 7 /* Set if short due to EOF */ #define NETFS_SREQ_IN_PROGRESS 8 /* Unlocked when the subrequest completes */ #define NETFS_SREQ_NEED_RETRY 9 /* Set if the filesystem requests a retry */ -#define NETFS_SREQ_RETRYING 10 /* Set if we're retrying */ -#define NETFS_SREQ_FAILED 11 /* Set if the subreq failed unretryably */ +#define NETFS_SREQ_FAILED 10 /* Set if the subreq failed unretryably */ }; enum netfs_io_origin { @@ -269,7 +269,6 @@ struct netfs_io_request { size_t prev_donated; /* Fallback for subreq->prev_donated */ refcount_t ref; unsigned long flags; -#define NETFS_RREQ_COPY_TO_CACHE 1 /* Need to write to the cache */ #define NETFS_RREQ_NO_UNLOCK_FOLIO 2 /* Don't unlock no_unlock_folio on completion */ #define NETFS_RREQ_DONT_UNLOCK_FOLIOS 3 /* Don't unlock the folios on completion */ #define NETFS_RREQ_FAILED 4 /* The request failed */ diff --git a/include/linux/netlink.h b/include/linux/netlink.h index b332c2048c75..c3ae84a77e16 100644 --- a/include/linux/netlink.h +++ b/include/linux/netlink.h @@ -34,6 +34,7 @@ struct netlink_skb_parms { #define NETLINK_CB(skb) (*(struct netlink_skb_parms*)&((skb)->cb)) #define NETLINK_CREDS(skb) (&NETLINK_CB((skb)).creds) +#define NETLINK_CTX_SIZE 48 void netlink_table_grab(void); @@ -239,7 +240,7 @@ int netlink_register_notifier(struct notifier_block *nb); int netlink_unregister_notifier(struct notifier_block *nb); /* finegrained unicast helpers: */ -struct sock *netlink_getsockbyfilp(struct file *filp); +struct sock *netlink_getsockbyfd(int fd); int netlink_attachskb(struct sock *sk, struct sk_buff *skb, long *timeo, struct sock *ssk); void netlink_detachskb(struct sock *sk, struct sk_buff *skb); @@ -293,7 +294,7 @@ struct netlink_callback { int flags; bool strict_check; union { - u8 ctx[48]; + u8 ctx[NETLINK_CTX_SIZE]; /* args is deprecated. Cast a struct over ctx instead * for proper type safety. @@ -302,7 +303,7 @@ struct netlink_callback { }; }; -#define NL_ASSERT_DUMP_CTX_FITS(type_name) \ +#define NL_ASSERT_CTX_FITS(type_name) \ BUILD_BUG_ON(sizeof(type_name) > \ sizeof_field(struct netlink_callback, ctx)) diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h index cd4e28db0cbd..b34301650c47 100644 --- a/include/linux/netpoll.h +++ b/include/linux/netpoll.h @@ -32,6 +32,7 @@ struct netpoll { bool ipv6; u16 local_port, remote_port; u8 remote_mac[ETH_ALEN]; + struct sk_buff_head skb_pool; }; struct netpoll_info { @@ -72,7 +73,7 @@ static inline void *netpoll_poll_lock(struct napi_struct *napi) { struct net_device *dev = napi->dev; - if (dev && dev->npinfo) { + if (dev && rcu_access_pointer(dev->npinfo)) { int owner = smp_processor_id(); while (cmpxchg(&napi->poll_owner, -1, owner) != -1) diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 12d8e47bc5a3..559273a0f16d 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -1336,7 +1336,7 @@ struct pnfs_commit_array { struct rcu_head rcu; refcount_t refcount; unsigned int nbuckets; - struct pnfs_commit_bucket buckets[]; + struct pnfs_commit_bucket buckets[] __counted_by(nbuckets); }; struct pnfs_ds_commit_info { diff --git a/include/linux/nfslocalio.h b/include/linux/nfslocalio.h index 3982fea79919..9202f4b24343 100644 --- a/include/linux/nfslocalio.h +++ b/include/linux/nfslocalio.h @@ -55,7 +55,7 @@ struct nfsd_localio_operations { const struct cred *, const struct nfs_fh *, const fmode_t); - void (*nfsd_file_put_local)(struct nfsd_file *); + struct net *(*nfsd_file_put_local)(struct nfsd_file *); struct file *(*nfsd_file_file)(struct nfsd_file *); } ____cacheline_aligned; @@ -66,7 +66,7 @@ struct nfsd_file *nfs_open_local_fh(nfs_uuid_t *, struct rpc_clnt *, const struct cred *, const struct nfs_fh *, const fmode_t); -static inline void nfs_to_nfsd_file_put_local(struct nfsd_file *localio) +static inline void nfs_to_nfsd_net_put(struct net *net) { /* * Once reference to nfsd_serv is dropped, NFSD could be @@ -74,10 +74,22 @@ static inline void nfs_to_nfsd_file_put_local(struct nfsd_file *localio) * by always taking RCU. */ rcu_read_lock(); - nfs_to->nfsd_file_put_local(localio); + nfs_to->nfsd_serv_put(net); rcu_read_unlock(); } +static inline void nfs_to_nfsd_file_put_local(struct nfsd_file *localio) +{ + /* + * Must not hold RCU otherwise nfsd_file_put() can easily trigger: + * "Voluntary context switch within RCU read-side critical section!" + * by scheduling deep in underlying filesystem (e.g. XFS). + */ + struct net *net = nfs_to->nfsd_file_put_local(localio); + + nfs_to_nfsd_net_put(net); +} + #else /* CONFIG_NFS_LOCALIO */ static inline void nfsd_localio_ops_init(void) { diff --git a/include/linux/notifier.h b/include/linux/notifier.h index 45702bdcbceb..b42e64734968 100644 --- a/include/linux/notifier.h +++ b/include/linux/notifier.h @@ -237,7 +237,5 @@ static inline int notifier_to_errno(int ret) #define KBD_KEYSYM 0x0004 /* Keyboard keysym */ #define KBD_POST_KEYSYM 0x0005 /* Called after keyboard keysym interpretation */ -extern struct blocking_notifier_head reboot_notifier_list; - #endif /* __KERNEL__ */ #endif /* _LINUX_NOTIFIER_H */ diff --git a/include/linux/nvme.h b/include/linux/nvme.h index b58d9405d65e..13377dde4527 100644 --- a/include/linux/nvme.h +++ b/include/linux/nvme.h @@ -327,7 +327,8 @@ struct nvme_id_ctrl { __le32 sanicap; __le32 hmminds; __le16 hmmaxd; - __u8 rsvd338[4]; + __le16 nvmsetidmax; + __le16 endgidmax; __u8 anatt; __u8 anacap; __le32 anagrpmax; @@ -388,6 +389,11 @@ enum { NVME_CTRL_CTRATT_PREDICTABLE_LAT = 1 << 5, NVME_CTRL_CTRATT_NAMESPACE_GRANULARITY = 1 << 7, NVME_CTRL_CTRATT_UUID_LIST = 1 << 9, + NVME_CTRL_SGLS_BYTE_ALIGNED = 1, + NVME_CTRL_SGLS_DWORD_ALIGNED = 2, + NVME_CTRL_SGLS_KSDBDS = 1 << 2, + NVME_CTRL_SGLS_MSDS = 1 << 19, + NVME_CTRL_SGLS_SAOS = 1 << 20, }; struct nvme_lbaf { @@ -522,6 +528,7 @@ enum { NVME_ID_CNS_NS_DESC_LIST = 0x03, NVME_ID_CNS_CS_NS = 0x05, NVME_ID_CNS_CS_CTRL = 0x06, + NVME_ID_CNS_NS_ACTIVE_LIST_CS = 0x07, NVME_ID_CNS_NS_CS_INDEP = 0x08, NVME_ID_CNS_NS_PRESENT_LIST = 0x10, NVME_ID_CNS_NS_PRESENT = 0x11, @@ -530,6 +537,7 @@ enum { NVME_ID_CNS_SCNDRY_CTRL_LIST = 0x15, NVME_ID_CNS_NS_GRANULARITY = 0x16, NVME_ID_CNS_UUID_LIST = 0x17, + NVME_ID_CNS_ENDGRP_LIST = 0x19, }; enum { @@ -560,6 +568,8 @@ enum { NVME_NS_FLBAS_LBA_SHIFT = 1, NVME_NS_FLBAS_META_EXT = 0x10, NVME_NS_NMIC_SHARED = 1 << 0, + NVME_NS_ROTATIONAL = 1 << 4, + NVME_NS_VWC_NOT_PRESENT = 1 << 5, NVME_LBAF_RP_BEST = 0, NVME_LBAF_RP_BETTER = 1, NVME_LBAF_RP_GOOD = 2, @@ -617,6 +627,40 @@ enum { NVME_NIDT_CSI = 0x04, }; +struct nvme_endurance_group_log { + __u8 egcw; + __u8 egfeat; + __u8 rsvd2; + __u8 avsp; + __u8 avspt; + __u8 pused; + __le16 did; + __u8 rsvd8[24]; + __u8 ee[16]; + __u8 dur[16]; + __u8 duw[16]; + __u8 muw[16]; + __u8 hrc[16]; + __u8 hwc[16]; + __u8 mdie[16]; + __u8 neile[16]; + __u8 tegcap[16]; + __u8 uegcap[16]; + __u8 rsvd192[320]; +}; + +struct nvme_rotational_media_log { + __le16 endgid; + __le16 numa; + __le16 nrs; + __u8 rsvd6[2]; + __le32 spinc; + __le32 fspinc; + __le32 ldc; + __le32 fldc; + __u8 rsvd24[488]; +}; + struct nvme_smart_log { __u8 critical_warning; __u8 temperature[2]; @@ -1244,6 +1288,7 @@ enum { NVME_FEAT_WRITE_PROTECT = 0x84, NVME_FEAT_VENDOR_START = 0xC0, NVME_FEAT_VENDOR_END = 0xFF, + NVME_LOG_SUPPORTED = 0x00, NVME_LOG_ERROR = 0x01, NVME_LOG_SMART = 0x02, NVME_LOG_FW_SLOT = 0x03, @@ -1254,6 +1299,8 @@ enum { NVME_LOG_TELEMETRY_CTRL = 0x08, NVME_LOG_ENDURANCE_GROUP = 0x09, NVME_LOG_ANA = 0x0c, + NVME_LOG_FEATURES = 0x12, + NVME_LOG_RMI = 0x16, NVME_LOG_DISC = 0x70, NVME_LOG_RESERVATION = 0x80, NVME_FWACT_REPL = (0 << 3), @@ -1261,6 +1308,24 @@ enum { NVME_FWACT_ACTV = (2 << 3), }; +struct nvme_supported_log { + __le32 lids[256]; +}; + +enum { + NVME_LIDS_LSUPP = 1 << 0, +}; + +struct nvme_supported_features_log { + __le32 fis[256]; +}; + +enum { + NVME_FIS_FSUPP = 1 << 0, + NVME_FIS_NSCPE = 1 << 20, + NVME_FIS_CSCPE = 1 << 21, +}; + /* NVMe Namespace Write Protect State */ enum { NVME_NS_NO_WRITE_PROTECT = 0, @@ -1281,7 +1346,8 @@ struct nvme_identify { __u8 cns; __u8 rsvd3; __le16 ctrlid; - __u8 rsvd11[3]; + __le16 cnssid; + __u8 rsvd11; __u8 csi; __u32 rsvd12[4]; }; @@ -1389,7 +1455,7 @@ struct nvme_get_log_page_command { __u8 lsp; /* upper 4 bits reserved */ __le16 numdl; __le16 numdu; - __u16 rsvd11; + __le16 lsi; union { struct { __le32 lpol; @@ -2037,4 +2103,81 @@ struct nvme_completion { #define NVME_MINOR(ver) (((ver) >> 8) & 0xff) #define NVME_TERTIARY(ver) ((ver) & 0xff) +enum { + NVME_AEN_RESV_LOG_PAGE_AVALIABLE = 0x00, +}; + +enum { + NVME_PR_LOG_EMPTY_LOG_PAGE = 0x00, + NVME_PR_LOG_REGISTRATION_PREEMPTED = 0x01, + NVME_PR_LOG_RESERVATION_RELEASED = 0x02, + NVME_PR_LOG_RESERVATOIN_PREEMPTED = 0x03, +}; + +enum { + NVME_PR_NOTIFY_BIT_REG_PREEMPTED = 1, + NVME_PR_NOTIFY_BIT_RESV_RELEASED = 2, + NVME_PR_NOTIFY_BIT_RESV_PREEMPTED = 3, +}; + +struct nvme_pr_log { + __le64 count; + __u8 type; + __u8 nr_pages; + __u8 rsvd1[2]; + __le32 nsid; + __u8 rsvd2[48]; +}; + +struct nvmet_pr_register_data { + __le64 crkey; + __le64 nrkey; +}; + +struct nvmet_pr_acquire_data { + __le64 crkey; + __le64 prkey; +}; + +struct nvmet_pr_release_data { + __le64 crkey; +}; + +enum nvme_pr_capabilities { + NVME_PR_SUPPORT_PTPL = 1, + NVME_PR_SUPPORT_WRITE_EXCLUSIVE = 1 << 1, + NVME_PR_SUPPORT_EXCLUSIVE_ACCESS = 1 << 2, + NVME_PR_SUPPORT_WRITE_EXCLUSIVE_REG_ONLY = 1 << 3, + NVME_PR_SUPPORT_EXCLUSIVE_ACCESS_REG_ONLY = 1 << 4, + NVME_PR_SUPPORT_WRITE_EXCLUSIVE_ALL_REGS = 1 << 5, + NVME_PR_SUPPORT_EXCLUSIVE_ACCESS_ALL_REGS = 1 << 6, + NVME_PR_SUPPORT_IEKEY_VER_1_3_DEF = 1 << 7, +}; + +enum nvme_pr_register_action { + NVME_PR_REGISTER_ACT_REG = 0, + NVME_PR_REGISTER_ACT_UNREG = 1, + NVME_PR_REGISTER_ACT_REPLACE = 1 << 1, +}; + +enum nvme_pr_acquire_action { + NVME_PR_ACQUIRE_ACT_ACQUIRE = 0, + NVME_PR_ACQUIRE_ACT_PREEMPT = 1, + NVME_PR_ACQUIRE_ACT_PREEMPT_AND_ABORT = 1 << 1, +}; + +enum nvme_pr_release_action { + NVME_PR_RELEASE_ACT_RELEASE = 0, + NVME_PR_RELEASE_ACT_CLEAR = 1, +}; + +enum nvme_pr_change_ptpl { + NVME_PR_CPTPL_NO_CHANGE = 0, + NVME_PR_CPTPL_RESV = 1 << 30, + NVME_PR_CPTPL_CLEARED = 2 << 30, + NVME_PR_CPTPL_PERSIST = 3 << 30, +}; + +#define NVME_PR_IGNORE_KEY (1 << 3) + #endif /* _LINUX_NVME_H */ diff --git a/include/linux/of.h b/include/linux/of.h index 85b60ac9eec5..f921786cb8ac 100644 --- a/include/linux/of.h +++ b/include/linux/of.h @@ -289,6 +289,9 @@ extern struct device_node *of_get_parent(const struct device_node *node); extern struct device_node *of_get_next_parent(struct device_node *node); extern struct device_node *of_get_next_child(const struct device_node *node, struct device_node *prev); +extern struct device_node *of_get_next_child_with_prefix(const struct device_node *node, + struct device_node *prev, + const char *prefix); extern struct device_node *of_get_next_available_child( const struct device_node *node, struct device_node *prev); extern struct device_node *of_get_next_reserved_child( @@ -357,7 +360,7 @@ extern struct device_node *of_get_cpu_node(int cpu, unsigned int *thread); extern struct device_node *of_cpu_device_node_get(int cpu); extern int of_cpu_node_to_id(struct device_node *np); extern struct device_node *of_get_next_cpu_node(struct device_node *prev); -extern struct device_node *of_get_cpu_state_node(struct device_node *cpu_node, +extern struct device_node *of_get_cpu_state_node(const struct device_node *cpu_node, int index); extern u64 of_get_cpu_hwid(struct device_node *cpun, unsigned int thread); @@ -395,7 +398,7 @@ extern int of_phandle_iterator_args(struct of_phandle_iterator *it, int size); extern void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align)); -extern int of_alias_get_id(struct device_node *np, const char *stem); +extern int of_alias_get_id(const struct device_node *np, const char *stem); extern int of_alias_get_highest_id(const char *stem); bool of_machine_compatible_match(const char *const *compats); @@ -435,7 +438,7 @@ extern int of_detach_node(struct device_node *); * of_property_for_each_u32(np, "propname", u) * printk("U32 value: %x\n", u); */ -const __be32 *of_prop_next_u32(struct property *prop, const __be32 *cur, +const __be32 *of_prop_next_u32(const struct property *prop, const __be32 *cur, u32 *pu); /* * struct property *prop; @@ -444,11 +447,11 @@ const __be32 *of_prop_next_u32(struct property *prop, const __be32 *cur, * of_property_for_each_string(np, "propname", prop, s) * printk("String value: %s\n", s); */ -const char *of_prop_next_string(struct property *prop, const char *cur); +const char *of_prop_next_string(const struct property *prop, const char *cur); -bool of_console_check(struct device_node *dn, char *name, int index); +bool of_console_check(const struct device_node *dn, char *name, int index); -int of_map_id(struct device_node *np, u32 id, +int of_map_id(const struct device_node *np, u32 id, const char *map_name, const char *map_mask_name, struct device_node **target, u32 *id_out); @@ -826,13 +829,13 @@ static inline bool of_console_check(const struct device_node *dn, const char *na return false; } -static inline const __be32 *of_prop_next_u32(struct property *prop, +static inline const __be32 *of_prop_next_u32(const struct property *prop, const __be32 *cur, u32 *pu) { return NULL; } -static inline const char *of_prop_next_string(struct property *prop, +static inline const char *of_prop_next_string(const struct property *prop, const char *cur) { return NULL; @@ -871,7 +874,7 @@ static inline void of_property_clear_flag(struct property *p, unsigned long flag { } -static inline int of_map_id(struct device_node *np, u32 id, +static inline int of_map_id(const struct device_node *np, u32 id, const char *map_name, const char *map_mask_name, struct device_node **target, u32 *id_out) { @@ -899,7 +902,7 @@ static inline const void *of_device_get_match_data(const struct device *dev) #define of_node_cmp(s1, s2) strcasecmp((s1), (s2)) #endif -static inline int of_prop_val_eq(struct property *p1, struct property *p2) +static inline int of_prop_val_eq(const struct property *p1, const struct property *p2) { return p1->length == p2->length && !memcmp(p1->value, p2->value, (size_t)p1->length); @@ -1252,7 +1255,7 @@ static inline int of_property_read_string_index(const struct device_node *np, static inline bool of_property_read_bool(const struct device_node *np, const char *propname) { - struct property *prop = of_find_property(np, propname, NULL); + const struct property *prop = of_find_property(np, propname, NULL); return prop ? true : false; } @@ -1430,7 +1433,7 @@ static inline int of_property_read_s32(const struct device_node *np, err = of_phandle_iterator_next(it)) #define of_property_for_each_u32(np, propname, u) \ - for (struct {struct property *prop; const __be32 *item; } _it = \ + for (struct {const struct property *prop; const __be32 *item; } _it = \ {of_find_property(np, propname, NULL), \ of_prop_next_u32(_it.prop, NULL, &u)}; \ _it.item; \ @@ -1468,6 +1471,12 @@ static inline int of_property_read_s32(const struct device_node *np, child != NULL; \ child = of_get_next_child(parent, child)) +#define for_each_child_of_node_with_prefix(parent, child, prefix) \ + for (struct device_node *child __free(device_node) = \ + of_get_next_child_with_prefix(parent, NULL, prefix); \ + child != NULL; \ + child = of_get_next_child_with_prefix(parent, child, prefix)) + #define for_each_available_child_of_node(parent, child) \ for (child = of_get_next_available_child(parent, NULL); child != NULL; \ child = of_get_next_available_child(parent, child)) @@ -1651,6 +1660,10 @@ static inline int of_changeset_add_prop_u32(struct of_changeset *ocs, return of_changeset_add_prop_u32_array(ocs, np, prop_name, &val, 1); } +int of_changeset_update_prop_string(struct of_changeset *ocs, + struct device_node *np, + const char *prop_name, const char *str); + int of_changeset_add_prop_bool(struct of_changeset *ocs, struct device_node *np, const char *prop_name); @@ -1734,7 +1747,7 @@ struct of_overlay_notify_data { #ifdef CONFIG_OF_OVERLAY int of_overlay_fdt_apply(const void *overlay_fdt, u32 overlay_fdt_size, - int *ovcs_id, struct device_node *target_base); + int *ovcs_id, const struct device_node *target_base); int of_overlay_remove(int *ovcs_id); int of_overlay_remove_all(void); @@ -1744,7 +1757,7 @@ int of_overlay_notifier_unregister(struct notifier_block *nb); #else static inline int of_overlay_fdt_apply(const void *overlay_fdt, u32 overlay_fdt_size, - int *ovcs_id, struct device_node *target_base) + int *ovcs_id, const struct device_node *target_base) { return -ENOTSUPP; } diff --git a/include/linux/of_address.h b/include/linux/of_address.h index 26a19daf0d09..9e034363788a 100644 --- a/include/linux/of_address.h +++ b/include/linux/of_address.h @@ -10,7 +10,7 @@ struct of_bus; struct of_pci_range_parser { struct device_node *node; - struct of_bus *bus; + const struct of_bus *bus; const __be32 *range; const __be32 *end; int na; @@ -83,8 +83,8 @@ extern struct of_pci_range *of_pci_range_parser_one( struct of_pci_range *range); extern int of_pci_address_to_resource(struct device_node *dev, int bar, struct resource *r); -extern int of_pci_range_to_resource(struct of_pci_range *range, - struct device_node *np, +extern int of_pci_range_to_resource(const struct of_pci_range *range, + const struct device_node *np, struct resource *res); extern int of_range_to_resource(struct device_node *np, int index, struct resource *res); diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h index d69ad5bb1eb1..b8d6c0c20876 100644 --- a/include/linux/of_fdt.h +++ b/include/linux/of_fdt.h @@ -31,6 +31,7 @@ extern void *of_fdt_unflatten_tree(const unsigned long *blob, extern int __initdata dt_root_addr_cells; extern int __initdata dt_root_size_cells; extern void *initial_boot_params; +extern phys_addr_t initial_boot_params_pa; extern char __dtb_start[]; extern char __dtb_end[]; @@ -70,8 +71,8 @@ extern u64 dt_mem_next_cell(int s, const __be32 **cellp); /* Early flat tree scan hooks */ extern int early_init_dt_scan_root(void); -extern bool early_init_dt_scan(void *params); -extern bool early_init_dt_verify(void *params); +extern bool early_init_dt_scan(void *dt_virt, phys_addr_t dt_phys); +extern bool early_init_dt_verify(void *dt_virt, phys_addr_t dt_phys); extern void early_init_dt_scan_nodes(void); extern const char *of_flat_dt_get_machine_name(void); diff --git a/include/linux/of_graph.h b/include/linux/of_graph.h index a4bea62bfa29..a692d9d979a6 100644 --- a/include/linux/of_graph.h +++ b/include/linux/of_graph.h @@ -11,6 +11,7 @@ #ifndef __LINUX_OF_GRAPH_H #define __LINUX_OF_GRAPH_H +#include <linux/cleanup.h> #include <linux/types.h> #include <linux/errno.h> @@ -37,14 +38,43 @@ struct of_endpoint { for (child = of_graph_get_next_endpoint(parent, NULL); child != NULL; \ child = of_graph_get_next_endpoint(parent, child)) +/** + * for_each_of_graph_port - iterate over every port in a device or ports node + * @parent: parent device or ports node containing port + * @child: loop variable pointing to the current port node + * + * When breaking out of the loop, and continue to use the @child, you need to + * use return_ptr(@child) or no_free_ptr(@child) not to call __free() for it. + */ +#define for_each_of_graph_port(parent, child) \ + for (struct device_node *child __free(device_node) = of_graph_get_next_port(parent, NULL);\ + child != NULL; child = of_graph_get_next_port(parent, child)) + +/** + * for_each_of_graph_port_endpoint - iterate over every endpoint in a port node + * @parent: parent port node + * @child: loop variable pointing to the current endpoint node + * + * When breaking out of the loop, and continue to use the @child, you need to + * use return_ptr(@child) or no_free_ptr(@child) not to call __free() for it. + */ +#define for_each_of_graph_port_endpoint(parent, child) \ + for (struct device_node *child __free(device_node) = of_graph_get_next_port_endpoint(parent, NULL);\ + child != NULL; child = of_graph_get_next_port_endpoint(parent, child)) + #ifdef CONFIG_OF bool of_graph_is_present(const struct device_node *node); int of_graph_parse_endpoint(const struct device_node *node, struct of_endpoint *endpoint); unsigned int of_graph_get_endpoint_count(const struct device_node *np); +unsigned int of_graph_get_port_count(struct device_node *np); struct device_node *of_graph_get_port_by_id(struct device_node *node, u32 id); struct device_node *of_graph_get_next_endpoint(const struct device_node *parent, struct device_node *previous); +struct device_node *of_graph_get_next_port(const struct device_node *parent, + struct device_node *port); +struct device_node *of_graph_get_next_port_endpoint(const struct device_node *port, + struct device_node *prev); struct device_node *of_graph_get_endpoint_by_regs( const struct device_node *parent, int port_reg, int reg); struct device_node *of_graph_get_remote_endpoint( @@ -73,6 +103,11 @@ static inline unsigned int of_graph_get_endpoint_count(const struct device_node return 0; } +static inline unsigned int of_graph_get_port_count(struct device_node *np) +{ + return 0; +} + static inline struct device_node *of_graph_get_port_by_id( struct device_node *node, u32 id) { @@ -86,6 +121,20 @@ static inline struct device_node *of_graph_get_next_endpoint( return NULL; } +static inline struct device_node *of_graph_get_next_port( + const struct device_node *parent, + struct device_node *previous) +{ + return NULL; +} + +static inline struct device_node *of_graph_get_next_port_endpoint( + const struct device_node *parent, + struct device_node *previous) +{ + return NULL; +} + static inline struct device_node *of_graph_get_endpoint_by_regs( const struct device_node *parent, int port_reg, int reg) { diff --git a/include/linux/of_irq.h b/include/linux/of_irq.h index d6d3eae2f145..6337ad4e5fe8 100644 --- a/include/linux/of_irq.h +++ b/include/linux/of_irq.h @@ -48,12 +48,12 @@ extern int of_irq_to_resource_table(struct device_node *dev, struct resource *res, int nr_irqs); extern struct device_node *of_irq_find_parent(struct device_node *child); extern struct irq_domain *of_msi_get_domain(struct device *dev, - struct device_node *np, + const struct device_node *np, enum irq_domain_bus_token token); extern struct irq_domain *of_msi_map_get_device_domain(struct device *dev, u32 id, u32 bus_token); -extern void of_msi_configure(struct device *dev, struct device_node *np); +extern void of_msi_configure(struct device *dev, const struct device_node *np); u32 of_msi_map_id(struct device *dev, struct device_node *msi_np, u32 id_in); #else static inline void of_irq_init(const struct of_device_id *matches) diff --git a/include/linux/once.h b/include/linux/once.h index bc714d414448..30346fcdc799 100644 --- a/include/linux/once.h +++ b/include/linux/once.h @@ -46,7 +46,7 @@ void __do_once_sleepable_done(bool *done, struct static_key_true *once_key, #define DO_ONCE(func, ...) \ ({ \ bool ___ret = false; \ - static bool __section(".data.once") ___done = false; \ + static bool __section(".data..once") ___done = false; \ static DEFINE_STATIC_KEY_TRUE(___once_key); \ if (static_branch_unlikely(&___once_key)) { \ unsigned long ___flags; \ @@ -64,7 +64,7 @@ void __do_once_sleepable_done(bool *done, struct static_key_true *once_key, #define DO_ONCE_SLEEPABLE(func, ...) \ ({ \ bool ___ret = false; \ - static bool __section(".data.once") ___done = false; \ + static bool __section(".data..once") ___done = false; \ static DEFINE_STATIC_KEY_TRUE(___once_key); \ if (static_branch_unlikely(&___once_key)) { \ ___ret = __do_once_sleepable_start(&___done); \ diff --git a/include/linux/once_lite.h b/include/linux/once_lite.h index b7bce4983638..27de7bc32a06 100644 --- a/include/linux/once_lite.h +++ b/include/linux/once_lite.h @@ -12,7 +12,7 @@ #define __ONCE_LITE_IF(condition) \ ({ \ - static bool __section(".data.once") __already_done; \ + static bool __section(".data..once") __already_done; \ bool __ret_cond = !!(condition); \ bool __ret_once = false; \ \ diff --git a/include/linux/oom.h b/include/linux/oom.h index 7d0c9c48a0c5..1e0fc6931ce9 100644 --- a/include/linux/oom.h +++ b/include/linux/oom.h @@ -7,7 +7,6 @@ #include <linux/types.h> #include <linux/nodemask.h> #include <uapi/linux/oom.h> -#include <linux/sched/coredump.h> /* MMF_* */ #include <linux/mm.h> /* VM_FAULT* */ struct zonelist; diff --git a/include/linux/packing.h b/include/linux/packing.h index 8d6571feb95d..5d36dcd06f60 100644 --- a/include/linux/packing.h +++ b/include/linux/packing.h @@ -17,33 +17,13 @@ enum packing_op { UNPACK, }; -/** - * packing - Convert numbers (currently u64) between a packed and an unpacked - * format. Unpacked means laid out in memory in the CPU's native - * understanding of integers, while packed means anything else that - * requires translation. - * - * @pbuf: Pointer to a buffer holding the packed value. - * @uval: Pointer to an u64 holding the unpacked value. - * @startbit: The index (in logical notation, compensated for quirks) where - * the packed value starts within pbuf. Must be larger than, or - * equal to, endbit. - * @endbit: The index (in logical notation, compensated for quirks) where - * the packed value ends within pbuf. Must be smaller than, or equal - * to, startbit. - * @op: If PACK, then uval will be treated as const pointer and copied (packed) - * into pbuf, between startbit and endbit. - * If UNPACK, then pbuf will be treated as const pointer and the logical - * value between startbit and endbit will be copied (unpacked) to uval. - * @quirks: A bit mask of QUIRK_LITTLE_ENDIAN, QUIRK_LSW32_IS_FIRST and - * QUIRK_MSB_ON_THE_RIGHT. - * - * Return: 0 on success, EINVAL or ERANGE if called incorrectly. Assuming - * correct usage, return code may be discarded. - * If op is PACK, pbuf is modified. - * If op is UNPACK, uval is modified. - */ int packing(void *pbuf, u64 *uval, int startbit, int endbit, size_t pbuflen, enum packing_op op, u8 quirks); +int pack(void *pbuf, u64 uval, size_t startbit, size_t endbit, size_t pbuflen, + u8 quirks); + +int unpack(const void *pbuf, u64 *uval, size_t startbit, size_t endbit, + size_t pbuflen, u8 quirks); + #endif diff --git a/include/linux/page-flags-layout.h b/include/linux/page-flags-layout.h index 7d79818dc065..4f5c9e979bb9 100644 --- a/include/linux/page-flags-layout.h +++ b/include/linux/page-flags-layout.h @@ -111,5 +111,12 @@ ZONES_WIDTH - LRU_GEN_WIDTH - SECTIONS_WIDTH - \ NODES_WIDTH - KASAN_TAG_WIDTH - LAST_CPUPID_WIDTH) +#define NR_NON_PAGEFLAG_BITS (SECTIONS_WIDTH + NODES_WIDTH + ZONES_WIDTH + \ + LAST_CPUPID_SHIFT + KASAN_TAG_WIDTH + \ + LRU_GEN_WIDTH + LRU_REFS_WIDTH) + +#define NR_UNUSED_PAGEFLAG_BITS (BITS_PER_LONG - \ + (NR_NON_PAGEFLAG_BITS + NR_PAGEFLAGS)) + #endif #endif /* _LINUX_PAGE_FLAGS_LAYOUT */ diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index cc839e4365c1..691506bdf2c5 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -306,7 +306,7 @@ static const unsigned long *const_folio_flags(const struct folio *folio, { const struct page *page = &folio->page; - VM_BUG_ON_PGFLAGS(PageTail(page), page); + VM_BUG_ON_PGFLAGS(page->compound_head & 1, page); VM_BUG_ON_PGFLAGS(n > 0 && !test_bit(PG_head, &page->flags), page); return &page[n].flags; } @@ -315,7 +315,7 @@ static unsigned long *folio_flags(struct folio *folio, unsigned n) { struct page *page = &folio->page; - VM_BUG_ON_PGFLAGS(PageTail(page), page); + VM_BUG_ON_PGFLAGS(page->compound_head & 1, page); VM_BUG_ON_PGFLAGS(n > 0 && !test_bit(PG_head, &page->flags), page); return &page[n].flags; } @@ -543,7 +543,7 @@ FOLIO_FLAG(swapbacked, FOLIO_HEAD_PAGE) * - PG_private and PG_private_2 cause release_folio() and co to be invoked */ PAGEFLAG(Private, private, PF_ANY) -PAGEFLAG(Private2, private_2, PF_ANY) TESTSCFLAG(Private2, private_2, PF_ANY) +FOLIO_FLAG(private_2, FOLIO_HEAD_PAGE) /* owner_2 can be set on tail pages for anon memory */ FOLIO_FLAG(owner_2, FOLIO_HEAD_PAGE) @@ -554,7 +554,7 @@ FOLIO_FLAG(owner_2, FOLIO_HEAD_PAGE) */ TESTPAGEFLAG(Writeback, writeback, PF_NO_TAIL) TESTSCFLAG(Writeback, writeback, PF_NO_TAIL) -PAGEFLAG(MappedToDisk, mappedtodisk, PF_NO_TAIL) +FOLIO_FLAG(mappedtodisk, FOLIO_HEAD_PAGE) /* PG_readahead is only used for reads; PG_reclaim is only for writes */ PAGEFLAG(Reclaim, reclaim, PF_NO_TAIL) @@ -689,6 +689,13 @@ static __always_inline bool folio_test_anon(const struct folio *folio) return ((unsigned long)folio->mapping & PAGE_MAPPING_ANON) != 0; } +static __always_inline bool PageAnonNotKsm(const struct page *page) +{ + unsigned long flags = (unsigned long)page_folio(page)->mapping; + + return (flags & PAGE_MAPPING_FLAGS) == PAGE_MAPPING_ANON; +} + static __always_inline bool PageAnon(const struct page *page) { return folio_test_anon(page_folio(page)); @@ -718,13 +725,8 @@ static __always_inline bool folio_test_ksm(const struct folio *folio) return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) == PAGE_MAPPING_KSM; } - -static __always_inline bool PageKsm(const struct page *page) -{ - return folio_test_ksm(page_folio(page)); -} #else -TESTPAGEFLAG_FALSE(Ksm, ksm) +FOLIO_TEST_FLAG_FALSE(ksm) #endif u64 stable_page_flags(const struct page *page); @@ -860,18 +862,10 @@ static inline void ClearPageCompound(struct page *page) ClearPageHead(page); } FOLIO_FLAG(large_rmappable, FOLIO_SECOND_PAGE) -FOLIO_TEST_FLAG(partially_mapped, FOLIO_SECOND_PAGE) -/* - * PG_partially_mapped is protected by deferred_split split_queue_lock, - * so its safe to use non-atomic set/clear. - */ -__FOLIO_SET_FLAG(partially_mapped, FOLIO_SECOND_PAGE) -__FOLIO_CLEAR_FLAG(partially_mapped, FOLIO_SECOND_PAGE) +FOLIO_FLAG(partially_mapped, FOLIO_SECOND_PAGE) #else FOLIO_FLAG_FALSE(large_rmappable) -FOLIO_TEST_FLAG_FALSE(partially_mapped) -__FOLIO_SET_FLAG_NOOP(partially_mapped) -__FOLIO_CLEAR_FLAG_NOOP(partially_mapped) +FOLIO_FLAG_FALSE(partially_mapped) #endif #define PG_head_mask ((1UL << PG_head)) @@ -1137,14 +1131,14 @@ static __always_inline int PageAnonExclusive(const struct page *page) static __always_inline void SetPageAnonExclusive(struct page *page) { - VM_BUG_ON_PGFLAGS(!PageAnon(page) || PageKsm(page), page); + VM_BUG_ON_PGFLAGS(!PageAnonNotKsm(page), page); VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page); set_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags); } static __always_inline void ClearPageAnonExclusive(struct page *page) { - VM_BUG_ON_PGFLAGS(!PageAnon(page) || PageKsm(page), page); + VM_BUG_ON_PGFLAGS(!PageAnonNotKsm(page), page); VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page); clear_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags); } diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h index c16db0067090..73dc2c1841ec 100644 --- a/include/linux/page-isolation.h +++ b/include/linux/page-isolation.h @@ -3,10 +3,6 @@ #define __LINUX_PAGEISOLATION_H #ifdef CONFIG_MEMORY_ISOLATION -static inline bool has_isolate_pageblock(struct zone *zone) -{ - return zone->nr_isolate_pageblock; -} static inline bool is_migrate_isolate_page(struct page *page) { return get_pageblock_migratetype(page) == MIGRATE_ISOLATE; @@ -16,10 +12,6 @@ static inline bool is_migrate_isolate(int migratetype) return migratetype == MIGRATE_ISOLATE; } #else -static inline bool has_isolate_pageblock(struct zone *zone) -{ - return false; -} static inline bool is_migrate_isolate_page(struct page *page) { return false; diff --git a/include/linux/page_frag_cache.h b/include/linux/page_frag_cache.h new file mode 100644 index 000000000000..41a91df82631 --- /dev/null +++ b/include/linux/page_frag_cache.h @@ -0,0 +1,61 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _LINUX_PAGE_FRAG_CACHE_H +#define _LINUX_PAGE_FRAG_CACHE_H + +#include <linux/bits.h> +#include <linux/log2.h> +#include <linux/mm_types_task.h> +#include <linux/types.h> + +#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) +/* Use a full byte here to enable assembler optimization as the shift + * operation is usually expecting a byte. + */ +#define PAGE_FRAG_CACHE_ORDER_MASK GENMASK(7, 0) +#else +/* Compiler should be able to figure out we don't read things as any value + * ANDed with 0 is 0. + */ +#define PAGE_FRAG_CACHE_ORDER_MASK 0 +#endif + +#define PAGE_FRAG_CACHE_PFMEMALLOC_BIT (PAGE_FRAG_CACHE_ORDER_MASK + 1) + +static inline bool encoded_page_decode_pfmemalloc(unsigned long encoded_page) +{ + return !!(encoded_page & PAGE_FRAG_CACHE_PFMEMALLOC_BIT); +} + +static inline void page_frag_cache_init(struct page_frag_cache *nc) +{ + nc->encoded_page = 0; +} + +static inline bool page_frag_cache_is_pfmemalloc(struct page_frag_cache *nc) +{ + return encoded_page_decode_pfmemalloc(nc->encoded_page); +} + +void page_frag_cache_drain(struct page_frag_cache *nc); +void __page_frag_cache_drain(struct page *page, unsigned int count); +void *__page_frag_alloc_align(struct page_frag_cache *nc, unsigned int fragsz, + gfp_t gfp_mask, unsigned int align_mask); + +static inline void *page_frag_alloc_align(struct page_frag_cache *nc, + unsigned int fragsz, gfp_t gfp_mask, + unsigned int align) +{ + WARN_ON_ONCE(!is_power_of_2(align)); + return __page_frag_alloc_align(nc, fragsz, gfp_mask, -align); +} + +static inline void *page_frag_alloc(struct page_frag_cache *nc, + unsigned int fragsz, gfp_t gfp_mask) +{ + return __page_frag_alloc_align(nc, fragsz, gfp_mask, ~0u); +} + +void page_frag_free(void *addr); + +#endif diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 68a5f1ff3301..bcf0865a38ae 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -1011,22 +1011,25 @@ static inline struct folio *read_mapping_folio(struct address_space *mapping, return read_cache_folio(mapping, index, NULL, file); } -/* - * Get the offset in PAGE_SIZE (even for hugetlb pages). +/** + * page_pgoff - Calculate the logical page offset of this page. + * @folio: The folio containing this page. + * @page: The page which we need the offset of. + * + * For file pages, this is the offset from the beginning of the file + * in units of PAGE_SIZE. For anonymous pages, this is the offset from + * the beginning of the anon_vma in units of PAGE_SIZE. This will + * return nonsense for KSM pages. + * + * Context: Caller must have a reference on the folio or otherwise + * prevent it from being split or freed. + * + * Return: The offset in units of PAGE_SIZE. */ -static inline pgoff_t page_to_pgoff(struct page *page) +static inline pgoff_t page_pgoff(const struct folio *folio, + const struct page *page) { - struct page *head; - - if (likely(!PageTransTail(page))) - return page->index; - - head = compound_head(page); - /* - * We don't initialize ->index for tail pages: calculate based on - * head page - */ - return head->index + page - head; + return folio->index + folio_page_idx(folio, page); } /* diff --git a/include/linux/pagewalk.h b/include/linux/pagewalk.h index f5eb5a32aeed..9700a29f8afb 100644 --- a/include/linux/pagewalk.h +++ b/include/linux/pagewalk.h @@ -25,12 +25,15 @@ enum page_walk_lock { * this handler is required to be able to handle * pmd_trans_huge() pmds. They may simply choose to * split_huge_page() instead of handling it explicitly. - * @pte_entry: if set, called for each PTE (lowest-level) entry, - * including empty ones + * @pte_entry: if set, called for each PTE (lowest-level) entry + * including empty ones, except if @install_pte is set. + * If @install_pte is set, @pte_entry is called only for + * existing PTEs. * @pte_hole: if set, called for each hole at all levels, * depth is -1 if not known, 0:PGD, 1:P4D, 2:PUD, 3:PMD. * Any folded depths (where PTRS_PER_P?D is equal to 1) - * are skipped. + * are skipped. If @install_pte is specified, this will + * not trigger for any populated ranges. * @hugetlb_entry: if set, called for each hugetlb entry. This hook * function is called with the vma lock held, in order to * protect against a concurrent freeing of the pte_t* or @@ -51,6 +54,13 @@ enum page_walk_lock { * @pre_vma: if set, called before starting walk on a non-null vma. * @post_vma: if set, called after a walk on a non-null vma, provided * that @pre_vma and the vma walk succeeded. + * @install_pte: if set, missing page table entries are installed and + * thus all levels are always walked in the specified + * range. This callback is then invoked at the PTE level + * (having split any THP pages prior), providing the PTE to + * install. If allocations fail, the walk is aborted. This + * operation is only available for userland memory. Not + * usable for hugetlb ranges. * * p?d_entry callbacks are called even if those levels are folded on a * particular architecture/configuration. @@ -76,6 +86,8 @@ struct mm_walk_ops { int (*pre_vma)(unsigned long start, unsigned long end, struct mm_walk *walk); void (*post_vma)(struct mm_walk *walk); + int (*install_pte)(unsigned long addr, unsigned long next, + pte_t *ptep, struct mm_walk *walk); enum page_walk_lock walk_lock; }; diff --git a/include/linux/pci-bwctrl.h b/include/linux/pci-bwctrl.h new file mode 100644 index 000000000000..cee07127455b --- /dev/null +++ b/include/linux/pci-bwctrl.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * PCIe bandwidth controller + * + * Copyright (C) 2023-2024 Intel Corporation + */ + +#ifndef LINUX_PCI_BWCTRL_H +#define LINUX_PCI_BWCTRL_H + +#include <linux/pci.h> + +struct thermal_cooling_device; + +#ifdef CONFIG_PCIE_THERMAL +struct thermal_cooling_device *pcie_cooling_device_register(struct pci_dev *port); +void pcie_cooling_device_unregister(struct thermal_cooling_device *cdev); +#else +static inline struct thermal_cooling_device *pcie_cooling_device_register(struct pci_dev *port) +{ + return NULL; +} +static inline void pcie_cooling_device_unregister(struct thermal_cooling_device *cdev) +{ +} +#endif + +#endif diff --git a/include/linux/pci-epc.h b/include/linux/pci-epc.h index 42ef06136bd1..e818e3fdcded 100644 --- a/include/linux/pci-epc.h +++ b/include/linux/pci-epc.h @@ -33,10 +33,42 @@ pci_epc_interface_string(enum pci_epc_interface_type type) } /** + * struct pci_epc_map - information about EPC memory for mapping a RC PCI + * address range + * @pci_addr: start address of the RC PCI address range to map + * @pci_size: size of the RC PCI address range mapped from @pci_addr + * @map_pci_addr: RC PCI address used as the first address mapped (may be lower + * than @pci_addr) + * @map_size: size of the controller memory needed for mapping the RC PCI address + * range @map_pci_addr..@pci_addr+@pci_size + * @phys_base: base physical address of the allocated EPC memory for mapping the + * RC PCI address range + * @phys_addr: physical address at which @pci_addr is mapped + * @virt_base: base virtual address of the allocated EPC memory for mapping the + * RC PCI address range + * @virt_addr: virtual address at which @pci_addr is mapped + */ +struct pci_epc_map { + u64 pci_addr; + size_t pci_size; + + u64 map_pci_addr; + size_t map_size; + + phys_addr_t phys_base; + phys_addr_t phys_addr; + void __iomem *virt_base; + void __iomem *virt_addr; +}; + +/** * struct pci_epc_ops - set of function pointers for performing EPC operations * @write_header: ops to populate configuration space header * @set_bar: ops to configure the BAR * @clear_bar: ops to reset the BAR + * @align_addr: operation to get the mapping address, mapping size and offset + * into a controller memory window needed to map an RC PCI address + * region * @map_addr: ops to map CPU address to PCI address * @unmap_addr: ops to unmap CPU address and PCI address * @set_msi: ops to set the requested number of MSI interrupts in the MSI @@ -61,6 +93,8 @@ struct pci_epc_ops { struct pci_epf_bar *epf_bar); void (*clear_bar)(struct pci_epc *epc, u8 func_no, u8 vfunc_no, struct pci_epf_bar *epf_bar); + u64 (*align_addr)(struct pci_epc *epc, u64 pci_addr, size_t *size, + size_t *offset); int (*map_addr)(struct pci_epc *epc, u8 func_no, u8 vfunc_no, phys_addr_t addr, u64 pci_addr, size_t size); void (*unmap_addr)(struct pci_epc *epc, u8 func_no, u8 vfunc_no, @@ -278,6 +312,10 @@ void __iomem *pci_epc_mem_alloc_addr(struct pci_epc *epc, phys_addr_t *phys_addr, size_t size); void pci_epc_mem_free_addr(struct pci_epc *epc, phys_addr_t phys_addr, void __iomem *virt_addr, size_t size); +int pci_epc_mem_map(struct pci_epc *epc, u8 func_no, u8 vfunc_no, + u64 pci_addr, size_t pci_size, struct pci_epc_map *map); +void pci_epc_mem_unmap(struct pci_epc *epc, u8 func_no, u8 vfunc_no, + struct pci_epc_map *map); #else static inline void pci_epc_init_notify(struct pci_epc *epc) diff --git a/include/linux/pci-pwrctl.h b/include/linux/pci-pwrctrl.h index 0d23dddf59ec..7d439b0675e9 100644 --- a/include/linux/pci-pwrctl.h +++ b/include/linux/pci-pwrctrl.h @@ -3,8 +3,8 @@ * Copyright (C) 2024 Linaro Ltd. */ -#ifndef __PCI_PWRCTL_H__ -#define __PCI_PWRCTL_H__ +#ifndef __PCI_PWRCTRL_H__ +#define __PCI_PWRCTRL_H__ #include <linux/notifier.h> #include <linux/workqueue.h> @@ -29,14 +29,14 @@ struct device_link; */ /** - * struct pci_pwrctl - PCI device power control context. + * struct pci_pwrctrl - PCI device power control context. * @dev: Address of the power controlling device. * * An object of this type must be allocated by the PCI power control device and - * passed to the pwrctl subsystem to trigger a bus rescan and setup a device + * passed to the pwrctrl subsystem to trigger a bus rescan and setup a device * link with the device once it's up. */ -struct pci_pwrctl { +struct pci_pwrctrl { struct device *dev; /* Private: don't use. */ @@ -45,10 +45,10 @@ struct pci_pwrctl { struct work_struct work; }; -void pci_pwrctl_init(struct pci_pwrctl *pwrctl, struct device *dev); -int pci_pwrctl_device_set_ready(struct pci_pwrctl *pwrctl); -void pci_pwrctl_device_unset_ready(struct pci_pwrctl *pwrctl); -int devm_pci_pwrctl_device_set_ready(struct device *dev, - struct pci_pwrctl *pwrctl); +void pci_pwrctrl_init(struct pci_pwrctrl *pwrctrl, struct device *dev); +int pci_pwrctrl_device_set_ready(struct pci_pwrctrl *pwrctrl); +void pci_pwrctrl_device_unset_ready(struct pci_pwrctrl *pwrctrl); +int devm_pci_pwrctrl_device_set_ready(struct device *dev, + struct pci_pwrctrl *pwrctrl); -#endif /* __PCI_PWRCTL_H__ */ +#endif /* __PCI_PWRCTRL_H__ */ diff --git a/include/linux/pci-tph.h b/include/linux/pci-tph.h new file mode 100644 index 000000000000..c3e806c13d64 --- /dev/null +++ b/include/linux/pci-tph.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * TPH (TLP Processing Hints) + * + * Copyright (C) 2024 Advanced Micro Devices, Inc. + * Eric Van Tassell <[email protected]> + * Wei Huang <[email protected]> + */ +#ifndef LINUX_PCI_TPH_H +#define LINUX_PCI_TPH_H + +/* + * According to the ECN for PCI Firmware Spec, Steering Tag can be different + * depending on the memory type: Volatile Memory or Persistent Memory. When a + * caller query about a target's Steering Tag, it must provide the target's + * tph_mem_type. ECN link: https://members.pcisig.com/wg/PCI-SIG/document/15470. + */ +enum tph_mem_type { + TPH_MEM_TYPE_VM, /* volatile memory */ + TPH_MEM_TYPE_PM /* persistent memory */ +}; + +#ifdef CONFIG_PCIE_TPH +int pcie_tph_set_st_entry(struct pci_dev *pdev, + unsigned int index, u16 tag); +int pcie_tph_get_cpu_st(struct pci_dev *dev, + enum tph_mem_type mem_type, + unsigned int cpu_uid, u16 *tag); +void pcie_disable_tph(struct pci_dev *pdev); +int pcie_enable_tph(struct pci_dev *pdev, int mode); +#else +static inline int pcie_tph_set_st_entry(struct pci_dev *pdev, + unsigned int index, u16 tag) +{ return -EINVAL; } +static inline int pcie_tph_get_cpu_st(struct pci_dev *dev, + enum tph_mem_type mem_type, + unsigned int cpu_uid, u16 *tag) +{ return -EINVAL; } +static inline void pcie_disable_tph(struct pci_dev *pdev) { } +static inline int pcie_enable_tph(struct pci_dev *pdev, int mode) +{ return -EINVAL; } +#endif + +#endif /* LINUX_PCI_TPH_H */ diff --git a/include/linux/pci.h b/include/linux/pci.h index 573b4c4c2be6..db9b47ce3eef 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -313,12 +313,20 @@ struct pci_vpd { }; struct irq_affinity; +struct pcie_bwctrl_data; struct pcie_link_state; struct pci_sriov; struct pci_p2pdma; struct rcec_ea; -/* The pci_dev structure describes PCI devices */ +/* struct pci_dev - describes a PCI device + * + * @supported_speeds: PCIe Supported Link Speeds Vector (+ reserved 0 at + * LSB). 0 when the supported speeds cannot be + * determined (e.g., for Root Complex Integrated + * Endpoints without the relevant Capability + * Registers). + */ struct pci_dev { struct list_head bus_list; /* Node in per-bus list */ struct pci_bus *bus; /* Bus this device is on */ @@ -434,6 +442,7 @@ struct pci_dev { unsigned int ats_enabled:1; /* Address Translation Svc */ unsigned int pasid_enabled:1; /* Process Address Space ID */ unsigned int pri_enabled:1; /* Page Request Interface */ + unsigned int tph_enabled:1; /* TLP Processing Hints */ unsigned int is_managed:1; /* Managed via devres */ unsigned int is_msi_managed:1; /* MSI release via devres installed */ unsigned int needs_freset:1; /* Requires fundamental reset */ @@ -495,6 +504,7 @@ struct pci_dev { unsigned int dpc_rp_extensions:1; u8 dpc_rp_log_size; #endif + struct pcie_bwctrl_data *link_bwctrl; #ifdef CONFIG_PCI_ATS union { struct pci_sriov *sriov; /* PF: SR-IOV info */ @@ -522,6 +532,7 @@ struct pci_dev { struct npem *npem; /* Native PCIe Enclosure Management */ #endif u16 acs_cap; /* ACS Capability offset */ + u8 supported_speeds; /* Supported Link Speeds Vector */ phys_addr_t rom; /* Physical address if not from BAR */ size_t romlen; /* Length if not from BAR */ /* @@ -534,6 +545,12 @@ struct pci_dev { /* These methods index pci_reset_fn_methods[] */ u8 reset_methods[PCI_NUM_RESET_METHODS]; /* In priority order */ + +#ifdef CONFIG_PCIE_TPH + u16 tph_cap; /* TPH capability offset */ + u8 tph_mode; /* TPH mode */ + u8 tph_req_type; /* TPH requester type */ +#endif }; static inline struct pci_dev *pci_physfn(struct pci_dev *dev) @@ -626,27 +643,6 @@ void pci_set_host_bridge_release(struct pci_host_bridge *bridge, int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge); -/* - * The first PCI_BRIDGE_RESOURCE_NUM PCI bus resources (those that correspond - * to P2P or CardBus bridge windows) go in a table. Additional ones (for - * buses below host bridges or subtractive decode bridges) go in the list. - * Use pci_bus_for_each_resource() to iterate through all the resources. - */ - -/* - * PCI_SUBTRACTIVE_DECODE means the bridge forwards the window implicitly - * and there's no way to program the bridge with the details of the window. - * This does not apply to ACPI _CRS windows, even with the _DEC subtractive- - * decode bit set, because they are explicit and can be programmed with _SRS. - */ -#define PCI_SUBTRACTIVE_DECODE 0x1 - -struct pci_bus_resource { - struct list_head list; - struct resource *res; - unsigned int flags; -}; - #define PCI_REGION_FLAG_MASK 0x0fU /* These bits of resource flags tell us the PCI region flags */ struct pci_bus { @@ -1274,6 +1270,7 @@ static inline int pcie_capability_clear_and_set_word(struct pci_dev *dev, { switch (pos) { case PCI_EXP_LNKCTL: + case PCI_EXP_LNKCTL2: case PCI_EXP_RTCTL: return pcie_capability_clear_and_set_word_locked(dev, pos, clear, set); @@ -1498,8 +1495,7 @@ void pci_add_resource(struct list_head *resources, struct resource *res); void pci_add_resource_offset(struct list_head *resources, struct resource *res, resource_size_t offset); void pci_free_resource_list(struct list_head *resources); -void pci_bus_add_resource(struct pci_bus *bus, struct resource *res, - unsigned int flags); +void pci_bus_add_resource(struct pci_bus *bus, struct resource *res); struct resource *pci_bus_resource_n(const struct pci_bus *bus, int n); void pci_bus_remove_resources(struct pci_bus *bus); void pci_bus_remove_resource(struct pci_bus *bus, struct resource *res); @@ -1556,7 +1552,7 @@ int __must_check pci_bus_alloc_resource(struct pci_bus *bus, void *alignf_data); -int pci_register_io_range(struct fwnode_handle *fwnode, phys_addr_t addr, +int pci_register_io_range(const struct fwnode_handle *fwnode, phys_addr_t addr, resource_size_t size); unsigned long pci_address_to_pio(phys_addr_t addr); phys_addr_t pci_pio_to_address(unsigned long pio); @@ -1623,8 +1619,6 @@ int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *), void *userdata); -void pci_walk_bus_locked(struct pci_bus *top, int (*cb)(struct pci_dev *, void *), - void *userdata); int pci_cfg_space_size(struct pci_dev *dev); unsigned char pci_bus_max_busnr(struct pci_bus *bus); void pci_setup_bridge(struct pci_bus *bus); @@ -1786,9 +1780,19 @@ static inline int pci_irqd_intx_xlate(struct irq_domain *d, #ifdef CONFIG_PCIEPORTBUS extern bool pcie_ports_disabled; extern bool pcie_ports_native; + +int pcie_set_target_speed(struct pci_dev *port, enum pci_bus_speed speed_req, + bool use_lt); #else #define pcie_ports_disabled true #define pcie_ports_native false + +static inline int pcie_set_target_speed(struct pci_dev *port, + enum pci_bus_speed speed_req, + bool use_lt) +{ + return -EOPNOTSUPP; +} #endif #define PCIE_LINK_STATE_L0S (BIT(0) | BIT(1)) /* Upstr/dwnstr L0s */ @@ -2019,7 +2023,7 @@ static inline int pci_request_regions(struct pci_dev *dev, const char *res_name) { return -EIO; } static inline void pci_release_regions(struct pci_dev *dev) { } -static inline int pci_register_io_range(struct fwnode_handle *fwnode, +static inline int pci_register_io_range(const struct fwnode_handle *fwnode, phys_addr_t addr, resource_size_t size) { return -EINVAL; } @@ -2293,15 +2297,15 @@ static inline void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev) { } #endif +int pcim_request_all_regions(struct pci_dev *pdev, const char *name); void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen); void __iomem *pcim_iomap_region(struct pci_dev *pdev, int bar, const char *name); +void pcim_iounmap_region(struct pci_dev *pdev, int bar); void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr); void __iomem * const *pcim_iomap_table(struct pci_dev *pdev); int pcim_request_region(struct pci_dev *pdev, int bar, const char *name); int pcim_iomap_regions(struct pci_dev *pdev, int mask, const char *name); -int pcim_iomap_regions_request_all(struct pci_dev *pdev, int mask, - const char *name); void pcim_iounmap_regions(struct pci_dev *pdev, int mask); void __iomem *pcim_iomap_range(struct pci_dev *pdev, int bar, unsigned long offset, unsigned long len); @@ -2609,6 +2613,12 @@ pci_host_bridge_acpi_msi_domain(struct pci_bus *bus) { return NULL; } static inline bool pci_pr3_present(struct pci_dev *pdev) { return false; } #endif +#if defined(CONFIG_X86) && defined(CONFIG_ACPI) +bool arch_pci_dev_is_removable(struct pci_dev *pdev); +#else +static inline bool arch_pci_dev_is_removable(struct pci_dev *pdev) { return false; } +#endif + #ifdef CONFIG_EEH static inline struct eeh_dev *pci_dev_to_eeh_dev(struct pci_dev *pdev) { diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 4cf6aaed5f35..d2402bf4aea2 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -121,6 +121,7 @@ #define PCI_CLASS_SERIAL_USB_OHCI 0x0c0310 #define PCI_CLASS_SERIAL_USB_EHCI 0x0c0320 #define PCI_CLASS_SERIAL_USB_XHCI 0x0c0330 +#define PCI_CLASS_SERIAL_USB_CDNS 0x0c0380 #define PCI_CLASS_SERIAL_USB_DEVICE 0x0c03fe #define PCI_CLASS_SERIAL_FIBER 0x0c04 #define PCI_CLASS_SERIAL_SMBUS 0x0c05 @@ -2421,6 +2422,9 @@ #define PCI_VENDOR_ID_QCOM 0x17cb #define PCI_VENDOR_ID_CDNS 0x17cd +#define PCI_DEVICE_ID_CDNS_USBSS 0x0100 +#define PCI_DEVICE_ID_CDNS_USB 0x0120 +#define PCI_DEVICE_ID_CDNS_USBSSP 0x0200 #define PCI_VENDOR_ID_ARECA 0x17d3 #define PCI_DEVICE_ID_ARECA_1110 0x1110 @@ -2709,9 +2713,6 @@ #define PCI_DEVICE_ID_INTEL_82815_MC 0x1130 #define PCI_DEVICE_ID_INTEL_82815_CGC 0x1132 #define PCI_DEVICE_ID_INTEL_SST_TNG 0x119a -#define PCI_DEVICE_ID_INTEL_DSA_GNRD 0x11fb -#define PCI_DEVICE_ID_INTEL_DSA_DMR 0x1212 -#define PCI_DEVICE_ID_INTEL_IAA_DMR 0x1216 #define PCI_DEVICE_ID_INTEL_82092AA_0 0x1221 #define PCI_DEVICE_ID_INTEL_82437 0x122d #define PCI_DEVICE_ID_INTEL_82371FB_0 0x122e diff --git a/include/linux/pcs/pcs-xpcs.h b/include/linux/pcs/pcs-xpcs.h index b4a4eb6c8866..b5b5d17998b8 100644 --- a/include/linux/pcs/pcs-xpcs.h +++ b/include/linux/pcs/pcs-xpcs.h @@ -21,8 +21,6 @@ #define DW_AN_C37_1000BASEX 4 #define DW_10GBASER 5 -struct dw_xpcs_desc; - enum dw_xpcs_pcs_id { DW_XPCS_ID_NATIVE = 0, NXP_SJA1105_XPCS_ID = 0x00000010, @@ -48,33 +46,18 @@ struct dw_xpcs_info { u32 pma; }; -enum dw_xpcs_clock { - DW_XPCS_CORE_CLK, - DW_XPCS_PAD_CLK, - DW_XPCS_NUM_CLKS, -}; - -struct dw_xpcs { - struct dw_xpcs_info info; - const struct dw_xpcs_desc *desc; - struct mdio_device *mdiodev; - struct clk_bulk_data clks[DW_XPCS_NUM_CLKS]; - struct phylink_pcs pcs; - phy_interface_t interface; -}; +struct dw_xpcs; +struct phylink_pcs *xpcs_to_phylink_pcs(struct dw_xpcs *xpcs); int xpcs_get_an_mode(struct dw_xpcs *xpcs, phy_interface_t interface); -void xpcs_link_up(struct phylink_pcs *pcs, unsigned int neg_mode, - phy_interface_t interface, int speed, int duplex); -int xpcs_do_config(struct dw_xpcs *xpcs, phy_interface_t interface, - const unsigned long *advertising, unsigned int neg_mode); void xpcs_get_interfaces(struct dw_xpcs *xpcs, unsigned long *interfaces); int xpcs_config_eee(struct dw_xpcs *xpcs, int mult_fact_100ns, int enable); -struct dw_xpcs *xpcs_create_mdiodev(struct mii_bus *bus, int addr, - phy_interface_t interface); -struct dw_xpcs *xpcs_create_fwnode(struct fwnode_handle *fwnode, - phy_interface_t interface); +struct dw_xpcs *xpcs_create_mdiodev(struct mii_bus *bus, int addr); +struct dw_xpcs *xpcs_create_fwnode(struct fwnode_handle *fwnode); void xpcs_destroy(struct dw_xpcs *xpcs); +struct phylink_pcs *xpcs_create_pcs_mdiodev(struct mii_bus *bus, int addr); +void xpcs_destroy_pcs(struct phylink_pcs *pcs); + #endif /* __LINUX_PCS_XPCS_H */ diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h index 8efce7414fad..5b520fe86b60 100644 --- a/include/linux/percpu-defs.h +++ b/include/linux/percpu-defs.h @@ -220,15 +220,17 @@ do { \ (void)__vpp_verify; \ } while (0) +#define PERCPU_PTR(__p) \ + (typeof(*(__p)) __force __kernel *)((__force unsigned long)(__p)) + #ifdef CONFIG_SMP /* - * Add an offset to a pointer but keep the pointer as-is. Use RELOC_HIDE() - * to prevent the compiler from making incorrect assumptions about the - * pointer value. The weird cast keeps both GCC and sparse happy. + * Add an offset to a pointer. Use RELOC_HIDE() to prevent the compiler + * from making incorrect assumptions about the pointer value. */ #define SHIFT_PERCPU_PTR(__p, __offset) \ - RELOC_HIDE((typeof(*(__p)) __kernel __force *)(__p), (__offset)) + RELOC_HIDE(PERCPU_PTR(__p), (__offset)) #define per_cpu_ptr(ptr, cpu) \ ({ \ @@ -254,13 +256,13 @@ do { \ #else /* CONFIG_SMP */ -#define VERIFY_PERCPU_PTR(__p) \ +#define per_cpu_ptr(ptr, cpu) \ ({ \ - __verify_pcpu_ptr(__p); \ - (typeof(*(__p)) __kernel __force *)(__p); \ + (void)(cpu); \ + __verify_pcpu_ptr(ptr); \ + PERCPU_PTR(ptr); \ }) -#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); VERIFY_PERCPU_PTR(ptr); }) #define raw_cpu_ptr(ptr) per_cpu_ptr(ptr, 0) #define this_cpu_ptr(ptr) raw_cpu_ptr(ptr) diff --git a/include/linux/perf/arm_pmuv3.h b/include/linux/perf/arm_pmuv3.h index 3372c1b56486..d698efba28a2 100644 --- a/include/linux/perf/arm_pmuv3.h +++ b/include/linux/perf/arm_pmuv3.h @@ -257,6 +257,7 @@ #define ARMV8_PMU_USERENR_SW (1 << 1) /* PMSWINC can be written at EL0 */ #define ARMV8_PMU_USERENR_CR (1 << 2) /* Cycle counter can be read at EL0 */ #define ARMV8_PMU_USERENR_ER (1 << 3) /* Event counter can be read at EL0 */ +#define ARMV8_PMU_USERENR_UEN (1 << 4) /* Fine grained per counter access at EL0 */ /* Mask for writable bits */ #define ARMV8_PMU_USERENR_MASK (ARMV8_PMU_USERENR_EN | ARMV8_PMU_USERENR_SW | \ ARMV8_PMU_USERENR_CR | ARMV8_PMU_USERENR_ER) diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index fb908843f209..cb99ec8c9e96 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -170,6 +170,12 @@ struct hw_perf_event { }; struct { /* aux / Intel-PT */ u64 aux_config; + /* + * For AUX area events, aux_paused cannot be a state + * flag because it can be updated asynchronously to + * state. + */ + unsigned int aux_paused; }; struct { /* software */ struct hrtimer hrtimer; @@ -294,6 +300,7 @@ struct perf_event_pmu_context; #define PERF_PMU_CAP_NO_EXCLUDE 0x0040 #define PERF_PMU_CAP_AUX_OUTPUT 0x0080 #define PERF_PMU_CAP_EXTENDED_HW_TYPE 0x0100 +#define PERF_PMU_CAP_AUX_PAUSE 0x0200 /** * pmu::scope @@ -384,6 +391,8 @@ struct pmu { #define PERF_EF_START 0x01 /* start the counter when adding */ #define PERF_EF_RELOAD 0x02 /* reload the counter when starting */ #define PERF_EF_UPDATE 0x04 /* update the counter when stopping */ +#define PERF_EF_PAUSE 0x08 /* AUX area event, pause tracing */ +#define PERF_EF_RESUME 0x10 /* AUX area event, resume tracing */ /* * Adds/Removes a counter to/from the PMU, can be done inside a @@ -423,6 +432,18 @@ struct pmu { * * ->start() with PERF_EF_RELOAD will reprogram the counter * value, must be preceded by a ->stop() with PERF_EF_UPDATE. + * + * ->stop() with PERF_EF_PAUSE will stop as simply as possible. Will not + * overlap another ->stop() with PERF_EF_PAUSE nor ->start() with + * PERF_EF_RESUME. + * + * ->start() with PERF_EF_RESUME will start as simply as possible but + * only if the counter is not otherwise stopped. Will not overlap + * another ->start() with PERF_EF_RESUME nor ->stop() with + * PERF_EF_PAUSE. + * + * Notably, PERF_EF_PAUSE/PERF_EF_RESUME *can* be concurrent with other + * ->stop()/->start() invocations, just not itself. */ void (*start) (struct perf_event *event, int flags); void (*stop) (struct perf_event *event, int flags); @@ -1655,15 +1676,35 @@ extern void perf_tp_event(u16 event_type, u64 count, void *record, struct task_struct *task); extern void perf_bp_event(struct perf_event *event, void *data); -#ifndef perf_misc_flags -# define perf_misc_flags(regs) \ +extern unsigned long perf_misc_flags(struct perf_event *event, struct pt_regs *regs); +extern unsigned long perf_instruction_pointer(struct perf_event *event, + struct pt_regs *regs); + +#ifndef perf_arch_misc_flags +# define perf_arch_misc_flags(regs) \ (user_mode(regs) ? PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL) -# define perf_instruction_pointer(regs) instruction_pointer(regs) +# define perf_arch_instruction_pointer(regs) instruction_pointer(regs) #endif #ifndef perf_arch_bpf_user_pt_regs # define perf_arch_bpf_user_pt_regs(regs) regs #endif +#ifndef perf_arch_guest_misc_flags +static inline unsigned long perf_arch_guest_misc_flags(struct pt_regs *regs) +{ + unsigned long guest_state = perf_guest_state(); + + if (!(guest_state & PERF_GUEST_ACTIVE)) + return 0; + + if (guest_state & PERF_GUEST_USER) + return PERF_RECORD_MISC_GUEST_USER; + else + return PERF_RECORD_MISC_GUEST_KERNEL; +} +# define perf_arch_guest_misc_flags(regs) perf_arch_guest_misc_flags(regs) +#endif + static inline bool has_branch_stack(struct perf_event *event) { return event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK; @@ -1679,6 +1720,13 @@ static inline bool has_aux(struct perf_event *event) return event->pmu->setup_aux; } +static inline bool has_aux_action(struct perf_event *event) +{ + return event->attr.aux_sample_size || + event->attr.aux_pause || + event->attr.aux_resume; +} + static inline bool is_write_backward(struct perf_event *event) { return !!event->attr.write_backward; diff --git a/include/linux/pgalloc_tag.h b/include/linux/pgalloc_tag.h index 59a3deb792a8..3469c4b20105 100644 --- a/include/linux/pgalloc_tag.h +++ b/include/linux/pgalloc_tag.h @@ -12,45 +12,166 @@ #include <linux/page_ext.h> extern struct page_ext_operations page_alloc_tagging_ops; +extern unsigned long alloc_tag_ref_mask; +extern int alloc_tag_ref_offs; +extern struct alloc_tag_kernel_section kernel_tags; -static inline union codetag_ref *codetag_ref_from_page_ext(struct page_ext *page_ext) +DECLARE_STATIC_KEY_FALSE(mem_profiling_compressed); + +typedef u16 pgalloc_tag_idx; + +union pgtag_ref_handle { + union codetag_ref *ref; /* reference in page extension */ + struct page *page; /* reference in page flags */ +}; + +/* Reserved indexes */ +#define CODETAG_ID_NULL 0 +#define CODETAG_ID_EMPTY 1 +#define CODETAG_ID_FIRST 2 + +#ifdef CONFIG_MODULES + +extern struct alloc_tag_module_section module_tags; + +static inline struct alloc_tag *module_idx_to_tag(pgalloc_tag_idx idx) { - return (union codetag_ref *)page_ext_data(page_ext, &page_alloc_tagging_ops); + return &module_tags.first_tag[idx - kernel_tags.count]; } -static inline struct page_ext *page_ext_from_codetag_ref(union codetag_ref *ref) +static inline pgalloc_tag_idx module_tag_to_idx(struct alloc_tag *tag) { - return (void *)ref - page_alloc_tagging_ops.offset; + return CODETAG_ID_FIRST + kernel_tags.count + (tag - module_tags.first_tag); } -/* Should be called only if mem_alloc_profiling_enabled() */ -static inline union codetag_ref *get_page_tag_ref(struct page *page) +#else /* CONFIG_MODULES */ + +static inline struct alloc_tag *module_idx_to_tag(pgalloc_tag_idx idx) { - if (page) { - struct page_ext *page_ext = page_ext_get(page); + pr_warn("invalid page tag reference %lu\n", (unsigned long)idx); + return NULL; +} - if (page_ext) - return codetag_ref_from_page_ext(page_ext); +static inline pgalloc_tag_idx module_tag_to_idx(struct alloc_tag *tag) +{ + pr_warn("invalid page tag 0x%lx\n", (unsigned long)tag); + return CODETAG_ID_NULL; +} + +#endif /* CONFIG_MODULES */ + +static inline void idx_to_ref(pgalloc_tag_idx idx, union codetag_ref *ref) +{ + switch (idx) { + case (CODETAG_ID_NULL): + ref->ct = NULL; + break; + case (CODETAG_ID_EMPTY): + set_codetag_empty(ref); + break; + default: + idx -= CODETAG_ID_FIRST; + ref->ct = idx < kernel_tags.count ? + &kernel_tags.first_tag[idx].ct : + &module_idx_to_tag(idx)->ct; + break; } - return NULL; } -static inline void put_page_tag_ref(union codetag_ref *ref) +static inline pgalloc_tag_idx ref_to_idx(union codetag_ref *ref) +{ + struct alloc_tag *tag; + + if (!ref->ct) + return CODETAG_ID_NULL; + + if (is_codetag_empty(ref)) + return CODETAG_ID_EMPTY; + + tag = ct_to_alloc_tag(ref->ct); + if (tag >= kernel_tags.first_tag && tag < kernel_tags.first_tag + kernel_tags.count) + return CODETAG_ID_FIRST + (tag - kernel_tags.first_tag); + + return module_tag_to_idx(tag); +} + + + +/* Should be called only if mem_alloc_profiling_enabled() */ +static inline bool get_page_tag_ref(struct page *page, union codetag_ref *ref, + union pgtag_ref_handle *handle) { - if (WARN_ON(!ref)) + if (!page) + return false; + + if (static_key_enabled(&mem_profiling_compressed)) { + pgalloc_tag_idx idx; + + idx = (page->flags >> alloc_tag_ref_offs) & alloc_tag_ref_mask; + idx_to_ref(idx, ref); + handle->page = page; + } else { + struct page_ext *page_ext; + union codetag_ref *tmp; + + page_ext = page_ext_get(page); + if (!page_ext) + return false; + + tmp = (union codetag_ref *)page_ext_data(page_ext, &page_alloc_tagging_ops); + ref->ct = tmp->ct; + handle->ref = tmp; + } + + return true; +} + +static inline void put_page_tag_ref(union pgtag_ref_handle handle) +{ + if (WARN_ON(!handle.ref)) return; - page_ext_put(page_ext_from_codetag_ref(ref)); + if (!static_key_enabled(&mem_profiling_compressed)) + page_ext_put((void *)handle.ref - page_alloc_tagging_ops.offset); +} + +static inline void update_page_tag_ref(union pgtag_ref_handle handle, union codetag_ref *ref) +{ + if (static_key_enabled(&mem_profiling_compressed)) { + struct page *page = handle.page; + unsigned long old_flags; + unsigned long flags; + unsigned long idx; + + if (WARN_ON(!page || !ref)) + return; + + idx = (unsigned long)ref_to_idx(ref); + idx = (idx & alloc_tag_ref_mask) << alloc_tag_ref_offs; + do { + old_flags = READ_ONCE(page->flags); + flags = old_flags; + flags &= ~(alloc_tag_ref_mask << alloc_tag_ref_offs); + flags |= idx; + } while (unlikely(!try_cmpxchg(&page->flags, &old_flags, flags))); + } else { + if (WARN_ON(!handle.ref || !ref)) + return; + + handle.ref->ct = ref->ct; + } } static inline void clear_page_tag_ref(struct page *page) { if (mem_alloc_profiling_enabled()) { - union codetag_ref *ref = get_page_tag_ref(page); + union pgtag_ref_handle handle; + union codetag_ref ref; - if (ref) { - set_codetag_empty(ref); - put_page_tag_ref(ref); + if (get_page_tag_ref(page, &ref, &handle)) { + set_codetag_empty(&ref); + update_page_tag_ref(handle, &ref); + put_page_tag_ref(handle); } } } @@ -59,11 +180,13 @@ static inline void pgalloc_tag_add(struct page *page, struct task_struct *task, unsigned int nr) { if (mem_alloc_profiling_enabled()) { - union codetag_ref *ref = get_page_tag_ref(page); + union pgtag_ref_handle handle; + union codetag_ref ref; - if (ref) { - alloc_tag_add(ref, task->alloc_tag, PAGE_SIZE * nr); - put_page_tag_ref(ref); + if (get_page_tag_ref(page, &ref, &handle)) { + alloc_tag_add(&ref, task->alloc_tag, PAGE_SIZE * nr); + update_page_tag_ref(handle, &ref); + put_page_tag_ref(handle); } } } @@ -71,11 +194,13 @@ static inline void pgalloc_tag_add(struct page *page, struct task_struct *task, static inline void pgalloc_tag_sub(struct page *page, unsigned int nr) { if (mem_alloc_profiling_enabled()) { - union codetag_ref *ref = get_page_tag_ref(page); + union pgtag_ref_handle handle; + union codetag_ref ref; - if (ref) { - alloc_tag_sub(ref, PAGE_SIZE * nr); - put_page_tag_ref(ref); + if (get_page_tag_ref(page, &ref, &handle)) { + alloc_tag_sub(&ref, PAGE_SIZE * nr); + update_page_tag_ref(handle, &ref); + put_page_tag_ref(handle); } } } @@ -85,13 +210,14 @@ static inline struct alloc_tag *pgalloc_tag_get(struct page *page) struct alloc_tag *tag = NULL; if (mem_alloc_profiling_enabled()) { - union codetag_ref *ref = get_page_tag_ref(page); - - alloc_tag_sub_check(ref); - if (ref) { - if (ref->ct) - tag = ct_to_alloc_tag(ref->ct); - put_page_tag_ref(ref); + union pgtag_ref_handle handle; + union codetag_ref ref; + + if (get_page_tag_ref(page, &ref, &handle)) { + alloc_tag_sub_check(&ref); + if (ref.ct) + tag = ct_to_alloc_tag(ref.ct); + put_page_tag_ref(handle); } } @@ -104,16 +230,22 @@ static inline void pgalloc_tag_sub_pages(struct alloc_tag *tag, unsigned int nr) this_cpu_sub(tag->counters->bytes, PAGE_SIZE * nr); } +void pgalloc_tag_split(struct folio *folio, int old_order, int new_order); +void pgalloc_tag_swap(struct folio *new, struct folio *old); + +void __init alloc_tag_sec_init(void); + #else /* CONFIG_MEM_ALLOC_PROFILING */ -static inline union codetag_ref *get_page_tag_ref(struct page *page) { return NULL; } -static inline void put_page_tag_ref(union codetag_ref *ref) {} static inline void clear_page_tag_ref(struct page *page) {} static inline void pgalloc_tag_add(struct page *page, struct task_struct *task, unsigned int nr) {} static inline void pgalloc_tag_sub(struct page *page, unsigned int nr) {} static inline struct alloc_tag *pgalloc_tag_get(struct page *page) { return NULL; } static inline void pgalloc_tag_sub_pages(struct alloc_tag *tag, unsigned int nr) {} +static inline void alloc_tag_sec_init(void) {} +static inline void pgalloc_tag_split(struct folio *folio, int old_order, int new_order) {} +static inline void pgalloc_tag_swap(struct folio *new, struct folio *old) {} #endif /* CONFIG_MEM_ALLOC_PROFILING */ diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h index e8b2ac6bd2ae..adef9d6e9b1b 100644 --- a/include/linux/pgtable.h +++ b/include/linux/pgtable.h @@ -90,6 +90,27 @@ static inline unsigned long pud_index(unsigned long address) #define pgd_index(a) (((a) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)) #endif +#ifndef kernel_pte_init +static inline void kernel_pte_init(void *addr) +{ +} +#define kernel_pte_init kernel_pte_init +#endif + +#ifndef pmd_init +static inline void pmd_init(void *addr) +{ +} +#define pmd_init pmd_init +#endif + +#ifndef pud_init +static inline void pud_init(void *addr) +{ +} +#define pud_init pud_init +#endif + #ifndef pte_offset_kernel static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address) { @@ -1056,44 +1077,6 @@ static inline int pgd_same(pgd_t pgd_a, pgd_t pgd_b) } #endif -/* - * Use set_p*_safe(), and elide TLB flushing, when confident that *no* - * TLB flush will be required as a result of the "set". For example, use - * in scenarios where it is known ahead of time that the routine is - * setting non-present entries, or re-setting an existing entry to the - * same value. Otherwise, use the typical "set" helpers and flush the - * TLB. - */ -#define set_pte_safe(ptep, pte) \ -({ \ - WARN_ON_ONCE(pte_present(*ptep) && !pte_same(*ptep, pte)); \ - set_pte(ptep, pte); \ -}) - -#define set_pmd_safe(pmdp, pmd) \ -({ \ - WARN_ON_ONCE(pmd_present(*pmdp) && !pmd_same(*pmdp, pmd)); \ - set_pmd(pmdp, pmd); \ -}) - -#define set_pud_safe(pudp, pud) \ -({ \ - WARN_ON_ONCE(pud_present(*pudp) && !pud_same(*pudp, pud)); \ - set_pud(pudp, pud); \ -}) - -#define set_p4d_safe(p4dp, p4d) \ -({ \ - WARN_ON_ONCE(p4d_present(*p4dp) && !p4d_same(*p4dp, p4d)); \ - set_p4d(p4dp, p4d); \ -}) - -#define set_pgd_safe(pgdp, pgd) \ -({ \ - WARN_ON_ONCE(pgd_present(*pgdp) && !pgd_same(*pgdp, pgd)); \ - set_pgd(pgdp, pgd); \ -}) - #ifndef __HAVE_ARCH_DO_SWAP_PAGE static inline void arch_do_swap_page_nr(struct mm_struct *mm, struct vm_area_struct *vma, diff --git a/include/linux/phy.h b/include/linux/phy.h index a98bc91a0cde..563c46205685 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -601,8 +601,8 @@ struct macsec_ops; * @adv_old: Saved advertised while power saving for WoL * @supported_eee: supported PHY EEE linkmodes * @advertising_eee: Currently advertised EEE linkmodes - * @eee_enabled: Flag indicating whether the EEE feature is enabled * @enable_tx_lpi: When True, MAC should transmit LPI to PHY + * @eee_active: phylib private state, indicating that EEE has been negotiated * @eee_cfg: User configuration of EEE * @lp_advertising: Current link partner advertised linkmodes * @host_interfaces: PHY interface modes supported by host @@ -721,16 +721,15 @@ struct phy_device { /* used for eee validation and configuration*/ __ETHTOOL_DECLARE_LINK_MODE_MASK(supported_eee); __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising_eee); - bool eee_enabled; - - /* Host supported PHY interface types. Should be ignored if empty. */ - DECLARE_PHY_INTERFACE_MASK(host_interfaces); - /* Energy efficient ethernet modes which should be prohibited */ - u32 eee_broken_modes; + __ETHTOOL_DECLARE_LINK_MODE_MASK(eee_broken_modes); bool enable_tx_lpi; + bool eee_active; struct eee_config eee_cfg; + /* Host supported PHY interface types. Should be ignored if empty. */ + DECLARE_PHY_INTERFACE_MASK(host_interfaces); + #ifdef CONFIG_LED_TRIGGER_PHY struct phy_led_trigger *phy_led_triggers; unsigned int phy_num_led_triggers; @@ -877,8 +876,9 @@ struct phy_plca_status { /* Modes for PHY LED configuration */ enum phy_led_modes { - PHY_LED_ACTIVE_LOW = 0, - PHY_LED_INACTIVE_HIGH_IMPEDANCE = 1, + PHY_LED_ACTIVE_HIGH = 0, + PHY_LED_ACTIVE_LOW = 1, + PHY_LED_INACTIVE_HIGH_IMPEDANCE = 2, /* keep it last */ __PHY_LED_MODES_NUM, @@ -1260,9 +1260,20 @@ size_t phy_speeds(unsigned int *speeds, size_t size, unsigned long *mask); void of_set_phy_supported(struct phy_device *phydev); void of_set_phy_eee_broken(struct phy_device *phydev); +void of_set_phy_timing_role(struct phy_device *phydev); int phy_speed_down_core(struct phy_device *phydev); /** + * phy_set_eee_broken - Mark an EEE mode as broken so that it isn't advertised. + * @phydev: The phy_device struct + * @link_mode: The broken EEE mode + */ +static inline void phy_set_eee_broken(struct phy_device *phydev, u32 link_mode) +{ + linkmode_set_bit(link_mode, phydev->eee_broken_modes); +} + +/** * phy_is_started - Convenience function to check whether PHY is started * @phydev: The phy_device struct */ @@ -1378,12 +1389,13 @@ int phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum); * @regnum: The register on the MMD to read * @val: Variable to read the register into * @cond: Break condition (usually involving @val) - * @sleep_us: Maximum time to sleep between reads in us (0 - * tight-loops). Should be less than ~20ms since usleep_range - * is used (see Documentation/timers/timers-howto.rst). + * @sleep_us: Maximum time to sleep between reads in us (0 tight-loops). Please + * read usleep_range() function description for details and + * limitations. * @timeout_us: Timeout in us, 0 means never timeout * @sleep_before_read: if it is true, sleep @sleep_us before read. - * Returns 0 on success and -ETIMEDOUT upon a timeout. In either + * + * Returns: 0 on success and -ETIMEDOUT upon a timeout. In either * case, the last read value at @args is stored in @val. Must not * be called from atomic context if sleep_us or timeout_us are used. */ @@ -1883,7 +1895,6 @@ int genphy_read_abilities(struct phy_device *phydev); int genphy_setup_forced(struct phy_device *phydev); int genphy_restart_aneg(struct phy_device *phydev); int genphy_check_and_restart_aneg(struct phy_device *phydev, bool restart); -int genphy_config_eee_advert(struct phy_device *phydev); int __genphy_config_aneg(struct phy_device *phydev, bool changed); int genphy_aneg_done(struct phy_device *phydev); int genphy_update_link(struct phy_device *phydev); @@ -1951,7 +1962,6 @@ int genphy_c45_ethtool_get_eee(struct phy_device *phydev, struct ethtool_keee *data); int genphy_c45_ethtool_set_eee(struct phy_device *phydev, struct ethtool_keee *data); -int genphy_c45_write_eee_adv(struct phy_device *phydev, unsigned long *adv); int genphy_c45_an_config_eee_aneg(struct phy_device *phydev); int genphy_c45_read_eee_adv(struct phy_device *phydev, unsigned long *adv); diff --git a/include/linux/phy/phy-sun4i-usb.h b/include/linux/phy/phy-sun4i-usb.h index 91eb755ee73b..f3e7b13608e4 100644 --- a/include/linux/phy/phy-sun4i-usb.h +++ b/include/linux/phy/phy-sun4i-usb.h @@ -11,7 +11,7 @@ /** * sun4i_usb_phy_set_squelch_detect() - Enable/disable squelch detect * @phy: reference to a sun4i usb phy - * @enabled: wether to enable or disable squelch detect + * @enabled: whether to enable or disable squelch detect */ void sun4i_usb_phy_set_squelch_detect(struct phy *phy, bool enabled); diff --git a/include/linux/platform_data/amd_qdma.h b/include/linux/platform_data/amd_qdma.h index 576d952f97ed..967a6ef31cf9 100644 --- a/include/linux/platform_data/amd_qdma.h +++ b/include/linux/platform_data/amd_qdma.h @@ -26,11 +26,13 @@ struct dma_slave_map; * @max_mm_channels: Maximum number of MM DMA channels in each direction * @device_map: DMA slave map * @irq_index: The index of first IRQ + * @dma_dev: The device pointer for dma operations */ struct qdma_platdata { u32 max_mm_channels; u32 irq_index; struct dma_slave_map *device_map; + struct device *dma_dev; }; #endif /* _PLATDATA_AMD_QDMA_H */ diff --git a/include/linux/platform_data/asoc-s3c.h b/include/linux/platform_data/asoc-s3c.h index f9c00f839e9f..085dd8e8af76 100644 --- a/include/linux/platform_data/asoc-s3c.h +++ b/include/linux/platform_data/asoc-s3c.h @@ -13,8 +13,6 @@ #include <linux/dmaengine.h> -extern void s3c64xx_ac97_setup_gpio(int); - struct samsung_i2s_type { /* If the Primary DAI has 5.1 Channels */ #define QUIRK_PRI_6CHAN (1 << 0) diff --git a/include/linux/platform_data/cros_ec_commands.h b/include/linux/platform_data/cros_ec_commands.h index e574b790be6f..b3c4993e656e 100644 --- a/include/linux/platform_data/cros_ec_commands.h +++ b/include/linux/platform_data/cros_ec_commands.h @@ -1312,6 +1312,38 @@ enum ec_feature_code { * The EC supports the AP composing VDMs for us to send. */ EC_FEATURE_TYPEC_AP_VDM_SEND = 46, + /* + * The EC supports system safe mode panic recovery. + */ + EC_FEATURE_SYSTEM_SAFE_MODE = 47, + /* + * The EC will reboot on runtime assertion failures. + */ + EC_FEATURE_ASSERT_REBOOTS = 48, + /* + * The EC image is built with tokenized logging enabled. + */ + EC_FEATURE_TOKENIZED_LOGGING = 49, + /* + * The EC supports triggering an STB dump. + */ + EC_FEATURE_AMD_STB_DUMP = 50, + /* + * The EC supports memory dump commands. + */ + EC_FEATURE_MEMORY_DUMP = 51, + /* + * The EC supports DP2.1 capability + */ + EC_FEATURE_TYPEC_DP2_1 = 52, + /* + * The MCU is System Companion Processor Core 1 + */ + EC_FEATURE_SCP_C1 = 53, + /* + * The EC supports UCSI PPM. + */ + EC_FEATURE_UCSI_PPM = 54, }; #define EC_FEATURE_MASK_0(event_code) BIT(event_code % 32) diff --git a/include/linux/platform_data/hwmon-s3c.h b/include/linux/platform_data/hwmon-s3c.h index 1707ad4147df..7d21e0c41037 100644 --- a/include/linux/platform_data/hwmon-s3c.h +++ b/include/linux/platform_data/hwmon-s3c.h @@ -33,14 +33,4 @@ struct s3c_hwmon_pdata { struct s3c_hwmon_chcfg *in[8]; }; -/** - * s3c_hwmon_set_platdata - Set platform data for S3C HWMON device - * @pd: Platform data to register to device. - * - * Register the given platform data for use with the S3C HWMON device. - * The call will copy the platform data, so the board definitions can - * make the structure itself __initdata. - */ -extern void __init s3c_hwmon_set_platdata(struct s3c_hwmon_pdata *pd); - #endif /* __HWMON_S3C_H__ */ diff --git a/include/linux/platform_data/max6639.h b/include/linux/platform_data/max6639.h deleted file mode 100644 index 65bfdb4fdc15..000000000000 --- a/include/linux/platform_data/max6639.h +++ /dev/null @@ -1,15 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _LINUX_MAX6639_H -#define _LINUX_MAX6639_H - -#include <linux/types.h> - -/* platform data for the MAX6639 temperature sensor and fan control */ - -struct max6639_platform_data { - bool pwm_polarity; /* Polarity low (0) or high (1, default) */ - int ppr; /* Pulses per rotation 1..4 (default == 2) */ - int rpm_range; /* 2000, 4000 (default), 8000 or 16000 */ -}; - -#endif /* _LINUX_MAX6639_H */ diff --git a/include/linux/platform_data/media/omap4iss.h b/include/linux/platform_data/media/omap4iss.h deleted file mode 100644 index 2a511a8fcda7..000000000000 --- a/include/linux/platform_data/media/omap4iss.h +++ /dev/null @@ -1,66 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef ARCH_ARM_PLAT_OMAP4_ISS_H -#define ARCH_ARM_PLAT_OMAP4_ISS_H - -#include <linux/i2c.h> - -struct iss_device; - -enum iss_interface_type { - ISS_INTERFACE_CSI2A_PHY1, - ISS_INTERFACE_CSI2B_PHY2, -}; - -/** - * struct iss_csiphy_lane: CSI2 lane position and polarity - * @pos: position of the lane - * @pol: polarity of the lane - */ -struct iss_csiphy_lane { - u8 pos; - u8 pol; -}; - -#define ISS_CSIPHY1_NUM_DATA_LANES 4 -#define ISS_CSIPHY2_NUM_DATA_LANES 1 - -/** - * struct iss_csiphy_lanes_cfg - CSI2 lane configuration - * @data: Configuration of one or two data lanes - * @clk: Clock lane configuration - */ -struct iss_csiphy_lanes_cfg { - struct iss_csiphy_lane data[ISS_CSIPHY1_NUM_DATA_LANES]; - struct iss_csiphy_lane clk; -}; - -/** - * struct iss_csi2_platform_data - CSI2 interface platform data - * @crc: Enable the cyclic redundancy check - * @vpclk_div: Video port output clock control - */ -struct iss_csi2_platform_data { - unsigned crc:1; - unsigned vpclk_div:2; - struct iss_csiphy_lanes_cfg lanecfg; -}; - -struct iss_subdev_i2c_board_info { - struct i2c_board_info *board_info; - int i2c_adapter_id; -}; - -struct iss_v4l2_subdevs_group { - struct iss_subdev_i2c_board_info *subdevs; - enum iss_interface_type interface; - union { - struct iss_csi2_platform_data csi2; - } bus; /* gcc < 4.6.0 chokes on anonymous union initializers */ -}; - -struct iss_platform_data { - struct iss_v4l2_subdevs_group *subdevs; - void (*set_constraints)(struct iss_device *iss, bool enable); -}; - -#endif diff --git a/include/linux/platform_data/microchip-ksz.h b/include/linux/platform_data/microchip-ksz.h index 2ee1a679e592..0e0e8fe6975f 100644 --- a/include/linux/platform_data/microchip-ksz.h +++ b/include/linux/platform_data/microchip-ksz.h @@ -42,6 +42,7 @@ enum ksz_chip_id { LAN9372_CHIP_ID = 0x00937200, LAN9373_CHIP_ID = 0x00937300, LAN9374_CHIP_ID = 0x00937400, + LAN9646_CHIP_ID = 0x00964600, }; struct ksz_platform_data { diff --git a/include/linux/platform_data/sa11x0-serial.h b/include/linux/platform_data/sa11x0-serial.h index 8b79ab08af45..a88096bc74e4 100644 --- a/include/linux/platform_data/sa11x0-serial.h +++ b/include/linux/platform_data/sa11x0-serial.h @@ -10,7 +10,6 @@ #define SA11X0_SERIAL_H struct uart_port; -struct uart_info; /* * This is a temporary structure for registering these diff --git a/include/linux/platform_data/x86/intel_scu_ipc.h b/include/linux/platform_data/x86/intel_scu_ipc.h index 0ca9962e97f2..b287627759f7 100644 --- a/include/linux/platform_data/x86/intel_scu_ipc.h +++ b/include/linux/platform_data/x86/intel_scu_ipc.h @@ -2,9 +2,13 @@ #ifndef __PLATFORM_X86_INTEL_SCU_IPC_H_ #define __PLATFORM_X86_INTEL_SCU_IPC_H_ +#include <linux/init.h> #include <linux/ioport.h> +#include <linux/types.h> struct device; +struct module; + struct intel_scu_ipc_dev; /** diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h index 7132623e4658..074754c23d33 100644 --- a/include/linux/platform_device.h +++ b/include/linux/platform_device.h @@ -235,17 +235,7 @@ extern void platform_device_put(struct platform_device *pdev); struct platform_driver { int (*probe)(struct platform_device *); - - /* - * .remove_new() is a relic from a prototype conversion of .remove(). - * New drivers are supposed to implement .remove(). Once all drivers are - * converted to not use .remove_new any more, it will be dropped. - */ - union { - void (*remove)(struct platform_device *); - void (*remove_new)(struct platform_device *); - }; - + void (*remove)(struct platform_device *); void (*shutdown)(struct platform_device *); int (*suspend)(struct platform_device *, pm_message_t state); int (*resume)(struct platform_device *); diff --git a/include/linux/pm.h b/include/linux/pm.h index 97b0e23363c8..e7f0260f15ad 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h @@ -385,7 +385,7 @@ const struct dev_pm_ops name = { \ #ifdef CONFIG_PM #define _EXPORT_DEV_PM_OPS(name, license, ns) _EXPORT_PM_OPS(name, license, ns) #define EXPORT_PM_FN_GPL(name) EXPORT_SYMBOL_GPL(name) -#define EXPORT_PM_FN_NS_GPL(name, ns) EXPORT_SYMBOL_NS_GPL(name, ns) +#define EXPORT_PM_FN_NS_GPL(name, ns) EXPORT_SYMBOL_NS_GPL(name, "ns") #else #define _EXPORT_DEV_PM_OPS(name, license, ns) _DISCARD_PM_OPS(name, license, ns) #define EXPORT_PM_FN_GPL(name) diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h index b637ec14025f..45646bfcaf1a 100644 --- a/include/linux/pm_domain.h +++ b/include/linux/pm_domain.h @@ -30,9 +30,16 @@ * supplier and its PM domain when creating the * device-links. * + * PD_FLAG_REQUIRED_OPP: Assign required_devs for the required OPPs. The + * index of the required OPP must correspond to the + * index in the array of the pd_names. If pd_names + * isn't specified, the index just follows the + * index for the attached PM domain. + * */ #define PD_FLAG_NO_DEV_LINK BIT(0) #define PD_FLAG_DEV_LINK_ON BIT(1) +#define PD_FLAG_REQUIRED_OPP BIT(2) struct dev_pm_domain_attach_data { const char * const *pd_names; @@ -43,6 +50,7 @@ struct dev_pm_domain_attach_data { struct dev_pm_domain_list { struct device **pd_devs; struct device_link **pd_links; + u32 *opp_tokens; u32 num_pds; }; @@ -92,6 +100,10 @@ struct dev_pm_domain_list { * GENPD_FLAG_OPP_TABLE_FW: The genpd provider supports performance states, * but its corresponding OPP tables are not * described in DT, but are given directly by FW. + * + * GENPD_FLAG_DEV_NAME_FW: Instructs genpd to generate an unique device name + * using ida. It is used by genpd providers which + * get their genpd-names directly from FW. */ #define GENPD_FLAG_PM_CLK (1U << 0) #define GENPD_FLAG_IRQ_SAFE (1U << 1) @@ -101,6 +113,7 @@ struct dev_pm_domain_list { #define GENPD_FLAG_RPM_ALWAYS_ON (1U << 5) #define GENPD_FLAG_MIN_RESIDENCY (1U << 6) #define GENPD_FLAG_OPP_TABLE_FW (1U << 7) +#define GENPD_FLAG_DEV_NAME_FW (1U << 8) enum gpd_status { GENPD_STATE_ON = 0, /* PM domain is on */ @@ -163,6 +176,7 @@ struct generic_pm_domain { atomic_t sd_count; /* Number of subdomains with power "on" */ enum gpd_status status; /* Current state of the domain */ unsigned int device_count; /* Number of devices */ + unsigned int device_id; /* unique device id */ unsigned int suspended_count; /* System suspend device counter */ unsigned int prepared_count; /* Suspend counter of prepared devices */ unsigned int performance_state; /* Aggregated max performance state */ @@ -244,6 +258,7 @@ struct generic_pm_domain_data { unsigned int performance_state; unsigned int default_pstate; unsigned int rpm_pstate; + unsigned int opp_token; bool hw_mode; void *data; }; diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h index 6424692c30b7..568183e3e641 100644 --- a/include/linux/pm_opp.h +++ b/include/linux/pm_opp.h @@ -62,11 +62,8 @@ typedef int (*config_clks_t)(struct device *dev, struct opp_table *opp_table, * @supported_hw: Array of hierarchy of versions to match. * @supported_hw_count: Number of elements in the array. * @regulator_names: Array of pointers to the names of the regulator, NULL terminated. - * @genpd_names: Null terminated array of pointers containing names of genpd to - * attach. Mutually exclusive with required_devs. - * @virt_devs: Pointer to return the array of genpd virtual devices. Mutually - * exclusive with required_devs. - * @required_devs: Required OPP devices. Mutually exclusive with genpd_names/virt_devs. + * @required_dev: The required OPP device. + * @required_dev_index: The index of the required OPP for the @required_dev. * * This structure contains platform specific OPP configurations for the device. */ @@ -79,9 +76,8 @@ struct dev_pm_opp_config { const unsigned int *supported_hw; unsigned int supported_hw_count; const char * const *regulator_names; - const char * const *genpd_names; - struct device ***virt_devs; - struct device **required_devs; + struct device *required_dev; + unsigned int required_dev_index; }; #define OPP_LEVEL_UNSET U32_MAX @@ -675,36 +671,6 @@ static inline void dev_pm_opp_put_config_regulators(int token) dev_pm_opp_clear_config(token); } -/* genpd helpers */ -static inline int dev_pm_opp_attach_genpd(struct device *dev, - const char * const *names, - struct device ***virt_devs) -{ - struct dev_pm_opp_config config = { - .genpd_names = names, - .virt_devs = virt_devs, - }; - - return dev_pm_opp_set_config(dev, &config); -} - -static inline void dev_pm_opp_detach_genpd(int token) -{ - dev_pm_opp_clear_config(token); -} - -static inline int devm_pm_opp_attach_genpd(struct device *dev, - const char * const *names, - struct device ***virt_devs) -{ - struct dev_pm_opp_config config = { - .genpd_names = names, - .virt_devs = virt_devs, - }; - - return devm_pm_opp_set_config(dev, &config); -} - /* prop-name helpers */ static inline int dev_pm_opp_set_prop_name(struct device *dev, const char *name) { diff --git a/include/linux/pm_wakeup.h b/include/linux/pm_wakeup.h index 76cd1f9f1365..222f7530806c 100644 --- a/include/linux/pm_wakeup.h +++ b/include/linux/pm_wakeup.h @@ -10,7 +10,7 @@ #define _LINUX_PM_WAKEUP_H #ifndef _DEVICE_H_ -# error "please don't include this file directly" +# error "Please do not include this file directly." #endif #include <linux/types.h> diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h index 453691710839..f11f10c97bd9 100644 --- a/include/linux/posix-timers.h +++ b/include/linux/posix-timers.h @@ -5,12 +5,16 @@ #include <linux/alarmtimer.h> #include <linux/list.h> #include <linux/mutex.h> +#include <linux/pid.h> #include <linux/posix-timers_types.h> +#include <linux/rcuref.h> #include <linux/spinlock.h> #include <linux/timerqueue.h> struct kernel_siginfo; struct task_struct; +struct sigqueue; +struct k_itimer; static inline clockid_t make_process_cpuclock(const unsigned int pid, const clockid_t clock) @@ -35,6 +39,8 @@ static inline int clockid_to_fd(const clockid_t clk) #ifdef CONFIG_POSIX_TIMERS +#include <linux/signal_types.h> + /** * cpu_timer - Posix CPU timer representation for k_itimer * @node: timerqueue node to queue in the task/sig @@ -42,6 +48,7 @@ static inline int clockid_to_fd(const clockid_t clk) * @pid: Pointer to target task PID * @elist: List head for the expiry list * @firing: Timer is currently firing + * @nanosleep: Timer is used for nanosleep and is not a regular posix-timer * @handling: Pointer to the task which handles expiry */ struct cpu_timer { @@ -49,7 +56,8 @@ struct cpu_timer { struct timerqueue_head *head; struct pid *pid; struct list_head elist; - int firing; + bool firing; + bool nanosleep; struct task_struct __rcu *handling; }; @@ -101,6 +109,12 @@ static inline void posix_cputimers_rt_watchdog(struct posix_cputimers *pct, pct->bases[CPUCLOCK_SCHED].nextevt = runtime; } +void posixtimer_rearm_itimer(struct task_struct *p); +bool posixtimer_init_sigqueue(struct sigqueue *q); +void posixtimer_send_sigqueue(struct k_itimer *tmr); +bool posixtimer_deliver_signal(struct kernel_siginfo *info, struct sigqueue *timer_sigq); +void posixtimer_free_timer(struct k_itimer *timer); + /* Init task static initializer */ #define INIT_CPU_TIMERBASE(b) { \ .nextevt = U64_MAX, \ @@ -122,6 +136,10 @@ struct cpu_timer { }; static inline void posix_cputimers_init(struct posix_cputimers *pct) { } static inline void posix_cputimers_group_init(struct posix_cputimers *pct, u64 cpu_limit) { } +static inline void posixtimer_rearm_itimer(struct task_struct *p) { } +static inline bool posixtimer_deliver_signal(struct kernel_siginfo *info, + struct sigqueue *timer_sigq) { return false; } +static inline void posixtimer_free_timer(struct k_itimer *timer) { } #endif #ifdef CONFIG_POSIX_CPU_TIMERS_TASK_WORK @@ -132,50 +150,56 @@ static inline void clear_posix_cputimers_work(struct task_struct *p) { } static inline void posix_cputimers_init_work(void) { } #endif -#define REQUEUE_PENDING 1 - /** * struct k_itimer - POSIX.1b interval timer structure. - * @list: List head for binding the timer to signals->posix_timers + * @list: List node for binding the timer to tsk::signal::posix_timers + * @ignored_list: List node for tracking ignored timers in tsk::signal::ignored_posix_timers * @t_hash: Entry in the posix timer hash table * @it_lock: Lock protecting the timer * @kclock: Pointer to the k_clock struct handling this timer * @it_clock: The posix timer clock id * @it_id: The posix timer id for identifying the timer - * @it_active: Marker that timer is active + * @it_status: The status of the timer + * @it_sig_periodic: The periodic status at signal delivery * @it_overrun: The overrun counter for pending signals * @it_overrun_last: The overrun at the time of the last delivered signal - * @it_requeue_pending: Indicator that timer waits for being requeued on - * signal delivery + * @it_signal_seq: Sequence count to control signal delivery + * @it_sigqueue_seq: The sequence count at the point where the signal was queued * @it_sigev_notify: The notify word of sigevent struct for signal delivery * @it_interval: The interval for periodic timers * @it_signal: Pointer to the creators signal struct * @it_pid: The pid of the process/task targeted by the signal * @it_process: The task to wakeup on clock_nanosleep (CPU timers) - * @sigq: Pointer to preallocated sigqueue + * @rcuref: Reference count for life time management + * @sigq: Embedded sigqueue * @it: Union representing the various posix timer type * internals. * @rcu: RCU head for freeing the timer. */ struct k_itimer { struct hlist_node list; + struct hlist_node ignored_list; struct hlist_node t_hash; spinlock_t it_lock; const struct k_clock *kclock; clockid_t it_clock; timer_t it_id; - int it_active; + int it_status; + bool it_sig_periodic; s64 it_overrun; s64 it_overrun_last; - int it_requeue_pending; + unsigned int it_signal_seq; + unsigned int it_sigqueue_seq; int it_sigev_notify; + enum pid_type it_pid_type; ktime_t it_interval; struct signal_struct *it_signal; union { struct pid *it_pid; struct task_struct *it_process; }; - struct sigqueue *sigq; + struct sigqueue sigq; + rcuref_t rcuref; union { struct { struct hrtimer timer; @@ -196,5 +220,29 @@ void set_process_cpu_timer(struct task_struct *task, unsigned int clock_idx, int update_rlimit_cpu(struct task_struct *task, unsigned long rlim_new); -void posixtimer_rearm(struct kernel_siginfo *info); +#ifdef CONFIG_POSIX_TIMERS +static inline void posixtimer_putref(struct k_itimer *tmr) +{ + if (rcuref_put(&tmr->rcuref)) + posixtimer_free_timer(tmr); +} + +static inline void posixtimer_sigqueue_getref(struct sigqueue *q) +{ + struct k_itimer *tmr = container_of(q, struct k_itimer, sigq); + + WARN_ON_ONCE(!rcuref_get(&tmr->rcuref)); +} + +static inline void posixtimer_sigqueue_putref(struct sigqueue *q) +{ + struct k_itimer *tmr = container_of(q, struct k_itimer, sigq); + + posixtimer_putref(tmr); +} +#else /* CONFIG_POSIX_TIMERS */ +static inline void posixtimer_sigqueue_getref(struct sigqueue *q) { } +static inline void posixtimer_sigqueue_putref(struct sigqueue *q) { } +#endif /* !CONFIG_POSIX_TIMERS */ + #endif diff --git a/include/linux/posix_acl.h b/include/linux/posix_acl.h index 0e65b3d634d9..e2d47eb1a7f3 100644 --- a/include/linux/posix_acl.h +++ b/include/linux/posix_acl.h @@ -28,9 +28,9 @@ struct posix_acl_entry { struct posix_acl { refcount_t a_refcount; - struct rcu_head a_rcu; unsigned int a_count; - struct posix_acl_entry a_entries[]; + struct rcu_head a_rcu; + struct posix_acl_entry a_entries[] __counted_by(a_count); }; #define FOREACH_ACL_ENTRY(pa, acl, pe) \ @@ -62,7 +62,7 @@ posix_acl_release(struct posix_acl *acl) /* posix_acl.c */ extern void posix_acl_init(struct posix_acl *, int); -extern struct posix_acl *posix_acl_alloc(int, gfp_t); +extern struct posix_acl *posix_acl_alloc(unsigned int count, gfp_t flags); extern struct posix_acl *posix_acl_from_mode(umode_t, gfp_t); extern int posix_acl_equiv_mode(const struct posix_acl *, umode_t *); extern int __posix_acl_create(struct posix_acl **, gfp_t, umode_t *); diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h index 910d407ebe63..b98106e1a90f 100644 --- a/include/linux/power_supply.h +++ b/include/linux/power_supply.h @@ -236,6 +236,8 @@ struct power_supply_config { char **supplied_to; size_t num_supplicants; + + bool no_wakeup_source; }; /* Description of power supply */ @@ -750,9 +752,9 @@ struct power_supply_battery_info { int temp_alert_max; int temp_min; int temp_max; - struct power_supply_battery_ocv_table *ocv_table[POWER_SUPPLY_OCV_TEMP_MAX]; + const struct power_supply_battery_ocv_table *ocv_table[POWER_SUPPLY_OCV_TEMP_MAX]; int ocv_table_size[POWER_SUPPLY_OCV_TEMP_MAX]; - struct power_supply_resistance_temp_table *resist_table; + const struct power_supply_resistance_temp_table *resist_table; int resist_table_size; const struct power_supply_vbat_ri_table *vbat2ri_discharging; int vbat2ri_discharging_size; @@ -797,15 +799,15 @@ extern bool power_supply_battery_info_has_prop(struct power_supply_battery_info extern int power_supply_battery_info_get_prop(struct power_supply_battery_info *info, enum power_supply_property psp, union power_supply_propval *val); -extern int power_supply_ocv2cap_simple(struct power_supply_battery_ocv_table *table, +extern int power_supply_ocv2cap_simple(const struct power_supply_battery_ocv_table *table, int table_len, int ocv); -extern struct power_supply_battery_ocv_table * +extern const struct power_supply_battery_ocv_table * power_supply_find_ocv2cap_table(struct power_supply_battery_info *info, int temp, int *table_len); extern int power_supply_batinfo_ocv2cap(struct power_supply_battery_info *info, int ocv, int temp); extern int -power_supply_temp2resist_simple(struct power_supply_resistance_temp_table *table, +power_supply_temp2resist_simple(const struct power_supply_resistance_temp_table *table, int table_len, int temp); extern int power_supply_vbat2ri(struct power_supply_battery_info *info, int vbat_uv, bool charging); @@ -863,8 +865,6 @@ static inline int power_supply_set_property(struct power_supply *psy, const union power_supply_propval *val) { return 0; } #endif -extern int power_supply_property_is_writeable(struct power_supply *psy, - enum power_supply_property psp); extern void power_supply_external_power_changed(struct power_supply *psy); extern struct power_supply *__must_check @@ -872,17 +872,9 @@ power_supply_register(struct device *parent, const struct power_supply_desc *desc, const struct power_supply_config *cfg); extern struct power_supply *__must_check -power_supply_register_no_ws(struct device *parent, - const struct power_supply_desc *desc, - const struct power_supply_config *cfg); -extern struct power_supply *__must_check devm_power_supply_register(struct device *parent, const struct power_supply_desc *desc, const struct power_supply_config *cfg); -extern struct power_supply *__must_check -devm_power_supply_register_no_ws(struct device *parent, - const struct power_supply_desc *desc, - const struct power_supply_config *cfg); extern void power_supply_unregister(struct power_supply *psy); extern int power_supply_powers(struct power_supply *psy, struct device *dev); @@ -945,19 +937,6 @@ static inline bool power_supply_is_watt_property(enum power_supply_property psp) return false; } -#ifdef CONFIG_POWER_SUPPLY_HWMON -int power_supply_add_hwmon_sysfs(struct power_supply *psy); -void power_supply_remove_hwmon_sysfs(struct power_supply *psy); -#else -static inline int power_supply_add_hwmon_sysfs(struct power_supply *psy) -{ - return 0; -} - -static inline -void power_supply_remove_hwmon_sysfs(struct power_supply *psy) {} -#endif - #ifdef CONFIG_SYSFS ssize_t power_supply_charge_behaviour_show(struct device *dev, unsigned int available_behaviours, diff --git a/include/linux/prandom.h b/include/linux/prandom.h index f7f1e5251c67..f2ed5b72b3d6 100644 --- a/include/linux/prandom.h +++ b/include/linux/prandom.h @@ -10,6 +10,7 @@ #include <linux/types.h> #include <linux/once.h> +#include <linux/percpu.h> #include <linux/random.h> struct rnd_state { diff --git a/include/linux/preempt.h b/include/linux/preempt.h index ce76f1a45722..ca86235ac15c 100644 --- a/include/linux/preempt.h +++ b/include/linux/preempt.h @@ -486,6 +486,7 @@ DEFINE_LOCK_GUARD_0(migrate, migrate_disable(), migrate_enable()) extern bool preempt_model_none(void); extern bool preempt_model_voluntary(void); extern bool preempt_model_full(void); +extern bool preempt_model_lazy(void); #else @@ -502,6 +503,11 @@ static inline bool preempt_model_full(void) return IS_ENABLED(CONFIG_PREEMPT); } +static inline bool preempt_model_lazy(void) +{ + return IS_ENABLED(CONFIG_PREEMPT_LAZY); +} + #endif static inline bool preempt_model_rt(void) @@ -519,7 +525,7 @@ static inline bool preempt_model_rt(void) */ static inline bool preempt_model_preemptible(void) { - return preempt_model_full() || preempt_model_rt(); + return preempt_model_full() || preempt_model_lazy() || preempt_model_rt(); } #endif /* __LINUX_PREEMPT_H */ diff --git a/include/linux/printk.h b/include/linux/printk.h index eca9bb2ee637..4217a9f412b2 100644 --- a/include/linux/printk.h +++ b/include/linux/printk.h @@ -166,6 +166,9 @@ __printf(1, 2) __cold int _printk_deferred(const char *fmt, ...); extern void __printk_deferred_enter(void); extern void __printk_deferred_exit(void); +extern void printk_force_console_enter(void); +extern void printk_force_console_exit(void); + /* * The printk_deferred_enter/exit macros are available only as a hack for * some code paths that need to defer all printk console printing. Interrupts @@ -229,6 +232,14 @@ static inline void printk_deferred_exit(void) { } +static inline void printk_force_console_enter(void) +{ +} + +static inline void printk_force_console_exit(void) +{ +} + static inline int printk_ratelimit(void) { return 0; diff --git a/include/linux/pwm.h b/include/linux/pwm.h index 8acd60b53f58..6853e29d9674 100644 --- a/include/linux/pwm.h +++ b/include/linux/pwm.h @@ -8,7 +8,7 @@ #include <linux/mutex.h> #include <linux/of.h> -MODULE_IMPORT_NS(PWM); +MODULE_IMPORT_NS("PWM"); struct pwm_chip; @@ -49,6 +49,31 @@ enum { PWMF_EXPORTED = 1, }; +/** + * struct pwm_waveform - description of a PWM waveform + * @period_length_ns: PWM period + * @duty_length_ns: PWM duty cycle + * @duty_offset_ns: offset of the rising edge from the period's start + * + * This is a representation of a PWM waveform alternative to struct pwm_state + * below. It's more expressive than struct pwm_state as it contains a + * duty_offset_ns and so can represent offsets other than zero (with .polarity = + * PWM_POLARITY_NORMAL) and period - duty_cycle (.polarity = + * PWM_POLARITY_INVERSED). + * + * Note there is no explicit bool for enabled. A "disabled" PWM is represented + * by .period_length_ns = 0. Note further that the behaviour of a "disabled" PWM + * is undefined. Depending on the hardware's capabilities it might drive the + * active or inactive level, go high-z or even continue to toggle. + * + * The unit for all three members is nanoseconds. + */ +struct pwm_waveform { + u64 period_length_ns; + u64 duty_length_ns; + u64 duty_offset_ns; +}; + /* * struct pwm_state - state of a PWM channel * @period: PWM period (in nanoseconds) @@ -251,6 +276,11 @@ struct pwm_capture { * @request: optional hook for requesting a PWM * @free: optional hook for freeing a PWM * @capture: capture and report PWM signal + * @sizeof_wfhw: size (in bytes) of driver specific waveform presentation + * @round_waveform_tohw: convert a struct pwm_waveform to driver specific presentation + * @round_waveform_fromhw: convert a driver specific waveform presentation to struct pwm_waveform + * @read_waveform: read driver specific waveform presentation from hardware + * @write_waveform: write driver specific waveform presentation to hardware * @apply: atomically apply a new PWM config * @get_state: get the current PWM state. */ @@ -259,6 +289,17 @@ struct pwm_ops { void (*free)(struct pwm_chip *chip, struct pwm_device *pwm); int (*capture)(struct pwm_chip *chip, struct pwm_device *pwm, struct pwm_capture *result, unsigned long timeout); + + size_t sizeof_wfhw; + int (*round_waveform_tohw)(struct pwm_chip *chip, struct pwm_device *pwm, + const struct pwm_waveform *wf, void *wfhw); + int (*round_waveform_fromhw)(struct pwm_chip *chip, struct pwm_device *pwm, + const void *wfhw, struct pwm_waveform *wf); + int (*read_waveform)(struct pwm_chip *chip, struct pwm_device *pwm, + void *wfhw); + int (*write_waveform)(struct pwm_chip *chip, struct pwm_device *pwm, + const void *wfhw); + int (*apply)(struct pwm_chip *chip, struct pwm_device *pwm, const struct pwm_state *state); int (*get_state)(struct pwm_chip *chip, struct pwm_device *pwm, @@ -275,6 +316,9 @@ struct pwm_ops { * @of_xlate: request a PWM device given a device tree PWM specifier * @atomic: can the driver's ->apply() be called in atomic context * @uses_pwmchip_alloc: signals if pwmchip_allow was used to allocate this chip + * @operational: signals if the chip can be used (or is already deregistered) + * @nonatomic_lock: mutex for nonatomic chips + * @atomic_lock: mutex for atomic chips * @pwms: array of PWM devices allocated by the framework */ struct pwm_chip { @@ -290,6 +334,16 @@ struct pwm_chip { /* only used internally by the PWM framework */ bool uses_pwmchip_alloc; + bool operational; + union { + /* + * depending on the chip being atomic or not either the mutex or + * the spinlock is used. It protects .operational and + * synchronizes the callbacks in .ops + */ + struct mutex nonatomic_lock; + spinlock_t atomic_lock; + }; struct pwm_device pwms[] __counted_by(npwm); }; @@ -309,9 +363,14 @@ static inline void pwmchip_set_drvdata(struct pwm_chip *chip, void *data) } #if IS_ENABLED(CONFIG_PWM) -/* PWM user APIs */ + +/* PWM consumer APIs */ +int pwm_round_waveform_might_sleep(struct pwm_device *pwm, struct pwm_waveform *wf); +int pwm_get_waveform_might_sleep(struct pwm_device *pwm, struct pwm_waveform *wf); +int pwm_set_waveform_might_sleep(struct pwm_device *pwm, const struct pwm_waveform *wf, bool exact); int pwm_apply_might_sleep(struct pwm_device *pwm, const struct pwm_state *state); int pwm_apply_atomic(struct pwm_device *pwm, const struct pwm_state *state); +int pwm_get_state_hw(struct pwm_device *pwm, struct pwm_state *state); int pwm_adjust_config(struct pwm_device *pwm); /** @@ -436,6 +495,11 @@ static inline int pwm_apply_atomic(struct pwm_device *pwm, return -EOPNOTSUPP; } +static inline int pwm_get_state_hw(struct pwm_device *pwm, struct pwm_state *state) +{ + return -EOPNOTSUPP; +} + static inline int pwm_adjust_config(struct pwm_device *pwm) { return -EOPNOTSUPP; diff --git a/include/linux/random.h b/include/linux/random.h index b0a940af4fff..333cecfca93f 100644 --- a/include/linux/random.h +++ b/include/linux/random.h @@ -145,13 +145,6 @@ declare_get_random_var_wait(u64, u32) declare_get_random_var_wait(long, unsigned long) #undef declare_get_random_var -/* - * This is designed to be standalone for just prandom - * users, but for now we include it from <linux/random.h> - * for legacy reasons. - */ -#include <linux/prandom.h> - #ifdef CONFIG_SMP int random_prepare_cpu(unsigned int cpu); int random_online_cpu(unsigned int cpu); diff --git a/include/linux/range.h b/include/linux/range.h index 6ad0b73cb7ad..d7f98e1285d7 100644 --- a/include/linux/range.h +++ b/include/linux/range.h @@ -13,11 +13,20 @@ static inline u64 range_len(const struct range *range) return range->end - range->start + 1; } -static inline bool range_contains(struct range *r1, struct range *r2) +/* True if r1 completely contains r2 */ +static inline bool range_contains(const struct range *r1, + const struct range *r2) { return r1->start <= r2->start && r1->end >= r2->end; } +/* True if any part of r1 overlaps r2 */ +static inline bool range_overlaps(const struct range *r1, + const struct range *r2) +{ + return r1->start <= r2->end && r1->end >= r2->start; +} + int add_range(struct range *range, int az, int nr_range, u64 start, u64 end); @@ -31,4 +40,10 @@ int clean_sort_range(struct range *range, int az); void sort_range(struct range *range, int nr_range); +#define DEFINE_RANGE(_start, _end) \ +(struct range) { \ + .start = (_start), \ + .end = (_end), \ + } + #endif diff --git a/include/linux/rbtree_latch.h b/include/linux/rbtree_latch.h index 6a0999c26c7c..2f630eb8307e 100644 --- a/include/linux/rbtree_latch.h +++ b/include/linux/rbtree_latch.h @@ -14,7 +14,7 @@ * * If we need to allow unconditional lookups (say as required for NMI context * usage) we need a more complex setup; this data structure provides this by - * employing the latch technique -- see @raw_write_seqcount_latch -- to + * employing the latch technique -- see @write_seqcount_latch_begin -- to * implement a latched RB-tree which does allow for unconditional lookups by * virtue of always having (at least) one stable copy of the tree. * @@ -132,7 +132,7 @@ __lt_find(void *key, struct latch_tree_root *ltr, int idx, * @ops: operators defining the node order * * It inserts @node into @root in an ordered fashion such that we can always - * observe one complete tree. See the comment for raw_write_seqcount_latch(). + * observe one complete tree. See the comment for write_seqcount_latch_begin(). * * The inserts use rcu_assign_pointer() to publish the element such that the * tree structure is stored before we can observe the new @node. @@ -145,10 +145,11 @@ latch_tree_insert(struct latch_tree_node *node, struct latch_tree_root *root, const struct latch_tree_ops *ops) { - raw_write_seqcount_latch(&root->seq); + write_seqcount_latch_begin(&root->seq); __lt_insert(node, root, 0, ops->less); - raw_write_seqcount_latch(&root->seq); + write_seqcount_latch(&root->seq); __lt_insert(node, root, 1, ops->less); + write_seqcount_latch_end(&root->seq); } /** @@ -159,7 +160,7 @@ latch_tree_insert(struct latch_tree_node *node, * * Removes @node from the trees @root in an ordered fashion such that we can * always observe one complete tree. See the comment for - * raw_write_seqcount_latch(). + * write_seqcount_latch_begin(). * * It is assumed that @node will observe one RCU quiescent state before being * reused of freed. @@ -172,10 +173,11 @@ latch_tree_erase(struct latch_tree_node *node, struct latch_tree_root *root, const struct latch_tree_ops *ops) { - raw_write_seqcount_latch(&root->seq); + write_seqcount_latch_begin(&root->seq); __lt_erase(node, root, 0); - raw_write_seqcount_latch(&root->seq); + write_seqcount_latch(&root->seq); __lt_erase(node, root, 1); + write_seqcount_latch_end(&root->seq); } /** @@ -204,9 +206,9 @@ latch_tree_find(void *key, struct latch_tree_root *root, unsigned int seq; do { - seq = raw_read_seqcount_latch(&root->seq); + seq = read_seqcount_latch(&root->seq); node = __lt_find(key, root, seq & 1, ops->comp); - } while (raw_read_seqcount_latch_retry(&root->seq, seq)); + } while (read_seqcount_latch_retry(&root->seq, seq)); return node; } diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 58d84c59f3dd..48e5c03df1dd 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -401,7 +401,7 @@ static inline int debug_lockdep_rcu_enabled(void) */ #define RCU_LOCKDEP_WARN(c, s) \ do { \ - static bool __section(".data.unlikely") __warned; \ + static bool __section(".data..unlikely") __warned; \ if (debug_lockdep_rcu_enabled() && (c) && \ debug_lockdep_rcu_enabled() && !__warned) { \ __warned = true; \ diff --git a/include/linux/rcupdate_trace.h b/include/linux/rcupdate_trace.h index eda493200663..e6c44eb428ab 100644 --- a/include/linux/rcupdate_trace.h +++ b/include/linux/rcupdate_trace.h @@ -10,6 +10,7 @@ #include <linux/sched.h> #include <linux/rcupdate.h> +#include <linux/cleanup.h> extern struct lockdep_map rcu_trace_lock_map; @@ -98,4 +99,8 @@ static inline void rcu_read_lock_trace(void) { BUG(); } static inline void rcu_read_unlock_trace(void) { BUG(); } #endif /* #ifdef CONFIG_TASKS_TRACE_RCU */ +DEFINE_LOCK_GUARD_0(rcu_tasks_trace, + rcu_read_lock_trace(), + rcu_read_unlock_trace()) + #endif /* __LINUX_RCUPDATE_TRACE_H */ diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index 0ee270b3f5ed..fe42315f667f 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h @@ -165,7 +165,6 @@ static inline bool rcu_inkernel_boot_has_ended(void) { return true; } static inline bool rcu_is_watching(void) { return true; } static inline void rcu_momentary_eqs(void) { } static inline void kfree_rcu_scheduler_running(void) { } -static inline bool rcu_gp_might_be_stalled(void) { return false; } /* Avoid RCU read-side critical sections leaking across. */ static inline void rcu_all_qs(void) { barrier(); } diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index 90a684f94776..27d86d912781 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h @@ -40,7 +40,6 @@ void kvfree_rcu_barrier(void); void rcu_barrier(void); void rcu_momentary_eqs(void); void kfree_rcu_scheduler_running(void); -bool rcu_gp_might_be_stalled(void); struct rcu_gp_oldstate { unsigned long rgos_norm; diff --git a/include/linux/regmap.h b/include/linux/regmap.h index f9ccad32fc5c..fd41baccbf3e 100644 --- a/include/linux/regmap.h +++ b/include/linux/regmap.h @@ -54,7 +54,14 @@ struct sdw_slave; #define REGMAP_UPSHIFT(s) (-(s)) #define REGMAP_DOWNSHIFT(s) (s) -/* An enum of all the supported cache types */ +/* + * The supported cache types, the default is no cache. Any new caches + * should usually use the maple tree cache unless they specifically + * require that there are never any allocations at runtime and can't + * provide defaults in which case they should use the flat cache. The + * rbtree cache *may* have some performance advantage for very low end + * systems that make heavy use of cache syncs but is mainly legacy. + */ enum regcache_type { REGCACHE_NONE, REGCACHE_RBTREE, @@ -106,17 +113,17 @@ struct reg_sequence { * @addr: Address to poll * @val: Unsigned integer variable to read the value into * @cond: Break condition (usually involving @val) - * @sleep_us: Maximum time to sleep between reads in us (0 - * tight-loops). Should be less than ~20ms since usleep_range - * is used (see Documentation/timers/timers-howto.rst). + * @sleep_us: Maximum time to sleep between reads in us (0 tight-loops). Please + * read usleep_range() function description for details and + * limitations. * @timeout_us: Timeout in us, 0 means never timeout * - * Returns 0 on success and -ETIMEDOUT upon a timeout or the regmap_read + * This is modelled after the readx_poll_timeout macros in linux/iopoll.h. + * + * Returns: 0 on success and -ETIMEDOUT upon a timeout or the regmap_read * error return value in case of a error read. In the two former cases, * the last read value at @addr is stored in @val. Must not be called * from atomic context if sleep_us or timeout_us are used. - * - * This is modelled after the readx_poll_timeout macros in linux/iopoll.h. */ #define regmap_read_poll_timeout(map, addr, val, cond, sleep_us, timeout_us) \ ({ \ @@ -133,20 +140,20 @@ struct reg_sequence { * @addr: Address to poll * @val: Unsigned integer variable to read the value into * @cond: Break condition (usually involving @val) - * @delay_us: Time to udelay between reads in us (0 tight-loops). - * Should be less than ~10us since udelay is used - * (see Documentation/timers/timers-howto.rst). + * @delay_us: Time to udelay between reads in us (0 tight-loops). Please + * read udelay() function description for details and + * limitations. * @timeout_us: Timeout in us, 0 means never timeout * - * Returns 0 on success and -ETIMEDOUT upon a timeout or the regmap_read - * error return value in case of a error read. In the two former cases, - * the last read value at @addr is stored in @val. - * * This is modelled after the readx_poll_timeout_atomic macros in linux/iopoll.h. * * Note: In general regmap cannot be used in atomic context. If you want to use * this macro then first setup your regmap for atomic use (flat or no cache * and MMIO regmap). + * + * Returns: 0 on success and -ETIMEDOUT upon a timeout or the regmap_read + * error return value in case of a error read. In the two former cases, + * the last read value at @addr is stored in @val. */ #define regmap_read_poll_timeout_atomic(map, addr, val, cond, delay_us, timeout_us) \ ({ \ @@ -177,17 +184,17 @@ struct reg_sequence { * @field: Regmap field to read from * @val: Unsigned integer variable to read the value into * @cond: Break condition (usually involving @val) - * @sleep_us: Maximum time to sleep between reads in us (0 - * tight-loops). Should be less than ~20ms since usleep_range - * is used (see Documentation/timers/timers-howto.rst). + * @sleep_us: Maximum time to sleep between reads in us (0 tight-loops). Please + * read usleep_range() function description for details and + * limitations. * @timeout_us: Timeout in us, 0 means never timeout * - * Returns 0 on success and -ETIMEDOUT upon a timeout or the regmap_field_read + * This is modelled after the readx_poll_timeout macros in linux/iopoll.h. + * + * Returns: 0 on success and -ETIMEDOUT upon a timeout or the regmap_field_read * error return value in case of a error read. In the two former cases, * the last read value at @addr is stored in @val. Must not be called * from atomic context if sleep_us or timeout_us are used. - * - * This is modelled after the readx_poll_timeout macros in linux/iopoll.h. */ #define regmap_field_read_poll_timeout(field, val, cond, sleep_us, timeout_us) \ ({ \ @@ -1328,6 +1335,15 @@ static inline int regmap_clear_bits(struct regmap *map, return regmap_update_bits_base(map, reg, bits, 0, NULL, false, false); } +static inline int regmap_assign_bits(struct regmap *map, unsigned int reg, + unsigned int bits, bool value) +{ + if (value) + return regmap_set_bits(map, reg, bits); + else + return regmap_clear_bits(map, reg, bits); +} + int regmap_test_bits(struct regmap *map, unsigned int reg, unsigned int bits); /** @@ -1796,6 +1812,13 @@ static inline int regmap_clear_bits(struct regmap *map, return -EINVAL; } +static inline int regmap_assign_bits(struct regmap *map, unsigned int reg, + unsigned int bits, bool value) +{ + WARN_ONCE(1, "regmap API is disabled"); + return -EINVAL; +} + static inline int regmap_test_bits(struct regmap *map, unsigned int reg, unsigned int bits) { diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h index b9ce521910a0..8c3c372ad735 100644 --- a/include/linux/regulator/consumer.h +++ b/include/linux/regulator/consumer.h @@ -168,6 +168,29 @@ int devm_regulator_get_enable_read_voltage(struct device *dev, const char *id); void regulator_put(struct regulator *regulator); void devm_regulator_put(struct regulator *regulator); +#if IS_ENABLED(CONFIG_OF) +struct regulator *__must_check of_regulator_get_optional(struct device *dev, + struct device_node *node, + const char *id); +struct regulator *__must_check devm_of_regulator_get_optional(struct device *dev, + struct device_node *node, + const char *id); +#else +static inline struct regulator *__must_check of_regulator_get_optional(struct device *dev, + struct device_node *node, + const char *id) +{ + return ERR_PTR(-ENODEV); +} + +static inline struct regulator *__must_check devm_of_regulator_get_optional(struct device *dev, + struct device_node *node, + const char *id) +{ + return ERR_PTR(-ENODEV); +} +#endif + int regulator_register_supply_alias(struct device *dev, const char *id, struct device *alias_dev, const char *alias_id); @@ -350,6 +373,20 @@ devm_regulator_get_optional(struct device *dev, const char *id) return ERR_PTR(-ENODEV); } +static inline struct regulator *__must_check of_regulator_get_optional(struct device *dev, + struct device_node *node, + const char *id) +{ + return ERR_PTR(-ENODEV); +} + +static inline struct regulator *__must_check devm_of_regulator_get_optional(struct device *dev, + struct device_node *node, + const char *id) +{ + return ERR_PTR(-ENODEV); +} + static inline void regulator_put(struct regulator *regulator) { } diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h index f230a472ccd3..5b66caf1695d 100644 --- a/include/linux/regulator/driver.h +++ b/include/linux/regulator/driver.h @@ -269,6 +269,11 @@ enum regulator_type { * config but it cannot store it for later usage. * Callback should return 0 on success or negative ERRNO * indicating failure. + * @init_cb: Optional callback called after the parsing of init_data. + * Allows the regulator to perform runtime init if necessary, + * such as synching the regulator and the parsed constraints. + * Callback should return 0 on success or negative ERRNO + * indicating failure. * @id: Numerical identifier for the regulator. * @ops: Regulator operations table. * @irq: Interrupt number for the regulator. @@ -365,6 +370,8 @@ struct regulator_desc { int (*of_parse_cb)(struct device_node *, const struct regulator_desc *, struct regulator_config *); + int (*init_cb)(struct regulator_dev *, + struct regulator_config *); int id; unsigned int continuous_voltage_range:1; unsigned n_voltages; diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h index 0cd76d264727..b3db09a7429b 100644 --- a/include/linux/regulator/machine.h +++ b/include/linux/regulator/machine.h @@ -273,8 +273,6 @@ struct regulator_consumer_supply { * be usable. * @num_consumer_supplies: Number of consumer device supplies. * @consumer_supplies: Consumer device supply configuration. - * - * @regulator_init: Callback invoked when the regulator has been registered. * @driver_data: Data passed to regulator_init. */ struct regulator_init_data { @@ -285,8 +283,7 @@ struct regulator_init_data { int num_consumer_supplies; struct regulator_consumer_supply *consumer_supplies; - /* optional regulator machine specific init */ - int (*regulator_init)(void *driver_data); + /* optional regulator machine specific data */ void *driver_data; /* core does not touch this */ }; diff --git a/include/linux/reset.h b/include/linux/reset.h index 514ddf003efc..2986ced69a02 100644 --- a/include/linux/reset.h +++ b/include/linux/reset.h @@ -25,6 +25,48 @@ struct reset_control_bulk_data { struct reset_control *rstc; }; +#define RESET_CONTROL_FLAGS_BIT_SHARED BIT(0) /* not exclusive */ +#define RESET_CONTROL_FLAGS_BIT_OPTIONAL BIT(1) +#define RESET_CONTROL_FLAGS_BIT_ACQUIRED BIT(2) /* iff exclusive, not released */ +#define RESET_CONTROL_FLAGS_BIT_DEASSERTED BIT(3) + +/** + * enum reset_control_flags - Flags that can be passed to the reset_control_get functions + * to determine the type of reset control. + * These values cannot be OR'd. + * + * @RESET_CONTROL_EXCLUSIVE: exclusive, acquired, + * @RESET_CONTROL_EXCLUSIVE_DEASSERTED: exclusive, acquired, deasserted + * @RESET_CONTROL_EXCLUSIVE_RELEASED: exclusive, released, + * @RESET_CONTROL_SHARED: shared + * @RESET_CONTROL_SHARED_DEASSERTED: shared, deasserted + * @RESET_CONTROL_OPTIONAL_EXCLUSIVE: optional, exclusive, acquired + * @RESET_CONTROL_OPTIONAL_EXCLUSIVE_DEASSERTED: optional, exclusive, acquired, deasserted + * @RESET_CONTROL_OPTIONAL_EXCLUSIVE_RELEASED: optional, exclusive, released + * @RESET_CONTROL_OPTIONAL_SHARED: optional, shared + * @RESET_CONTROL_OPTIONAL_SHARED_DEASSERTED: optional, shared, deasserted + */ +enum reset_control_flags { + RESET_CONTROL_EXCLUSIVE = RESET_CONTROL_FLAGS_BIT_ACQUIRED, + RESET_CONTROL_EXCLUSIVE_DEASSERTED = RESET_CONTROL_FLAGS_BIT_ACQUIRED | + RESET_CONTROL_FLAGS_BIT_DEASSERTED, + RESET_CONTROL_EXCLUSIVE_RELEASED = 0, + RESET_CONTROL_SHARED = RESET_CONTROL_FLAGS_BIT_SHARED, + RESET_CONTROL_SHARED_DEASSERTED = RESET_CONTROL_FLAGS_BIT_SHARED | + RESET_CONTROL_FLAGS_BIT_DEASSERTED, + RESET_CONTROL_OPTIONAL_EXCLUSIVE = RESET_CONTROL_FLAGS_BIT_OPTIONAL | + RESET_CONTROL_FLAGS_BIT_ACQUIRED, + RESET_CONTROL_OPTIONAL_EXCLUSIVE_DEASSERTED = RESET_CONTROL_FLAGS_BIT_OPTIONAL | + RESET_CONTROL_FLAGS_BIT_ACQUIRED | + RESET_CONTROL_FLAGS_BIT_DEASSERTED, + RESET_CONTROL_OPTIONAL_EXCLUSIVE_RELEASED = RESET_CONTROL_FLAGS_BIT_OPTIONAL, + RESET_CONTROL_OPTIONAL_SHARED = RESET_CONTROL_FLAGS_BIT_OPTIONAL | + RESET_CONTROL_FLAGS_BIT_SHARED, + RESET_CONTROL_OPTIONAL_SHARED_DEASSERTED = RESET_CONTROL_FLAGS_BIT_OPTIONAL | + RESET_CONTROL_FLAGS_BIT_SHARED | + RESET_CONTROL_FLAGS_BIT_DEASSERTED, +}; + #ifdef CONFIG_RESET_CONTROLLER int reset_control_reset(struct reset_control *rstc); @@ -42,30 +84,25 @@ int reset_control_bulk_acquire(int num_rstcs, struct reset_control_bulk_data *rs void reset_control_bulk_release(int num_rstcs, struct reset_control_bulk_data *rstcs); struct reset_control *__of_reset_control_get(struct device_node *node, - const char *id, int index, bool shared, - bool optional, bool acquired); + const char *id, int index, enum reset_control_flags flags); struct reset_control *__reset_control_get(struct device *dev, const char *id, - int index, bool shared, - bool optional, bool acquired); + int index, enum reset_control_flags flags); void reset_control_put(struct reset_control *rstc); int __reset_control_bulk_get(struct device *dev, int num_rstcs, struct reset_control_bulk_data *rstcs, - bool shared, bool optional, bool acquired); + enum reset_control_flags flags); void reset_control_bulk_put(int num_rstcs, struct reset_control_bulk_data *rstcs); int __device_reset(struct device *dev, bool optional); struct reset_control *__devm_reset_control_get(struct device *dev, - const char *id, int index, bool shared, - bool optional, bool acquired); + const char *id, int index, enum reset_control_flags flags); int __devm_reset_control_bulk_get(struct device *dev, int num_rstcs, struct reset_control_bulk_data *rstcs, - bool shared, bool optional, bool acquired); + enum reset_control_flags flags); struct reset_control *devm_reset_control_array_get(struct device *dev, - bool shared, bool optional); -struct reset_control *of_reset_control_array_get(struct device_node *np, - bool shared, bool optional, - bool acquired); + enum reset_control_flags flags); +struct reset_control *of_reset_control_array_get(struct device_node *np, enum reset_control_flags); int reset_control_get_count(struct device *dev); @@ -116,17 +153,19 @@ static inline int __device_reset(struct device *dev, bool optional) static inline struct reset_control *__of_reset_control_get( struct device_node *node, - const char *id, int index, bool shared, - bool optional, bool acquired) + const char *id, int index, enum reset_control_flags flags) { + bool optional = flags & RESET_CONTROL_FLAGS_BIT_OPTIONAL; + return optional ? NULL : ERR_PTR(-ENOTSUPP); } static inline struct reset_control *__reset_control_get( struct device *dev, const char *id, - int index, bool shared, bool optional, - bool acquired) + int index, enum reset_control_flags flags) { + bool optional = flags & RESET_CONTROL_FLAGS_BIT_OPTIONAL; + return optional ? NULL : ERR_PTR(-ENOTSUPP); } @@ -162,8 +201,10 @@ reset_control_bulk_release(int num_rstcs, struct reset_control_bulk_data *rstcs) static inline int __reset_control_bulk_get(struct device *dev, int num_rstcs, struct reset_control_bulk_data *rstcs, - bool shared, bool optional, bool acquired) + enum reset_control_flags flags) { + bool optional = flags & RESET_CONTROL_FLAGS_BIT_OPTIONAL; + return optional ? 0 : -EOPNOTSUPP; } @@ -174,30 +215,36 @@ reset_control_bulk_put(int num_rstcs, struct reset_control_bulk_data *rstcs) static inline struct reset_control *__devm_reset_control_get( struct device *dev, const char *id, - int index, bool shared, bool optional, - bool acquired) + int index, enum reset_control_flags flags) { + bool optional = flags & RESET_CONTROL_FLAGS_BIT_OPTIONAL; + return optional ? NULL : ERR_PTR(-ENOTSUPP); } static inline int __devm_reset_control_bulk_get(struct device *dev, int num_rstcs, struct reset_control_bulk_data *rstcs, - bool shared, bool optional, bool acquired) + enum reset_control_flags flags) { + bool optional = flags & RESET_CONTROL_FLAGS_BIT_OPTIONAL; + return optional ? 0 : -EOPNOTSUPP; } static inline struct reset_control * -devm_reset_control_array_get(struct device *dev, bool shared, bool optional) +devm_reset_control_array_get(struct device *dev, enum reset_control_flags flags) { + bool optional = flags & RESET_CONTROL_FLAGS_BIT_OPTIONAL; + return optional ? NULL : ERR_PTR(-ENOTSUPP); } static inline struct reset_control * -of_reset_control_array_get(struct device_node *np, bool shared, bool optional, - bool acquired) +of_reset_control_array_get(struct device_node *np, enum reset_control_flags flags) { + bool optional = flags & RESET_CONTROL_FLAGS_BIT_OPTIONAL; + return optional ? NULL : ERR_PTR(-ENOTSUPP); } @@ -236,7 +283,7 @@ static inline int device_reset_optional(struct device *dev) static inline struct reset_control * __must_check reset_control_get_exclusive(struct device *dev, const char *id) { - return __reset_control_get(dev, id, 0, false, false, true); + return __reset_control_get(dev, id, 0, RESET_CONTROL_EXCLUSIVE); } /** @@ -253,7 +300,7 @@ static inline int __must_check reset_control_bulk_get_exclusive(struct device *dev, int num_rstcs, struct reset_control_bulk_data *rstcs) { - return __reset_control_bulk_get(dev, num_rstcs, rstcs, false, false, true); + return __reset_control_bulk_get(dev, num_rstcs, rstcs, RESET_CONTROL_EXCLUSIVE); } /** @@ -274,7 +321,7 @@ static inline struct reset_control * __must_check reset_control_get_exclusive_released(struct device *dev, const char *id) { - return __reset_control_get(dev, id, 0, false, false, false); + return __reset_control_get(dev, id, 0, RESET_CONTROL_EXCLUSIVE_RELEASED); } /** @@ -295,7 +342,7 @@ static inline int __must_check reset_control_bulk_get_exclusive_released(struct device *dev, int num_rstcs, struct reset_control_bulk_data *rstcs) { - return __reset_control_bulk_get(dev, num_rstcs, rstcs, false, false, false); + return __reset_control_bulk_get(dev, num_rstcs, rstcs, RESET_CONTROL_EXCLUSIVE_RELEASED); } /** @@ -316,7 +363,8 @@ static inline int __must_check reset_control_bulk_get_optional_exclusive_released(struct device *dev, int num_rstcs, struct reset_control_bulk_data *rstcs) { - return __reset_control_bulk_get(dev, num_rstcs, rstcs, false, true, false); + return __reset_control_bulk_get(dev, num_rstcs, rstcs, + RESET_CONTROL_OPTIONAL_EXCLUSIVE_RELEASED); } /** @@ -344,7 +392,7 @@ reset_control_bulk_get_optional_exclusive_released(struct device *dev, int num_r static inline struct reset_control *reset_control_get_shared( struct device *dev, const char *id) { - return __reset_control_get(dev, id, 0, true, false, false); + return __reset_control_get(dev, id, 0, RESET_CONTROL_SHARED); } /** @@ -361,7 +409,7 @@ static inline int __must_check reset_control_bulk_get_shared(struct device *dev, int num_rstcs, struct reset_control_bulk_data *rstcs) { - return __reset_control_bulk_get(dev, num_rstcs, rstcs, true, false, false); + return __reset_control_bulk_get(dev, num_rstcs, rstcs, RESET_CONTROL_SHARED); } /** @@ -378,7 +426,7 @@ reset_control_bulk_get_shared(struct device *dev, int num_rstcs, static inline struct reset_control *reset_control_get_optional_exclusive( struct device *dev, const char *id) { - return __reset_control_get(dev, id, 0, false, true, true); + return __reset_control_get(dev, id, 0, RESET_CONTROL_OPTIONAL_EXCLUSIVE); } /** @@ -398,7 +446,7 @@ static inline int __must_check reset_control_bulk_get_optional_exclusive(struct device *dev, int num_rstcs, struct reset_control_bulk_data *rstcs) { - return __reset_control_bulk_get(dev, num_rstcs, rstcs, false, true, true); + return __reset_control_bulk_get(dev, num_rstcs, rstcs, RESET_CONTROL_OPTIONAL_EXCLUSIVE); } /** @@ -415,7 +463,7 @@ reset_control_bulk_get_optional_exclusive(struct device *dev, int num_rstcs, static inline struct reset_control *reset_control_get_optional_shared( struct device *dev, const char *id) { - return __reset_control_get(dev, id, 0, true, true, false); + return __reset_control_get(dev, id, 0, RESET_CONTROL_OPTIONAL_SHARED); } /** @@ -435,7 +483,7 @@ static inline int __must_check reset_control_bulk_get_optional_shared(struct device *dev, int num_rstcs, struct reset_control_bulk_data *rstcs) { - return __reset_control_bulk_get(dev, num_rstcs, rstcs, true, true, false); + return __reset_control_bulk_get(dev, num_rstcs, rstcs, RESET_CONTROL_OPTIONAL_SHARED); } /** @@ -451,7 +499,7 @@ reset_control_bulk_get_optional_shared(struct device *dev, int num_rstcs, static inline struct reset_control *of_reset_control_get_exclusive( struct device_node *node, const char *id) { - return __of_reset_control_get(node, id, 0, false, false, true); + return __of_reset_control_get(node, id, 0, RESET_CONTROL_EXCLUSIVE); } /** @@ -471,7 +519,7 @@ static inline struct reset_control *of_reset_control_get_exclusive( static inline struct reset_control *of_reset_control_get_optional_exclusive( struct device_node *node, const char *id) { - return __of_reset_control_get(node, id, 0, false, true, true); + return __of_reset_control_get(node, id, 0, RESET_CONTROL_OPTIONAL_EXCLUSIVE); } /** @@ -496,7 +544,7 @@ static inline struct reset_control *of_reset_control_get_optional_exclusive( static inline struct reset_control *of_reset_control_get_shared( struct device_node *node, const char *id) { - return __of_reset_control_get(node, id, 0, true, false, false); + return __of_reset_control_get(node, id, 0, RESET_CONTROL_SHARED); } /** @@ -513,7 +561,7 @@ static inline struct reset_control *of_reset_control_get_shared( static inline struct reset_control *of_reset_control_get_exclusive_by_index( struct device_node *node, int index) { - return __of_reset_control_get(node, NULL, index, false, false, true); + return __of_reset_control_get(node, NULL, index, RESET_CONTROL_EXCLUSIVE); } /** @@ -541,7 +589,7 @@ static inline struct reset_control *of_reset_control_get_exclusive_by_index( static inline struct reset_control *of_reset_control_get_shared_by_index( struct device_node *node, int index) { - return __of_reset_control_get(node, NULL, index, true, false, false); + return __of_reset_control_get(node, NULL, index, RESET_CONTROL_SHARED); } /** @@ -560,7 +608,26 @@ static inline struct reset_control * __must_check devm_reset_control_get_exclusive(struct device *dev, const char *id) { - return __devm_reset_control_get(dev, id, 0, false, false, true); + return __devm_reset_control_get(dev, id, 0, RESET_CONTROL_EXCLUSIVE); +} + +/** + * devm_reset_control_get_exclusive_deasserted - resource managed + * reset_control_get_exclusive() + + * reset_control_deassert() + * @dev: device to be reset by the controller + * @id: reset line name + * + * Managed reset_control_get_exclusive() + reset_control_deassert(). For reset + * controllers returned from this function, reset_control_assert() + + * reset_control_put() is called automatically on driver detach. + * + * See reset_control_get_exclusive() for more information. + */ +static inline struct reset_control * __must_check +devm_reset_control_get_exclusive_deasserted(struct device *dev, const char *id) +{ + return __devm_reset_control_get(dev, id, 0, RESET_CONTROL_EXCLUSIVE_DEASSERTED); } /** @@ -580,7 +647,8 @@ static inline int __must_check devm_reset_control_bulk_get_exclusive(struct device *dev, int num_rstcs, struct reset_control_bulk_data *rstcs) { - return __devm_reset_control_bulk_get(dev, num_rstcs, rstcs, false, false, true); + return __devm_reset_control_bulk_get(dev, num_rstcs, rstcs, + RESET_CONTROL_EXCLUSIVE); } /** @@ -599,7 +667,7 @@ static inline struct reset_control * __must_check devm_reset_control_get_exclusive_released(struct device *dev, const char *id) { - return __devm_reset_control_get(dev, id, 0, false, false, false); + return __devm_reset_control_get(dev, id, 0, RESET_CONTROL_EXCLUSIVE_RELEASED); } /** @@ -619,7 +687,8 @@ static inline int __must_check devm_reset_control_bulk_get_exclusive_released(struct device *dev, int num_rstcs, struct reset_control_bulk_data *rstcs) { - return __devm_reset_control_bulk_get(dev, num_rstcs, rstcs, false, false, false); + return __devm_reset_control_bulk_get(dev, num_rstcs, rstcs, + RESET_CONTROL_EXCLUSIVE_RELEASED); } /** @@ -638,7 +707,7 @@ static inline struct reset_control * __must_check devm_reset_control_get_optional_exclusive_released(struct device *dev, const char *id) { - return __devm_reset_control_get(dev, id, 0, false, true, false); + return __devm_reset_control_get(dev, id, 0, RESET_CONTROL_OPTIONAL_EXCLUSIVE_RELEASED); } /** @@ -658,7 +727,8 @@ static inline int __must_check devm_reset_control_bulk_get_optional_exclusive_released(struct device *dev, int num_rstcs, struct reset_control_bulk_data *rstcs) { - return __devm_reset_control_bulk_get(dev, num_rstcs, rstcs, false, true, false); + return __devm_reset_control_bulk_get(dev, num_rstcs, rstcs, + RESET_CONTROL_OPTIONAL_EXCLUSIVE_RELEASED); } /** @@ -673,7 +743,26 @@ devm_reset_control_bulk_get_optional_exclusive_released(struct device *dev, int static inline struct reset_control *devm_reset_control_get_shared( struct device *dev, const char *id) { - return __devm_reset_control_get(dev, id, 0, true, false, false); + return __devm_reset_control_get(dev, id, 0, RESET_CONTROL_SHARED); +} + +/** + * devm_reset_control_get_shared_deasserted - resource managed + * reset_control_get_shared() + + * reset_control_deassert() + * @dev: device to be reset by the controller + * @id: reset line name + * + * Managed reset_control_get_shared() + reset_control_deassert(). For reset + * controllers returned from this function, reset_control_assert() + + * reset_control_put() is called automatically on driver detach. + * + * See devm_reset_control_get_shared() for more information. + */ +static inline struct reset_control * __must_check +devm_reset_control_get_shared_deasserted(struct device *dev, const char *id) +{ + return __devm_reset_control_get(dev, id, 0, RESET_CONTROL_SHARED_DEASSERTED); } /** @@ -693,7 +782,29 @@ static inline int __must_check devm_reset_control_bulk_get_shared(struct device *dev, int num_rstcs, struct reset_control_bulk_data *rstcs) { - return __devm_reset_control_bulk_get(dev, num_rstcs, rstcs, true, false, false); + return __devm_reset_control_bulk_get(dev, num_rstcs, rstcs, RESET_CONTROL_SHARED); +} + +/** + * devm_reset_control_bulk_get_shared_deasserted - resource managed + * reset_control_bulk_get_shared() + + * reset_control_bulk_deassert() + * @dev: device to be reset by the controller + * @num_rstcs: number of entries in rstcs array + * @rstcs: array of struct reset_control_bulk_data with reset line names set + * + * Managed reset_control_bulk_get_shared() + reset_control_bulk_deassert(). For + * reset controllers returned from this function, reset_control_bulk_assert() + + * reset_control_bulk_put() are called automatically on driver detach. + * + * See devm_reset_control_bulk_get_shared() for more information. + */ +static inline int __must_check +devm_reset_control_bulk_get_shared_deasserted(struct device *dev, int num_rstcs, + struct reset_control_bulk_data *rstcs) +{ + return __devm_reset_control_bulk_get(dev, num_rstcs, rstcs, + RESET_CONTROL_SHARED_DEASSERTED); } /** @@ -711,7 +822,26 @@ devm_reset_control_bulk_get_shared(struct device *dev, int num_rstcs, static inline struct reset_control *devm_reset_control_get_optional_exclusive( struct device *dev, const char *id) { - return __devm_reset_control_get(dev, id, 0, false, true, true); + return __devm_reset_control_get(dev, id, 0, RESET_CONTROL_OPTIONAL_EXCLUSIVE); +} + +/** + * devm_reset_control_get_optional_exclusive_deasserted - resource managed + * reset_control_get_optional_exclusive() + + * reset_control_deassert() + * @dev: device to be reset by the controller + * @id: reset line name + * + * Managed reset_control_get_optional_exclusive() + reset_control_deassert(). + * For reset controllers returned from this function, reset_control_assert() + + * reset_control_put() is called automatically on driver detach. + * + * See devm_reset_control_get_optional_exclusive() for more information. + */ +static inline struct reset_control * +devm_reset_control_get_optional_exclusive_deasserted(struct device *dev, const char *id) +{ + return __devm_reset_control_get(dev, id, 0, RESET_CONTROL_OPTIONAL_EXCLUSIVE_DEASSERTED); } /** @@ -731,7 +861,8 @@ static inline int __must_check devm_reset_control_bulk_get_optional_exclusive(struct device *dev, int num_rstcs, struct reset_control_bulk_data *rstcs) { - return __devm_reset_control_bulk_get(dev, num_rstcs, rstcs, false, true, true); + return __devm_reset_control_bulk_get(dev, num_rstcs, rstcs, + RESET_CONTROL_OPTIONAL_EXCLUSIVE); } /** @@ -749,7 +880,26 @@ devm_reset_control_bulk_get_optional_exclusive(struct device *dev, int num_rstcs static inline struct reset_control *devm_reset_control_get_optional_shared( struct device *dev, const char *id) { - return __devm_reset_control_get(dev, id, 0, true, true, false); + return __devm_reset_control_get(dev, id, 0, RESET_CONTROL_OPTIONAL_SHARED); +} + +/** + * devm_reset_control_get_optional_shared_deasserted - resource managed + * reset_control_get_optional_shared() + + * reset_control_deassert() + * @dev: device to be reset by the controller + * @id: reset line name + * + * Managed reset_control_get_optional_shared() + reset_control_deassert(). For + * reset controllers returned from this function, reset_control_assert() + + * reset_control_put() is called automatically on driver detach. + * + * See devm_reset_control_get_optional_shared() for more information. + */ +static inline struct reset_control * +devm_reset_control_get_optional_shared_deasserted(struct device *dev, const char *id) +{ + return __devm_reset_control_get(dev, id, 0, RESET_CONTROL_OPTIONAL_SHARED_DEASSERTED); } /** @@ -769,7 +919,7 @@ static inline int __must_check devm_reset_control_bulk_get_optional_shared(struct device *dev, int num_rstcs, struct reset_control_bulk_data *rstcs) { - return __devm_reset_control_bulk_get(dev, num_rstcs, rstcs, true, true, false); + return __devm_reset_control_bulk_get(dev, num_rstcs, rstcs, RESET_CONTROL_OPTIONAL_SHARED); } /** @@ -787,7 +937,7 @@ devm_reset_control_bulk_get_optional_shared(struct device *dev, int num_rstcs, static inline struct reset_control * devm_reset_control_get_exclusive_by_index(struct device *dev, int index) { - return __devm_reset_control_get(dev, NULL, index, false, false, true); + return __devm_reset_control_get(dev, NULL, index, RESET_CONTROL_EXCLUSIVE); } /** @@ -803,7 +953,7 @@ devm_reset_control_get_exclusive_by_index(struct device *dev, int index) static inline struct reset_control * devm_reset_control_get_shared_by_index(struct device *dev, int index) { - return __devm_reset_control_get(dev, NULL, index, true, false, false); + return __devm_reset_control_get(dev, NULL, index, RESET_CONTROL_SHARED); } /* @@ -851,54 +1001,54 @@ static inline struct reset_control *devm_reset_control_get_by_index( static inline struct reset_control * devm_reset_control_array_get_exclusive(struct device *dev) { - return devm_reset_control_array_get(dev, false, false); + return devm_reset_control_array_get(dev, RESET_CONTROL_EXCLUSIVE); } static inline struct reset_control * devm_reset_control_array_get_shared(struct device *dev) { - return devm_reset_control_array_get(dev, true, false); + return devm_reset_control_array_get(dev, RESET_CONTROL_SHARED); } static inline struct reset_control * devm_reset_control_array_get_optional_exclusive(struct device *dev) { - return devm_reset_control_array_get(dev, false, true); + return devm_reset_control_array_get(dev, RESET_CONTROL_OPTIONAL_EXCLUSIVE); } static inline struct reset_control * devm_reset_control_array_get_optional_shared(struct device *dev) { - return devm_reset_control_array_get(dev, true, true); + return devm_reset_control_array_get(dev, RESET_CONTROL_OPTIONAL_SHARED); } static inline struct reset_control * of_reset_control_array_get_exclusive(struct device_node *node) { - return of_reset_control_array_get(node, false, false, true); + return of_reset_control_array_get(node, RESET_CONTROL_EXCLUSIVE); } static inline struct reset_control * of_reset_control_array_get_exclusive_released(struct device_node *node) { - return of_reset_control_array_get(node, false, false, false); + return of_reset_control_array_get(node, RESET_CONTROL_EXCLUSIVE_RELEASED); } static inline struct reset_control * of_reset_control_array_get_shared(struct device_node *node) { - return of_reset_control_array_get(node, true, false, true); + return of_reset_control_array_get(node, RESET_CONTROL_SHARED); } static inline struct reset_control * of_reset_control_array_get_optional_exclusive(struct device_node *node) { - return of_reset_control_array_get(node, false, true, true); + return of_reset_control_array_get(node, RESET_CONTROL_OPTIONAL_EXCLUSIVE); } static inline struct reset_control * of_reset_control_array_get_optional_shared(struct device_node *node) { - return of_reset_control_array_get(node, true, true, true); + return of_reset_control_array_get(node, RESET_CONTROL_OPTIONAL_SHARED); } #endif diff --git a/include/linux/rmap.h b/include/linux/rmap.h index d5e93e44322e..683a04088f3f 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -171,7 +171,7 @@ static inline void anon_vma_merge(struct vm_area_struct *vma, unlink_anon_vmas(next); } -struct anon_vma *folio_get_anon_vma(struct folio *folio); +struct anon_vma *folio_get_anon_vma(const struct folio *folio); /* RMAP flags, currently only relevant for some anon rmap operations. */ typedef int __bitwise rmap_t; @@ -194,8 +194,8 @@ enum rmap_level { RMAP_LEVEL_PMD, }; -static inline void __folio_rmap_sanity_checks(struct folio *folio, - struct page *page, int nr_pages, enum rmap_level level) +static inline void __folio_rmap_sanity_checks(const struct folio *folio, + const struct page *page, int nr_pages, enum rmap_level level) { /* hugetlb folios are handled separately. */ VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio); @@ -728,11 +728,8 @@ page_vma_mapped_walk_restart(struct page_vma_mapped_walk *pvmw) } bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw); - -/* - * Used by swapoff to help locate where page is expected in vma. - */ -unsigned long page_address_in_vma(struct page *, struct vm_area_struct *); +unsigned long page_address_in_vma(const struct folio *folio, + const struct page *, const struct vm_area_struct *); /* * Cleans the PTEs of shared mappings. @@ -774,14 +771,14 @@ struct rmap_walk_control { bool (*rmap_one)(struct folio *folio, struct vm_area_struct *vma, unsigned long addr, void *arg); int (*done)(struct folio *folio); - struct anon_vma *(*anon_lock)(struct folio *folio, + struct anon_vma *(*anon_lock)(const struct folio *folio, struct rmap_walk_control *rwc); bool (*invalid_vma)(struct vm_area_struct *vma, void *arg); }; void rmap_walk(struct folio *folio, struct rmap_walk_control *rwc); void rmap_walk_locked(struct folio *folio, struct rmap_walk_control *rwc); -struct anon_vma *folio_lock_anon_vma_read(struct folio *folio, +struct anon_vma *folio_lock_anon_vma_read(const struct folio *folio, struct rmap_walk_control *rwc); #else /* !CONFIG_MMU */ diff --git a/include/linux/rtc/m48t59.h b/include/linux/rtc/m48t59.h index 9465d5405fe2..373ba77071c6 100644 --- a/include/linux/rtc/m48t59.h +++ b/include/linux/rtc/m48t59.h @@ -56,6 +56,9 @@ struct m48t59_plat_data { void __iomem *ioaddr; /* offset to RTC registers, automatically set according to the type */ unsigned int offset; + + /* YY digits (in RTC) are offset, i.e. year is 1900 + yy_offset + YY */ + int yy_offset; }; #endif /* _LINUX_RTC_M48T59_H_ */ diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h index a7da7dfc06a2..14b88f551920 100644 --- a/include/linux/rtnetlink.h +++ b/include/linux/rtnetlink.h @@ -7,7 +7,6 @@ #include <linux/netdevice.h> #include <linux/wait.h> #include <linux/refcount.h> -#include <linux/cleanup.h> #include <uapi/linux/rtnetlink.h> extern int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, u32 group, int echo); @@ -47,13 +46,15 @@ extern int rtnl_is_locked(void); extern int rtnl_lock_killable(void); extern bool refcount_dec_and_rtnl_lock(refcount_t *r); -DEFINE_LOCK_GUARD_0(rtnl, rtnl_lock(), rtnl_unlock()) - extern wait_queue_head_t netdev_unregistering_wq; extern atomic_t dev_unreg_count; extern struct rw_semaphore pernet_ops_rwsem; extern struct rw_semaphore net_rwsem; +#define ASSERT_RTNL() \ + WARN_ONCE(!rtnl_is_locked(), \ + "RTNL: assertion failed at %s (%d)\n", __FILE__, __LINE__) + #ifdef CONFIG_PROVE_LOCKING extern bool lockdep_rtnl_is_held(void); #else @@ -95,6 +96,61 @@ static inline bool lockdep_rtnl_is_held(void) #define rcu_replace_pointer_rtnl(rp, p) \ rcu_replace_pointer(rp, p, lockdep_rtnl_is_held()) +#ifdef CONFIG_DEBUG_NET_SMALL_RTNL +void __rtnl_net_lock(struct net *net); +void __rtnl_net_unlock(struct net *net); +void rtnl_net_lock(struct net *net); +void rtnl_net_unlock(struct net *net); +int rtnl_net_trylock(struct net *net); +int rtnl_net_lock_cmp_fn(const struct lockdep_map *a, const struct lockdep_map *b); + +bool rtnl_net_is_locked(struct net *net); + +#define ASSERT_RTNL_NET(net) \ + WARN_ONCE(!rtnl_net_is_locked(net), \ + "RTNL_NET: assertion failed at %s (%d)\n", \ + __FILE__, __LINE__) + +bool lockdep_rtnl_net_is_held(struct net *net); + +#define rcu_dereference_rtnl_net(net, p) \ + rcu_dereference_check(p, lockdep_rtnl_net_is_held(net)) +#define rtnl_net_dereference(net, p) \ + rcu_dereference_protected(p, lockdep_rtnl_net_is_held(net)) +#define rcu_replace_pointer_rtnl_net(net, rp, p) \ + rcu_replace_pointer(rp, p, lockdep_rtnl_net_is_held(net)) +#else +static inline void __rtnl_net_lock(struct net *net) {} +static inline void __rtnl_net_unlock(struct net *net) {} + +static inline void rtnl_net_lock(struct net *net) +{ + rtnl_lock(); +} + +static inline void rtnl_net_unlock(struct net *net) +{ + rtnl_unlock(); +} + +static inline int rtnl_net_trylock(struct net *net) +{ + return rtnl_trylock(); +} + +static inline void ASSERT_RTNL_NET(struct net *net) +{ + ASSERT_RTNL(); +} + +#define rcu_dereference_rtnl_net(net, p) \ + rcu_dereference_rtnl(p) +#define rtnl_net_dereference(net, p) \ + rtnl_dereference(p) +#define rcu_replace_pointer_rtnl_net(net, rp, p) \ + rcu_replace_pointer_rtnl(rp, p) +#endif + static inline struct netdev_queue *dev_ingress_queue(struct net_device *dev) { return rtnl_dereference(dev->ingress_queue); @@ -122,10 +178,6 @@ void rtnetlink_init(void); void __rtnl_unlock(void); void rtnl_kfree_skbs(struct sk_buff *head, struct sk_buff *tail); -#define ASSERT_RTNL() \ - WARN_ONCE(!rtnl_is_locked(), \ - "RTNL: assertion failed at %s (%d)\n", __FILE__, __LINE__) - extern int ndo_dflt_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb, struct net_device *dev, diff --git a/include/linux/rtsx_common.h b/include/linux/rtsx_common.h index bf290ad14c57..da9c8c6b5d50 100644 --- a/include/linux/rtsx_common.h +++ b/include/linux/rtsx_common.h @@ -12,7 +12,6 @@ #define DRV_NAME_RTSX_PCI "rtsx_pci" #define DRV_NAME_RTSX_PCI_SDMMC "rtsx_pci_sdmmc" -#define DRV_NAME_RTSX_PCI_MS "rtsx_pci_ms" #define RTSX_REG_PAIR(addr, val) (((u32)(addr) << 16) | (u8)(val)) diff --git a/include/linux/rtsx_usb.h b/include/linux/rtsx_usb.h index 3247ed8e9ff0..f267a06c6b1e 100644 --- a/include/linux/rtsx_usb.h +++ b/include/linux/rtsx_usb.h @@ -12,6 +12,10 @@ #include <linux/usb.h> +#define DRV_NAME_RTSX_USB "rtsx_usb" +#define DRV_NAME_RTSX_USB_SDMMC "rtsx_usb_sdmmc" +#define DRV_NAME_RTSX_USB_MS "rtsx_usb_ms" + /* related module names */ #define RTSX_USB_SD_CARD 0 #define RTSX_USB_MS_CARD 1 diff --git a/include/linux/rwlock.h b/include/linux/rwlock.h index c0ef596f340b..5b87c6f4a243 100644 --- a/include/linux/rwlock.h +++ b/include/linux/rwlock.h @@ -2,7 +2,7 @@ #define __LINUX_RWLOCK_H #ifndef __LINUX_INSIDE_SPINLOCK_H -# error "please don't include this file directly" +# error "Please do not include this file directly." #endif /* diff --git a/include/linux/rwlock_api_smp.h b/include/linux/rwlock_api_smp.h index dceb0a59b692..31d3d1116323 100644 --- a/include/linux/rwlock_api_smp.h +++ b/include/linux/rwlock_api_smp.h @@ -2,7 +2,7 @@ #define __LINUX_RWLOCK_API_SMP_H #ifndef __LINUX_SPINLOCK_API_SMP_H -# error "please don't include this file directly" +# error "Please do not include this file directly." #endif /* diff --git a/include/linux/rwlock_rt.h b/include/linux/rwlock_rt.h index 8544ff05e594..7d81fc6918ee 100644 --- a/include/linux/rwlock_rt.h +++ b/include/linux/rwlock_rt.h @@ -24,13 +24,13 @@ do { \ __rt_rwlock_init(rwl, #rwl, &__key); \ } while (0) -extern void rt_read_lock(rwlock_t *rwlock); +extern void rt_read_lock(rwlock_t *rwlock) __acquires(rwlock); extern int rt_read_trylock(rwlock_t *rwlock); -extern void rt_read_unlock(rwlock_t *rwlock); -extern void rt_write_lock(rwlock_t *rwlock); -extern void rt_write_lock_nested(rwlock_t *rwlock, int subclass); +extern void rt_read_unlock(rwlock_t *rwlock) __releases(rwlock); +extern void rt_write_lock(rwlock_t *rwlock) __acquires(rwlock); +extern void rt_write_lock_nested(rwlock_t *rwlock, int subclass) __acquires(rwlock); extern int rt_write_trylock(rwlock_t *rwlock); -extern void rt_write_unlock(rwlock_t *rwlock); +extern void rt_write_unlock(rwlock_t *rwlock) __releases(rwlock); static __always_inline void read_lock(rwlock_t *rwlock) { diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h index e61d164622db..d836e7440ee8 100644 --- a/include/linux/scatterlist.h +++ b/include/linux/scatterlist.h @@ -273,7 +273,7 @@ static inline void sg_unmark_end(struct scatterlist *sg) } /* - * One 64-bit architectures there is a 4-byte padding in struct scatterlist + * On 64-bit architectures there is a 4-byte padding in struct scatterlist * (assuming also CONFIG_NEED_SG_DMA_LENGTH is set). Use this padding for DMA * flags bits to indicate when a specific dma address is a bus address or the * buffer may have been bounced via SWIOTLB. @@ -313,7 +313,7 @@ static inline void sg_dma_mark_bus_address(struct scatterlist *sg) } /** - * sg_unmark_bus_address - Unmark the scatterlist entry as a bus address + * sg_dma_unmark_bus_address - Unmark the scatterlist entry as a bus address * @sg: SG entry * * Description: diff --git a/include/linux/sched.h b/include/linux/sched.h index bb343136ddd0..64934e0830af 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -656,6 +656,12 @@ struct sched_dl_entity { * @dl_defer_armed tells if the deferrable server is waiting * for the replenishment timer to activate it. * + * @dl_server_active tells if the dlserver is active(started). + * dlserver is started on first cfs enqueue on an idle runqueue + * and is stopped when a dequeue results in 0 cfs tasks on the + * runqueue. In other words, dlserver is active only when cpu's + * runqueue has atleast one cfs task. + * * @dl_defer_running tells if the deferrable server is actually * running, skipping the defer phase. */ @@ -664,6 +670,7 @@ struct sched_dl_entity { unsigned int dl_non_contending : 1; unsigned int dl_overrun : 1; unsigned int dl_server : 1; + unsigned int dl_server_active : 1; unsigned int dl_defer : 1; unsigned int dl_defer_armed : 1; unsigned int dl_defer_running : 1; @@ -1001,7 +1008,7 @@ struct task_struct { #ifdef CONFIG_ARCH_HAS_CPU_PASID unsigned pasid_activated:1; #endif -#ifdef CONFIG_CPU_SUP_INTEL +#ifdef CONFIG_X86_BUS_LOCK_DETECT unsigned reported_split_lock:1; #endif #ifdef CONFIG_TASK_DELAY_ACCT @@ -1121,9 +1128,12 @@ struct task_struct { /* * executable name, excluding path. * - * - normally initialized setup_new_exec() - * - access it with [gs]et_task_comm() - * - lock it with task_lock() + * - normally initialized begin_new_exec() + * - set it with set_task_comm() + * - strscpy_pad() to ensure it is always NUL-terminated and + * zero-padded + * - task_lock() to ensure the operation is atomic and the name is + * fully updated. */ char comm[TASK_COMM_LEN]; @@ -1441,6 +1451,7 @@ struct task_struct { /* Timestamp for last schedule: */ unsigned long long ftrace_timestamp; + unsigned long long ftrace_sleeptime; /* * Number of functions that haven't been traced @@ -1626,8 +1637,9 @@ static inline unsigned int __task_state_index(unsigned int tsk_state, * We're lying here, but rather than expose a completely new task state * to userspace, we can make this appear as if the task has gone through * a regular rt_mutex_lock() call. + * Report frozen tasks as uninterruptible. */ - if (tsk_state & TASK_RTLOCK_WAIT) + if ((tsk_state & TASK_RTLOCK_WAIT) || (tsk_state & TASK_FROZEN)) state = TASK_UNINTERRUPTIBLE; return fls(state); @@ -1898,7 +1910,7 @@ extern unsigned long init_stack[THREAD_SIZE / sizeof(unsigned long)]; #ifdef CONFIG_THREAD_INFO_IN_TASK # define task_thread_info(task) (&(task)->thread_info) -#elif !defined(__HAVE_THREAD_FUNCTIONS) +#else # define task_thread_info(task) ((struct thread_info *)(task)->stack) #endif @@ -1938,10 +1950,23 @@ static inline void set_task_comm(struct task_struct *tsk, const char *from) __set_task_comm(tsk, from, false); } -extern char *__get_task_comm(char *to, size_t len, struct task_struct *tsk); +/* + * - Why not use task_lock()? + * User space can randomly change their names anyway, so locking for readers + * doesn't make sense. For writers, locking is probably necessary, as a race + * condition could lead to long-term mixed results. + * The strscpy_pad() in __set_task_comm() can ensure that the task comm is + * always NUL-terminated and zero-padded. Therefore the race condition between + * reader and writer is not an issue. + * + * - BUILD_BUG_ON() can help prevent the buf from being truncated. + * Since the callers don't perform any return value checks, this safeguard is + * necessary. + */ #define get_task_comm(buf, tsk) ({ \ - BUILD_BUG_ON(sizeof(buf) != TASK_COMM_LEN); \ - __get_task_comm(buf, sizeof(buf), tsk); \ + BUILD_BUG_ON(sizeof(buf) < TASK_COMM_LEN); \ + strscpy_pad(buf, (tsk)->comm); \ + buf; \ }) #ifdef CONFIG_SMP @@ -2002,7 +2027,8 @@ static inline void set_tsk_need_resched(struct task_struct *tsk) static inline void clear_tsk_need_resched(struct task_struct *tsk) { - clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED); + atomic_long_andnot(_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY, + (atomic_long_t *)&task_thread_info(tsk)->flags); } static inline int test_tsk_need_resched(struct task_struct *tsk) diff --git a/include/linux/sched/coredump.h b/include/linux/sched/coredump.h index e62ff805cfc9..6eb65ceed213 100644 --- a/include/linux/sched/coredump.h +++ b/include/linux/sched/coredump.h @@ -8,12 +8,6 @@ #define SUID_DUMP_USER 1 /* Dump as user of process */ #define SUID_DUMP_ROOT 2 /* Dump as root */ -/* mm flags */ - -/* for SUID_DUMP_* above */ -#define MMF_DUMPABLE_BITS 2 -#define MMF_DUMPABLE_MASK ((1 << MMF_DUMPABLE_BITS) - 1) - extern void set_dumpable(struct mm_struct *mm, int value); /* * This returns the actual value of the suid_dumpable flag. For things @@ -31,80 +25,4 @@ static inline int get_dumpable(struct mm_struct *mm) return __get_dumpable(mm->flags); } -/* coredump filter bits */ -#define MMF_DUMP_ANON_PRIVATE 2 -#define MMF_DUMP_ANON_SHARED 3 -#define MMF_DUMP_MAPPED_PRIVATE 4 -#define MMF_DUMP_MAPPED_SHARED 5 -#define MMF_DUMP_ELF_HEADERS 6 -#define MMF_DUMP_HUGETLB_PRIVATE 7 -#define MMF_DUMP_HUGETLB_SHARED 8 -#define MMF_DUMP_DAX_PRIVATE 9 -#define MMF_DUMP_DAX_SHARED 10 - -#define MMF_DUMP_FILTER_SHIFT MMF_DUMPABLE_BITS -#define MMF_DUMP_FILTER_BITS 9 -#define MMF_DUMP_FILTER_MASK \ - (((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT) -#define MMF_DUMP_FILTER_DEFAULT \ - ((1 << MMF_DUMP_ANON_PRIVATE) | (1 << MMF_DUMP_ANON_SHARED) |\ - (1 << MMF_DUMP_HUGETLB_PRIVATE) | MMF_DUMP_MASK_DEFAULT_ELF) - -#ifdef CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS -# define MMF_DUMP_MASK_DEFAULT_ELF (1 << MMF_DUMP_ELF_HEADERS) -#else -# define MMF_DUMP_MASK_DEFAULT_ELF 0 -#endif - /* leave room for more dump flags */ -#define MMF_VM_MERGEABLE 16 /* KSM may merge identical pages */ -#define MMF_VM_HUGEPAGE 17 /* set when mm is available for - khugepaged */ -/* - * This one-shot flag is dropped due to necessity of changing exe once again - * on NFS restore - */ -//#define MMF_EXE_FILE_CHANGED 18 /* see prctl_set_mm_exe_file() */ - -#define MMF_HAS_UPROBES 19 /* has uprobes */ -#define MMF_RECALC_UPROBES 20 /* MMF_HAS_UPROBES can be wrong */ -#define MMF_OOM_SKIP 21 /* mm is of no interest for the OOM killer */ -#define MMF_UNSTABLE 22 /* mm is unstable for copy_from_user */ -#define MMF_HUGE_ZERO_PAGE 23 /* mm has ever used the global huge zero page */ -#define MMF_DISABLE_THP 24 /* disable THP for all VMAs */ -#define MMF_DISABLE_THP_MASK (1 << MMF_DISABLE_THP) -#define MMF_OOM_REAP_QUEUED 25 /* mm was queued for oom_reaper */ -#define MMF_MULTIPROCESS 26 /* mm is shared between processes */ -/* - * MMF_HAS_PINNED: Whether this mm has pinned any pages. This can be either - * replaced in the future by mm.pinned_vm when it becomes stable, or grow into - * a counter on its own. We're aggresive on this bit for now: even if the - * pinned pages were unpinned later on, we'll still keep this bit set for the - * lifecycle of this mm, just for simplicity. - */ -#define MMF_HAS_PINNED 27 /* FOLL_PIN has run, never cleared */ - -#define MMF_HAS_MDWE 28 -#define MMF_HAS_MDWE_MASK (1 << MMF_HAS_MDWE) - - -#define MMF_HAS_MDWE_NO_INHERIT 29 - -#define MMF_VM_MERGE_ANY 30 -#define MMF_VM_MERGE_ANY_MASK (1 << MMF_VM_MERGE_ANY) - -#define MMF_TOPDOWN 31 /* mm searches top down by default */ -#define MMF_TOPDOWN_MASK (1 << MMF_TOPDOWN) - -#define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK |\ - MMF_DISABLE_THP_MASK | MMF_HAS_MDWE_MASK |\ - MMF_VM_MERGE_ANY_MASK | MMF_TOPDOWN_MASK) - -static inline unsigned long mmf_init_flags(unsigned long flags) -{ - if (flags & (1UL << MMF_HAS_MDWE_NO_INHERIT)) - flags &= ~((1UL << MMF_HAS_MDWE) | - (1UL << MMF_HAS_MDWE_NO_INHERIT)); - return flags & MMF_INIT_MASK; -} - #endif /* _LINUX_SCHED_COREDUMP_H */ diff --git a/include/linux/sched/ext.h b/include/linux/sched/ext.h index 1ddbde64a31b..1d70a9867fb1 100644 --- a/include/linux/sched/ext.h +++ b/include/linux/sched/ext.h @@ -199,17 +199,18 @@ struct sched_ext_entity { #ifdef CONFIG_EXT_GROUP_SCHED struct cgroup *cgrp_moving_from; #endif - /* must be the last field, see init_scx_entity() */ struct list_head tasks_node; }; void sched_ext_free(struct task_struct *p); void print_scx_info(const char *log_lvl, struct task_struct *p); +void scx_softlockup(u32 dur_s); #else /* !CONFIG_SCHED_CLASS_EXT */ static inline void sched_ext_free(struct task_struct *p) {} static inline void print_scx_info(const char *log_lvl, struct task_struct *p) {} +static inline void scx_softlockup(u32 dur_s) {} #endif /* CONFIG_SCHED_CLASS_EXT */ #endif /* _LINUX_SCHED_EXT_H */ diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h index c8ed09ac29ac..d5d03d919df8 100644 --- a/include/linux/sched/signal.h +++ b/include/linux/sched/signal.h @@ -138,6 +138,7 @@ struct signal_struct { /* POSIX.1b Interval Timers */ unsigned int next_posix_timer_id; struct hlist_head posix_timers; + struct hlist_head ignored_posix_timers; /* ITIMER_REAL timer for the process */ struct hrtimer real_timer; @@ -338,9 +339,6 @@ extern void force_fatal_sig(int); extern void force_exit_sig(int); extern int send_sig(int, struct task_struct *, int); extern int zap_other_threads(struct task_struct *p); -extern struct sigqueue *sigqueue_alloc(void); -extern void sigqueue_free(struct sigqueue *); -extern int send_sigqueue(struct sigqueue *, struct pid *, enum pid_type); extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *); static inline void clear_notify_signal(void) diff --git a/include/linux/sched/task_stack.h b/include/linux/sched/task_stack.h index bf10bdb487dd..cffad65bdc6a 100644 --- a/include/linux/sched/task_stack.h +++ b/include/linux/sched/task_stack.h @@ -9,6 +9,7 @@ #include <linux/sched.h> #include <linux/magic.h> #include <linux/refcount.h> +#include <linux/kasan.h> #ifdef CONFIG_THREAD_INFO_IN_TASK @@ -33,7 +34,7 @@ static __always_inline unsigned long *end_of_stack(const struct task_struct *tas #endif } -#elif !defined(__HAVE_THREAD_FUNCTIONS) +#else #define task_stack_page(task) ((void *)(task)->stack) @@ -89,6 +90,7 @@ static inline int object_is_on_stack(const void *obj) { void *stack = task_stack_page(current); + obj = kasan_reset_tag(obj); return (obj >= stack) && (obj < (stack + THREAD_SIZE)); } diff --git a/include/linux/seccomp.h b/include/linux/seccomp.h index 709ad84809e1..341980599c71 100644 --- a/include/linux/seccomp.h +++ b/include/linux/seccomp.h @@ -32,6 +32,11 @@ static inline int secure_computing(void) } #else extern void secure_computing_strict(int this_syscall); +static inline int __secure_computing(const struct seccomp_data *sd) +{ + secure_computing_strict(sd->nr); + return 0; +} #endif extern long prctl_get_seccomp(void); diff --git a/include/linux/security.h b/include/linux/security.h index 2ec8f3014757..cbdba435b798 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -34,6 +34,10 @@ #include <linux/sockptr.h> #include <linux/bpf.h> #include <uapi/linux/lsm.h> +#include <linux/lsm/selinux.h> +#include <linux/lsm/smack.h> +#include <linux/lsm/apparmor.h> +#include <linux/lsm/bpf.h> struct linux_binprm; struct cred; @@ -152,6 +156,16 @@ enum lockdown_reason { LOCKDOWN_CONFIDENTIALITY_MAX, }; +/* + * Data exported by the security modules + */ +struct lsm_prop { + struct lsm_prop_selinux selinux; + struct lsm_prop_smack smack; + struct lsm_prop_apparmor apparmor; + struct lsm_prop_bpf bpf; +}; + extern const char *const lockdown_reasons[LOCKDOWN_CONFIDENTIALITY_MAX+1]; extern u32 lsm_active_cnt; extern const struct lsm_id *lsm_idlist[]; @@ -269,8 +283,32 @@ static inline const char *kernel_load_data_id_str(enum kernel_load_data_id id) return kernel_load_data_str[id]; } +/** + * lsmprop_init - initialize a lsm_prop structure + * @prop: Pointer to the data to initialize + * + * Set all secid for all modules to the specified value. + */ +static inline void lsmprop_init(struct lsm_prop *prop) +{ + memset(prop, 0, sizeof(*prop)); +} + #ifdef CONFIG_SECURITY +/** + * lsmprop_is_set - report if there is a value in the lsm_prop + * @prop: Pointer to the exported LSM data + * + * Returns true if there is a value set, false otherwise + */ +static inline bool lsmprop_is_set(struct lsm_prop *prop) +{ + const struct lsm_prop empty = {}; + + return !!memcmp(prop, &empty, sizeof(*prop)); +} + int call_blocking_lsm_notifier(enum lsm_event event, void *data); int register_blocking_lsm_notifier(struct notifier_block *nb); int unregister_blocking_lsm_notifier(struct notifier_block *nb); @@ -408,7 +446,7 @@ int security_inode_getsecurity(struct mnt_idmap *idmap, void **buffer, bool alloc); int security_inode_setsecurity(struct inode *inode, const char *name, const void *value, size_t size, int flags); int security_inode_listsecurity(struct inode *inode, char *buffer, size_t buffer_size); -void security_inode_getsecid(struct inode *inode, u32 *secid); +void security_inode_getlsmprop(struct inode *inode, struct lsm_prop *prop); int security_inode_copy_up(struct dentry *src, struct cred **new); int security_inode_copy_up_xattr(struct dentry *src, const char *name); int security_inode_setintegrity(const struct inode *inode, @@ -444,6 +482,7 @@ void security_cred_free(struct cred *cred); int security_prepare_creds(struct cred *new, const struct cred *old, gfp_t gfp); void security_transfer_creds(struct cred *new, const struct cred *old); void security_cred_getsecid(const struct cred *c, u32 *secid); +void security_cred_getlsmprop(const struct cred *c, struct lsm_prop *prop); int security_kernel_act_as(struct cred *new, u32 secid); int security_kernel_create_files_as(struct cred *new, struct inode *inode); int security_kernel_module_request(char *kmod_name); @@ -463,8 +502,8 @@ int security_task_fix_setgroups(struct cred *new, const struct cred *old); int security_task_setpgid(struct task_struct *p, pid_t pgid); int security_task_getpgid(struct task_struct *p); int security_task_getsid(struct task_struct *p); -void security_current_getsecid_subj(u32 *secid); -void security_task_getsecid_obj(struct task_struct *p, u32 *secid); +void security_current_getlsmprop_subj(struct lsm_prop *prop); +void security_task_getlsmprop_obj(struct task_struct *p, struct lsm_prop *prop); int security_task_setnice(struct task_struct *p, int nice); int security_task_setioprio(struct task_struct *p, int ioprio); int security_task_getioprio(struct task_struct *p); @@ -482,7 +521,7 @@ int security_task_prctl(int option, unsigned long arg2, unsigned long arg3, void security_task_to_inode(struct task_struct *p, struct inode *inode); int security_create_user_ns(const struct cred *cred); int security_ipc_permission(struct kern_ipc_perm *ipcp, short flag); -void security_ipc_getsecid(struct kern_ipc_perm *ipcp, u32 *secid); +void security_ipc_getlsmprop(struct kern_ipc_perm *ipcp, struct lsm_prop *prop); int security_msg_msg_alloc(struct msg_msg *msg); void security_msg_msg_free(struct msg_msg *msg); int security_msg_queue_alloc(struct kern_ipc_perm *msq); @@ -515,6 +554,7 @@ int security_setprocattr(int lsmid, const char *name, void *value, size_t size); int security_netlink_send(struct sock *sk, struct sk_buff *skb); int security_ismaclabel(const char *name); int security_secid_to_secctx(u32 secid, char **secdata, u32 *seclen); +int security_lsmprop_to_secctx(struct lsm_prop *prop, char **secdata, u32 *seclen); int security_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid); void security_release_secctx(char *secdata, u32 seclen); void security_inode_invalidate_secctx(struct inode *inode); @@ -531,6 +571,17 @@ int security_bdev_setintegrity(struct block_device *bdev, size_t size); #else /* CONFIG_SECURITY */ +/** + * lsmprop_is_set - report if there is a value in the lsm_prop + * @prop: Pointer to the exported LSM data + * + * Returns true if there is a value set, false otherwise + */ +static inline bool lsmprop_is_set(struct lsm_prop *prop) +{ + return false; +} + static inline int call_blocking_lsm_notifier(enum lsm_event event, void *data) { return 0; @@ -1020,9 +1071,10 @@ static inline int security_inode_listsecurity(struct inode *inode, char *buffer, return 0; } -static inline void security_inode_getsecid(struct inode *inode, u32 *secid) +static inline void security_inode_getlsmprop(struct inode *inode, + struct lsm_prop *prop) { - *secid = 0; + lsmprop_init(prop); } static inline int security_inode_copy_up(struct dentry *src, struct cred **new) @@ -1172,6 +1224,10 @@ static inline void security_cred_getsecid(const struct cred *c, u32 *secid) *secid = 0; } +static inline void security_cred_getlsmprop(const struct cred *c, + struct lsm_prop *prop) +{ } + static inline int security_kernel_act_as(struct cred *cred, u32 secid) { return 0; @@ -1249,14 +1305,15 @@ static inline int security_task_getsid(struct task_struct *p) return 0; } -static inline void security_current_getsecid_subj(u32 *secid) +static inline void security_current_getlsmprop_subj(struct lsm_prop *prop) { - *secid = 0; + lsmprop_init(prop); } -static inline void security_task_getsecid_obj(struct task_struct *p, u32 *secid) +static inline void security_task_getlsmprop_obj(struct task_struct *p, + struct lsm_prop *prop) { - *secid = 0; + lsmprop_init(prop); } static inline int security_task_setnice(struct task_struct *p, int nice) @@ -1332,9 +1389,10 @@ static inline int security_ipc_permission(struct kern_ipc_perm *ipcp, return 0; } -static inline void security_ipc_getsecid(struct kern_ipc_perm *ipcp, u32 *secid) +static inline void security_ipc_getlsmprop(struct kern_ipc_perm *ipcp, + struct lsm_prop *prop) { - *secid = 0; + lsmprop_init(prop); } static inline int security_msg_msg_alloc(struct msg_msg *msg) @@ -1468,7 +1526,14 @@ static inline int security_ismaclabel(const char *name) return 0; } -static inline int security_secid_to_secctx(u32 secid, char **secdata, u32 *seclen) +static inline int security_secid_to_secctx(u32 secid, char **secdata, + u32 *seclen) +{ + return -EOPNOTSUPP; +} + +static inline int security_lsmprop_to_secctx(struct lsm_prop *prop, + char **secdata, u32 *seclen) { return -EOPNOTSUPP; } @@ -2095,7 +2160,8 @@ static inline void security_key_post_create_or_update(struct key *keyring, int security_audit_rule_init(u32 field, u32 op, char *rulestr, void **lsmrule, gfp_t gfp); int security_audit_rule_known(struct audit_krule *krule); -int security_audit_rule_match(u32 secid, u32 field, u32 op, void *lsmrule); +int security_audit_rule_match(struct lsm_prop *prop, u32 field, u32 op, + void *lsmrule); void security_audit_rule_free(void *lsmrule); #else @@ -2111,8 +2177,8 @@ static inline int security_audit_rule_known(struct audit_krule *krule) return 0; } -static inline int security_audit_rule_match(u32 secid, u32 field, u32 op, - void *lsmrule) +static inline int security_audit_rule_match(struct lsm_prop *prop, u32 field, + u32 op, void *lsmrule) { return 0; } diff --git a/include/linux/sed-opal.h b/include/linux/sed-opal.h index 2ac50822554e..80f33a93f944 100644 --- a/include/linux/sed-opal.h +++ b/include/linux/sed-opal.h @@ -52,6 +52,7 @@ static inline bool is_sed_ioctl(unsigned int cmd) case IOC_OPAL_GET_GEOMETRY: case IOC_OPAL_DISCOVERY: case IOC_OPAL_REVERT_LSP: + case IOC_OPAL_SET_SID_PW: return true; } return false; diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index fffeb754880f..5298765d6ca4 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h @@ -622,6 +622,23 @@ static __always_inline unsigned raw_read_seqcount_latch(const seqcount_latch_t * } /** + * read_seqcount_latch() - pick even/odd latch data copy + * @s: Pointer to seqcount_latch_t + * + * See write_seqcount_latch() for details and a full reader/writer usage + * example. + * + * Return: sequence counter raw value. Use the lowest bit as an index for + * picking which data copy to read. The full counter must then be checked + * with read_seqcount_latch_retry(). + */ +static __always_inline unsigned read_seqcount_latch(const seqcount_latch_t *s) +{ + kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX); + return raw_read_seqcount_latch(s); +} + +/** * raw_read_seqcount_latch_retry() - end a seqcount_latch_t read section * @s: Pointer to seqcount_latch_t * @start: count, from raw_read_seqcount_latch() @@ -636,8 +653,33 @@ raw_read_seqcount_latch_retry(const seqcount_latch_t *s, unsigned start) } /** + * read_seqcount_latch_retry() - end a seqcount_latch_t read section + * @s: Pointer to seqcount_latch_t + * @start: count, from read_seqcount_latch() + * + * Return: true if a read section retry is required, else false + */ +static __always_inline int +read_seqcount_latch_retry(const seqcount_latch_t *s, unsigned start) +{ + kcsan_atomic_next(0); + return raw_read_seqcount_latch_retry(s, start); +} + +/** * raw_write_seqcount_latch() - redirect latch readers to even/odd copy * @s: Pointer to seqcount_latch_t + */ +static __always_inline void raw_write_seqcount_latch(seqcount_latch_t *s) +{ + smp_wmb(); /* prior stores before incrementing "sequence" */ + s->seqcount.sequence++; + smp_wmb(); /* increment "sequence" before following stores */ +} + +/** + * write_seqcount_latch_begin() - redirect latch readers to odd copy + * @s: Pointer to seqcount_latch_t * * The latch technique is a multiversion concurrency control method that allows * queries during non-atomic modifications. If you can guarantee queries never @@ -665,17 +707,11 @@ raw_read_seqcount_latch_retry(const seqcount_latch_t *s, unsigned start) * * void latch_modify(struct latch_struct *latch, ...) * { - * smp_wmb(); // Ensure that the last data[1] update is visible - * latch->seq.sequence++; - * smp_wmb(); // Ensure that the seqcount update is visible - * + * write_seqcount_latch_begin(&latch->seq); * modify(latch->data[0], ...); - * - * smp_wmb(); // Ensure that the data[0] update is visible - * latch->seq.sequence++; - * smp_wmb(); // Ensure that the seqcount update is visible - * + * write_seqcount_latch(&latch->seq); * modify(latch->data[1], ...); + * write_seqcount_latch_end(&latch->seq); * } * * The query will have a form like:: @@ -686,13 +722,13 @@ raw_read_seqcount_latch_retry(const seqcount_latch_t *s, unsigned start) * unsigned seq, idx; * * do { - * seq = raw_read_seqcount_latch(&latch->seq); + * seq = read_seqcount_latch(&latch->seq); * * idx = seq & 0x01; * entry = data_query(latch->data[idx], ...); * * // This includes needed smp_rmb() - * } while (raw_read_seqcount_latch_retry(&latch->seq, seq)); + * } while (read_seqcount_latch_retry(&latch->seq, seq)); * * return entry; * } @@ -716,11 +752,31 @@ raw_read_seqcount_latch_retry(const seqcount_latch_t *s, unsigned start) * When data is a dynamic data structure; one should use regular RCU * patterns to manage the lifetimes of the objects within. */ -static inline void raw_write_seqcount_latch(seqcount_latch_t *s) +static __always_inline void write_seqcount_latch_begin(seqcount_latch_t *s) { - smp_wmb(); /* prior stores before incrementing "sequence" */ - s->seqcount.sequence++; - smp_wmb(); /* increment "sequence" before following stores */ + kcsan_nestable_atomic_begin(); + raw_write_seqcount_latch(s); +} + +/** + * write_seqcount_latch() - redirect latch readers to even copy + * @s: Pointer to seqcount_latch_t + */ +static __always_inline void write_seqcount_latch(seqcount_latch_t *s) +{ + raw_write_seqcount_latch(s); +} + +/** + * write_seqcount_latch_end() - end a seqcount_latch_t write section + * @s: Pointer to seqcount_latch_t + * + * Marks the end of a seqcount_latch_t writer section, after all copies of the + * latch-protected data have been updated. + */ +static __always_inline void write_seqcount_latch_end(seqcount_latch_t *s) +{ + kcsan_nestable_atomic_end(); } #define __SEQLOCK_UNLOCKED(lockname) \ @@ -754,11 +810,7 @@ static inline void raw_write_seqcount_latch(seqcount_latch_t *s) */ static inline unsigned read_seqbegin(const seqlock_t *sl) { - unsigned ret = read_seqcount_begin(&sl->seqcount); - - kcsan_atomic_next(0); /* non-raw usage, assume closing read_seqretry() */ - kcsan_flat_atomic_begin(); - return ret; + return read_seqcount_begin(&sl->seqcount); } /** @@ -774,12 +826,6 @@ static inline unsigned read_seqbegin(const seqlock_t *sl) */ static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start) { - /* - * Assume not nested: read_seqretry() may be called multiple times when - * completing read critical section. - */ - kcsan_flat_atomic_end(); - return read_seqcount_retry(&sl->seqcount, start); } diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index 4ab65874a850..743b4afaad4c 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h @@ -505,7 +505,11 @@ struct uart_port { * The remaining bits are serial-core specific and not modifiable by * userspace. */ +#ifdef CONFIG_HAS_IOPORT #define UPF_FOURPORT ((__force upf_t) ASYNC_FOURPORT /* 1 */ ) +#else +#define UPF_FOURPORT 0 +#endif #define UPF_SAK ((__force upf_t) ASYNC_SAK /* 2 */ ) #define UPF_SPD_HI ((__force upf_t) ASYNC_SPD_HI /* 4 */ ) #define UPF_SPD_VHI ((__force upf_t) ASYNC_SPD_VHI /* 5 */ ) diff --git a/include/linux/serio.h b/include/linux/serio.h index bf2191f25350..69a47674af65 100644 --- a/include/linux/serio.h +++ b/include/linux/serio.h @@ -6,6 +6,7 @@ #define _SERIO_H +#include <linux/cleanup.h> #include <linux/types.h> #include <linux/interrupt.h> #include <linux/list.h> @@ -161,4 +162,6 @@ static inline void serio_continue_rx(struct serio *serio) spin_unlock_irq(&serio->lock); } +DEFINE_GUARD(serio_pause_rx, struct serio *, serio_pause_rx(_T), serio_continue_rx(_T)) + #endif diff --git a/include/linux/set_memory.h b/include/linux/set_memory.h index e7aec20fb44f..3030d9245f5a 100644 --- a/include/linux/set_memory.h +++ b/include/linux/set_memory.h @@ -34,6 +34,12 @@ static inline int set_direct_map_default_noflush(struct page *page) return 0; } +static inline int set_direct_map_valid_noflush(struct page *page, + unsigned nr, bool valid) +{ + return 0; +} + static inline bool kernel_page_present(struct page *page) { return true; diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h index 515a9a6a3c6f..0b273a7b9f01 100644 --- a/include/linux/shmem_fs.h +++ b/include/linux/shmem_fs.h @@ -42,10 +42,10 @@ struct shmem_inode_info { struct inode vfs_inode; }; -#define SHMEM_FL_USER_VISIBLE FS_FL_USER_VISIBLE +#define SHMEM_FL_USER_VISIBLE (FS_FL_USER_VISIBLE | FS_CASEFOLD_FL) #define SHMEM_FL_USER_MODIFIABLE \ - (FS_IMMUTABLE_FL | FS_APPEND_FL | FS_NODUMP_FL | FS_NOATIME_FL) -#define SHMEM_FL_INHERITED (FS_NODUMP_FL | FS_NOATIME_FL) + (FS_IMMUTABLE_FL | FS_APPEND_FL | FS_NODUMP_FL | FS_NOATIME_FL | FS_CASEFOLD_FL) +#define SHMEM_FL_INHERITED (FS_NODUMP_FL | FS_NOATIME_FL | FS_CASEFOLD_FL) struct shmem_quota_limits { qsize_t usrquota_bhardlimit; /* Default user quota block hard limit */ @@ -114,6 +114,7 @@ int shmem_unuse(unsigned int type); unsigned long shmem_allowable_huge_orders(struct inode *inode, struct vm_area_struct *vma, pgoff_t index, loff_t write_end, bool shmem_huge_force); +bool shmem_hpage_pmd_enabled(void); #else static inline unsigned long shmem_allowable_huge_orders(struct inode *inode, struct vm_area_struct *vma, pgoff_t index, @@ -121,6 +122,11 @@ static inline unsigned long shmem_allowable_huge_orders(struct inode *inode, { return 0; } + +static inline bool shmem_hpage_pmd_enabled(void) +{ + return false; +} #endif #ifdef CONFIG_SHMEM diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 39f1d16f3628..58009fa66102 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -31,6 +31,7 @@ #include <linux/in6.h> #include <linux/if_packet.h> #include <linux/llist.h> +#include <linux/page_frag_cache.h> #include <net/flow.h> #if IS_ENABLED(CONFIG_NF_CONNTRACK) #include <linux/netfilter/nf_conntrack_common.h> @@ -2681,6 +2682,12 @@ static inline void skb_assert_len(struct sk_buff *skb) #endif /* CONFIG_DEBUG_NET */ } +#if defined(CONFIG_FAIL_SKB_REALLOC) +void skb_might_realloc(struct sk_buff *skb); +#else +static inline void skb_might_realloc(struct sk_buff *skb) {} +#endif + /* * Add data to an sk_buff */ @@ -2781,6 +2788,7 @@ static inline enum skb_drop_reason pskb_may_pull_reason(struct sk_buff *skb, unsigned int len) { DEBUG_NET_WARN_ON_ONCE(len > INT_MAX); + skb_might_realloc(skb); if (likely(len <= skb_headlen(skb))) return SKB_NOT_DROPPED_YET; @@ -2909,9 +2917,19 @@ static inline void skb_reset_inner_headers(struct sk_buff *skb) skb->inner_transport_header = skb->transport_header; } +static inline int skb_mac_header_was_set(const struct sk_buff *skb) +{ + return skb->mac_header != (typeof(skb->mac_header))~0U; +} + static inline void skb_reset_mac_len(struct sk_buff *skb) { - skb->mac_len = skb->network_header - skb->mac_header; + if (!skb_mac_header_was_set(skb)) { + DEBUG_NET_WARN_ON_ONCE(1); + skb->mac_len = 0; + } else { + skb->mac_len = skb->network_header - skb->mac_header; + } } static inline unsigned char *skb_inner_transport_header(const struct sk_buff @@ -2927,7 +2945,10 @@ static inline int skb_inner_transport_offset(const struct sk_buff *skb) static inline void skb_reset_inner_transport_header(struct sk_buff *skb) { - skb->inner_transport_header = skb->data - skb->head; + long offset = skb->data - skb->head; + + DEBUG_NET_WARN_ON_ONCE(offset != (typeof(skb->inner_transport_header))offset); + skb->inner_transport_header = offset; } static inline void skb_set_inner_transport_header(struct sk_buff *skb, @@ -2944,7 +2965,10 @@ static inline unsigned char *skb_inner_network_header(const struct sk_buff *skb) static inline void skb_reset_inner_network_header(struct sk_buff *skb) { - skb->inner_network_header = skb->data - skb->head; + long offset = skb->data - skb->head; + + DEBUG_NET_WARN_ON_ONCE(offset != (typeof(skb->inner_network_header))offset); + skb->inner_network_header = offset; } static inline void skb_set_inner_network_header(struct sk_buff *skb, @@ -2966,7 +2990,10 @@ static inline unsigned char *skb_inner_mac_header(const struct sk_buff *skb) static inline void skb_reset_inner_mac_header(struct sk_buff *skb) { - skb->inner_mac_header = skb->data - skb->head; + long offset = skb->data - skb->head; + + DEBUG_NET_WARN_ON_ONCE(offset != (typeof(skb->inner_mac_header))offset); + skb->inner_mac_header = offset; } static inline void skb_set_inner_mac_header(struct sk_buff *skb, @@ -2988,7 +3015,10 @@ static inline unsigned char *skb_transport_header(const struct sk_buff *skb) static inline void skb_reset_transport_header(struct sk_buff *skb) { - skb->transport_header = skb->data - skb->head; + long offset = skb->data - skb->head; + + DEBUG_NET_WARN_ON_ONCE(offset != (typeof(skb->transport_header))offset); + skb->transport_header = offset; } static inline void skb_set_transport_header(struct sk_buff *skb, @@ -3005,7 +3035,10 @@ static inline unsigned char *skb_network_header(const struct sk_buff *skb) static inline void skb_reset_network_header(struct sk_buff *skb) { - skb->network_header = skb->data - skb->head; + long offset = skb->data - skb->head; + + DEBUG_NET_WARN_ON_ONCE(offset != (typeof(skb->network_header))offset); + skb->network_header = offset; } static inline void skb_set_network_header(struct sk_buff *skb, const int offset) @@ -3014,11 +3047,6 @@ static inline void skb_set_network_header(struct sk_buff *skb, const int offset) skb->network_header += offset; } -static inline int skb_mac_header_was_set(const struct sk_buff *skb) -{ - return skb->mac_header != (typeof(skb->mac_header))~0U; -} - static inline unsigned char *skb_mac_header(const struct sk_buff *skb) { DEBUG_NET_WARN_ON_ONCE(!skb_mac_header_was_set(skb)); @@ -3043,7 +3071,10 @@ static inline void skb_unset_mac_header(struct sk_buff *skb) static inline void skb_reset_mac_header(struct sk_buff *skb) { - skb->mac_header = skb->data - skb->head; + long offset = skb->data - skb->head; + + DEBUG_NET_WARN_ON_ONCE(offset != (typeof(skb->mac_header))offset); + skb->mac_header = offset; } static inline void skb_set_mac_header(struct sk_buff *skb, const int offset) @@ -3130,9 +3161,15 @@ static inline int skb_inner_network_offset(const struct sk_buff *skb) return skb_inner_network_header(skb) - skb->data; } +static inline enum skb_drop_reason +pskb_network_may_pull_reason(struct sk_buff *skb, unsigned int len) +{ + return pskb_may_pull_reason(skb, skb_network_offset(skb) + len); +} + static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len) { - return pskb_may_pull(skb, skb_network_offset(skb) + len); + return pskb_network_may_pull_reason(skb, len) == SKB_NOT_DROPPED_YET; } /* @@ -3210,6 +3247,7 @@ static inline int __pskb_trim(struct sk_buff *skb, unsigned int len) static inline int pskb_trim(struct sk_buff *skb, unsigned int len) { + skb_might_realloc(skb); return (len < skb->len) ? __pskb_trim(skb, len) : 0; } @@ -3964,6 +4002,7 @@ int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len); static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len) { + skb_might_realloc(skb); if (likely(len >= skb->len)) return 0; return pskb_trim_rcsum_slow(skb, len); diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h index d9b03e0746e7..2cbe0c22a32f 100644 --- a/include/linux/skmsg.h +++ b/include/linux/skmsg.h @@ -317,17 +317,22 @@ static inline void sock_drop(struct sock *sk, struct sk_buff *skb) kfree_skb(skb); } -static inline void sk_psock_queue_msg(struct sk_psock *psock, +static inline bool sk_psock_queue_msg(struct sk_psock *psock, struct sk_msg *msg) { + bool ret; + spin_lock_bh(&psock->ingress_lock); - if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) + if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) { list_add_tail(&msg->list, &psock->ingress_msg); - else { + ret = true; + } else { sk_msg_free(psock->sk, msg); kfree(msg); + ret = false; } spin_unlock_bh(&psock->ingress_lock); + return ret; } static inline struct sk_msg *sk_psock_dequeue_msg(struct sk_psock *psock) diff --git a/include/linux/slab.h b/include/linux/slab.h index b35e2db7eb0e..10a971c2bde3 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -77,7 +77,17 @@ enum _slab_flag_bits { #define SLAB_POISON __SLAB_FLAG_BIT(_SLAB_POISON) /* Indicate a kmalloc slab */ #define SLAB_KMALLOC __SLAB_FLAG_BIT(_SLAB_KMALLOC) -/* Align objs on cache lines */ +/** + * define SLAB_HWCACHE_ALIGN - Align objects on cache line boundaries. + * + * Sufficiently large objects are aligned on cache line boundary. For object + * size smaller than a half of cache line size, the alignment is on the half of + * cache line size. In general, if object size is smaller than 1/2^n of cache + * line size, the alignment is adjusted to 1/2^n. + * + * If explicit alignment is also requested by the respective + * &struct kmem_cache_args field, the greater of both is alignments is applied. + */ #define SLAB_HWCACHE_ALIGN __SLAB_FLAG_BIT(_SLAB_HWCACHE_ALIGN) /* Use GFP_DMA memory */ #define SLAB_CACHE_DMA __SLAB_FLAG_BIT(_SLAB_CACHE_DMA) @@ -87,8 +97,8 @@ enum _slab_flag_bits { #define SLAB_STORE_USER __SLAB_FLAG_BIT(_SLAB_STORE_USER) /* Panic if kmem_cache_create() fails */ #define SLAB_PANIC __SLAB_FLAG_BIT(_SLAB_PANIC) -/* - * SLAB_TYPESAFE_BY_RCU - **WARNING** READ THIS! +/** + * define SLAB_TYPESAFE_BY_RCU - **WARNING** READ THIS! * * This delays freeing the SLAB page by a grace period, it does _NOT_ * delay object freeing. This means that if you do kmem_cache_free() @@ -99,20 +109,22 @@ enum _slab_flag_bits { * stays valid, the trick to using this is relying on an independent * object validation pass. Something like: * - * begin: - * rcu_read_lock(); - * obj = lockless_lookup(key); - * if (obj) { - * if (!try_get_ref(obj)) // might fail for free objects - * rcu_read_unlock(); - * goto begin; + * :: + * + * begin: + * rcu_read_lock(); + * obj = lockless_lookup(key); + * if (obj) { + * if (!try_get_ref(obj)) // might fail for free objects + * rcu_read_unlock(); + * goto begin; * - * if (obj->key != key) { // not the object we expected - * put_ref(obj); - * rcu_read_unlock(); - * goto begin; - * } - * } + * if (obj->key != key) { // not the object we expected + * put_ref(obj); + * rcu_read_unlock(); + * goto begin; + * } + * } * rcu_read_unlock(); * * This is useful if we need to approach a kernel structure obliquely, @@ -137,7 +149,6 @@ enum _slab_flag_bits { * * Note that SLAB_TYPESAFE_BY_RCU was originally named SLAB_DESTROY_BY_RCU. */ -/* Defer freeing slabs to RCU */ #define SLAB_TYPESAFE_BY_RCU __SLAB_FLAG_BIT(_SLAB_TYPESAFE_BY_RCU) /* Trace allocations and frees */ #define SLAB_TRACE __SLAB_FLAG_BIT(_SLAB_TRACE) @@ -170,7 +181,12 @@ enum _slab_flag_bits { #else # define SLAB_FAILSLAB __SLAB_FLAG_UNUSED #endif -/* Account to memcg */ +/** + * define SLAB_ACCOUNT - Account allocations to memcg. + * + * All object allocations from this cache will be memcg accounted, regardless of + * __GFP_ACCOUNT being or not being passed to individual allocations. + */ #ifdef CONFIG_MEMCG # define SLAB_ACCOUNT __SLAB_FLAG_BIT(_SLAB_ACCOUNT) #else @@ -197,7 +213,13 @@ enum _slab_flag_bits { #endif /* The following flags affect the page allocator grouping pages by mobility */ -/* Objects are reclaimable */ +/** + * define SLAB_RECLAIM_ACCOUNT - Objects are reclaimable. + * + * Use this flag for caches that have an associated shrinker. As a result, slab + * pages are allocated with __GFP_RECLAIMABLE, which affects grouping pages by + * mobility, and are accounted in SReclaimable counter in /proc/meminfo + */ #ifndef CONFIG_SLUB_TINY #define SLAB_RECLAIM_ACCOUNT __SLAB_FLAG_BIT(_SLAB_RECLAIM_ACCOUNT) #else @@ -448,6 +470,7 @@ void kfree_sensitive(const void *objp); size_t __ksize(const void *objp); DEFINE_FREE(kfree, void *, if (!IS_ERR_OR_NULL(_T)) kfree(_T)) +DEFINE_FREE(kfree_sensitive, void *, if (_T) kfree_sensitive(_T)) /** * ksize - Report actual allocation size of associated object diff --git a/include/linux/soc/mediatek/dvfsrc.h b/include/linux/soc/mediatek/dvfsrc.h new file mode 100644 index 000000000000..1498b3ed396b --- /dev/null +++ b/include/linux/soc/mediatek/dvfsrc.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright (c) 2021 MediaTek Inc. + * Copyright (c) 2024 Collabora Ltd. + * AngeloGioacchino Del Regno <[email protected]> + */ + +#ifndef __MEDIATEK_DVFSRC_H +#define __MEDIATEK_DVFSRC_H + +enum mtk_dvfsrc_cmd { + MTK_DVFSRC_CMD_BW, + MTK_DVFSRC_CMD_HRT_BW, + MTK_DVFSRC_CMD_PEAK_BW, + MTK_DVFSRC_CMD_OPP, + MTK_DVFSRC_CMD_VCORE_LEVEL, + MTK_DVFSRC_CMD_VSCP_LEVEL, + MTK_DVFSRC_CMD_MAX, +}; + +#if IS_ENABLED(CONFIG_MTK_DVFSRC) + +int mtk_dvfsrc_send_request(const struct device *dev, u32 cmd, u64 data); +int mtk_dvfsrc_query_info(const struct device *dev, u32 cmd, int *data); + +#else + +static inline int mtk_dvfsrc_send_request(const struct device *dev, u32 cmd, u64 data) +{ return -ENODEV; } + +static inline int mtk_dvfsrc_query_info(const struct device *dev, u32 cmd, int *data) +{ return -ENODEV; } + +#endif /* CONFIG_MTK_DVFSRC */ + +#endif diff --git a/include/linux/soc/mediatek/infracfg.h b/include/linux/soc/mediatek/infracfg.h index 6c6cccc848f4..9956e18c5ffa 100644 --- a/include/linux/soc/mediatek/infracfg.h +++ b/include/linux/soc/mediatek/infracfg.h @@ -434,6 +434,11 @@ #define MT7622_TOP_AXI_PROT_EN_WB (BIT(2) | BIT(6) | \ BIT(7) | BIT(8)) +#define MT6735_TOP_AXI_PROT_EN_CONN (BIT(2) | BIT(8)) +#define MT6735_TOP_AXI_PROT_EN_MD1 (BIT(24) | BIT(25) | \ + BIT(26) | BIT(27) | \ + BIT(28)) + #define INFRA_TOPAXI_PROTECTEN 0x0220 #define INFRA_TOPAXI_PROTECTSTA1 0x0228 #define INFRA_TOPAXI_PROTECTEN_SET 0x0260 diff --git a/include/linux/soc/mediatek/mtk_sip_svc.h b/include/linux/soc/mediatek/mtk_sip_svc.h index 0761128b4354..abe24a73ee19 100644 --- a/include/linux/soc/mediatek/mtk_sip_svc.h +++ b/include/linux/soc/mediatek/mtk_sip_svc.h @@ -22,6 +22,9 @@ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, MTK_SIP_SMC_CONVENTION, \ ARM_SMCCC_OWNER_SIP, fn_id) +/* DVFSRC SMC calls */ +#define MTK_SIP_DVFSRC_VCOREFS_CONTROL MTK_SIP_SMC_CMD(0x506) + /* IOMMU related SMC call */ #define MTK_SIP_KERNEL_IOMMU_CONTROL MTK_SIP_SMC_CMD(0x514) diff --git a/include/linux/soc/qcom/llcc-qcom.h b/include/linux/soc/qcom/llcc-qcom.h index 2f20281d4ad4..8e5d78fb4847 100644 --- a/include/linux/soc/qcom/llcc-qcom.h +++ b/include/linux/soc/qcom/llcc-qcom.h @@ -54,7 +54,19 @@ #define LLCC_CAMEXP4 52 #define LLCC_DISP_WB 53 #define LLCC_DISP_1 54 +#define LLCC_VIEYE 57 +#define LLCC_VIDPTH 58 +#define LLCC_GPUMV 59 +#define LLCC_EVA_LEFT 60 +#define LLCC_EVA_RIGHT 61 +#define LLCC_EVAGAIN 62 +#define LLCC_VIPTH 63 #define LLCC_VIDVSP 64 +#define LLCC_DISP_LEFT 65 +#define LLCC_DISP_RIGHT 66 +#define LLCC_EVCS_LEFT 67 +#define LLCC_EVCS_RIGHT 68 +#define LLCC_SPAD 69 /** * struct llcc_slice_desc - Cache slice descriptor diff --git a/include/linux/soc/ti/ti_sci_protocol.h b/include/linux/soc/ti/ti_sci_protocol.h index bd0d11af76c5..fd104b666836 100644 --- a/include/linux/soc/ti/ti_sci_protocol.h +++ b/include/linux/soc/ti/ti_sci_protocol.h @@ -195,6 +195,35 @@ struct ti_sci_clk_ops { u64 *current_freq); }; +/* TISCI LPM IO isolation control values */ +#define TISCI_MSG_VALUE_IO_ENABLE 1 +#define TISCI_MSG_VALUE_IO_DISABLE 0 + +/* TISCI LPM constraint state values */ +#define TISCI_MSG_CONSTRAINT_SET 1 +#define TISCI_MSG_CONSTRAINT_CLR 0 + +/** + * struct ti_sci_pm_ops - Low Power Mode (LPM) control operations + * @lpm_wake_reason: Get the wake up source that woke the SoC from LPM + * - source: The wake up source that woke soc from LPM. + * - timestamp: Timestamp at which soc woke. + * @set_device_constraint: Set LPM constraint on behalf of a device + * - id: Device Identifier + * - state: The desired state of device constraint: set or clear. + * @set_latency_constraint: Set LPM resume latency constraint + * - latency: maximum acceptable latency to wake up from low power mode + * - state: The desired state of latency constraint: set or clear. + */ +struct ti_sci_pm_ops { + int (*lpm_wake_reason)(const struct ti_sci_handle *handle, + u32 *source, u64 *timestamp, u8 *pin, u8 *mode); + int (*set_device_constraint)(const struct ti_sci_handle *handle, + u32 id, u8 state); + int (*set_latency_constraint)(const struct ti_sci_handle *handle, + u16 latency, u8 state); +}; + /** * struct ti_sci_resource_desc - Description of TI SCI resource instance range. * @start: Start index of the first resource range. @@ -539,6 +568,7 @@ struct ti_sci_ops { struct ti_sci_core_ops core_ops; struct ti_sci_dev_ops dev_ops; struct ti_sci_clk_ops clk_ops; + struct ti_sci_pm_ops pm_ops; struct ti_sci_rm_core_ops rm_core_ops; struct ti_sci_rm_irq_ops rm_irq_ops; struct ti_sci_rm_ringacc_ops rm_ring_ops; diff --git a/include/linux/sockptr.h b/include/linux/sockptr.h index fc5a206c4043..3e6c8e9d67ae 100644 --- a/include/linux/sockptr.h +++ b/include/linux/sockptr.h @@ -53,6 +53,8 @@ static inline int copy_from_sockptr_offset(void *dst, sockptr_t src, /* Deprecated. * This is unsafe, unless caller checked user provided optlen. * Prefer copy_safe_from_sockptr() instead. + * + * Returns 0 for success, or number of bytes not copied on error. */ static inline int copy_from_sockptr(void *dst, sockptr_t src, size_t size) { @@ -77,7 +79,9 @@ static inline int copy_safe_from_sockptr(void *dst, size_t ksize, { if (optlen < ksize) return -EINVAL; - return copy_from_sockptr(dst, optval, ksize); + if (copy_from_sockptr(dst, optval, ksize)) + return -EFAULT; + return 0; } static inline int copy_struct_from_sockptr(void *dst, size_t ksize, diff --git a/include/linux/soundwire/sdw.h b/include/linux/soundwire/sdw.h index 5e0dd47a0412..bd9836690da6 100644 --- a/include/linux/soundwire/sdw.h +++ b/include/linux/soundwire/sdw.h @@ -4,12 +4,20 @@ #ifndef __SOUNDWIRE_H #define __SOUNDWIRE_H +#include <linux/bitfield.h> #include <linux/bug.h> -#include <linux/lockdep_types.h> +#include <linux/completion.h> +#include <linux/device.h> #include <linux/irq.h> #include <linux/irqdomain.h> +#include <linux/lockdep_types.h> #include <linux/mod_devicetable.h> -#include <linux/bitfield.h> +#include <linux/mutex.h> +#include <linux/types.h> +#include <sound/sdca.h> + +struct dentry; +struct fwnode_handle; struct sdw_bus; struct sdw_slave; @@ -226,64 +234,36 @@ enum sdw_clk_stop_mode { /** * struct sdw_dp0_prop - DP0 properties + * @words: wordlengths supported * @max_word: Maximum number of bits in a Payload Channel Sample, 1 to 64 * (inclusive) * @min_word: Minimum number of bits in a Payload Channel Sample, 1 to 64 * (inclusive) * @num_words: number of wordlengths supported - * @words: wordlengths supported + * @ch_prep_timeout: Port-specific timeout value, in milliseconds * @BRA_flow_controlled: Slave implementation results in an OK_NotReady * response * @simple_ch_prep_sm: If channel prepare sequence is required - * @ch_prep_timeout: Port-specific timeout value, in milliseconds * @imp_def_interrupts: If set, each bit corresponds to support for * implementation-defined interrupts + * @num_lanes: array size of @lane_list + * @lane_list: indicates which Lanes can be used by DP0 * * The wordlengths are specified by Spec as max, min AND number of * discrete values, implementation can define based on the wordlengths they * support */ struct sdw_dp0_prop { + u32 *words; u32 max_word; u32 min_word; u32 num_words; - u32 *words; + u32 ch_prep_timeout; bool BRA_flow_controlled; bool simple_ch_prep_sm; - u32 ch_prep_timeout; bool imp_def_interrupts; -}; - -/** - * struct sdw_dpn_audio_mode - Audio mode properties for DPn - * @bus_min_freq: Minimum bus frequency, in Hz - * @bus_max_freq: Maximum bus frequency, in Hz - * @bus_num_freq: Number of discrete frequencies supported - * @bus_freq: Discrete bus frequencies, in Hz - * @min_freq: Minimum sampling frequency, in Hz - * @max_freq: Maximum sampling bus frequency, in Hz - * @num_freq: Number of discrete sampling frequency supported - * @freq: Discrete sampling frequencies, in Hz - * @prep_ch_behave: Specifies the dependencies between Channel Prepare - * sequence and bus clock configuration - * If 0, Channel Prepare can happen at any Bus clock rate - * If 1, Channel Prepare sequence shall happen only after Bus clock is - * changed to a frequency supported by this mode or compatible modes - * described by the next field - * @glitchless: Bitmap describing possible glitchless transitions from this - * Audio Mode to other Audio Modes - */ -struct sdw_dpn_audio_mode { - u32 bus_min_freq; - u32 bus_max_freq; - u32 bus_num_freq; - u32 *bus_freq; - u32 max_freq; - u32 min_freq; - u32 num_freq; - u32 *freq; - u32 prep_ch_behave; - u32 glitchless; + int num_lanes; + u32 *lane_list; }; /** @@ -298,24 +278,25 @@ struct sdw_dpn_audio_mode { * @type: Data port type. Full, Simplified or Reduced * @max_grouping: Maximum number of samples that can be grouped together for * a full data port - * @simple_ch_prep_sm: If the port supports simplified channel prepare state - * machine * @ch_prep_timeout: Port-specific timeout value, in milliseconds * @imp_def_interrupts: If set, each bit corresponds to support for * implementation-defined interrupts * @max_ch: Maximum channels supported * @min_ch: Minimum channels supported * @num_channels: Number of discrete channels supported - * @channels: Discrete channels supported * @num_ch_combinations: Number of channel combinations supported + * @channels: Discrete channels supported * @ch_combinations: Channel combinations supported + * @lane_list: indicates which Lanes can be used by DPn + * @num_lanes: array size of @lane_list * @modes: SDW mode supported * @max_async_buffer: Number of samples that this port can buffer in * asynchronous modes + * @port_encoding: Payload Channel Sample encoding schemes supported * @block_pack_mode: Type of block port mode supported * @read_only_wordlength: Read Only wordlength field in DPN_BlockCtrl1 register - * @port_encoding: Payload Channel Sample encoding schemes supported - * @audio_modes: Audio modes supported + * @simple_ch_prep_sm: If the port supports simplified channel prepare state + * machine */ struct sdw_dpn_prop { u32 num; @@ -325,25 +306,29 @@ struct sdw_dpn_prop { u32 *words; enum sdw_dpn_type type; u32 max_grouping; - bool simple_ch_prep_sm; u32 ch_prep_timeout; u32 imp_def_interrupts; u32 max_ch; u32 min_ch; u32 num_channels; - u32 *channels; u32 num_ch_combinations; + u32 *channels; u32 *ch_combinations; + u32 *lane_list; + int num_lanes; u32 modes; u32 max_async_buffer; + u32 port_encoding; bool block_pack_mode; bool read_only_wordlength; - u32 port_encoding; - struct sdw_dpn_audio_mode *audio_modes; + bool simple_ch_prep_sm; }; /** * struct sdw_slave_prop - SoundWire Slave properties + * @dp0_prop: Data Port 0 properties + * @src_dpn_prop: Source Data Port N properties + * @sink_dpn_prop: Sink Data Port N properties * @mipi_revision: Spec version of the implementation * @wake_capable: Wake-up events are supported * @test_mode_capable: If test mode is supported @@ -360,23 +345,26 @@ struct sdw_dpn_prop { * SCP_AddrPage2 * @bank_delay_support: Slave implements bank delay/bridge support registers * SCP_BankDelay and SCP_NextFrame + * @lane_control_support: Slave supports lane control * @p15_behave: Slave behavior when the Master attempts a read to the Port15 * alias - * @lane_control_support: Slave supports lane control * @master_count: Number of Masters present on this Slave * @source_ports: Bitmap identifying source ports * @sink_ports: Bitmap identifying sink ports - * @dp0_prop: Data Port 0 properties - * @src_dpn_prop: Source Data Port N properties - * @sink_dpn_prop: Sink Data Port N properties - * @scp_int1_mask: SCP_INT1_MASK desired settings * @quirks: bitmask identifying deltas from the MIPI specification + * @sdca_interrupt_register_list: indicates which sets of SDCA interrupt status + * and masks are supported + * @commit_register_supported: is PCP_Commit register supported + * @scp_int1_mask: SCP_INT1_MASK desired settings * @clock_reg_supported: the Peripheral implements the clock base and scale * registers introduced with the SoundWire 1.2 specification. SDCA devices * do not need to set this boolean property as the registers are required. * @use_domain_irq: call actual IRQ handler on slave, as well as callback */ struct sdw_slave_prop { + struct sdw_dp0_prop *dp0_prop; + struct sdw_dpn_prop *src_dpn_prop; + struct sdw_dpn_prop *sink_dpn_prop; u32 mipi_revision; bool wake_capable; bool test_mode_capable; @@ -388,16 +376,15 @@ struct sdw_slave_prop { bool high_PHY_capable; bool paging_support; bool bank_delay_support; - enum sdw_p15_behave p15_behave; bool lane_control_support; + enum sdw_p15_behave p15_behave; u32 master_count; u32 source_ports; u32 sink_ports; - struct sdw_dp0_prop *dp0_prop; - struct sdw_dpn_prop *src_dpn_prop; - struct sdw_dpn_prop *sink_dpn_prop; - u8 scp_int1_mask; u32 quirks; + u32 sdca_interrupt_register_list; + u8 commit_register_supported; + u8 scp_int1_mask; bool clock_reg_supported; bool use_domain_irq; }; @@ -406,13 +393,14 @@ struct sdw_slave_prop { /** * struct sdw_master_prop - Master properties + * @clk_gears: Clock gears supported + * @clk_freq: Clock frequencies supported, in Hz + * @quirks: bitmask identifying optional behavior beyond the scope of the MIPI specification * @revision: MIPI spec version of the implementation * @clk_stop_modes: Bitmap, bit N set when clock-stop-modeN supported * @max_clk_freq: Maximum Bus clock frequency, in Hz * @num_clk_gears: Number of clock gears supported - * @clk_gears: Clock gears supported * @num_clk_freq: Number of clock frequencies supported, in Hz - * @clk_freq: Clock frequencies supported, in Hz * @default_frame_rate: Controller default Frame rate, in Hz * @default_row: Number of rows * @default_col: Number of columns @@ -421,24 +409,23 @@ struct sdw_slave_prop { * command * @mclk_freq: clock reference passed to SoundWire Master, in Hz. * @hw_disabled: if true, the Master is not functional, typically due to pin-mux - * @quirks: bitmask identifying optional behavior beyond the scope of the MIPI specification */ struct sdw_master_prop { + u32 *clk_gears; + u32 *clk_freq; + u64 quirks; u32 revision; u32 clk_stop_modes; u32 max_clk_freq; u32 num_clk_gears; - u32 *clk_gears; u32 num_clk_freq; - u32 *clk_freq; u32 default_frame_rate; u32 default_row; u32 default_col; - bool dynamic_frame; u32 err_threshold; u32 mclk_freq; + bool dynamic_frame; bool hw_disabled; - u64 quirks; }; /* Definitions for Master quirks */ @@ -488,9 +475,9 @@ struct sdw_slave_id { __u8 sdw_version:4; }; -struct sdw_extended_slave_id { - int link_id; - struct sdw_slave_id id; +struct sdw_peripherals { + int num_peripherals; + struct sdw_slave *array[]; }; /* @@ -630,7 +617,6 @@ struct sdw_slave_ops { int (*clk_stop)(struct sdw_slave *slave, enum sdw_clk_stop_mode mode, enum sdw_clk_stop_type type); - }; /** @@ -663,6 +649,7 @@ struct sdw_slave_ops { * @is_mockup_device: status flag used to squelch errors in the command/control * protocol for SoundWire mockup devices * @sdw_dev_lock: mutex used to protect callbacks/remove races + * @sdca_data: structure containing all device data for SDCA helpers */ struct sdw_slave { struct sdw_slave_id id; @@ -686,6 +673,7 @@ struct sdw_slave { bool first_interrupt_done; bool is_mockup_device; struct mutex sdw_dev_lock; /* protect callbacks/remove races */ + struct sdca_device_data sdca_data; }; #define dev_to_sdw_dev(_dev) container_of(_dev, struct sdw_slave, dev) @@ -704,8 +692,7 @@ struct sdw_master_device { container_of(d, struct sdw_master_device, dev) struct sdw_driver { - int (*probe)(struct sdw_slave *sdw, - const struct sdw_device_id *id); + int (*probe)(struct sdw_slave *sdw, const struct sdw_device_id *id); int (*remove)(struct sdw_slave *sdw); void (*shutdown)(struct sdw_slave *sdw); @@ -724,7 +711,7 @@ struct sdw_driver { SDW_SLAVE_ENTRY_EXT((_mfg_id), (_part_id), 0, 0, (_drv_data)) int sdw_handle_slave_status(struct sdw_bus *bus, - enum sdw_slave_status status[]); + enum sdw_slave_status status[]); /* * SDW master structures and APIs @@ -806,29 +793,28 @@ struct sdw_enable_ch { */ struct sdw_master_port_ops { int (*dpn_set_port_params)(struct sdw_bus *bus, - struct sdw_port_params *port_params, - unsigned int bank); + struct sdw_port_params *port_params, + unsigned int bank); int (*dpn_set_port_transport_params)(struct sdw_bus *bus, - struct sdw_transport_params *transport_params, - enum sdw_reg_bank bank); - int (*dpn_port_prep)(struct sdw_bus *bus, - struct sdw_prepare_ch *prepare_ch); + struct sdw_transport_params *transport_params, + enum sdw_reg_bank bank); + int (*dpn_port_prep)(struct sdw_bus *bus, struct sdw_prepare_ch *prepare_ch); int (*dpn_port_enable_ch)(struct sdw_bus *bus, - struct sdw_enable_ch *enable_ch, unsigned int bank); + struct sdw_enable_ch *enable_ch, unsigned int bank); }; struct sdw_msg; /** - * struct sdw_defer - SDW deffered message - * @length: message length + * struct sdw_defer - SDW deferred message * @complete: message completion * @msg: SDW message + * @length: message length */ struct sdw_defer { + struct sdw_msg *msg; int length; struct completion complete; - struct sdw_msg *msg; }; /** @@ -849,14 +835,11 @@ struct sdw_defer { */ struct sdw_master_ops { int (*read_prop)(struct sdw_bus *bus); - u64 (*override_adr) - (struct sdw_bus *bus, u64 addr); - enum sdw_command_response (*xfer_msg) - (struct sdw_bus *bus, struct sdw_msg *msg); - enum sdw_command_response (*xfer_msg_defer) - (struct sdw_bus *bus); + u64 (*override_adr)(struct sdw_bus *bus, u64 addr); + enum sdw_command_response (*xfer_msg)(struct sdw_bus *bus, struct sdw_msg *msg); + enum sdw_command_response (*xfer_msg_defer)(struct sdw_bus *bus); int (*set_bus_conf)(struct sdw_bus *bus, - struct sdw_bus_params *params); + struct sdw_bus_params *params); int (*pre_bank_switch)(struct sdw_bus *bus); int (*post_bank_switch)(struct sdw_bus *bus); u32 (*read_ping_status)(struct sdw_bus *bus); @@ -871,68 +854,71 @@ struct sdw_master_ops { * struct sdw_bus - SoundWire bus * @dev: Shortcut to &bus->md->dev to avoid changing the entire code. * @md: Master device - * @controller_id: system-unique controller ID. If set to -1, the bus @id will be used. - * @link_id: Link id number, can be 0 to N, unique for each Controller - * @id: bus system-wide unique id - * @slaves: list of Slaves on this bus - * @assigned: Bitmap for Slave device numbers. - * Bit set implies used number, bit clear implies unused number. + * @bus_lock_key: bus lock key associated to @bus_lock * @bus_lock: bus lock + * @slaves: list of Slaves on this bus + * @msg_lock_key: message lock key associated to @msg_lock * @msg_lock: message lock - * @compute_params: points to Bus resource management implementation - * @ops: Master callback ops - * @port_ops: Master port callback ops - * @params: Current bus parameters - * @prop: Master properties - * @vendor_specific_prop: pointer to non-standard properties * @m_rt_list: List of Master instance of all stream(s) running on Bus. This * is used to compute and program bus bandwidth, clock, frame shape, * transport and port parameters - * @debugfs: Bus debugfs - * @domain: IRQ domain * @defer_msg: Defer message - * @clk_stop_timeout: Clock stop timeout computed - * @bank_switch_timeout: Bank switch timeout computed - * @multi_link: Store bus property that indicates if multi links - * are supported. This flag is populated by drivers after reading - * appropriate firmware (ACPI/DT). + * @params: Current bus parameters + * @stream_refcount: number of streams currently using this bus + * @ops: Master callback ops + * @port_ops: Master port callback ops + * @prop: Master properties + * @vendor_specific_prop: pointer to non-standard properties * @hw_sync_min_links: Number of links used by a stream above which * hardware-based synchronization is required. This value is only * meaningful if multi_link is set. If set to 1, hardware-based * synchronization will be used even if a stream only uses a single * SoundWire segment. - * @stream_refcount: number of streams currently using this bus + * @controller_id: system-unique controller ID. If set to -1, the bus @id will be used. + * @link_id: Link id number, can be 0 to N, unique for each Controller + * @id: bus system-wide unique id + * @compute_params: points to Bus resource management implementation + * @assigned: Bitmap for Slave device numbers. + * Bit set implies used number, bit clear implies unused number. + * @clk_stop_timeout: Clock stop timeout computed + * @bank_switch_timeout: Bank switch timeout computed + * @domain: IRQ domain + * @irq_chip: IRQ chip + * @debugfs: Bus debugfs (optional) + * @multi_link: Store bus property that indicates if multi links + * are supported. This flag is populated by drivers after reading + * appropriate firmware (ACPI/DT). */ struct sdw_bus { struct device *dev; struct sdw_master_device *md; - int controller_id; - unsigned int link_id; - int id; - struct list_head slaves; - DECLARE_BITMAP(assigned, SDW_MAX_DEVICES); - struct mutex bus_lock; struct lock_class_key bus_lock_key; - struct mutex msg_lock; + struct mutex bus_lock; + struct list_head slaves; struct lock_class_key msg_lock_key; - int (*compute_params)(struct sdw_bus *bus); + struct mutex msg_lock; + struct list_head m_rt_list; + struct sdw_defer defer_msg; + struct sdw_bus_params params; + int stream_refcount; const struct sdw_master_ops *ops; const struct sdw_master_port_ops *port_ops; - struct sdw_bus_params params; struct sdw_master_prop prop; void *vendor_specific_prop; - struct list_head m_rt_list; + int hw_sync_min_links; + int controller_id; + unsigned int link_id; + int id; + int (*compute_params)(struct sdw_bus *bus); + DECLARE_BITMAP(assigned, SDW_MAX_DEVICES); + unsigned int clk_stop_timeout; + u32 bank_switch_timeout; + struct irq_chip irq_chip; + struct irq_domain *domain; #ifdef CONFIG_DEBUG_FS struct dentry *debugfs; #endif - struct irq_chip irq_chip; - struct irq_domain *domain; - struct sdw_defer defer_msg; - unsigned int clk_stop_timeout; - u32 bank_switch_timeout; bool multi_link; - int hw_sync_min_links; - int stream_refcount; }; int sdw_bus_master_add(struct sdw_bus *bus, struct device *parent, @@ -1010,18 +996,18 @@ struct sdw_stream_params { * @params: Stream parameters * @state: Current state of the stream * @type: Stream type PCM or PDM + * @m_rt_count: Count of Master runtime(s) in this stream * @master_list: List of Master runtime(s) in this stream. * master_list can contain only one m_rt per Master instance * for a stream - * @m_rt_count: Count of Master runtime(s) in this stream */ struct sdw_stream_runtime { const char *name; struct sdw_stream_params params; enum sdw_stream_state state; enum sdw_stream_type type; - struct list_head master_list; int m_rt_count; + struct list_head master_list; }; struct sdw_stream_runtime *sdw_alloc_stream(const char *stream_name); @@ -1030,12 +1016,12 @@ void sdw_release_stream(struct sdw_stream_runtime *stream); int sdw_compute_params(struct sdw_bus *bus); int sdw_stream_add_master(struct sdw_bus *bus, - struct sdw_stream_config *stream_config, - const struct sdw_port_config *port_config, - unsigned int num_ports, - struct sdw_stream_runtime *stream); + struct sdw_stream_config *stream_config, + const struct sdw_port_config *port_config, + unsigned int num_ports, + struct sdw_stream_runtime *stream); int sdw_stream_remove_master(struct sdw_bus *bus, - struct sdw_stream_runtime *stream); + struct sdw_stream_runtime *stream); int sdw_startup_stream(void *sdw_substream); int sdw_prepare_stream(struct sdw_stream_runtime *stream); int sdw_enable_stream(struct sdw_stream_runtime *stream); diff --git a/include/linux/soundwire/sdw_amd.h b/include/linux/soundwire/sdw_amd.h index 28a4eb77717f..799f8578137b 100644 --- a/include/linux/soundwire/sdw_amd.h +++ b/include/linux/soundwire/sdw_amd.h @@ -27,9 +27,11 @@ #define ACP_SDW0 0 #define ACP_SDW1 1 #define AMD_SDW_MAX_MANAGER_COUNT 2 +#define ACP63_PCI_REV_ID 0x63 struct acp_sdw_pdata { u16 instance; + u32 acp_rev; /* mutex to protect acp common register access */ struct mutex *acp_sdw_lock; }; @@ -66,6 +68,7 @@ struct sdw_amd_dai_runtime { * @instance: SoundWire manager instance * @quirks: SoundWire manager quirks * @wake_en_mask: wake enable mask per SoundWire manager + * @acp_rev: acp pci device revision id * @clk_stopped: flag set to true when clock is stopped * @power_mode_mask: flag interprets amd SoundWire manager power mode * @dai_runtime_array: dai runtime array @@ -94,6 +97,7 @@ struct amd_sdw_manager { u32 quirks; u32 wake_en_mask; u32 power_mode_mask; + u32 acp_rev; bool clk_stopped; struct sdw_amd_dai_runtime **dai_runtime_array; @@ -115,25 +119,23 @@ struct sdw_amd_acpi_info { * struct sdw_amd_ctx - context allocated by the controller driver probe * * @count: link count - * @num_slaves: total number of devices exposed across all enabled links * @link_mask: bit-wise mask listing SoundWire links reported by the * Controller - * @ids: array of slave_id, representing Slaves exposed across all enabled - * links * @pdev: platform device structure + * @peripherals: array representing Peripherals exposed across all enabled links */ struct sdw_amd_ctx { int count; - int num_slaves; u32 link_mask; - struct sdw_extended_slave_id *ids; struct platform_device *pdev[AMD_SDW_MAX_MANAGER_COUNT]; + struct sdw_peripherals *peripherals; }; /** * struct sdw_amd_res - Soundwire AMD global resource structure, * typically populated by the DSP driver/Legacy driver * + * @acp_rev: acp pci device revision id * @addr: acp pci device resource start address * @reg_range: ACP register range * @link_mask: bit-wise mask listing links selected by the DSP driver/ @@ -146,6 +148,7 @@ struct sdw_amd_ctx { * @acp_lock: mutex protecting acp common registers access */ struct sdw_amd_res { + u32 acp_rev; u32 addr; u32 reg_range; u32 link_mask; diff --git a/include/linux/soundwire/sdw_intel.h b/include/linux/soundwire/sdw_intel.h index 734dc1fa3b5b..580086417e4b 100644 --- a/include/linux/soundwire/sdw_intel.h +++ b/include/linux/soundwire/sdw_intel.h @@ -4,6 +4,7 @@ #ifndef __SDW_INTEL_H #define __SDW_INTEL_H +#include <linux/acpi.h> #include <linux/irqreturn.h> #include <linux/soundwire/sdw.h> @@ -286,31 +287,28 @@ struct hdac_bus; * hardware capabilities after all power dependencies are settled. * @link_mask: bit-wise mask listing SoundWire links reported by the * Controller - * @num_slaves: total number of devices exposed across all enabled links * @handle: ACPI parent handle * @ldev: information for each link (controller-specific and kept * opaque here) - * @ids: array of slave_id, representing Slaves exposed across all enabled - * links * @link_list: list to handle interrupts across all links * @shim_lock: mutex to handle concurrent rmw access to shared SHIM registers. * @shim_mask: flags to track initialization of SHIM shared registers * @shim_base: sdw shim base. * @alh_base: sdw alh base. + * @peripherals: array representing Peripherals exposed across all enabled links */ struct sdw_intel_ctx { int count; void __iomem *mmio_base; u32 link_mask; - int num_slaves; acpi_handle handle; struct sdw_intel_link_dev **ldev; - struct sdw_extended_slave_id *ids; struct list_head link_list; struct mutex shim_lock; /* lock for access to shared SHIM registers */ u32 shim_mask; u32 shim_base; u32 alh_base; + struct sdw_peripherals *peripherals; }; /** diff --git a/include/linux/spi/spi-mem.h b/include/linux/spi/spi-mem.h index f866d5c8ed32..c46d2b8029be 100644 --- a/include/linux/spi/spi-mem.h +++ b/include/linux/spi/spi-mem.h @@ -90,6 +90,8 @@ enum spi_mem_data_dir { * @data.buswidth: number of IO lanes used to send/receive the data * @data.dtr: whether the data should be sent in DTR mode or not * @data.ecc: whether error correction is required or not + * @data.swap16: whether the byte order of 16-bit words is swapped when read + * or written in Octal DTR mode compared to STR mode. * @data.dir: direction of the transfer * @data.nbytes: number of data bytes to send/receive. Can be zero if the * operation does not involve transferring data @@ -124,7 +126,8 @@ struct spi_mem_op { u8 buswidth; u8 dtr : 1; u8 ecc : 1; - u8 __pad : 6; + u8 swap16 : 1; + u8 __pad : 5; enum spi_mem_data_dir dir; unsigned int nbytes; union { @@ -297,10 +300,13 @@ struct spi_controller_mem_ops { * struct spi_controller_mem_caps - SPI memory controller capabilities * @dtr: Supports DTR operations * @ecc: Supports operations with error correction + * @swap16: Supports swapping bytes on a 16 bit boundary when configured in + * Octal DTR */ struct spi_controller_mem_caps { bool dtr; bool ecc; + bool swap16; }; #define spi_mem_controller_is_capable(ctlr, cap) \ diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index 4b95663163e0..8497f4747e24 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -824,21 +824,6 @@ void spi_take_timestamp_post(struct spi_controller *ctlr, extern struct spi_controller *__spi_alloc_controller(struct device *host, unsigned int size, bool slave); -static inline struct spi_controller *spi_alloc_master(struct device *host, - unsigned int size) -{ - return __spi_alloc_controller(host, size, false); -} - -static inline struct spi_controller *spi_alloc_slave(struct device *host, - unsigned int size) -{ - if (!IS_ENABLED(CONFIG_SPI_SLAVE)) - return NULL; - - return __spi_alloc_controller(host, size, true); -} - static inline struct spi_controller *spi_alloc_host(struct device *dev, unsigned int size) { @@ -858,21 +843,6 @@ struct spi_controller *__devm_spi_alloc_controller(struct device *dev, unsigned int size, bool slave); -static inline struct spi_controller *devm_spi_alloc_master(struct device *dev, - unsigned int size) -{ - return __devm_spi_alloc_controller(dev, size, false); -} - -static inline struct spi_controller *devm_spi_alloc_slave(struct device *dev, - unsigned int size) -{ - if (!IS_ENABLED(CONFIG_SPI_SLAVE)) - return NULL; - - return __devm_spi_alloc_controller(dev, size, true); -} - static inline struct spi_controller *devm_spi_alloc_host(struct device *dev, unsigned int size) { diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h index 89eb6f4c659c..9ecb0ab504e3 100644 --- a/include/linux/spinlock_api_smp.h +++ b/include/linux/spinlock_api_smp.h @@ -2,7 +2,7 @@ #define __LINUX_SPINLOCK_API_SMP_H #ifndef __LINUX_INSIDE_SPINLOCK_H -# error "please don't include this file directly" +# error "Please do not include this file directly." #endif /* diff --git a/include/linux/spinlock_rt.h b/include/linux/spinlock_rt.h index 61c49b16f69a..f6499c37157d 100644 --- a/include/linux/spinlock_rt.h +++ b/include/linux/spinlock_rt.h @@ -16,26 +16,25 @@ static inline void __rt_spin_lock_init(spinlock_t *lock, const char *name, } #endif -#define spin_lock_init(slock) \ +#define __spin_lock_init(slock, name, key, percpu) \ do { \ - static struct lock_class_key __key; \ - \ rt_mutex_base_init(&(slock)->lock); \ - __rt_spin_lock_init(slock, #slock, &__key, false); \ + __rt_spin_lock_init(slock, name, key, percpu); \ } while (0) -#define local_spin_lock_init(slock) \ +#define _spin_lock_init(slock, percpu) \ do { \ static struct lock_class_key __key; \ - \ - rt_mutex_base_init(&(slock)->lock); \ - __rt_spin_lock_init(slock, #slock, &__key, true); \ + __spin_lock_init(slock, #slock, &__key, percpu); \ } while (0) -extern void rt_spin_lock(spinlock_t *lock); -extern void rt_spin_lock_nested(spinlock_t *lock, int subclass); -extern void rt_spin_lock_nest_lock(spinlock_t *lock, struct lockdep_map *nest_lock); -extern void rt_spin_unlock(spinlock_t *lock); +#define spin_lock_init(slock) _spin_lock_init(slock, false) +#define local_spin_lock_init(slock) _spin_lock_init(slock, true) + +extern void rt_spin_lock(spinlock_t *lock) __acquires(lock); +extern void rt_spin_lock_nested(spinlock_t *lock, int subclass) __acquires(lock); +extern void rt_spin_lock_nest_lock(spinlock_t *lock, struct lockdep_map *nest_lock) __acquires(lock); +extern void rt_spin_unlock(spinlock_t *lock) __releases(lock); extern void rt_spin_lock_unlock(spinlock_t *lock); extern int rt_spin_trylock_bh(spinlock_t *lock); extern int rt_spin_trylock(spinlock_t *lock); @@ -132,7 +131,7 @@ static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, #define spin_trylock_irq(lock) \ __cond_lock(lock, rt_spin_trylock(lock)) -#define __spin_trylock_irqsave(lock, flags) \ +#define spin_trylock_irqsave(lock, flags) \ ({ \ int __locked; \ \ @@ -142,9 +141,6 @@ static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, __locked; \ }) -#define spin_trylock_irqsave(lock, flags) \ - __cond_lock(lock, __spin_trylock_irqsave(lock, flags)) - #define spin_is_contended(lock) (((void)(lock), 0)) static inline int spin_is_locked(spinlock_t *lock) diff --git a/include/linux/spinlock_types_up.h b/include/linux/spinlock_types_up.h index 7f86a2016ac5..fc4e2d017c20 100644 --- a/include/linux/spinlock_types_up.h +++ b/include/linux/spinlock_types_up.h @@ -2,7 +2,7 @@ #define __LINUX_SPINLOCK_TYPES_UP_H #ifndef __LINUX_SPINLOCK_TYPES_RAW_H -# error "please don't include this file directly" +# error "Please do not include this file directly." #endif /* diff --git a/include/linux/spinlock_up.h b/include/linux/spinlock_up.h index c87204247592..1e84e71ca495 100644 --- a/include/linux/spinlock_up.h +++ b/include/linux/spinlock_up.h @@ -2,7 +2,7 @@ #define __LINUX_SPINLOCK_UP_H #ifndef __LINUX_INSIDE_SPINLOCK_H -# error "please don't include this file directly" +# error "Please do not include this file directly." #endif #include <asm/processor.h> /* for cpu_relax() */ diff --git a/include/linux/srcu.h b/include/linux/srcu.h index 835bbb2d1f88..08339eb8a01c 100644 --- a/include/linux/srcu.h +++ b/include/linux/srcu.h @@ -56,6 +56,13 @@ void call_srcu(struct srcu_struct *ssp, struct rcu_head *head, void cleanup_srcu_struct(struct srcu_struct *ssp); int __srcu_read_lock(struct srcu_struct *ssp) __acquires(ssp); void __srcu_read_unlock(struct srcu_struct *ssp, int idx) __releases(ssp); +#ifdef CONFIG_TINY_SRCU +#define __srcu_read_lock_lite __srcu_read_lock +#define __srcu_read_unlock_lite __srcu_read_unlock +#else // #ifdef CONFIG_TINY_SRCU +int __srcu_read_lock_lite(struct srcu_struct *ssp) __acquires(ssp); +void __srcu_read_unlock_lite(struct srcu_struct *ssp, int idx) __releases(ssp); +#endif // #else // #ifdef CONFIG_TINY_SRCU void synchronize_srcu(struct srcu_struct *ssp); #define SRCU_GET_STATE_COMPLETED 0x1 @@ -176,17 +183,6 @@ static inline int srcu_read_lock_held(const struct srcu_struct *ssp) #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ -#define SRCU_NMI_UNKNOWN 0x0 -#define SRCU_NMI_UNSAFE 0x1 -#define SRCU_NMI_SAFE 0x2 - -#if defined(CONFIG_PROVE_RCU) && defined(CONFIG_TREE_SRCU) -void srcu_check_nmi_safety(struct srcu_struct *ssp, bool nmi_safe); -#else -static inline void srcu_check_nmi_safety(struct srcu_struct *ssp, - bool nmi_safe) { } -#endif - /** * srcu_dereference_check - fetch SRCU-protected pointer for later dereferencing @@ -236,33 +232,67 @@ static inline void srcu_check_nmi_safety(struct srcu_struct *ssp, * a mutex that is held elsewhere while calling synchronize_srcu() or * synchronize_srcu_expedited(). * - * Note that srcu_read_lock() and the matching srcu_read_unlock() must - * occur in the same context, for example, it is illegal to invoke - * srcu_read_unlock() in an irq handler if the matching srcu_read_lock() - * was invoked in process context. + * The return value from srcu_read_lock() must be passed unaltered + * to the matching srcu_read_unlock(). Note that srcu_read_lock() and + * the matching srcu_read_unlock() must occur in the same context, for + * example, it is illegal to invoke srcu_read_unlock() in an irq handler + * if the matching srcu_read_lock() was invoked in process context. Or, + * for that matter to invoke srcu_read_unlock() from one task and the + * matching srcu_read_lock() from another. */ static inline int srcu_read_lock(struct srcu_struct *ssp) __acquires(ssp) { int retval; - srcu_check_nmi_safety(ssp, false); + srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NORMAL); retval = __srcu_read_lock(ssp); srcu_lock_acquire(&ssp->dep_map); return retval; } /** + * srcu_read_lock_lite - register a new reader for an SRCU-protected structure. + * @ssp: srcu_struct in which to register the new reader. + * + * Enter an SRCU read-side critical section, but for a light-weight + * smp_mb()-free reader. See srcu_read_lock() for more information. + * + * If srcu_read_lock_lite() is ever used on an srcu_struct structure, + * then none of the other flavors may be used, whether before, during, + * or after. Note that grace-period auto-expediting is disabled for _lite + * srcu_struct structures because auto-expedited grace periods invoke + * synchronize_rcu_expedited(), IPIs and all. + * + * Note that srcu_read_lock_lite() can be invoked only from those contexts + * where RCU is watching, that is, from contexts where it would be legal + * to invoke rcu_read_lock(). Otherwise, lockdep will complain. + */ +static inline int srcu_read_lock_lite(struct srcu_struct *ssp) __acquires(ssp) +{ + int retval; + + srcu_check_read_flavor_lite(ssp); + retval = __srcu_read_lock_lite(ssp); + rcu_try_lock_acquire(&ssp->dep_map); + return retval; +} + +/** * srcu_read_lock_nmisafe - register a new reader for an SRCU-protected structure. * @ssp: srcu_struct in which to register the new reader. * * Enter an SRCU read-side critical section, but in an NMI-safe manner. * See srcu_read_lock() for more information. + * + * If srcu_read_lock_nmisafe() is ever used on an srcu_struct structure, + * then none of the other flavors may be used, whether before, during, + * or after. */ static inline int srcu_read_lock_nmisafe(struct srcu_struct *ssp) __acquires(ssp) { int retval; - srcu_check_nmi_safety(ssp, true); + srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NMI); retval = __srcu_read_lock_nmisafe(ssp); rcu_try_lock_acquire(&ssp->dep_map); return retval; @@ -274,7 +304,7 @@ srcu_read_lock_notrace(struct srcu_struct *ssp) __acquires(ssp) { int retval; - srcu_check_nmi_safety(ssp, false); + srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NORMAL); retval = __srcu_read_lock(ssp); return retval; } @@ -303,7 +333,7 @@ srcu_read_lock_notrace(struct srcu_struct *ssp) __acquires(ssp) static inline int srcu_down_read(struct srcu_struct *ssp) __acquires(ssp) { WARN_ON_ONCE(in_nmi()); - srcu_check_nmi_safety(ssp, false); + srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NORMAL); return __srcu_read_lock(ssp); } @@ -318,12 +348,28 @@ static inline void srcu_read_unlock(struct srcu_struct *ssp, int idx) __releases(ssp) { WARN_ON_ONCE(idx & ~0x1); - srcu_check_nmi_safety(ssp, false); + srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NORMAL); srcu_lock_release(&ssp->dep_map); __srcu_read_unlock(ssp, idx); } /** + * srcu_read_unlock_lite - unregister a old reader from an SRCU-protected structure. + * @ssp: srcu_struct in which to unregister the old reader. + * @idx: return value from corresponding srcu_read_lock(). + * + * Exit a light-weight SRCU read-side critical section. + */ +static inline void srcu_read_unlock_lite(struct srcu_struct *ssp, int idx) + __releases(ssp) +{ + WARN_ON_ONCE(idx & ~0x1); + srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_LITE); + srcu_lock_release(&ssp->dep_map); + __srcu_read_unlock_lite(ssp, idx); +} + +/** * srcu_read_unlock_nmisafe - unregister a old reader from an SRCU-protected structure. * @ssp: srcu_struct in which to unregister the old reader. * @idx: return value from corresponding srcu_read_lock(). @@ -334,7 +380,7 @@ static inline void srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx) __releases(ssp) { WARN_ON_ONCE(idx & ~0x1); - srcu_check_nmi_safety(ssp, true); + srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NMI); rcu_lock_release(&ssp->dep_map); __srcu_read_unlock_nmisafe(ssp, idx); } @@ -343,7 +389,7 @@ static inline void srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx) static inline notrace void srcu_read_unlock_notrace(struct srcu_struct *ssp, int idx) __releases(ssp) { - srcu_check_nmi_safety(ssp, false); + srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NORMAL); __srcu_read_unlock(ssp, idx); } @@ -360,7 +406,7 @@ static inline void srcu_up_read(struct srcu_struct *ssp, int idx) { WARN_ON_ONCE(idx & ~0x1); WARN_ON_ONCE(in_nmi()); - srcu_check_nmi_safety(ssp, false); + srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NORMAL); __srcu_read_unlock(ssp, idx); } diff --git a/include/linux/srcutiny.h b/include/linux/srcutiny.h index 4d96bbdb45f0..1321da803274 100644 --- a/include/linux/srcutiny.h +++ b/include/linux/srcutiny.h @@ -81,6 +81,9 @@ static inline void srcu_barrier(struct srcu_struct *ssp) synchronize_srcu(ssp); } +#define srcu_check_read_flavor(ssp, read_flavor) do { } while (0) +#define srcu_check_read_flavor_lite(ssp) do { } while (0) + /* Defined here to avoid size increase for non-torture kernels. */ static inline void srcu_torture_stats_print(struct srcu_struct *ssp, char *tt, char *tf) diff --git a/include/linux/srcutree.h b/include/linux/srcutree.h index ed57598394de..490aeecc6bb4 100644 --- a/include/linux/srcutree.h +++ b/include/linux/srcutree.h @@ -25,7 +25,7 @@ struct srcu_data { /* Read-side state. */ atomic_long_t srcu_lock_count[2]; /* Locks per CPU. */ atomic_long_t srcu_unlock_count[2]; /* Unlocks per CPU. */ - int srcu_nmi_safety; /* NMI-safe srcu_struct structure? */ + int srcu_reader_flavor; /* Reader flavor for srcu_struct structure? */ /* Update-side state. */ spinlock_t __private lock ____cacheline_internodealigned_in_smp; @@ -43,6 +43,11 @@ struct srcu_data { struct srcu_struct *ssp; }; +/* Values for ->srcu_reader_flavor. */ +#define SRCU_READ_FLAVOR_NORMAL 0x1 // srcu_read_lock(). +#define SRCU_READ_FLAVOR_NMI 0x2 // srcu_read_lock_nmisafe(). +#define SRCU_READ_FLAVOR_LITE 0x4 // srcu_read_lock_lite(). + /* * Node in SRCU combining tree, similar in function to rcu_data. */ @@ -204,4 +209,64 @@ void synchronize_srcu_expedited(struct srcu_struct *ssp); void srcu_barrier(struct srcu_struct *ssp); void srcu_torture_stats_print(struct srcu_struct *ssp, char *tt, char *tf); +/* + * Counts the new reader in the appropriate per-CPU element of the + * srcu_struct. Returns an index that must be passed to the matching + * srcu_read_unlock_lite(). + * + * Note that this_cpu_inc() is an RCU read-side critical section either + * because it disables interrupts, because it is a single instruction, + * or because it is a read-modify-write atomic operation, depending on + * the whims of the architecture. + */ +static inline int __srcu_read_lock_lite(struct srcu_struct *ssp) +{ + int idx; + + RCU_LOCKDEP_WARN(!rcu_is_watching(), "RCU must be watching srcu_read_lock_lite()."); + idx = READ_ONCE(ssp->srcu_idx) & 0x1; + this_cpu_inc(ssp->sda->srcu_lock_count[idx].counter); /* Y */ + barrier(); /* Avoid leaking the critical section. */ + return idx; +} + +/* + * Removes the count for the old reader from the appropriate + * per-CPU element of the srcu_struct. Note that this may well be a + * different CPU than that which was incremented by the corresponding + * srcu_read_lock_lite(), but it must be within the same task. + * + * Note that this_cpu_inc() is an RCU read-side critical section either + * because it disables interrupts, because it is a single instruction, + * or because it is a read-modify-write atomic operation, depending on + * the whims of the architecture. + */ +static inline void __srcu_read_unlock_lite(struct srcu_struct *ssp, int idx) +{ + barrier(); /* Avoid leaking the critical section. */ + this_cpu_inc(ssp->sda->srcu_unlock_count[idx].counter); /* Z */ + RCU_LOCKDEP_WARN(!rcu_is_watching(), "RCU must be watching srcu_read_unlock_lite()."); +} + +void __srcu_check_read_flavor(struct srcu_struct *ssp, int read_flavor); + +// Record _lite() usage even for CONFIG_PROVE_RCU=n kernels. +static inline void srcu_check_read_flavor_lite(struct srcu_struct *ssp) +{ + struct srcu_data *sdp = raw_cpu_ptr(ssp->sda); + + if (likely(READ_ONCE(sdp->srcu_reader_flavor) & SRCU_READ_FLAVOR_LITE)) + return; + + // Note that the cmpxchg() in srcu_check_read_flavor() is fully ordered. + __srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_LITE); +} + +// Record non-_lite() usage only for CONFIG_PROVE_RCU=y kernels. +static inline void srcu_check_read_flavor(struct srcu_struct *ssp, int read_flavor) +{ + if (IS_ENABLED(CONFIG_PROVE_RCU)) + __srcu_check_read_flavor(ssp, read_flavor); +} + #endif diff --git a/include/linux/stackdepot.h b/include/linux/stackdepot.h index e9ec32fb97d4..2cc21ffcdaf9 100644 --- a/include/linux/stackdepot.h +++ b/include/linux/stackdepot.h @@ -147,7 +147,7 @@ static inline int stack_depot_early_init(void) { return 0; } * If the provided stack trace comes from the interrupt context, only the part * up to the interrupt entry is saved. * - * Context: Any context, but setting STACK_DEPOT_FLAG_CAN_ALLOC is required if + * Context: Any context, but unsetting STACK_DEPOT_FLAG_CAN_ALLOC is required if * alloc_pages() cannot be used from the current context. Currently * this is the case for contexts where neither %GFP_ATOMIC nor * %GFP_NOWAIT can be used (NMI, raw_spin_lock). @@ -156,7 +156,7 @@ static inline int stack_depot_early_init(void) { return 0; } */ depot_stack_handle_t stack_depot_save_flags(unsigned long *entries, unsigned int nr_entries, - gfp_t gfp_flags, + gfp_t alloc_flags, depot_flags_t depot_flags); /** @@ -175,7 +175,7 @@ depot_stack_handle_t stack_depot_save_flags(unsigned long *entries, * Return: Handle of the stack trace stored in depot, 0 on failure */ depot_stack_handle_t stack_depot_save(unsigned long *entries, - unsigned int nr_entries, gfp_t gfp_flags); + unsigned int nr_entries, gfp_t alloc_flags); /** * __stack_depot_get_stack_record - Get a pointer to a stack_record struct diff --git a/include/linux/static_call.h b/include/linux/static_call.h index 141e6b176a1b..78a77a4ae0ea 100644 --- a/include/linux/static_call.h +++ b/include/linux/static_call.h @@ -160,6 +160,8 @@ extern void arch_static_call_transform(void *site, void *tramp, void *func, bool #ifdef CONFIG_HAVE_STATIC_CALL_INLINE +extern int static_call_initialized; + extern int __init static_call_init(void); extern void static_call_force_reinit(void); @@ -225,6 +227,8 @@ extern long __static_call_return0(void); #elif defined(CONFIG_HAVE_STATIC_CALL) +#define static_call_initialized 0 + static inline int static_call_init(void) { return 0; } #define DEFINE_STATIC_CALL(name, _func) \ @@ -281,6 +285,8 @@ extern long __static_call_return0(void); #else /* Generic implementation */ +#define static_call_initialized 0 + static inline int static_call_init(void) { return 0; } static inline long __static_call_return0(void) diff --git a/include/linux/string.h b/include/linux/string.h index 0dd27afcfaf7..493ac4862c77 100644 --- a/include/linux/string.h +++ b/include/linux/string.h @@ -335,7 +335,6 @@ int __sysfs_match_string(const char * const *array, size_t n, const char *s); #ifdef CONFIG_BINARY_PRINTF int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args); int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf); -int bprintf(u32 *bin_buf, size_t size, const char *fmt, ...) __printf(3, 4); #endif extern ssize_t memory_read_from_buffer(void *to, size_t count, loff_t *ppos, diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h index 5f775e104f9a..a2ab813a9800 100644 --- a/include/linux/sunrpc/xdr.h +++ b/include/linux/sunrpc/xdr.h @@ -681,6 +681,27 @@ xdr_stream_decode_u32(struct xdr_stream *xdr, __u32 *ptr) } /** + * xdr_stream_decode_be32 - Decode a big-endian 32-bit integer + * @xdr: pointer to xdr_stream + * @ptr: location to store integer + * + * Return values: + * %0 on success + * %-EBADMSG on XDR buffer overflow + */ +static inline ssize_t +xdr_stream_decode_be32(struct xdr_stream *xdr, __be32 *ptr) +{ + const size_t count = sizeof(*ptr); + __be32 *p = xdr_inline_decode(xdr, count); + + if (unlikely(!p)) + return -EBADMSG; + *ptr = *p; + return 0; +} + +/** * xdr_stream_decode_u64 - Decode a 64-bit integer * @xdr: pointer to xdr_stream * @ptr: location to store 64-bit integer diff --git a/include/linux/sunrpc/xdrgen/_defs.h b/include/linux/sunrpc/xdrgen/_defs.h index be9e62371758..20c7270aa64d 100644 --- a/include/linux/sunrpc/xdrgen/_defs.h +++ b/include/linux/sunrpc/xdrgen/_defs.h @@ -23,4 +23,13 @@ typedef struct { u8 *data; } opaque; +#define XDR_void (0) +#define XDR_bool (1) +#define XDR_int (1) +#define XDR_unsigned_int (1) +#define XDR_long (1) +#define XDR_unsigned_long (1) +#define XDR_hyper (2) +#define XDR_unsigned_hyper (2) + #endif /* _SUNRPC_XDRGEN__DEFS_H_ */ diff --git a/include/linux/swapops.h b/include/linux/swapops.h index cb468e418ea1..96f26e29fefe 100644 --- a/include/linux/swapops.h +++ b/include/linux/swapops.h @@ -426,9 +426,19 @@ typedef unsigned long pte_marker; * "Poisoned" here is meant in the very general sense of "future accesses are * invalid", instead of referring very specifically to hardware memory errors. * This marker is meant to represent any of various different causes of this. + * + * Note that, when encountered by the faulting logic, PTEs with this marker will + * result in VM_FAULT_HWPOISON and thus regardless trigger hardware memory error + * logic. */ #define PTE_MARKER_POISONED BIT(1) -#define PTE_MARKER_MASK (BIT(2) - 1) +/* + * Indicates that, on fault, this PTE will case a SIGSEGV signal to be + * sent. This means guard markers behave in effect as if the region were mapped + * PROT_NONE, rather than if they were a memory hole or equivalent. + */ +#define PTE_MARKER_GUARD BIT(2) +#define PTE_MARKER_MASK (BIT(3) - 1) static inline swp_entry_t make_pte_marker_entry(pte_marker marker) { @@ -464,6 +474,18 @@ static inline int is_poisoned_swp_entry(swp_entry_t entry) { return is_pte_marker_entry(entry) && (pte_marker_get(entry) & PTE_MARKER_POISONED); + +} + +static inline swp_entry_t make_guard_swp_entry(void) +{ + return make_pte_marker_entry(PTE_MARKER_GUARD); +} + +static inline int is_guard_swp_entry(swp_entry_t entry) +{ + return is_pte_marker_entry(entry) && + (pte_marker_get(entry) & PTE_MARKER_GUARD); } /* diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 5758104921e6..c6333204d451 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -77,6 +77,7 @@ struct cachestat_range; struct cachestat; struct statmount; struct mnt_id_req; +struct xattr_args; #include <linux/types.h> #include <linux/aio_abi.h> @@ -338,23 +339,35 @@ asmlinkage long sys_io_uring_register(unsigned int fd, unsigned int op, void __user *arg, unsigned int nr_args); asmlinkage long sys_setxattr(const char __user *path, const char __user *name, const void __user *value, size_t size, int flags); +asmlinkage long sys_setxattrat(int dfd, const char __user *path, unsigned int at_flags, + const char __user *name, + const struct xattr_args __user *args, size_t size); asmlinkage long sys_lsetxattr(const char __user *path, const char __user *name, const void __user *value, size_t size, int flags); asmlinkage long sys_fsetxattr(int fd, const char __user *name, const void __user *value, size_t size, int flags); asmlinkage long sys_getxattr(const char __user *path, const char __user *name, void __user *value, size_t size); +asmlinkage long sys_getxattrat(int dfd, const char __user *path, unsigned int at_flags, + const char __user *name, + struct xattr_args __user *args, size_t size); asmlinkage long sys_lgetxattr(const char __user *path, const char __user *name, void __user *value, size_t size); asmlinkage long sys_fgetxattr(int fd, const char __user *name, void __user *value, size_t size); asmlinkage long sys_listxattr(const char __user *path, char __user *list, size_t size); +asmlinkage long sys_listxattrat(int dfd, const char __user *path, + unsigned int at_flags, + char __user *list, size_t size); asmlinkage long sys_llistxattr(const char __user *path, char __user *list, size_t size); asmlinkage long sys_flistxattr(int fd, char __user *list, size_t size); asmlinkage long sys_removexattr(const char __user *path, const char __user *name); +asmlinkage long sys_removexattrat(int dfd, const char __user *path, + unsigned int at_flags, + const char __user *name); asmlinkage long sys_lremovexattr(const char __user *path, const char __user *name); asmlinkage long sys_fremovexattr(int fd, const char __user *name); diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h index aa4c6d44aaa0..40a6ac6c9713 100644 --- a/include/linux/sysctl.h +++ b/include/linux/sysctl.h @@ -90,9 +90,7 @@ int proc_do_static_key(const struct ctl_table *table, int write, void *buffer, /* * Register a set of sysctl names by calling register_sysctl - * with an initialised array of struct ctl_table's. An entry with - * NULL procname terminates the table. table->de will be - * set up by the registration and need not be initialised in advance. + * with an initialised array of struct ctl_table's. * * sysctl names can be mirrored automatically under /proc/sys. The * procname supplied controls /proc naming. @@ -133,7 +131,7 @@ static inline void *proc_sys_poll_event(struct ctl_table_poll *poll) /* A sysctl table is an array of struct ctl_table: */ struct ctl_table { - const char *procname; /* Text ID for /proc/sys, or zero */ + const char *procname; /* Text ID for /proc/sys */ void *data; int maxlen; umode_t mode; @@ -162,7 +160,7 @@ struct ctl_node { struct ctl_table_header { union { struct { - struct ctl_table *ctl_table; + const struct ctl_table *ctl_table; int ctl_table_size; int used; int count; @@ -223,13 +221,13 @@ extern void retire_sysctl_set(struct ctl_table_set *set); struct ctl_table_header *__register_sysctl_table( struct ctl_table_set *set, - const char *path, struct ctl_table *table, size_t table_size); -struct ctl_table_header *register_sysctl_sz(const char *path, struct ctl_table *table, + const char *path, const struct ctl_table *table, size_t table_size); +struct ctl_table_header *register_sysctl_sz(const char *path, const struct ctl_table *table, size_t table_size); void unregister_sysctl_table(struct ctl_table_header * table); extern int sysctl_init_bases(void); -extern void __register_sysctl_init(const char *path, struct ctl_table *table, +extern void __register_sysctl_init(const char *path, const struct ctl_table *table, const char *table_name, size_t table_size); #define register_sysctl_init(path, table) \ __register_sysctl_init(path, table, #table, ARRAY_SIZE(table)) @@ -251,7 +249,7 @@ extern int no_unaligned_warning; #else /* CONFIG_SYSCTL */ -static inline void register_sysctl_init(const char *path, struct ctl_table *table) +static inline void register_sysctl_init(const char *path, const struct ctl_table *table) { } @@ -261,7 +259,7 @@ static inline struct ctl_table_header *register_sysctl_mount_point(const char *p } static inline struct ctl_table_header *register_sysctl_sz(const char *path, - struct ctl_table *table, + const struct ctl_table *table, size_t table_size) { return NULL; diff --git a/include/linux/sysfb.h b/include/linux/sysfb.h index bef5f06a91de..07cbab516942 100644 --- a/include/linux/sysfb.h +++ b/include/linux/sysfb.h @@ -60,12 +60,19 @@ struct efifb_dmi_info { void sysfb_disable(struct device *dev); +bool sysfb_handles_screen_info(void); + #else /* CONFIG_SYSFB */ static inline void sysfb_disable(struct device *dev) { } +static inline bool sysfb_handles_screen_info(void) +{ + return false; +} + #endif /* CONFIG_SYSFB */ #ifdef CONFIG_EFI diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h index c4e64dc11206..0f2fcd244523 100644 --- a/include/linux/sysfs.h +++ b/include/linux/sysfs.h @@ -87,6 +87,11 @@ do { \ * SYSFS_GROUP_VISIBLE() when assigning this callback to * specify separate _group_visible() and _attr_visible() * handlers. + * @bin_size: + * Optional: Function to return the size of a binary attribute + * of the group. Will be called repeatedly for each binary + * attribute in the group. Overwrites the size field embedded + * inside the attribute itself. * @attrs: Pointer to NULL terminated list of attributes. * @bin_attrs: Pointer to NULL terminated list of binary attributes. * Either attrs or bin_attrs or both must be provided. @@ -96,9 +101,15 @@ struct attribute_group { umode_t (*is_visible)(struct kobject *, struct attribute *, int); umode_t (*is_bin_visible)(struct kobject *, - struct bin_attribute *, int); + const struct bin_attribute *, int); + size_t (*bin_size)(struct kobject *, + const struct bin_attribute *, + int); struct attribute **attrs; - struct bin_attribute **bin_attrs; + union { + struct bin_attribute **bin_attrs; + const struct bin_attribute *const *bin_attrs_new; + }; }; #define SYSFS_PREALLOC 010000 @@ -191,22 +202,22 @@ struct attribute_group { * attributes, the group visibility is determined by the function * specified to is_visible() not is_bin_visible() */ -#define DEFINE_SYSFS_BIN_GROUP_VISIBLE(name) \ - static inline umode_t sysfs_group_visible_##name( \ - struct kobject *kobj, struct bin_attribute *attr, int n) \ - { \ - if (n == 0 && !name##_group_visible(kobj)) \ - return SYSFS_GROUP_INVISIBLE; \ - return name##_attr_visible(kobj, attr, n); \ +#define DEFINE_SYSFS_BIN_GROUP_VISIBLE(name) \ + static inline umode_t sysfs_group_visible_##name( \ + struct kobject *kobj, const struct bin_attribute *attr, int n) \ + { \ + if (n == 0 && !name##_group_visible(kobj)) \ + return SYSFS_GROUP_INVISIBLE; \ + return name##_attr_visible(kobj, attr, n); \ } -#define DEFINE_SIMPLE_SYSFS_BIN_GROUP_VISIBLE(name) \ - static inline umode_t sysfs_group_visible_##name( \ - struct kobject *kobj, struct bin_attribute *a, int n) \ - { \ - if (n == 0 && !name##_group_visible(kobj)) \ - return SYSFS_GROUP_INVISIBLE; \ - return a->mode; \ +#define DEFINE_SIMPLE_SYSFS_BIN_GROUP_VISIBLE(name) \ + static inline umode_t sysfs_group_visible_##name( \ + struct kobject *kobj, const struct bin_attribute *a, int n) \ + { \ + if (n == 0 && !name##_group_visible(kobj)) \ + return SYSFS_GROUP_INVISIBLE; \ + return a->mode; \ } #define SYSFS_GROUP_VISIBLE(fn) sysfs_group_visible_##fn @@ -297,11 +308,15 @@ struct bin_attribute { struct address_space *(*f_mapping)(void); ssize_t (*read)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t, size_t); + ssize_t (*read_new)(struct file *, struct kobject *, const struct bin_attribute *, + char *, loff_t, size_t); ssize_t (*write)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t, size_t); - loff_t (*llseek)(struct file *, struct kobject *, struct bin_attribute *, + ssize_t (*write_new)(struct file *, struct kobject *, + const struct bin_attribute *, char *, loff_t, size_t); + loff_t (*llseek)(struct file *, struct kobject *, const struct bin_attribute *, loff_t, int); - int (*mmap)(struct file *, struct kobject *, struct bin_attribute *attr, + int (*mmap)(struct file *, struct kobject *, const struct bin_attribute *attr, struct vm_area_struct *vma); }; @@ -317,25 +332,36 @@ struct bin_attribute { */ #define sysfs_bin_attr_init(bin_attr) sysfs_attr_init(&(bin_attr)->attr) +typedef ssize_t __sysfs_bin_rw_handler_new(struct file *, struct kobject *, + const struct bin_attribute *, char *, loff_t, size_t); + /* macros to create static binary attributes easier */ #define __BIN_ATTR(_name, _mode, _read, _write, _size) { \ .attr = { .name = __stringify(_name), .mode = _mode }, \ - .read = _read, \ - .write = _write, \ + .read = _Generic(_read, \ + __sysfs_bin_rw_handler_new * : NULL, \ + default : _read \ + ), \ + .read_new = _Generic(_read, \ + __sysfs_bin_rw_handler_new * : _read, \ + default : NULL \ + ), \ + .write = _Generic(_write, \ + __sysfs_bin_rw_handler_new * : NULL, \ + default : _write \ + ), \ + .write_new = _Generic(_write, \ + __sysfs_bin_rw_handler_new * : _write, \ + default : NULL \ + ), \ .size = _size, \ } -#define __BIN_ATTR_RO(_name, _size) { \ - .attr = { .name = __stringify(_name), .mode = 0444 }, \ - .read = _name##_read, \ - .size = _size, \ -} +#define __BIN_ATTR_RO(_name, _size) \ + __BIN_ATTR(_name, 0444, _name##_read, NULL, _size) -#define __BIN_ATTR_WO(_name, _size) { \ - .attr = { .name = __stringify(_name), .mode = 0200 }, \ - .write = _name##_write, \ - .size = _size, \ -} +#define __BIN_ATTR_WO(_name, _size) \ + __BIN_ATTR(_name, 0200, NULL, _name##_write, _size) #define __BIN_ATTR_RW(_name, _size) \ __BIN_ATTR(_name, 0644, _name##_read, _name##_write, _size) @@ -356,11 +382,8 @@ struct bin_attribute bin_attr_##_name = __BIN_ATTR_WO(_name, _size) struct bin_attribute bin_attr_##_name = __BIN_ATTR_RW(_name, _size) -#define __BIN_ATTR_ADMIN_RO(_name, _size) { \ - .attr = { .name = __stringify(_name), .mode = 0400 }, \ - .read = _name##_read, \ - .size = _size, \ -} +#define __BIN_ATTR_ADMIN_RO(_name, _size) \ + __BIN_ATTR(_name, 0400, _name##_read, NULL, _size) #define __BIN_ATTR_ADMIN_RW(_name, _size) \ __BIN_ATTR(_name, 0600, _name##_read, _name##_write, _size) @@ -371,10 +394,8 @@ struct bin_attribute bin_attr_##_name = __BIN_ATTR_ADMIN_RO(_name, _size) #define BIN_ATTR_ADMIN_RW(_name, _size) \ struct bin_attribute bin_attr_##_name = __BIN_ATTR_ADMIN_RW(_name, _size) -#define __BIN_ATTR_SIMPLE_RO(_name, _mode) { \ - .attr = { .name = __stringify(_name), .mode = _mode }, \ - .read = sysfs_bin_attr_simple_read, \ -} +#define __BIN_ATTR_SIMPLE_RO(_name, _mode) \ + __BIN_ATTR(_name, _mode, sysfs_bin_attr_simple_read, NULL, 0) #define BIN_ATTR_SIMPLE_RO(_name) \ struct bin_attribute bin_attr_##_name = __BIN_ATTR_SIMPLE_RO(_name, 0444) diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 6a5e08b937b3..f88daaa76d83 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -200,7 +200,6 @@ struct tcp_sock { /* TX read-mostly hotpath cache lines */ __cacheline_group_begin(tcp_sock_read_tx); - /* timestamp of last sent data packet (for restart window) */ u32 max_window; /* Maximal window ever seen from peer */ u32 rcv_ssthresh; /* Current window clamp */ u32 reordering; /* Packet reordering metric. */ @@ -263,7 +262,7 @@ struct tcp_sock { u32 chrono_stat[3]; /* Time in jiffies for chrono_stat stats */ u32 write_seq; /* Tail(+1) of data held in tcp send buffer */ u32 pushed_seq; /* Last pushed seq, required to talk to windows */ - u32 lsndtime; + u32 lsndtime; /* timestamp of last sent data packet (for restart window) */ u32 mdev_us; /* medium deviation */ u32 rtt_seq; /* sequence number to update rttvar */ u64 tcp_wstamp_ns; /* departure time for next sent data packet */ diff --git a/include/linux/text-patching.h b/include/linux/text-patching.h new file mode 100644 index 000000000000..ad5877ab0855 --- /dev/null +++ b/include/linux/text-patching.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_TEXT_PATCHING_H +#define _LINUX_TEXT_PATCHING_H + +#include <asm/text-patching.h> + +#ifndef text_poke_copy +static inline void *text_poke_copy(void *dst, const void *src, size_t len) +{ + return memcpy(dst, src, len); +} +#define text_poke_copy text_poke_copy +#endif + +#endif /* _LINUX_TEXT_PATCHING_H */ diff --git a/include/linux/thermal.h b/include/linux/thermal.h index 25ea8fe2313e..754802478b96 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h @@ -56,6 +56,9 @@ enum thermal_notify_event { THERMAL_TZ_UNBIND_CDEV, /* Cooling dev is unbind from the thermal zone */ THERMAL_INSTANCE_WEIGHT_CHANGED, /* Thermal instance weight changed */ THERMAL_TZ_RESUME, /* Thermal zone is resuming after system sleep */ + THERMAL_TZ_ADD_THRESHOLD, /* Threshold added */ + THERMAL_TZ_DEL_THRESHOLD, /* Threshold deleted */ + THERMAL_TZ_FLUSH_THRESHOLDS, /* All thresholds deleted */ }; /** @@ -137,6 +140,9 @@ struct thermal_cooling_device { #endif }; +DEFINE_GUARD(cooling_dev, struct thermal_cooling_device *, mutex_lock(&_T->lock), + mutex_unlock(&_T->lock)) + /* Structure to define Thermal Zone parameters */ struct thermal_zone_params { const char *governor_name; diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h index 9ea0b28068f4..cf2446c9c30d 100644 --- a/include/linux/thread_info.h +++ b/include/linux/thread_info.h @@ -59,6 +59,14 @@ enum syscall_work_bit { #include <asm/thread_info.h> +#ifndef TIF_NEED_RESCHED_LAZY +#ifdef CONFIG_ARCH_HAS_PREEMPT_LAZY +#error Inconsistent PREEMPT_LAZY +#endif +#define TIF_NEED_RESCHED_LAZY TIF_NEED_RESCHED +#define _TIF_NEED_RESCHED_LAZY _TIF_NEED_RESCHED +#endif + #ifdef __KERNEL__ #ifndef arch_set_restart_data @@ -179,22 +187,27 @@ static __always_inline unsigned long read_ti_thread_flags(struct thread_info *ti #ifdef _ASM_GENERIC_BITOPS_INSTRUMENTED_NON_ATOMIC_H -static __always_inline bool tif_need_resched(void) +static __always_inline bool tif_test_bit(int bit) { - return arch_test_bit(TIF_NEED_RESCHED, + return arch_test_bit(bit, (unsigned long *)(¤t_thread_info()->flags)); } #else -static __always_inline bool tif_need_resched(void) +static __always_inline bool tif_test_bit(int bit) { - return test_bit(TIF_NEED_RESCHED, + return test_bit(bit, (unsigned long *)(¤t_thread_info()->flags)); } #endif /* _ASM_GENERIC_BITOPS_INSTRUMENTED_NON_ATOMIC_H */ +static __always_inline bool tif_need_resched(void) +{ + return tif_test_bit(TIF_NEED_RESCHED); +} + #ifndef CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES static inline int arch_within_stack_frames(const void * const stack, const void * const stackend, diff --git a/include/linux/tick.h b/include/linux/tick.h index 99c9c5a7252a..b8ddc8e631a3 100644 --- a/include/linux/tick.h +++ b/include/linux/tick.h @@ -20,12 +20,10 @@ extern void __init tick_init(void); extern void tick_suspend_local(void); /* Should be core only, but XEN resume magic and ARM BL switcher require it */ extern void tick_resume_local(void); -extern void tick_cleanup_dead_cpu(int cpu); #else /* CONFIG_GENERIC_CLOCKEVENTS */ static inline void tick_init(void) { } static inline void tick_suspend_local(void) { } static inline void tick_resume_local(void) { } -static inline void tick_cleanup_dead_cpu(int cpu) { } #endif /* !CONFIG_GENERIC_CLOCKEVENTS */ #if defined(CONFIG_GENERIC_CLOCKEVENTS) && defined(CONFIG_HOTPLUG_CPU) diff --git a/include/linux/timekeeper_internal.h b/include/linux/timekeeper_internal.h index 902c20ef495a..e39d4d563b19 100644 --- a/include/linux/timekeeper_internal.h +++ b/include/linux/timekeeper_internal.h @@ -26,7 +26,7 @@ * occupies a single 64byte cache line. * * The struct is separate from struct timekeeper as it is also used - * for a fast NMI safe accessors. + * for the fast NMI safe accessors. * * @base_real is for the fast NMI safe accessor to allow reading clock * realtime from any context. @@ -44,33 +44,38 @@ struct tk_read_base { /** * struct timekeeper - Structure holding internal timekeeping values. - * @tkr_mono: The readout base structure for CLOCK_MONOTONIC - * @tkr_raw: The readout base structure for CLOCK_MONOTONIC_RAW - * @xtime_sec: Current CLOCK_REALTIME time in seconds - * @ktime_sec: Current CLOCK_MONOTONIC time in seconds - * @wall_to_monotonic: CLOCK_REALTIME to CLOCK_MONOTONIC offset - * @offs_real: Offset clock monotonic -> clock realtime - * @offs_boot: Offset clock monotonic -> clock boottime - * @offs_tai: Offset clock monotonic -> clock tai - * @tai_offset: The current UTC to TAI offset in seconds - * @clock_was_set_seq: The sequence number of clock was set events - * @cs_was_changed_seq: The sequence number of clocksource change events - * @next_leap_ktime: CLOCK_MONOTONIC time value of a pending leap-second - * @raw_sec: CLOCK_MONOTONIC_RAW time in seconds - * @monotonic_to_boot: CLOCK_MONOTONIC to CLOCK_BOOTTIME offset - * @cycle_interval: Number of clock cycles in one NTP interval - * @xtime_interval: Number of clock shifted nano seconds in one NTP - * interval. - * @xtime_remainder: Shifted nano seconds left over when rounding - * @cycle_interval - * @raw_interval: Shifted raw nano seconds accumulated per NTP interval. - * @ntp_error: Difference between accumulated time and NTP time in ntp - * shifted nano seconds. - * @ntp_error_shift: Shift conversion between clock shifted nano seconds and - * ntp shifted nano seconds. - * @last_warning: Warning ratelimiter (DEBUG_TIMEKEEPING) - * @underflow_seen: Underflow warning flag (DEBUG_TIMEKEEPING) - * @overflow_seen: Overflow warning flag (DEBUG_TIMEKEEPING) + * @tkr_mono: The readout base structure for CLOCK_MONOTONIC + * @xtime_sec: Current CLOCK_REALTIME time in seconds + * @ktime_sec: Current CLOCK_MONOTONIC time in seconds + * @wall_to_monotonic: CLOCK_REALTIME to CLOCK_MONOTONIC offset + * @offs_real: Offset clock monotonic -> clock realtime + * @offs_boot: Offset clock monotonic -> clock boottime + * @offs_tai: Offset clock monotonic -> clock tai + * @tai_offset: The current UTC to TAI offset in seconds + * @tkr_raw: The readout base structure for CLOCK_MONOTONIC_RAW + * @raw_sec: CLOCK_MONOTONIC_RAW time in seconds + * @clock_was_set_seq: The sequence number of clock was set events + * @cs_was_changed_seq: The sequence number of clocksource change events + * @monotonic_to_boot: CLOCK_MONOTONIC to CLOCK_BOOTTIME offset + * @cycle_interval: Number of clock cycles in one NTP interval + * @xtime_interval: Number of clock shifted nano seconds in one NTP + * interval. + * @xtime_remainder: Shifted nano seconds left over when rounding + * @cycle_interval + * @raw_interval: Shifted raw nano seconds accumulated per NTP interval. + * @next_leap_ktime: CLOCK_MONOTONIC time value of a pending leap-second + * @ntp_tick: The ntp_tick_length() value currently being + * used. This cached copy ensures we consistently + * apply the tick length for an entire tick, as + * ntp_tick_length may change mid-tick, and we don't + * want to apply that new value to the tick in + * progress. + * @ntp_error: Difference between accumulated time and NTP time in ntp + * shifted nano seconds. + * @ntp_error_shift: Shift conversion between clock shifted nano seconds and + * ntp shifted nano seconds. + * @ntp_err_mult: Multiplication factor for scaled math conversion + * @skip_second_overflow: Flag used to avoid updating NTP twice with same second * * Note: For timespec(64) based interfaces wall_to_monotonic is what * we need to add to xtime (or xtime corrected for sub jiffy times) @@ -88,10 +93,28 @@ struct tk_read_base { * * @monotonic_to_boottime is a timespec64 representation of @offs_boot to * accelerate the VDSO update for CLOCK_BOOTTIME. + * + * The cacheline ordering of the structure is optimized for in kernel usage of + * the ktime_get() and ktime_get_ts64() family of time accessors. Struct + * timekeeper is prepended in the core timekeeping code with a sequence count, + * which results in the following cacheline layout: + * + * 0: seqcount, tkr_mono + * 1: xtime_sec ... tai_offset + * 2: tkr_raw, raw_sec + * 3,4: Internal variables + * + * Cacheline 0,1 contain the data which is used for accessing + * CLOCK_MONOTONIC/REALTIME/BOOTTIME/TAI, while cacheline 2 contains the + * data for accessing CLOCK_MONOTONIC_RAW. Cacheline 3,4 are internal + * variables which are only accessed during timekeeper updates once per + * tick. */ struct timekeeper { + /* Cacheline 0 (together with prepended seqcount of timekeeper core): */ struct tk_read_base tkr_mono; - struct tk_read_base tkr_raw; + + /* Cacheline 1: */ u64 xtime_sec; unsigned long ktime_sec; struct timespec64 wall_to_monotonic; @@ -99,43 +122,28 @@ struct timekeeper { ktime_t offs_boot; ktime_t offs_tai; s32 tai_offset; + + /* Cacheline 2: */ + struct tk_read_base tkr_raw; + u64 raw_sec; + + /* Cachline 3 and 4 (timekeeping internal variables): */ unsigned int clock_was_set_seq; u8 cs_was_changed_seq; - ktime_t next_leap_ktime; - u64 raw_sec; + struct timespec64 monotonic_to_boot; - /* The following members are for timekeeping internal use */ u64 cycle_interval; u64 xtime_interval; s64 xtime_remainder; u64 raw_interval; - /* The ntp_tick_length() value currently being used. - * This cached copy ensures we consistently apply the tick - * length for an entire tick, as ntp_tick_length may change - * mid-tick, and we don't want to apply that new value to - * the tick in progress. - */ + + ktime_t next_leap_ktime; u64 ntp_tick; - /* Difference between accumulated time and NTP time in ntp - * shifted nano seconds. */ s64 ntp_error; u32 ntp_error_shift; u32 ntp_err_mult; - /* Flag used to avoid updating NTP twice with same second */ u32 skip_second_overflow; -#ifdef CONFIG_DEBUG_TIMEKEEPING - long last_warning; - /* - * These simple flag variables are managed - * without locks, which is racy, but they are - * ok since we don't really care about being - * super precise about how many events were - * seen, just that a problem was observed. - */ - int underflow_seen; - int overflow_seen; -#endif }; #ifdef CONFIG_GENERIC_TIME_VSYSCALL diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h index fc12a9ba2c88..0e035f675efe 100644 --- a/include/linux/timekeeping.h +++ b/include/linux/timekeeping.h @@ -45,6 +45,11 @@ extern void ktime_get_real_ts64(struct timespec64 *tv); extern void ktime_get_coarse_ts64(struct timespec64 *ts); extern void ktime_get_coarse_real_ts64(struct timespec64 *ts); +/* Multigrain timestamp interfaces */ +extern void ktime_get_coarse_real_ts64_mg(struct timespec64 *ts); +extern void ktime_get_real_ts64_mg(struct timespec64 *ts); +extern unsigned long timekeeping_get_mg_floor_swaps(void); + void getboottime64(struct timespec64 *ts); /* @@ -275,6 +280,7 @@ struct ktime_timestamps { * counter value * @cycles: Clocksource counter value to produce the system times * @real: Realtime system time + * @boot: Boot time * @raw: Monotonic raw system time * @cs_id: Clocksource ID * @clock_was_set_seq: The sequence number of clock-was-set events @@ -283,6 +289,7 @@ struct ktime_timestamps { struct system_time_snapshot { u64 cycles; ktime_t real; + ktime_t boot; ktime_t raw; enum clocksource_ids cs_id; unsigned int clock_was_set_seq; diff --git a/include/linux/timex.h b/include/linux/timex.h index 3871b06bd302..4ee32eff3f22 100644 --- a/include/linux/timex.h +++ b/include/linux/timex.h @@ -139,14 +139,6 @@ unsigned long random_get_entropy_fallback(void); #define MAXSEC 2048 /* max interval between updates (s) */ #define NTP_PHASE_LIMIT ((MAXPHASE / NSEC_PER_USEC) << 5) /* beyond max. dispersion */ -/* - * kernel variables - * Note: maximum error = NTP sync distance = dispersion + delay / 2; - * estimated error = NTP dispersion. - */ -extern unsigned long tick_usec; /* USER_HZ period (usec) */ -extern unsigned long tick_nsec; /* SHIFTED_HZ period (nsec) */ - /* Required to safely shift negative values */ #define shift_right(x, s) ({ \ __typeof__(x) __x = (x); \ diff --git a/include/linux/tpm.h b/include/linux/tpm.h index 587b96b4418e..20a40ade8030 100644 --- a/include/linux/tpm.h +++ b/include/linux/tpm.h @@ -421,6 +421,7 @@ void tpm_buf_append_u32(struct tpm_buf *buf, const u32 value); u8 tpm_buf_read_u8(struct tpm_buf *buf, off_t *offset); u16 tpm_buf_read_u16(struct tpm_buf *buf, off_t *offset); u32 tpm_buf_read_u32(struct tpm_buf *buf, off_t *offset); +void tpm_buf_append_handle(struct tpm_chip *chip, struct tpm_buf *buf, u32 handle); /* * Check if TPM device is in the firmware upgrade mode. @@ -505,6 +506,8 @@ void tpm_buf_append_name(struct tpm_chip *chip, struct tpm_buf *buf, void tpm_buf_append_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf, u8 attributes, u8 *passphrase, int passphraselen); +void tpm_buf_append_auth(struct tpm_chip *chip, struct tpm_buf *buf, + u8 attributes, u8 *passphrase, int passphraselen); static inline void tpm_buf_append_hmac_session_opt(struct tpm_chip *chip, struct tpm_buf *buf, u8 attributes, diff --git a/include/linux/tpm_eventlog.h b/include/linux/tpm_eventlog.h index 7d68a5cc5881..891368e82558 100644 --- a/include/linux/tpm_eventlog.h +++ b/include/linux/tpm_eventlog.h @@ -157,7 +157,7 @@ struct tcg_algorithm_info { * Return: size of the event on success, 0 on failure */ -static __always_inline int __calc_tpm2_event_size(struct tcg_pcr_event2_head *event, +static __always_inline u32 __calc_tpm2_event_size(struct tcg_pcr_event2_head *event, struct tcg_pcr_event *event_header, bool do_mapping) { diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h index 42bedcddd511..58ad4ead33fc 100644 --- a/include/linux/trace_events.h +++ b/include/linux/trace_events.h @@ -184,7 +184,7 @@ unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status); enum trace_flag_type { TRACE_FLAG_IRQS_OFF = 0x01, - TRACE_FLAG_IRQS_NOSUPPORT = 0x02, + TRACE_FLAG_NEED_RESCHED_LAZY = 0x02, TRACE_FLAG_NEED_RESCHED = 0x04, TRACE_FLAG_HARDIRQ = 0x08, TRACE_FLAG_SOFTIRQ = 0x10, @@ -193,7 +193,6 @@ enum trace_flag_type { TRACE_FLAG_BH_OFF = 0x80, }; -#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT static inline unsigned int tracing_gen_ctx_flags(unsigned long irqflags) { unsigned int irq_status = irqs_disabled_flags(irqflags) ? @@ -207,17 +206,6 @@ static inline unsigned int tracing_gen_ctx(void) local_save_flags(irqflags); return tracing_gen_ctx_flags(irqflags); } -#else - -static inline unsigned int tracing_gen_ctx_flags(unsigned long irqflags) -{ - return tracing_gen_ctx_irq_test(TRACE_FLAG_IRQS_NOSUPPORT); -} -static inline unsigned int tracing_gen_ctx(void) -{ - return tracing_gen_ctx_irq_test(TRACE_FLAG_IRQS_NOSUPPORT); -} -#endif static inline unsigned int tracing_gen_ctx_dec(void) { @@ -285,7 +273,8 @@ struct trace_event_fields { const char *name; const int size; const int align; - const int is_signed; + const unsigned int is_signed:1; + unsigned int needs_test:1; const int filter_type; const int len; }; @@ -326,7 +315,6 @@ void *trace_event_buffer_reserve(struct trace_event_buffer *fbuffer, void trace_event_buffer_commit(struct trace_event_buffer *fbuffer); enum { - TRACE_EVENT_FL_FILTERED_BIT, TRACE_EVENT_FL_CAP_ANY_BIT, TRACE_EVENT_FL_NO_SET_FILTER_BIT, TRACE_EVENT_FL_IGNORE_ENABLE_BIT, @@ -337,11 +325,11 @@ enum { TRACE_EVENT_FL_EPROBE_BIT, TRACE_EVENT_FL_FPROBE_BIT, TRACE_EVENT_FL_CUSTOM_BIT, + TRACE_EVENT_FL_TEST_STR_BIT, }; /* * Event flags: - * FILTERED - The event has a filter attached * CAP_ANY - Any user can enable for perf * NO_SET_FILTER - Set when filter has error and is to be ignored * IGNORE_ENABLE - For trace internal events, do not enable with debugfs file @@ -354,9 +342,9 @@ enum { * CUSTOM - Event is a custom event (to be attached to an exsiting tracepoint) * This is set when the custom event has not been attached * to a tracepoint yet, then it is cleared when it is. + * TEST_STR - The event has a "%s" that points to a string outside the event */ enum { - TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT), TRACE_EVENT_FL_CAP_ANY = (1 << TRACE_EVENT_FL_CAP_ANY_BIT), TRACE_EVENT_FL_NO_SET_FILTER = (1 << TRACE_EVENT_FL_NO_SET_FILTER_BIT), TRACE_EVENT_FL_IGNORE_ENABLE = (1 << TRACE_EVENT_FL_IGNORE_ENABLE_BIT), @@ -367,6 +355,7 @@ enum { TRACE_EVENT_FL_EPROBE = (1 << TRACE_EVENT_FL_EPROBE_BIT), TRACE_EVENT_FL_FPROBE = (1 << TRACE_EVENT_FL_FPROBE_BIT), TRACE_EVENT_FL_CUSTOM = (1 << TRACE_EVENT_FL_CUSTOM_BIT), + TRACE_EVENT_FL_TEST_STR = (1 << TRACE_EVENT_FL_TEST_STR_BIT), }; #define TRACE_EVENT_FL_UKPROBE (TRACE_EVENT_FL_KPROBE | TRACE_EVENT_FL_UPROBE) @@ -375,13 +364,12 @@ struct trace_event_call { struct list_head list; struct trace_event_class *class; union { - char *name; + const char *name; /* Set TRACE_EVENT_FL_TRACEPOINT flag when using "tp" */ struct tracepoint *tp; }; struct trace_event event; char *print_fmt; - struct event_filter *filter; /* * Static events can disappear with modules, * where as dynamic ones need their own ref count. diff --git a/include/linux/tracepoint-defs.h b/include/linux/tracepoint-defs.h index 4dc4955f0fbf..aebf0571c736 100644 --- a/include/linux/tracepoint-defs.h +++ b/include/linux/tracepoint-defs.h @@ -29,16 +29,22 @@ struct tracepoint_func { int prio; }; +struct tracepoint_ext { + int (*regfunc)(void); + void (*unregfunc)(void); + /* Flags. */ + unsigned int faultable:1; +}; + struct tracepoint { const char *name; /* Tracepoint name */ - struct static_key key; + struct static_key_false key; struct static_call_key *static_call_key; void *static_call_tramp; void *iterator; void *probestub; - int (*regfunc)(void); - void (*unregfunc)(void); struct tracepoint_func __rcu *funcs; + struct tracepoint_ext *ext; }; #ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS @@ -83,7 +89,7 @@ struct bpf_raw_event_map { #ifdef CONFIG_TRACEPOINTS # define tracepoint_enabled(tp) \ - static_key_false(&(__tracepoint_##tp).key) + static_branch_unlikely(&(__tracepoint_##tp).key) #else # define tracepoint_enabled(tracepoint) false #endif diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index 93a9f3070b48..76d9055b2cff 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h @@ -17,6 +17,7 @@ #include <linux/errno.h> #include <linux/types.h> #include <linux/rcupdate.h> +#include <linux/rcupdate_trace.h> #include <linux/tracepoint-defs.h> #include <linux/static_call.h> @@ -32,8 +33,6 @@ struct trace_eval_map { #define TRACEPOINT_DEFAULT_PRIO 10 -extern struct srcu_struct tracepoint_srcu; - extern int tracepoint_probe_register(struct tracepoint *tp, void *probe, void *data); extern int @@ -105,16 +104,30 @@ void for_each_tracepoint_in_module(struct module *mod, * tracepoint_synchronize_unregister must be called between the last tracepoint * probe unregistration and the end of module exit to make sure there is no * caller executing a probe when it is freed. + * + * An alternative is to use the following for batch reclaim associated + * with a given tracepoint: + * + * - tracepoint_is_faultable() == false: call_rcu() + * - tracepoint_is_faultable() == true: call_rcu_tasks_trace() */ #ifdef CONFIG_TRACEPOINTS static inline void tracepoint_synchronize_unregister(void) { - synchronize_srcu(&tracepoint_srcu); + synchronize_rcu_tasks_trace(); synchronize_rcu(); } +static inline bool tracepoint_is_faultable(struct tracepoint *tp) +{ + return tp->ext && tp->ext->faultable; +} #else static inline void tracepoint_synchronize_unregister(void) { } +static inline bool tracepoint_is_faultable(struct tracepoint *tp) +{ + return false; +} #endif #ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS @@ -197,65 +210,16 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) #endif /* CONFIG_HAVE_STATIC_CALL */ /* - * ARCH_WANTS_NO_INSTR archs are expected to have sanitized entry and idle - * code that disallow any/all tracing/instrumentation when RCU isn't watching. - */ -#ifdef CONFIG_ARCH_WANTS_NO_INSTR -#define RCUIDLE_COND(rcuidle) (rcuidle) -#else -/* srcu can't be used from NMI */ -#define RCUIDLE_COND(rcuidle) (rcuidle && in_nmi()) -#endif - -/* - * it_func[0] is never NULL because there is at least one element in the array - * when the array itself is non NULL. + * Declare an exported function that Rust code can call to trigger this + * tracepoint. This function does not include the static branch; that is done + * in Rust to avoid a function call when the tracepoint is disabled. */ -#define __DO_TRACE(name, args, cond, rcuidle) \ - do { \ - int __maybe_unused __idx = 0; \ - \ - if (!(cond)) \ - return; \ - \ - if (WARN_ONCE(RCUIDLE_COND(rcuidle), \ - "Bad RCU usage for tracepoint")) \ - return; \ - \ - /* keep srcu and sched-rcu usage consistent */ \ - preempt_disable_notrace(); \ - \ - /* \ - * For rcuidle callers, use srcu since sched-rcu \ - * doesn't work from the idle path. \ - */ \ - if (rcuidle) { \ - __idx = srcu_read_lock_notrace(&tracepoint_srcu);\ - ct_irq_enter_irqson(); \ - } \ - \ - __DO_TRACE_CALL(name, TP_ARGS(args)); \ - \ - if (rcuidle) { \ - ct_irq_exit_irqson(); \ - srcu_read_unlock_notrace(&tracepoint_srcu, __idx);\ - } \ - \ - preempt_enable_notrace(); \ - } while (0) - -#ifndef MODULE -#define __DECLARE_TRACE_RCU(name, proto, args, cond) \ - static inline void trace_##name##_rcuidle(proto) \ +#define DEFINE_RUST_DO_TRACE(name, proto, args) +#define __DEFINE_RUST_DO_TRACE(name, proto, args) \ + notrace void rust_do_trace_##name(proto) \ { \ - if (static_key_false(&__tracepoint_##name.key)) \ - __DO_TRACE(name, \ - TP_ARGS(args), \ - TP_CONDITION(cond), 1); \ + __rust_do_trace_##name(args); \ } -#else -#define __DECLARE_TRACE_RCU(name, proto, args, cond) -#endif /* * Make sure the alignment of the structure in the __tracepoints section will @@ -268,23 +232,11 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) * site if it is not watching, as it will need to be active when the * tracepoint is enabled. */ -#define __DECLARE_TRACE(name, proto, args, cond, data_proto) \ +#define __DECLARE_TRACE_COMMON(name, proto, args, data_proto) \ extern int __traceiter_##name(data_proto); \ DECLARE_STATIC_CALL(tp_func_##name, __traceiter_##name); \ extern struct tracepoint __tracepoint_##name; \ - static inline void trace_##name(proto) \ - { \ - if (static_key_false(&__tracepoint_##name.key)) \ - __DO_TRACE(name, \ - TP_ARGS(args), \ - TP_CONDITION(cond), 0); \ - if (IS_ENABLED(CONFIG_LOCKDEP) && (cond)) { \ - WARN_ONCE(!rcu_is_watching(), \ - "RCU not watching for tracepoint"); \ - } \ - } \ - __DECLARE_TRACE_RCU(name, PARAMS(proto), PARAMS(args), \ - PARAMS(cond)) \ + extern void rust_do_trace_##name(proto); \ static inline int \ register_trace_##name(void (*probe)(data_proto), void *data) \ { \ @@ -311,15 +263,61 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) static inline bool \ trace_##name##_enabled(void) \ { \ - return static_key_false(&__tracepoint_##name.key); \ + return static_branch_unlikely(&__tracepoint_##name.key);\ + } + +#define __DECLARE_TRACE(name, proto, args, cond, data_proto) \ + __DECLARE_TRACE_COMMON(name, PARAMS(proto), PARAMS(args), PARAMS(data_proto)) \ + static inline void __rust_do_trace_##name(proto) \ + { \ + if (cond) { \ + guard(preempt_notrace)(); \ + __DO_TRACE_CALL(name, TP_ARGS(args)); \ + } \ + } \ + static inline void trace_##name(proto) \ + { \ + if (static_branch_unlikely(&__tracepoint_##name.key)) { \ + if (cond) { \ + guard(preempt_notrace)(); \ + __DO_TRACE_CALL(name, TP_ARGS(args)); \ + } \ + } \ + if (IS_ENABLED(CONFIG_LOCKDEP) && (cond)) { \ + WARN_ONCE(!rcu_is_watching(), \ + "RCU not watching for tracepoint"); \ + } \ + } + +#define __DECLARE_TRACE_SYSCALL(name, proto, args, data_proto) \ + __DECLARE_TRACE_COMMON(name, PARAMS(proto), PARAMS(args), PARAMS(data_proto)) \ + static inline void __rust_do_trace_##name(proto) \ + { \ + guard(rcu_tasks_trace)(); \ + __DO_TRACE_CALL(name, TP_ARGS(args)); \ + } \ + static inline void trace_##name(proto) \ + { \ + might_fault(); \ + if (static_branch_unlikely(&__tracepoint_##name.key)) { \ + guard(rcu_tasks_trace)(); \ + __DO_TRACE_CALL(name, TP_ARGS(args)); \ + } \ + if (IS_ENABLED(CONFIG_LOCKDEP)) { \ + WARN_ONCE(!rcu_is_watching(), \ + "RCU not watching for tracepoint"); \ + } \ } /* * We have no guarantee that gcc and the linker won't up-align the tracepoint * structures, so we create an array of pointers that will be used for iteration * on the tracepoints. + * + * it_func[0] is never NULL because there is at least one element in the array + * when the array itself is non NULL. */ -#define DEFINE_TRACE_FN(_name, _reg, _unreg, proto, args) \ +#define __DEFINE_TRACE_EXT(_name, _ext, proto, args) \ static const char __tpstrtab_##_name[] \ __section("__tracepoints_strings") = #_name; \ extern struct static_call_key STATIC_CALL_KEY(tp_func_##_name); \ @@ -328,14 +326,14 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) struct tracepoint __tracepoint_##_name __used \ __section("__tracepoints") = { \ .name = __tpstrtab_##_name, \ - .key = STATIC_KEY_INIT_FALSE, \ + .key = STATIC_KEY_FALSE_INIT, \ .static_call_key = &STATIC_CALL_KEY(tp_func_##_name), \ .static_call_tramp = STATIC_CALL_TRAMP_ADDR(tp_func_##_name), \ .iterator = &__traceiter_##_name, \ .probestub = &__probestub_##_name, \ - .regfunc = _reg, \ - .unregfunc = _unreg, \ - .funcs = NULL }; \ + .funcs = NULL, \ + .ext = _ext, \ + }; \ __TRACEPOINT_ENTRY(_name); \ int __traceiter_##_name(void *__data, proto) \ { \ @@ -356,10 +354,27 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) void __probestub_##_name(void *__data, proto) \ { \ } \ - DEFINE_STATIC_CALL(tp_func_##_name, __traceiter_##_name); + DEFINE_STATIC_CALL(tp_func_##_name, __traceiter_##_name); \ + DEFINE_RUST_DO_TRACE(_name, TP_PROTO(proto), TP_ARGS(args)) -#define DEFINE_TRACE(name, proto, args) \ - DEFINE_TRACE_FN(name, NULL, NULL, PARAMS(proto), PARAMS(args)); +#define DEFINE_TRACE_FN(_name, _reg, _unreg, _proto, _args) \ + static struct tracepoint_ext __tracepoint_ext_##_name = { \ + .regfunc = _reg, \ + .unregfunc = _unreg, \ + .faultable = false, \ + }; \ + __DEFINE_TRACE_EXT(_name, &__tracepoint_ext_##_name, PARAMS(_proto), PARAMS(_args)); + +#define DEFINE_TRACE_SYSCALL(_name, _reg, _unreg, _proto, _args) \ + static struct tracepoint_ext __tracepoint_ext_##_name = { \ + .regfunc = _reg, \ + .unregfunc = _unreg, \ + .faultable = true, \ + }; \ + __DEFINE_TRACE_EXT(_name, &__tracepoint_ext_##_name, PARAMS(_proto), PARAMS(_args)); + +#define DEFINE_TRACE(_name, _proto, _args) \ + __DEFINE_TRACE_EXT(_name, NULL, PARAMS(_proto), PARAMS(_args)); #define EXPORT_TRACEPOINT_SYMBOL_GPL(name) \ EXPORT_SYMBOL_GPL(__tracepoint_##name); \ @@ -372,11 +387,9 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) #else /* !TRACEPOINTS_ENABLED */ -#define __DECLARE_TRACE(name, proto, args, cond, data_proto) \ +#define __DECLARE_TRACE_COMMON(name, proto, args, data_proto) \ static inline void trace_##name(proto) \ { } \ - static inline void trace_##name##_rcuidle(proto) \ - { } \ static inline int \ register_trace_##name(void (*probe)(data_proto), \ void *data) \ @@ -398,7 +411,14 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) return false; \ } +#define __DECLARE_TRACE(name, proto, args, cond, data_proto) \ + __DECLARE_TRACE_COMMON(name, PARAMS(proto), PARAMS(args), PARAMS(data_proto)) + +#define __DECLARE_TRACE_SYSCALL(name, proto, args, data_proto) \ + __DECLARE_TRACE_COMMON(name, PARAMS(proto), PARAMS(args), PARAMS(data_proto)) + #define DEFINE_TRACE_FN(name, reg, unreg, proto, args) +#define DEFINE_TRACE_SYSCALL(name, reg, unreg, proto, args) #define DEFINE_TRACE(name, proto, args) #define EXPORT_TRACEPOINT_SYMBOL_GPL(name) #define EXPORT_TRACEPOINT_SYMBOL(name) @@ -459,6 +479,10 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) cpu_online(raw_smp_processor_id()) && (PARAMS(cond)), \ PARAMS(void *__data, proto)) +#define DECLARE_TRACE_SYSCALL(name, proto, args) \ + __DECLARE_TRACE_SYSCALL(name, PARAMS(proto), PARAMS(args), \ + PARAMS(void *__data, proto)) + #define TRACE_EVENT_FLAGS(event, flag) #define TRACE_EVENT_PERF_PERM(event, expr...) @@ -596,6 +620,9 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) struct, assign, print) \ DECLARE_TRACE_CONDITION(name, PARAMS(proto), \ PARAMS(args), PARAMS(cond)) +#define TRACE_EVENT_SYSCALL(name, proto, args, struct, assign, \ + print, reg, unreg) \ + DECLARE_TRACE_SYSCALL(name, PARAMS(proto), PARAMS(args)) #define TRACE_EVENT_FLAGS(event, flag) diff --git a/include/linux/types.h b/include/linux/types.h index 2bc8766ba20c..2d7b9ae8714c 100644 --- a/include/linux/types.h +++ b/include/linux/types.h @@ -115,8 +115,9 @@ typedef u64 u_int64_t; typedef s64 int64_t; #endif -/* this is a special 64bit data type that is 8-byte aligned */ +/* These are the special 64-bit data types that are 8-byte aligned */ #define aligned_u64 __aligned_u64 +#define aligned_s64 __aligned_s64 #define aligned_be64 __aligned_be64 #define aligned_le64 __aligned_le64 diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index 43844510d5d0..e9c702c1908d 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h @@ -403,6 +403,103 @@ copy_struct_from_user(void *dst, size_t ksize, const void __user *src, return 0; } +/** + * copy_struct_to_user: copy a struct to userspace + * @dst: Destination address, in userspace. This buffer must be @ksize + * bytes long. + * @usize: (Alleged) size of @dst struct. + * @src: Source address, in kernel space. + * @ksize: Size of @src struct. + * @ignored_trailing: Set to %true if there was a non-zero byte in @src that + * userspace cannot see because they are using an smaller struct. + * + * Copies a struct from kernel space to userspace, in a way that guarantees + * backwards-compatibility for struct syscall arguments (as long as future + * struct extensions are made such that all new fields are *appended* to the + * old struct, and zeroed-out new fields have the same meaning as the old + * struct). + * + * Some syscalls may wish to make sure that userspace knows about everything in + * the struct, and if there is a non-zero value that userspce doesn't know + * about, they want to return an error (such as -EMSGSIZE) or have some other + * fallback (such as adding a "you're missing some information" flag). If + * @ignored_trailing is non-%NULL, it will be set to %true if there was a + * non-zero byte that could not be copied to userspace (ie. was past @usize). + * + * While unconditionally returning an error in this case is the simplest + * solution, for maximum backward compatibility you should try to only return + * -EMSGSIZE if the user explicitly requested the data that couldn't be copied. + * Note that structure sizes can change due to header changes and simple + * recompilations without code changes(!), so if you care about + * @ignored_trailing you probably want to make sure that any new field data is + * associated with a flag. Otherwise you might assume that a program knows + * about data it does not. + * + * @ksize is just sizeof(*src), and @usize should've been passed by userspace. + * The recommended usage is something like the following: + * + * SYSCALL_DEFINE2(foobar, struct foo __user *, uarg, size_t, usize) + * { + * int err; + * bool ignored_trailing; + * struct foo karg = {}; + * + * if (usize > PAGE_SIZE) + * return -E2BIG; + * if (usize < FOO_SIZE_VER0) + * return -EINVAL; + * + * // ... modify karg somehow ... + * + * err = copy_struct_to_user(uarg, usize, &karg, sizeof(karg), + * &ignored_trailing); + * if (err) + * return err; + * if (ignored_trailing) + * return -EMSGSIZE: + * + * // ... + * } + * + * There are three cases to consider: + * * If @usize == @ksize, then it's copied verbatim. + * * If @usize < @ksize, then the kernel is trying to pass userspace a newer + * struct than it supports. Thus we only copy the interoperable portions + * (@usize) and ignore the rest (but @ignored_trailing is set to %true if + * any of the trailing (@ksize - @usize) bytes are non-zero). + * * If @usize > @ksize, then the kernel is trying to pass userspace an older + * struct than userspace supports. In order to make sure the + * unknown-to-the-kernel fields don't contain garbage values, we zero the + * trailing (@usize - @ksize) bytes. + * + * Returns (in all cases, some data may have been copied): + * * -EFAULT: access to userspace failed. + */ +static __always_inline __must_check int +copy_struct_to_user(void __user *dst, size_t usize, const void *src, + size_t ksize, bool *ignored_trailing) +{ + size_t size = min(ksize, usize); + size_t rest = max(ksize, usize) - size; + + /* Double check if ksize is larger than a known object size. */ + if (WARN_ON_ONCE(ksize > __builtin_object_size(src, 1))) + return -E2BIG; + + /* Deal with trailing bytes. */ + if (usize > ksize) { + if (clear_user(dst + size, rest)) + return -EFAULT; + } + if (ignored_trailing) + *ignored_trailing = ksize < usize && + memchr_inv(src + size, 0, rest) != NULL; + /* Copy the interoperable parts of the struct. */ + if (copy_to_user(dst, src, size)) + return -EFAULT; + return 0; +} + bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size); long copy_from_kernel_nofault(void *dst, const void *src, size_t size); diff --git a/include/linux/udp.h b/include/linux/udp.h index 3eb3f2b9a2a0..0807e21cfec9 100644 --- a/include/linux/udp.h +++ b/include/linux/udp.h @@ -56,6 +56,12 @@ struct udp_sock { int pending; /* Any pending frames ? */ __u8 encap_type; /* Is this an Encapsulation socket? */ +#if !IS_ENABLED(CONFIG_BASE_SMALL) + /* For UDP 4-tuple hash */ + __u16 udp_lrpa_hash; + struct hlist_nulls_node udp_lrpa_node; +#endif + /* * Following member retains the information to create a UDP header * when the socket is uncorked. @@ -206,6 +212,11 @@ static inline void udp_allow_gso(struct sock *sk) #define udp_portaddr_for_each_entry_rcu(__sk, list) \ hlist_for_each_entry_rcu(__sk, list, __sk_common.skc_portaddr_node) +#if !IS_ENABLED(CONFIG_BASE_SMALL) +#define udp_lrpa_for_each_entry_rcu(__up, node, list) \ + hlist_nulls_for_each_entry_rcu(__up, node, list, udp_lrpa_node) +#endif + #define IS_UDPLITE(__sk) (__sk->sk_protocol == IPPROTO_UDPLITE) #endif /* _LINUX_UDP_H */ diff --git a/include/linux/unicode.h b/include/linux/unicode.h index 4d39e6e11a95..5e6b212a2aed 100644 --- a/include/linux/unicode.h +++ b/include/linux/unicode.h @@ -16,6 +16,8 @@ struct utf8data_table; ((unsigned int)(MIN) << UNICODE_MIN_SHIFT) | \ ((unsigned int)(REV))) +#define UTF8_LATEST UNICODE_AGE(12, 1, 0) + static inline u8 unicode_major(unsigned int age) { return (age >> UNICODE_MAJ_SHIFT) & 0xff; @@ -76,4 +78,6 @@ int utf8_casefold_hash(const struct unicode_map *um, const void *salt, struct unicode_map *utf8_load(unsigned int version); void utf8_unload(struct unicode_map *um); +int utf8_parse_version(char *version); + #endif /* _LINUX_UNICODE_H */ diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h index 2b294bf1881f..e0a4c2082245 100644 --- a/include/linux/uprobes.h +++ b/include/linux/uprobes.h @@ -15,6 +15,7 @@ #include <linux/rbtree.h> #include <linux/types.h> #include <linux/wait.h> +#include <linux/timer.h> struct uprobe; struct vm_area_struct; @@ -23,8 +24,17 @@ struct inode; struct notifier_block; struct page; +/* + * Allowed return values from uprobe consumer's handler callback + * with following meaning: + * + * UPROBE_HANDLER_REMOVE + * - Remove the uprobe breakpoint from current->mm. + * UPROBE_HANDLER_IGNORE + * - Ignore ret_handler callback for this consumer. + */ #define UPROBE_HANDLER_REMOVE 1 -#define UPROBE_HANDLER_MASK 1 +#define UPROBE_HANDLER_IGNORE 2 #define MAX_URETPROBE_DEPTH 64 @@ -37,13 +47,15 @@ struct uprobe_consumer { * for the current process. If filter() is omitted or returns true, * UPROBE_HANDLER_REMOVE is effectively ignored. */ - int (*handler)(struct uprobe_consumer *self, struct pt_regs *regs); + int (*handler)(struct uprobe_consumer *self, struct pt_regs *regs, __u64 *data); int (*ret_handler)(struct uprobe_consumer *self, unsigned long func, - struct pt_regs *regs); + struct pt_regs *regs, __u64 *data); bool (*filter)(struct uprobe_consumer *self, struct mm_struct *mm); struct list_head cons_node; + + __u64 id; /* set when uprobe_consumer is registered */ }; #ifdef CONFIG_UPROBES @@ -56,12 +68,62 @@ enum uprobe_task_state { UTASK_SSTEP_TRAPPED, }; +/* The state of hybrid-lifetime uprobe inside struct return_instance */ +enum hprobe_state { + HPROBE_LEASED, /* uretprobes_srcu-protected uprobe */ + HPROBE_STABLE, /* refcounted uprobe */ + HPROBE_GONE, /* NULL uprobe, SRCU expired, refcount failed */ + HPROBE_CONSUMED, /* uprobe "consumed" by uretprobe handler */ +}; + +/* + * Hybrid lifetime uprobe. Represents a uprobe instance that could be either + * SRCU protected (with SRCU protection eventually potentially timing out), + * refcounted using uprobe->ref, or there could be no valid uprobe (NULL). + * + * hprobe's internal state is setup such that background timer thread can + * atomically "downgrade" temporarily RCU-protected uprobe into refcounted one + * (or no uprobe, if refcounting failed). + * + * *stable* pointer always point to the uprobe (or could be NULL if there is + * was no valid underlying uprobe to begin with). + * + * *leased* pointer is the key to achieving race-free atomic lifetime state + * transition and can have three possible states: + * - either the same non-NULL value as *stable*, in which case uprobe is + * SRCU-protected; + * - NULL, in which case uprobe (if there is any) is refcounted; + * - special __UPROBE_DEAD value, which represents an uprobe that was SRCU + * protected initially, but SRCU period timed out and we attempted to + * convert it to refcounted, but refcount_inc_not_zero() failed, because + * uprobe effectively went away (the last consumer unsubscribed). In this + * case it's important to know that *stable* pointer (which still has + * non-NULL uprobe pointer) shouldn't be used, because lifetime of + * underlying uprobe is not guaranteed anymore. __UPROBE_DEAD is just an + * internal marker and is handled transparently by hprobe_fetch() helper. + * + * When uprobe is SRCU-protected, we also record srcu_idx value, necessary for + * SRCU unlocking. + * + * See hprobe_expire() and hprobe_fetch() for details of race-free uprobe + * state transitioning details. It all hinges on atomic xchg() over *leaded* + * pointer. *stable* pointer, once initially set, is not modified concurrently. + */ +struct hprobe { + enum hprobe_state state; + int srcu_idx; + struct uprobe *uprobe; +}; + /* * uprobe_task: Metadata of a task while it singlesteps. */ struct uprobe_task { enum uprobe_task_state state; + unsigned int depth; + struct return_instance *return_instances; + union { struct { struct arch_uprobe_task autask; @@ -75,23 +137,30 @@ struct uprobe_task { }; struct uprobe *active_uprobe; + struct timer_list ri_timer; unsigned long xol_vaddr; struct arch_uprobe *auprobe; +}; - struct return_instance *return_instances; - unsigned int depth; +struct return_consumer { + __u64 cookie; + __u64 id; }; struct return_instance { - struct uprobe *uprobe; + struct hprobe hprobe; unsigned long func; unsigned long stack; /* stack pointer */ unsigned long orig_ret_vaddr; /* original return address */ bool chained; /* true, if instance is nested */ + int consumers_cnt; struct return_instance *next; /* keep as stack */ -}; + struct rcu_head rcu; + + struct return_consumer consumers[] __counted_by(consumers_cnt); +} ____cacheline_aligned; enum rp_check { RP_CHECK_CALL, diff --git a/include/linux/usb.h b/include/linux/usb.h index 672d8fc2abdb..cfa8005e24f9 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h @@ -1129,8 +1129,8 @@ static inline int usb_make_path(struct usb_device *dev, char *buf, size_t size) /* ----------------------------------------------------------------------- */ /* Stuff for dynamic usb ids */ +extern struct mutex usb_dynids_lock; struct usb_dynids { - spinlock_t lock; struct list_head list; }; @@ -1243,7 +1243,7 @@ struct usb_driver { unsigned int disable_hub_initiated_lpm:1; unsigned int soft_unbind:1; }; -#define to_usb_driver(d) container_of(d, struct usb_driver, driver) +#define to_usb_driver(d) container_of_const(d, struct usb_driver, driver) /** * struct usb_device_driver - identifies USB device driver to usbcore @@ -1294,8 +1294,7 @@ struct usb_device_driver { unsigned int supports_autosuspend:1; unsigned int generic_subclass:1; }; -#define to_usb_device_driver(d) container_of(d, struct usb_device_driver, \ - driver) +#define to_usb_device_driver(d) container_of_const(d, struct usb_device_driver, driver) /** * struct usb_class_driver - identifies a USB driver that wants to use the USB major number diff --git a/include/linux/usb/chipidea.h b/include/linux/usb/chipidea.h index 5a7f96684ea2..ebdfef124b2b 100644 --- a/include/linux/usb/chipidea.h +++ b/include/linux/usb/chipidea.h @@ -65,6 +65,7 @@ struct ci_hdrc_platform_data { #define CI_HDRC_PHY_VBUS_CONTROL BIT(16) #define CI_HDRC_HAS_PORTSC_PEC_MISSED BIT(17) #define CI_HDRC_FORCE_VBUS_ACTIVE_ALWAYS BIT(18) +#define CI_HDRC_HAS_SHORT_PKT_LIMIT BIT(19) enum usb_dr_mode dr_mode; #define CI_HDRC_CONTROLLER_RESET_EVENT 0 #define CI_HDRC_CONTROLLER_STOPPED_EVENT 1 diff --git a/include/linux/usb/storage.h b/include/linux/usb/storage.h index 2827ce72e502..8539956bc2be 100644 --- a/include/linux/usb/storage.h +++ b/include/linux/usb/storage.h @@ -53,7 +53,7 @@ struct bulk_cb_wrap { __le32 Signature; /* contains 'USBC' */ __u32 Tag; /* unique per command id */ __le32 DataTransferLength; /* size of data */ - __u8 Flags; /* direction in bit 0 */ + __u8 Flags; /* direction in bit 7 */ __u8 Lun; /* LUN normally 0 */ __u8 Length; /* length of the CDB */ __u8 CDB[16]; /* max command */ diff --git a/include/linux/usb/typec.h b/include/linux/usb/typec.h index 549275f8ac1b..d616b8807000 100644 --- a/include/linux/usb/typec.h +++ b/include/linux/usb/typec.h @@ -87,6 +87,17 @@ enum typec_orientation { TYPEC_ORIENTATION_REVERSE, }; +enum usb_mode { + USB_MODE_NONE, + USB_MODE_USB2, + USB_MODE_USB3, + USB_MODE_USB4 +}; + +#define USB_CAPABILITY_USB2 BIT(0) +#define USB_CAPABILITY_USB3 BIT(1) +#define USB_CAPABILITY_USB4 BIT(2) + /* * struct enter_usb_data - Enter_USB Message details * @eudo: Enter_USB Data Object @@ -209,6 +220,7 @@ struct typec_cable_desc { * @accessory: Audio, Debug or none. * @identity: Discover Identity command data * @pd_revision: USB Power Delivery Specification Revision if supported + * @usb_capability: Supported USB Modes * @attach: Notification about attached USB device * @deattach: Notification about removed USB device * @@ -226,6 +238,7 @@ struct typec_partner_desc { enum typec_accessory accessory; struct usb_pd_identity *identity; u16 pd_revision; /* 0300H = "3.0" */ + u8 usb_capability; void (*attach)(struct typec_partner *partner, struct device *dev); void (*deattach)(struct typec_partner *partner, struct device *dev); @@ -240,6 +253,8 @@ struct typec_partner_desc { * @port_type_set: Set port type * @pd_get: Get available USB Power Delivery Capabilities. * @pd_set: Set USB Power Delivery Capabilities. + * @default_usb_mode_set: USB Mode to be used by default with Enter_USB Message + * @enter_usb_mode: Change the active USB Mode */ struct typec_operations { int (*try_role)(struct typec_port *port, int role); @@ -250,6 +265,8 @@ struct typec_operations { enum typec_port_type type); struct usb_power_delivery **(*pd_get)(struct typec_port *port); int (*pd_set)(struct typec_port *port, struct usb_power_delivery *pd); + int (*default_usb_mode_set)(struct typec_port *port, enum usb_mode mode); + int (*enter_usb_mode)(struct typec_port *port, enum usb_mode mode); }; enum usb_pd_svdm_ver { @@ -267,6 +284,7 @@ enum usb_pd_svdm_ver { * @svdm_version: USB PD Structured VDM version if supported * @prefer_role: Initial role preference (DRP ports). * @accessory: Supported Accessory Modes + * @usb_capability: Supported USB Modes * @fwnode: Optional fwnode of the port * @driver_data: Private pointer for driver specific info * @pd: Optional USB Power Delivery Support @@ -283,6 +301,7 @@ struct typec_capability { int prefer_role; enum typec_accessory accessory[TYPEC_MAX_ACCESSORY]; unsigned int orientation_aware:1; + u8 usb_capability; struct fwnode_handle *fwnode; void *driver_data; @@ -350,6 +369,9 @@ int typec_port_set_usb_power_delivery(struct typec_port *port, struct usb_power_ int typec_partner_set_usb_power_delivery(struct typec_partner *partner, struct usb_power_delivery *pd); +void typec_partner_set_usb_mode(struct typec_partner *partner, enum usb_mode usb_mode); +void typec_port_set_usb_mode(struct typec_port *port, enum usb_mode mode); + /** * struct typec_connector - Representation of Type-C port for external drivers * @attach: notification about device removal diff --git a/include/linux/usb/uvc.h b/include/linux/usb/uvc.h index 88d96095bcb1..bce95153e5a6 100644 --- a/include/linux/usb/uvc.h +++ b/include/linux/usb/uvc.h @@ -118,6 +118,9 @@ #define UVC_GUID_FORMAT_Y12I \ { 'Y', '1', '2', 'I', 0x00, 0x00, 0x10, 0x00, \ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71} +#define UVC_GUID_FORMAT_Y16I \ + { 'Y', '1', '6', 'I', 0x00, 0x00, 0x10, 0x00, \ + 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71} #define UVC_GUID_FORMAT_Z16 \ { 'Z', '1', '6', ' ', 0x00, 0x00, 0x10, 0x00, \ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71} @@ -140,6 +143,9 @@ #define UVC_GUID_FORMAT_D3DFMT_L8 \ {0x32, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, \ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71} +#define UVC_GUID_FORMAT_D3DFMT_R5G6B5 \ + {0x7b, 0xeb, 0x36, 0xe4, 0x4f, 0x52, 0xce, 0x11, \ + 0x9f, 0x53, 0x00, 0x20, 0xaf, 0x0b, 0xa7, 0x70} #define UVC_GUID_FORMAT_KSMEDIA_L8_IR \ {0x32, 0x00, 0x00, 0x00, 0x02, 0x00, 0x10, 0x00, \ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71} diff --git a/include/linux/util_macros.h b/include/linux/util_macros.h index 6bb460c3e818..825487fb66fa 100644 --- a/include/linux/util_macros.h +++ b/include/linux/util_macros.h @@ -4,19 +4,6 @@ #include <linux/math.h> -#define __find_closest(x, a, as, op) \ -({ \ - typeof(as) __fc_i, __fc_as = (as) - 1; \ - typeof(x) __fc_x = (x); \ - typeof(*a) const *__fc_a = (a); \ - for (__fc_i = 0; __fc_i < __fc_as; __fc_i++) { \ - if (__fc_x op DIV_ROUND_CLOSEST(__fc_a[__fc_i] + \ - __fc_a[__fc_i + 1], 2)) \ - break; \ - } \ - (__fc_i); \ -}) - /** * find_closest - locate the closest element in a sorted array * @x: The reference value. @@ -25,8 +12,27 @@ * @as: Size of 'a'. * * Returns the index of the element closest to 'x'. + * Note: If using an array of negative numbers (or mixed positive numbers), + * then be sure that 'x' is of a signed-type to get good results. */ -#define find_closest(x, a, as) __find_closest(x, a, as, <=) +#define find_closest(x, a, as) \ +({ \ + typeof(as) __fc_i, __fc_as = (as) - 1; \ + long __fc_mid_x, __fc_x = (x); \ + long __fc_left, __fc_right; \ + typeof(*a) const *__fc_a = (a); \ + for (__fc_i = 0; __fc_i < __fc_as; __fc_i++) { \ + __fc_mid_x = (__fc_a[__fc_i] + __fc_a[__fc_i + 1]) / 2; \ + if (__fc_x <= __fc_mid_x) { \ + __fc_left = __fc_x - __fc_a[__fc_i]; \ + __fc_right = __fc_a[__fc_i + 1] - __fc_x; \ + if (__fc_right < __fc_left) \ + __fc_i++; \ + break; \ + } \ + } \ + (__fc_i); \ +}) /** * find_closest_descending - locate the closest element in a sorted array @@ -36,9 +42,27 @@ * @as: Size of 'a'. * * Similar to find_closest() but 'a' is expected to be sorted in descending - * order. + * order. The iteration is done in reverse order, so that the comparison + * of '__fc_right' & '__fc_left' also works for unsigned numbers. */ -#define find_closest_descending(x, a, as) __find_closest(x, a, as, >=) +#define find_closest_descending(x, a, as) \ +({ \ + typeof(as) __fc_i, __fc_as = (as) - 1; \ + long __fc_mid_x, __fc_x = (x); \ + long __fc_left, __fc_right; \ + typeof(*a) const *__fc_a = (a); \ + for (__fc_i = __fc_as; __fc_i >= 1; __fc_i--) { \ + __fc_mid_x = (__fc_a[__fc_i] + __fc_a[__fc_i - 1]) / 2; \ + if (__fc_x <= __fc_mid_x) { \ + __fc_left = __fc_x - __fc_a[__fc_i]; \ + __fc_right = __fc_a[__fc_i - 1] - __fc_x; \ + if (__fc_right < __fc_left) \ + __fc_i--; \ + break; \ + } \ + } \ + (__fc_i); \ +}) /** * is_insidevar - check if the @ptr points inside the @var memory range. diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h index a54046bf37e5..939ceabcaf06 100644 --- a/include/linux/vermagic.h +++ b/include/linux/vermagic.h @@ -15,10 +15,10 @@ #else #define MODULE_VERMAGIC_SMP "" #endif -#ifdef CONFIG_PREEMPT_BUILD -#define MODULE_VERMAGIC_PREEMPT "preempt " -#elif defined(CONFIG_PREEMPT_RT) +#ifdef CONFIG_PREEMPT_RT #define MODULE_VERMAGIC_PREEMPT "preempt_rt " +#elif defined(CONFIG_PREEMPT_BUILD) +#define MODULE_VERMAGIC_PREEMPT "preempt " #else #define MODULE_VERMAGIC_PREEMPT "" #endif diff --git a/include/linux/virtio.h b/include/linux/virtio.h index 306137a15d07..dd88682e27e3 100644 --- a/include/linux/virtio.h +++ b/include/linux/virtio.h @@ -56,6 +56,17 @@ int virtqueue_add_inbuf_ctx(struct virtqueue *vq, void *ctx, gfp_t gfp); +int virtqueue_add_inbuf_premapped(struct virtqueue *vq, + struct scatterlist *sg, unsigned int num, + void *data, + void *ctx, + gfp_t gfp); + +int virtqueue_add_outbuf_premapped(struct virtqueue *vq, + struct scatterlist *sg, unsigned int num, + void *data, + gfp_t gfp); + int virtqueue_add_sgs(struct virtqueue *vq, struct scatterlist *sgs[], unsigned int out_sgs, @@ -82,8 +93,6 @@ bool virtqueue_enable_cb(struct virtqueue *vq); unsigned virtqueue_enable_cb_prepare(struct virtqueue *vq); -int virtqueue_set_dma_premapped(struct virtqueue *_vq); - bool virtqueue_poll(struct virtqueue *vq, unsigned); bool virtqueue_enable_cb_delayed(struct virtqueue *vq); @@ -100,9 +109,11 @@ dma_addr_t virtqueue_get_avail_addr(const struct virtqueue *vq); dma_addr_t virtqueue_get_used_addr(const struct virtqueue *vq); int virtqueue_resize(struct virtqueue *vq, u32 num, - void (*recycle)(struct virtqueue *vq, void *buf)); + void (*recycle)(struct virtqueue *vq, void *buf), + void (*recycle_done)(struct virtqueue *vq)); int virtqueue_reset(struct virtqueue *vq, - void (*recycle)(struct virtqueue *vq, void *buf)); + void (*recycle)(struct virtqueue *vq, void *buf), + void (*recycle_done)(struct virtqueue *vq)); struct virtio_admin_cmd { __le16 opcode; @@ -111,6 +122,7 @@ struct virtio_admin_cmd { struct scatterlist *data_sg; struct scatterlist *result_sg; struct completion completion; + u32 result_sg_size; int ret; }; diff --git a/include/linux/virtio_pci_admin.h b/include/linux/virtio_pci_admin.h index f4a100a0fe2e..dffc92c17ad2 100644 --- a/include/linux/virtio_pci_admin.h +++ b/include/linux/virtio_pci_admin.h @@ -20,4 +20,15 @@ int virtio_pci_admin_legacy_io_notify_info(struct pci_dev *pdev, u64 *bar_offset); #endif +bool virtio_pci_admin_has_dev_parts(struct pci_dev *pdev); +int virtio_pci_admin_mode_set(struct pci_dev *pdev, u8 mode); +int virtio_pci_admin_obj_create(struct pci_dev *pdev, u16 obj_type, u8 operation_type, + u32 *obj_id); +int virtio_pci_admin_obj_destroy(struct pci_dev *pdev, u16 obj_type, u32 id); +int virtio_pci_admin_dev_parts_metadata_get(struct pci_dev *pdev, u16 obj_type, + u32 id, u8 metadata_type, u32 *out); +int virtio_pci_admin_dev_parts_get(struct pci_dev *pdev, u16 obj_type, u32 id, + u8 get_type, struct scatterlist *res_sg, u32 *res_size); +int virtio_pci_admin_dev_parts_set(struct pci_dev *pdev, struct scatterlist *data_sg); + #endif /* _LINUX_VIRTIO_PCI_ADMIN_H */ diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h index aed952d04132..f70d0958095c 100644 --- a/include/linux/vm_event_item.h +++ b/include/linux/vm_event_item.h @@ -134,6 +134,8 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, #ifdef CONFIG_SWAP SWAP_RA, SWAP_RA_HIT, + SWPIN_ZERO, + SWPOUT_ZERO, #ifdef CONFIG_KSM KSM_SWPIN_COPY, #endif diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index ad2ce7a6ab7a..31e9ffd936e3 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -134,12 +134,6 @@ extern void vm_unmap_ram(const void *mem, unsigned int count); extern void *vm_map_ram(struct page **pages, unsigned int count, int node); extern void vm_unmap_aliases(void); -#ifdef CONFIG_MMU -extern unsigned long vmalloc_nr_pages(void); -#else -static inline unsigned long vmalloc_nr_pages(void) { return 0; } -#endif - extern void *vmalloc_noprof(unsigned long size) __alloc_size(1); #define vmalloc(...) alloc_hooks(vmalloc_noprof(__VA_ARGS__)) @@ -208,6 +202,9 @@ extern int remap_vmalloc_range_partial(struct vm_area_struct *vma, extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, unsigned long pgoff); +int vmap_pages_range(unsigned long addr, unsigned long end, pgprot_t prot, + struct page **pages, unsigned int page_shift); + /* * Architectures can set this mask to a combination of PGTBL_P?D_MODIFIED values * and let generic vmalloc and ioremap code know when arch_sync_kernel_mappings() @@ -266,12 +263,29 @@ static inline bool is_vm_area_hugepages(const void *addr) #endif } +/* for /proc/kcore */ +long vread_iter(struct iov_iter *iter, const char *addr, size_t count); + +/* + * Internals. Don't use.. + */ +__init void vm_area_add_early(struct vm_struct *vm); +__init void vm_area_register_early(struct vm_struct *vm, size_t align); + +int register_vmap_purge_notifier(struct notifier_block *nb); +int unregister_vmap_purge_notifier(struct notifier_block *nb); + #ifdef CONFIG_MMU +#define VMALLOC_TOTAL (VMALLOC_END - VMALLOC_START) + +unsigned long vmalloc_nr_pages(void); + int vm_area_map_pages(struct vm_struct *area, unsigned long start, unsigned long end, struct page **pages); void vm_area_unmap_pages(struct vm_struct *area, unsigned long start, unsigned long end); void vunmap_range(unsigned long addr, unsigned long end); + static inline void set_vm_flush_reset_perms(void *addr) { struct vm_struct *vm = find_vm_area(addr); @@ -279,24 +293,14 @@ static inline void set_vm_flush_reset_perms(void *addr) if (vm) vm->flags |= VM_FLUSH_RESET_PERMS; } +#else /* !CONFIG_MMU */ +#define VMALLOC_TOTAL 0UL -#else -static inline void set_vm_flush_reset_perms(void *addr) -{ -} -#endif - -/* for /proc/kcore */ -extern long vread_iter(struct iov_iter *iter, const char *addr, size_t count); - -/* - * Internals. Don't use.. - */ -extern __init void vm_area_add_early(struct vm_struct *vm); -extern __init void vm_area_register_early(struct vm_struct *vm, size_t align); +static inline unsigned long vmalloc_nr_pages(void) { return 0; } +static inline void set_vm_flush_reset_perms(void *addr) {} +#endif /* CONFIG_MMU */ -#ifdef CONFIG_SMP -# ifdef CONFIG_MMU +#if defined(CONFIG_MMU) && defined(CONFIG_SMP) struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, const size_t *sizes, int nr_vms, size_t align); @@ -311,22 +315,9 @@ pcpu_get_vm_areas(const unsigned long *offsets, return NULL; } -static inline void -pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms) -{ -} -# endif -#endif - -#ifdef CONFIG_MMU -#define VMALLOC_TOTAL (VMALLOC_END - VMALLOC_START) -#else -#define VMALLOC_TOTAL 0UL +static inline void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms) {} #endif -int register_vmap_purge_notifier(struct notifier_block *nb); -int unregister_vmap_purge_notifier(struct notifier_block *nb); - #if defined(CONFIG_MMU) && defined(CONFIG_PRINTK) bool vmalloc_dump_obj(void *object); #else diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index d2761bf8ff32..9f3a04345b86 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h @@ -515,7 +515,7 @@ static inline const char *node_stat_name(enum node_stat_item item) static inline const char *lru_list_name(enum lru_list lru) { - return node_stat_name(NR_LRU_BASE + lru) + 3; // skip "nr_" + return node_stat_name(NR_LRU_BASE + (enum node_stat_item)lru) + 3; // skip "nr_" } #if defined(CONFIG_VM_EVENT_COUNTERS) || defined(CONFIG_MEMCG) diff --git a/include/linux/vt_buffer.h b/include/linux/vt_buffer.h index 919d999a8c1d..b6eeb8cb6070 100644 --- a/include/linux/vt_buffer.h +++ b/include/linux/vt_buffer.h @@ -28,45 +28,21 @@ #ifndef VT_BUF_HAVE_MEMSETW static inline void scr_memsetw(u16 *s, u16 c, unsigned int count) { -#ifdef VT_BUF_HAVE_RW - count /= 2; - while (count--) - scr_writew(c, s++); -#else memset16(s, c, count / 2); -#endif } #endif #ifndef VT_BUF_HAVE_MEMCPYW static inline void scr_memcpyw(u16 *d, const u16 *s, unsigned int count) { -#ifdef VT_BUF_HAVE_RW - count /= 2; - while (count--) - scr_writew(scr_readw(s++), d++); -#else memcpy(d, s, count); -#endif } #endif #ifndef VT_BUF_HAVE_MEMMOVEW static inline void scr_memmovew(u16 *d, const u16 *s, unsigned int count) { -#ifdef VT_BUF_HAVE_RW - if (d < s) - scr_memcpyw(d, s, count); - else { - count /= 2; - d += count; - s += count; - while (count--) - scr_writew(scr_readw(--s), --d); - } -#else memmove(d, s, count); -#endif } #endif diff --git a/include/linux/wait.h b/include/linux/wait.h index 8aa3372f21a0..6d90ad974408 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h @@ -221,6 +221,7 @@ void __wake_up_pollfree(struct wait_queue_head *wq_head); #define wake_up_all(x) __wake_up(x, TASK_NORMAL, 0, NULL) #define wake_up_locked(x) __wake_up_locked((x), TASK_NORMAL, 1) #define wake_up_all_locked(x) __wake_up_locked((x), TASK_NORMAL, 0) +#define wake_up_sync(x) __wake_up_sync(x, TASK_NORMAL) #define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL) #define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL) @@ -541,8 +542,8 @@ do { \ int __ret = 0; \ struct hrtimer_sleeper __t; \ \ - hrtimer_init_sleeper_on_stack(&__t, CLOCK_MONOTONIC, \ - HRTIMER_MODE_REL); \ + hrtimer_setup_sleeper_on_stack(&__t, CLOCK_MONOTONIC, \ + HRTIMER_MODE_REL); \ if ((timeout) != KTIME_MAX) { \ hrtimer_set_expires_range_ns(&__t.timer, timeout, \ current->timer_slack_ns); \ diff --git a/include/linux/wait_bit.h b/include/linux/wait_bit.h index 7725b7579b78..9e29d79fc790 100644 --- a/include/linux/wait_bit.h +++ b/include/linux/wait_bit.h @@ -8,7 +8,7 @@ #include <linux/wait.h> struct wait_bit_key { - void *flags; + unsigned long *flags; int bit_nr; unsigned long timeout; }; @@ -23,14 +23,14 @@ struct wait_bit_queue_entry { typedef int wait_bit_action_f(struct wait_bit_key *key, int mode); -void __wake_up_bit(struct wait_queue_head *wq_head, void *word, int bit); +void __wake_up_bit(struct wait_queue_head *wq_head, unsigned long *word, int bit); int __wait_on_bit(struct wait_queue_head *wq_head, struct wait_bit_queue_entry *wbq_entry, wait_bit_action_f *action, unsigned int mode); int __wait_on_bit_lock(struct wait_queue_head *wq_head, struct wait_bit_queue_entry *wbq_entry, wait_bit_action_f *action, unsigned int mode); -void wake_up_bit(void *word, int bit); -int out_of_line_wait_on_bit(void *word, int, wait_bit_action_f *action, unsigned int mode); -int out_of_line_wait_on_bit_timeout(void *word, int, wait_bit_action_f *action, unsigned int mode, unsigned long timeout); -int out_of_line_wait_on_bit_lock(void *word, int, wait_bit_action_f *action, unsigned int mode); -struct wait_queue_head *bit_waitqueue(void *word, int bit); +void wake_up_bit(unsigned long *word, int bit); +int out_of_line_wait_on_bit(unsigned long *word, int, wait_bit_action_f *action, unsigned int mode); +int out_of_line_wait_on_bit_timeout(unsigned long *word, int, wait_bit_action_f *action, unsigned int mode, unsigned long timeout); +int out_of_line_wait_on_bit_lock(unsigned long *word, int, wait_bit_action_f *action, unsigned int mode); +struct wait_queue_head *bit_waitqueue(unsigned long *word, int bit); extern void __init wait_bit_init(void); int wake_bit_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key); @@ -49,23 +49,24 @@ int wake_bit_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync extern int bit_wait(struct wait_bit_key *key, int mode); extern int bit_wait_io(struct wait_bit_key *key, int mode); extern int bit_wait_timeout(struct wait_bit_key *key, int mode); -extern int bit_wait_io_timeout(struct wait_bit_key *key, int mode); /** * wait_on_bit - wait for a bit to be cleared - * @word: the word being waited on, a kernel virtual address - * @bit: the bit of the word being waited on + * @word: the address containing the bit being waited on + * @bit: the bit at that address being waited on * @mode: the task state to sleep in * - * There is a standard hashed waitqueue table for generic use. This - * is the part of the hashtable's accessor API that waits on a bit. - * For instance, if one were to have waiters on a bitflag, one would - * call wait_on_bit() in threads waiting for the bit to clear. - * One uses wait_on_bit() where one is waiting for the bit to clear, - * but has no intention of setting it. - * Returned value will be zero if the bit was cleared, or non-zero - * if the process received a signal and the mode permitted wakeup - * on that signal. + * Wait for the given bit in an unsigned long or bitmap (see DECLARE_BITMAP()) + * to be cleared. The clearing of the bit must be signalled with + * wake_up_bit(), often as clear_and_wake_up_bit(). + * + * The process will wait on a waitqueue selected by hash from a shared + * pool. It will only be woken on a wake_up for the target bit, even + * if other processes on the same queue are waiting for other bits. + * + * Returned value will be zero if the bit was cleared in which case the + * call has ACQUIRE semantics, or %-EINTR if the process received a + * signal and the mode permitted wake up on that signal. */ static inline int wait_on_bit(unsigned long *word, int bit, unsigned mode) @@ -80,17 +81,20 @@ wait_on_bit(unsigned long *word, int bit, unsigned mode) /** * wait_on_bit_io - wait for a bit to be cleared - * @word: the word being waited on, a kernel virtual address - * @bit: the bit of the word being waited on + * @word: the address containing the bit being waited on + * @bit: the bit at that address being waited on * @mode: the task state to sleep in * - * Use the standard hashed waitqueue table to wait for a bit - * to be cleared. This is similar to wait_on_bit(), but calls - * io_schedule() instead of schedule() for the actual waiting. + * Wait for the given bit in an unsigned long or bitmap (see DECLARE_BITMAP()) + * to be cleared. The clearing of the bit must be signalled with + * wake_up_bit(), often as clear_and_wake_up_bit(). * - * Returned value will be zero if the bit was cleared, or non-zero - * if the process received a signal and the mode permitted wakeup - * on that signal. + * This is similar to wait_on_bit(), but calls io_schedule() instead of + * schedule() for the actual waiting. + * + * Returned value will be zero if the bit was cleared in which case the + * call has ACQUIRE semantics, or %-EINTR if the process received a + * signal and the mode permitted wake up on that signal. */ static inline int wait_on_bit_io(unsigned long *word, int bit, unsigned mode) @@ -104,19 +108,24 @@ wait_on_bit_io(unsigned long *word, int bit, unsigned mode) } /** - * wait_on_bit_timeout - wait for a bit to be cleared or a timeout elapses - * @word: the word being waited on, a kernel virtual address - * @bit: the bit of the word being waited on + * wait_on_bit_timeout - wait for a bit to be cleared or a timeout to elapse + * @word: the address containing the bit being waited on + * @bit: the bit at that address being waited on * @mode: the task state to sleep in * @timeout: timeout, in jiffies * - * Use the standard hashed waitqueue table to wait for a bit - * to be cleared. This is similar to wait_on_bit(), except also takes a - * timeout parameter. + * Wait for the given bit in an unsigned long or bitmap (see + * DECLARE_BITMAP()) to be cleared, or for a timeout to expire. The + * clearing of the bit must be signalled with wake_up_bit(), often as + * clear_and_wake_up_bit(). + * + * This is similar to wait_on_bit(), except it also takes a timeout + * parameter. * - * Returned value will be zero if the bit was cleared before the - * @timeout elapsed, or non-zero if the @timeout elapsed or process - * received a signal and the mode permitted wakeup on that signal. + * Returned value will be zero if the bit was cleared in which case the + * call has ACQUIRE semantics, or %-EINTR if the process received a + * signal and the mode permitted wake up on that signal, or %-EAGAIN if the + * timeout elapsed. */ static inline int wait_on_bit_timeout(unsigned long *word, int bit, unsigned mode, @@ -132,19 +141,21 @@ wait_on_bit_timeout(unsigned long *word, int bit, unsigned mode, /** * wait_on_bit_action - wait for a bit to be cleared - * @word: the word being waited on, a kernel virtual address - * @bit: the bit of the word being waited on + * @word: the address containing the bit waited on + * @bit: the bit at that address being waited on * @action: the function used to sleep, which may take special actions * @mode: the task state to sleep in * - * Use the standard hashed waitqueue table to wait for a bit - * to be cleared, and allow the waiting action to be specified. - * This is like wait_on_bit() but allows fine control of how the waiting - * is done. + * Wait for the given bit in an unsigned long or bitmap (see DECLARE_BITMAP()) + * to be cleared. The clearing of the bit must be signalled with + * wake_up_bit(), often as clear_and_wake_up_bit(). + * + * This is similar to wait_on_bit(), but calls @action() instead of + * schedule() for the actual waiting. * - * Returned value will be zero if the bit was cleared, or non-zero - * if the process received a signal and the mode permitted wakeup - * on that signal. + * Returned value will be zero if the bit was cleared in which case the + * call has ACQUIRE semantics, or the error code returned by @action if + * that call returned non-zero. */ static inline int wait_on_bit_action(unsigned long *word, int bit, wait_bit_action_f *action, @@ -157,23 +168,22 @@ wait_on_bit_action(unsigned long *word, int bit, wait_bit_action_f *action, } /** - * wait_on_bit_lock - wait for a bit to be cleared, when wanting to set it - * @word: the word being waited on, a kernel virtual address - * @bit: the bit of the word being waited on + * wait_on_bit_lock - wait for a bit to be cleared, then set it + * @word: the address containing the bit being waited on + * @bit: the bit of the word being waited on and set * @mode: the task state to sleep in * - * There is a standard hashed waitqueue table for generic use. This - * is the part of the hashtable's accessor API that waits on a bit - * when one intends to set it, for instance, trying to lock bitflags. - * For instance, if one were to have waiters trying to set bitflag - * and waiting for it to clear before setting it, one would call - * wait_on_bit() in threads waiting to be able to set the bit. - * One uses wait_on_bit_lock() where one is waiting for the bit to - * clear with the intention of setting it, and when done, clearing it. + * Wait for the given bit in an unsigned long or bitmap (see + * DECLARE_BITMAP()) to be cleared. The clearing of the bit must be + * signalled with wake_up_bit(), often as clear_and_wake_up_bit(). As + * soon as it is clear, atomically set it and return. * - * Returns zero if the bit was (eventually) found to be clear and was - * set. Returns non-zero if a signal was delivered to the process and - * the @mode allows that signal to wake the process. + * This is similar to wait_on_bit(), but sets the bit before returning. + * + * Returned value will be zero if the bit was successfully set in which + * case the call has the same memory sequencing semantics as + * test_and_clear_bit(), or %-EINTR if the process received a signal and + * the mode permitted wake up on that signal. */ static inline int wait_on_bit_lock(unsigned long *word, int bit, unsigned mode) @@ -185,15 +195,18 @@ wait_on_bit_lock(unsigned long *word, int bit, unsigned mode) } /** - * wait_on_bit_lock_io - wait for a bit to be cleared, when wanting to set it - * @word: the word being waited on, a kernel virtual address - * @bit: the bit of the word being waited on + * wait_on_bit_lock_io - wait for a bit to be cleared, then set it + * @word: the address containing the bit being waited on + * @bit: the bit of the word being waited on and set * @mode: the task state to sleep in * - * Use the standard hashed waitqueue table to wait for a bit - * to be cleared and then to atomically set it. This is similar - * to wait_on_bit(), but calls io_schedule() instead of schedule() - * for the actual waiting. + * Wait for the given bit in an unsigned long or bitmap (see + * DECLARE_BITMAP()) to be cleared. The clearing of the bit must be + * signalled with wake_up_bit(), often as clear_and_wake_up_bit(). As + * soon as it is clear, atomically set it and return. + * + * This is similar to wait_on_bit_lock(), but calls io_schedule() instead + * of schedule(). * * Returns zero if the bit was (eventually) found to be clear and was * set. Returns non-zero if a signal was delivered to the process and @@ -209,21 +222,19 @@ wait_on_bit_lock_io(unsigned long *word, int bit, unsigned mode) } /** - * wait_on_bit_lock_action - wait for a bit to be cleared, when wanting to set it - * @word: the word being waited on, a kernel virtual address - * @bit: the bit of the word being waited on + * wait_on_bit_lock_action - wait for a bit to be cleared, then set it + * @word: the address containing the bit being waited on + * @bit: the bit of the word being waited on and set * @action: the function used to sleep, which may take special actions * @mode: the task state to sleep in * - * Use the standard hashed waitqueue table to wait for a bit - * to be cleared and then to set it, and allow the waiting action - * to be specified. - * This is like wait_on_bit() but allows fine control of how the waiting - * is done. + * This is similar to wait_on_bit_lock(), but calls @action() instead of + * schedule() for the actual waiting. * - * Returns zero if the bit was (eventually) found to be clear and was - * set. Returns non-zero if a signal was delivered to the process and - * the @mode allows that signal to wake the process. + * Returned value will be zero if the bit was successfully set in which + * case the call has the same memory sequencing semantics as + * test_and_clear_bit(), or the error code returned by @action if that + * call returned non-zero. */ static inline int wait_on_bit_lock_action(unsigned long *word, int bit, wait_bit_action_f *action, @@ -269,7 +280,26 @@ __out: __ret; \ #define __wait_var_event(var, condition) \ ___wait_var_event(var, condition, TASK_UNINTERRUPTIBLE, 0, 0, \ schedule()) +#define __wait_var_event_io(var, condition) \ + ___wait_var_event(var, condition, TASK_UNINTERRUPTIBLE, 0, 0, \ + io_schedule()) +/** + * wait_var_event - wait for a variable to be updated and notified + * @var: the address of variable being waited on + * @condition: the condition to wait for + * + * Wait for a @condition to be true, only re-checking when a wake up is + * received for the given @var (an arbitrary kernel address which need + * not be directly related to the given condition, but usually is). + * + * The process will wait on a waitqueue selected by hash from a shared + * pool. It will only be woken on a wake_up for the given address. + * + * The condition should normally use smp_load_acquire() or a similarly + * ordered access to ensure that any changes to memory made before the + * condition became true will be visible after the wait completes. + */ #define wait_var_event(var, condition) \ do { \ might_sleep(); \ @@ -278,10 +308,56 @@ do { \ __wait_var_event(var, condition); \ } while (0) +/** + * wait_var_event_io - wait for a variable to be updated and notified + * @var: the address of variable being waited on + * @condition: the condition to wait for + * + * Wait for an IO related @condition to be true, only re-checking when a + * wake up is received for the given @var (an arbitrary kernel address + * which need not be directly related to the given condition, but + * usually is). + * + * The process will wait on a waitqueue selected by hash from a shared + * pool. It will only be woken on a wake_up for the given address. + * + * This is similar to wait_var_event(), but calls io_schedule() instead + * of schedule(). + * + * The condition should normally use smp_load_acquire() or a similarly + * ordered access to ensure that any changes to memory made before the + * condition became true will be visible after the wait completes. + */ +#define wait_var_event_io(var, condition) \ +do { \ + might_sleep(); \ + if (condition) \ + break; \ + __wait_var_event_io(var, condition); \ +} while (0) + #define __wait_var_event_killable(var, condition) \ ___wait_var_event(var, condition, TASK_KILLABLE, 0, 0, \ schedule()) +/** + * wait_var_event_killable - wait for a variable to be updated and notified + * @var: the address of variable being waited on + * @condition: the condition to wait for + * + * Wait for a @condition to be true or a fatal signal to be received, + * only re-checking the condition when a wake up is received for the given + * @var (an arbitrary kernel address which need not be directly related + * to the given condition, but usually is). + * + * This is similar to wait_var_event() but returns a value which is + * 0 if the condition became true, or %-ERESTARTSYS if a fatal signal + * was received. + * + * The condition should normally use smp_load_acquire() or a similarly + * ordered access to ensure that any changes to memory made before the + * condition became true will be visible after the wait completes. + */ #define wait_var_event_killable(var, condition) \ ({ \ int __ret = 0; \ @@ -296,6 +372,26 @@ do { \ TASK_UNINTERRUPTIBLE, 0, timeout, \ __ret = schedule_timeout(__ret)) +/** + * wait_var_event_timeout - wait for a variable to be updated or a timeout to expire + * @var: the address of variable being waited on + * @condition: the condition to wait for + * @timeout: maximum time to wait in jiffies + * + * Wait for a @condition to be true or a timeout to expire, only + * re-checking the condition when a wake up is received for the given + * @var (an arbitrary kernel address which need not be directly related + * to the given condition, but usually is). + * + * This is similar to wait_var_event() but returns a value which is 0 if + * the timeout expired and the condition was still false, or the + * remaining time left in the timeout (but at least 1) if the condition + * was found to be true. + * + * The condition should normally use smp_load_acquire() or a similarly + * ordered access to ensure that any changes to memory made before the + * condition became true will be visible after the wait completes. + */ #define wait_var_event_timeout(var, condition, timeout) \ ({ \ long __ret = timeout; \ @@ -309,6 +405,23 @@ do { \ ___wait_var_event(var, condition, TASK_INTERRUPTIBLE, 0, 0, \ schedule()) +/** + * wait_var_event_killable - wait for a variable to be updated and notified + * @var: the address of variable being waited on + * @condition: the condition to wait for + * + * Wait for a @condition to be true or a signal to be received, only + * re-checking the condition when a wake up is received for the given + * @var (an arbitrary kernel address which need not be directly related + * to the given condition, but usually is). + * + * This is similar to wait_var_event() but returns a value which is 0 if + * the condition became true, or %-ERESTARTSYS if a signal was received. + * + * The condition should normally use smp_load_acquire() or a similarly + * ordered access to ensure that any changes to memory made before the + * condition became true will be visible after the wait completes. + */ #define wait_var_event_interruptible(var, condition) \ ({ \ int __ret = 0; \ @@ -319,15 +432,122 @@ do { \ }) /** - * clear_and_wake_up_bit - clear a bit and wake up anyone waiting on that bit + * wait_var_event_any_lock - wait for a variable to be updated under a lock + * @var: the address of the variable being waited on + * @condition: condition to wait for + * @lock: the object that is locked to protect updates to the variable + * @type: prefix on lock and unlock operations + * @state: waiting state, %TASK_UNINTERRUPTIBLE etc. + * + * Wait for a condition which can only be reliably tested while holding + * a lock. The variables assessed in the condition will normal be updated + * under the same lock, and the wake up should be signalled with + * wake_up_var_locked() under the same lock. + * + * This is similar to wait_var_event(), but assumes a lock is held + * while calling this function and while updating the variable. * + * This must be called while the given lock is held and the lock will be + * dropped when schedule() is called to wait for a wake up, and will be + * reclaimed before testing the condition again. The functions used to + * unlock and lock the object are constructed by appending _unlock and _lock + * to @type. + * + * Return %-ERESTARTSYS if a signal arrives which is allowed to interrupt + * the wait according to @state. + */ +#define wait_var_event_any_lock(var, condition, lock, type, state) \ +({ \ + int __ret = 0; \ + if (!(condition)) \ + __ret = ___wait_var_event(var, condition, state, 0, 0, \ + type ## _unlock(lock); \ + schedule(); \ + type ## _lock(lock)); \ + __ret; \ +}) + +/** + * wait_var_event_spinlock - wait for a variable to be updated under a spinlock + * @var: the address of the variable being waited on + * @condition: condition to wait for + * @lock: the spinlock which protects updates to the variable + * + * Wait for a condition which can only be reliably tested while holding + * a spinlock. The variables assessed in the condition will normal be updated + * under the same spinlock, and the wake up should be signalled with + * wake_up_var_locked() under the same spinlock. + * + * This is similar to wait_var_event(), but assumes a spinlock is held + * while calling this function and while updating the variable. + * + * This must be called while the given lock is held and the lock will be + * dropped when schedule() is called to wait for a wake up, and will be + * reclaimed before testing the condition again. + */ +#define wait_var_event_spinlock(var, condition, lock) \ + wait_var_event_any_lock(var, condition, lock, spin, TASK_UNINTERRUPTIBLE) + +/** + * wait_var_event_mutex - wait for a variable to be updated under a mutex + * @var: the address of the variable being waited on + * @condition: condition to wait for + * @mutex: the mutex which protects updates to the variable + * + * Wait for a condition which can only be reliably tested while holding + * a mutex. The variables assessed in the condition will normal be + * updated under the same mutex, and the wake up should be signalled + * with wake_up_var_locked() under the same mutex. + * + * This is similar to wait_var_event(), but assumes a mutex is held + * while calling this function and while updating the variable. + * + * This must be called while the given mutex is held and the mutex will be + * dropped when schedule() is called to wait for a wake up, and will be + * reclaimed before testing the condition again. + */ +#define wait_var_event_mutex(var, condition, lock) \ + wait_var_event_any_lock(var, condition, lock, mutex, TASK_UNINTERRUPTIBLE) + +/** + * wake_up_var_protected - wake up waiters for a variable asserting that it is safe + * @var: the address of the variable being waited on + * @cond: the condition which afirms this is safe + * + * When waking waiters which use wait_var_event_any_lock() the waker must be + * holding the reelvant lock to avoid races. This version of wake_up_var() + * asserts that the relevant lock is held and so no barrier is needed. + * The @cond is only tested when CONFIG_LOCKDEP is enabled. + */ +#define wake_up_var_protected(var, cond) \ +do { \ + lockdep_assert(cond); \ + wake_up_var(var); \ +} while (0) + +/** + * wake_up_var_locked - wake up waiters for a variable while holding a spinlock or mutex + * @var: the address of the variable being waited on + * @lock: The spinlock or mutex what protects the variable + * + * Send a wake up for the given variable which should be waited for with + * wait_var_event_spinlock() or wait_var_event_mutex(). Unlike wake_up_var(), + * no extra barriers are needed as the locking provides sufficient sequencing. + */ +#define wake_up_var_locked(var, lock) \ + wake_up_var_protected(var, lockdep_is_held(lock)) + +/** + * clear_and_wake_up_bit - clear a bit and wake up anyone waiting on that bit * @bit: the bit of the word being waited on - * @word: the word being waited on, a kernel virtual address + * @word: the address containing the bit being waited on * - * You can use this helper if bitflags are manipulated atomically rather than - * non-atomically under a lock. + * The designated bit is cleared and any tasks waiting in wait_on_bit() + * or similar will be woken. This call has RELEASE semantics so that + * any changes to memory made before this call are guaranteed to be visible + * after the corresponding wait_on_bit() completes. */ -static inline void clear_and_wake_up_bit(int bit, void *word) +static inline void clear_and_wake_up_bit(int bit, unsigned long *word) { clear_bit_unlock(bit, word); /* See wake_up_bit() for which memory barrier you need to use. */ @@ -335,4 +555,64 @@ static inline void clear_and_wake_up_bit(int bit, void *word) wake_up_bit(word, bit); } +/** + * test_and_clear_wake_up_bit - clear a bit if it was set: wake up anyone waiting on that bit + * @bit: the bit of the word being waited on + * @word: the address of memory containing that bit + * + * If the bit is set and can be atomically cleared, any tasks waiting in + * wait_on_bit() or similar will be woken. This call has the same + * complete ordering semantics as test_and_clear_bit(). Any changes to + * memory made before this call are guaranteed to be visible after the + * corresponding wait_on_bit() completes. + * + * Returns %true if the bit was successfully set and the wake up was sent. + */ +static inline bool test_and_clear_wake_up_bit(int bit, unsigned long *word) +{ + if (!test_and_clear_bit(bit, word)) + return false; + /* no extra barrier required */ + wake_up_bit(word, bit); + return true; +} + +/** + * atomic_dec_and_wake_up - decrement an atomic_t and if zero, wake up waiters + * @var: the variable to dec and test + * + * Decrements the atomic variable and if it reaches zero, send a wake_up to any + * processes waiting on the variable. + * + * This function has the same complete ordering semantics as atomic_dec_and_test. + * + * Returns %true is the variable reaches zero and the wake up was sent. + */ + +static inline bool atomic_dec_and_wake_up(atomic_t *var) +{ + if (!atomic_dec_and_test(var)) + return false; + /* No extra barrier required */ + wake_up_var(var); + return true; +} + +/** + * store_release_wake_up - update a variable and send a wake_up + * @var: the address of the variable to be updated and woken + * @val: the value to store in the variable. + * + * Store the given value in the variable send a wake up to any tasks + * waiting on the variable. All necessary barriers are included to ensure + * the task calling wait_var_event() sees the new value and all values + * written to memory before this call. + */ +#define store_release_wake_up(var, val) \ +do { \ + smp_store_release(var, val); \ + smp_mb(); \ + wake_up_var(var); \ +} while (0) + #endif /* _LINUX_WAIT_BIT_H */ diff --git a/include/linux/wireless.h b/include/linux/wireless.h index e6e34d74dda0..03e5d3fe226d 100644 --- a/include/linux/wireless.h +++ b/include/linux/wireless.h @@ -21,8 +21,7 @@ struct compat_iw_point { __u16 length; __u16 flags; }; -#endif -#ifdef CONFIG_COMPAT + struct __compat_iw_event { __u16 len; /* Real length of this stuff */ __u16 cmd; /* Wireless IOCTL */ @@ -49,5 +48,5 @@ struct __compat_iw_event { #define IW_EV_COMPAT_POINT_LEN \ (IW_EV_COMPAT_LCP_LEN + sizeof(struct compat_iw_point) - \ IW_EV_COMPAT_POINT_OFF) -#endif +#endif /* CONFIG_COMPAT */ #endif /* _LINUX_WIRELESS_H */ diff --git a/include/linux/wmi.h b/include/linux/wmi.h index 3275470b5531..10751c8e5e6a 100644 --- a/include/linux/wmi.h +++ b/include/linux/wmi.h @@ -34,7 +34,7 @@ struct wmi_device { * * Cast a struct device to a struct wmi_device. */ -#define to_wmi_device(device) container_of(device, struct wmi_device, dev) +#define to_wmi_device(device) container_of_const(device, struct wmi_device, dev) extern acpi_status wmidev_evaluate_method(struct wmi_device *wdev, u8 instance, u32 method_id, @@ -56,6 +56,7 @@ u8 wmidev_instance_count(struct wmi_device *wdev); * @no_singleton: Driver can be instantiated multiple times * @probe: Callback for device binding * @remove: Callback for device unbinding + * @shutdown: Callback for device shutdown * @notify: Callback for receiving WMI events * * This represents WMI drivers which handle WMI devices. @@ -68,9 +69,18 @@ struct wmi_driver { int (*probe)(struct wmi_device *wdev, const void *context); void (*remove)(struct wmi_device *wdev); + void (*shutdown)(struct wmi_device *wdev); void (*notify)(struct wmi_device *device, union acpi_object *data); }; +/** + * to_wmi_driver() - Helper macro to cast a driver to a wmi_driver + * @drv: driver struct + * + * Cast a struct device_driver to a struct wmi_driver. + */ +#define to_wmi_driver(drv) container_of_const(drv, struct wmi_driver, driver) + extern int __must_check __wmi_driver_register(struct wmi_driver *driver, struct module *owner); extern void wmi_driver_unregister(struct wmi_driver *driver); diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 59c2695e12e7..b0dc957c3e56 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -412,7 +412,7 @@ enum wq_flags { }; enum wq_consts { - WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */ + WQ_MAX_ACTIVE = 2048, /* I like 2048, better ideas? */ WQ_UNBOUND_MAX_ACTIVE = WQ_MAX_ACTIVE, WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2, diff --git a/include/linux/writeback.h b/include/linux/writeback.h index d6db822e4bb3..d11b903c2edb 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h @@ -213,11 +213,8 @@ static inline void wait_on_inode(struct inode *inode) #include <linux/bio.h> void __inode_attach_wb(struct inode *inode, struct folio *folio); -void wbc_attach_and_unlock_inode(struct writeback_control *wbc, - struct inode *inode) - __releases(&inode->i_lock); void wbc_detach_inode(struct writeback_control *wbc); -void wbc_account_cgroup_owner(struct writeback_control *wbc, struct page *page, +void wbc_account_cgroup_owner(struct writeback_control *wbc, struct folio *folio, size_t bytes); int cgroup_writeback_by_id(u64 bdi_id, int memcg_id, enum wb_reason reason, struct wb_completion *done); @@ -254,22 +251,8 @@ static inline void inode_detach_wb(struct inode *inode) } } -/** - * wbc_attach_fdatawrite_inode - associate wbc and inode for fdatawrite - * @wbc: writeback_control of interest - * @inode: target inode - * - * This function is to be used by __filemap_fdatawrite_range(), which is an - * alternative entry point into writeback code, and first ensures @inode is - * associated with a bdi_writeback and attaches it to @wbc. - */ -static inline void wbc_attach_fdatawrite_inode(struct writeback_control *wbc, - struct inode *inode) -{ - spin_lock(&inode->i_lock); - inode_attach_wb(inode, NULL); - wbc_attach_and_unlock_inode(wbc, inode); -} +void wbc_attach_fdatawrite_inode(struct writeback_control *wbc, + struct inode *inode); /** * wbc_init_bio - writeback specific initializtion of bio @@ -303,13 +286,6 @@ static inline void inode_detach_wb(struct inode *inode) { } -static inline void wbc_attach_and_unlock_inode(struct writeback_control *wbc, - struct inode *inode) - __releases(&inode->i_lock) -{ - spin_unlock(&inode->i_lock); -} - static inline void wbc_attach_fdatawrite_inode(struct writeback_control *wbc, struct inode *inode) { @@ -324,7 +300,7 @@ static inline void wbc_init_bio(struct writeback_control *wbc, struct bio *bio) } static inline void wbc_account_cgroup_owner(struct writeback_control *wbc, - struct page *page, size_t bytes) + struct folio *folio, size_t bytes) { } diff --git a/include/linux/ww_mutex.h b/include/linux/ww_mutex.h index bb763085479a..45ff6f7a872b 100644 --- a/include/linux/ww_mutex.h +++ b/include/linux/ww_mutex.h @@ -65,6 +65,16 @@ struct ww_acquire_ctx { #endif #ifdef CONFIG_DEBUG_LOCK_ALLOC struct lockdep_map dep_map; + /** + * @first_lock_dep_map: fake lockdep_map for first locked ww_mutex. + * + * lockdep requires the lockdep_map for the first locked ww_mutex + * in a ww transaction to remain in memory until all ww_mutexes of + * the transaction have been unlocked. Ensure this by keeping a + * fake locked ww_mutex lockdep map between ww_acquire_init() and + * ww_acquire_fini(). + */ + struct lockdep_map first_lock_dep_map; #endif #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH unsigned int deadlock_inject_interval; @@ -146,7 +156,10 @@ static inline void ww_acquire_init(struct ww_acquire_ctx *ctx, debug_check_no_locks_freed((void *)ctx, sizeof(*ctx)); lockdep_init_map(&ctx->dep_map, ww_class->acquire_name, &ww_class->acquire_key, 0); + lockdep_init_map_wait(&ctx->first_lock_dep_map, ww_class->mutex_name, + &ww_class->mutex_key, 0, LD_WAIT_SLEEP); mutex_acquire(&ctx->dep_map, 0, 0, _RET_IP_); + mutex_acquire_nest(&ctx->first_lock_dep_map, 0, 0, &ctx->dep_map, _RET_IP_); #endif #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH ctx->deadlock_inject_interval = 1; @@ -185,6 +198,7 @@ static inline void ww_acquire_done(struct ww_acquire_ctx *ctx) static inline void ww_acquire_fini(struct ww_acquire_ctx *ctx) { #ifdef CONFIG_DEBUG_LOCK_ALLOC + mutex_release(&ctx->first_lock_dep_map, _THIS_IP_); mutex_release(&ctx->dep_map, _THIS_IP_); #endif #ifdef DEBUG_WW_MUTEXES diff --git a/include/linux/wwan.h b/include/linux/wwan.h index 170fdee6339c..79c781875c09 100644 --- a/include/linux/wwan.h +++ b/include/linux/wwan.h @@ -17,6 +17,8 @@ * @WWAN_PORT_FIREHOSE: XML based command protocol * @WWAN_PORT_XMMRPC: Control protocol for Intel XMM modems * @WWAN_PORT_FASTBOOT: Fastboot protocol control + * @WWAN_PORT_ADB: ADB protocol control + * @WWAN_PORT_MIPC: MTK MIPC diagnostic interface * * @WWAN_PORT_MAX: Highest supported port types * @WWAN_PORT_UNKNOWN: Special value to indicate an unknown port type @@ -30,6 +32,8 @@ enum wwan_port_type { WWAN_PORT_FIREHOSE, WWAN_PORT_XMMRPC, WWAN_PORT_FASTBOOT, + WWAN_PORT_ADB, + WWAN_PORT_MIPC, /* Add new port types above this line */ diff --git a/include/linux/xattr.h b/include/linux/xattr.h index d20051865800..86b0d47984a1 100644 --- a/include/linux/xattr.h +++ b/include/linux/xattr.h @@ -19,6 +19,10 @@ #include <linux/user_namespace.h> #include <uapi/linux/xattr.h> +/* List of all open_how "versions". */ +#define XATTR_ARGS_SIZE_VER0 16 /* sizeof first published struct */ +#define XATTR_ARGS_SIZE_LATEST XATTR_ARGS_SIZE_VER0 + struct inode; struct dentry; diff --git a/include/linux/zswap.h b/include/linux/zswap.h index 9cd1beef0654..d961ead91bf1 100644 --- a/include/linux/zswap.h +++ b/include/linux/zswap.h @@ -7,7 +7,7 @@ struct lruvec; -extern atomic_t zswap_stored_pages; +extern atomic_long_t zswap_stored_pages; #ifdef CONFIG_ZSWAP |