diff options
Diffstat (limited to 'include')
70 files changed, 718 insertions, 280 deletions
diff --git a/include/asm-generic/cacheflush.h b/include/asm-generic/cacheflush.h index fedc0dfa4877..4f07afacbc23 100644 --- a/include/asm-generic/cacheflush.h +++ b/include/asm-generic/cacheflush.h @@ -50,13 +50,7 @@ static inline void flush_dcache_page(struct page *page) { } -static inline void flush_dcache_folio(struct folio *folio) { } #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 -#define ARCH_IMPLEMENTS_FLUSH_DCACHE_FOLIO -#endif - -#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_FOLIO -void flush_dcache_folio(struct folio *folio); #endif #ifndef flush_dcache_mmap_lock diff --git a/include/drm/drm_device.h b/include/drm/drm_device.h index 604b1d1b2d72..9923c7a6885e 100644 --- a/include/drm/drm_device.h +++ b/include/drm/drm_device.h @@ -6,16 +6,13 @@ #include <linux/mutex.h> #include <linux/idr.h> -#include <drm/drm_hashtab.h> +#include <drm/drm_legacy.h> #include <drm/drm_mode_config.h> struct drm_driver; struct drm_minor; struct drm_master; -struct drm_device_dma; struct drm_vblank_crtc; -struct drm_sg_mem; -struct drm_local_map; struct drm_vma_offset_manager; struct drm_vram_mm; struct drm_fb_helper; diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h index 8b2ed4199284..30359e434c3f 100644 --- a/include/drm/drm_dp_helper.h +++ b/include/drm/drm_dp_helper.h @@ -1784,6 +1784,13 @@ drm_dp_tps3_supported(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) } static inline bool +drm_dp_max_downspread(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) +{ + return dpcd[DP_DPCD_REV] >= 0x11 || + dpcd[DP_MAX_DOWNSPREAD] & DP_MAX_DOWNSPREAD_0_5; +} + +static inline bool drm_dp_tps4_supported(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) { return dpcd[DP_DPCD_REV] >= 0x14 && diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h index a84eb4028e5b..f6159acb8856 100644 --- a/include/drm/drm_drv.h +++ b/include/drm/drm_drv.h @@ -291,8 +291,9 @@ struct drm_driver { /** * @gem_create_object: constructor for gem objects * - * Hook for allocating the GEM object struct, for use by the CMA and - * SHMEM GEM helpers. + * Hook for allocating the GEM object struct, for use by the CMA + * and SHMEM GEM helpers. Returns a GEM object on success, or an + * ERR_PTR()-encoded error code otherwise. */ struct drm_gem_object *(*gem_create_object)(struct drm_device *dev, size_t size); @@ -601,5 +602,6 @@ static inline bool drm_drv_uses_atomic_modeset(struct drm_device *dev) int drm_dev_set_unique(struct drm_device *dev, const char *name); +extern bool drm_firmware_drivers_only(void); #endif diff --git a/include/drm/drm_format_helper.h b/include/drm/drm_format_helper.h index 97e4c3223af3..b30ed5de0a33 100644 --- a/include/drm/drm_format_helper.h +++ b/include/drm/drm_format_helper.h @@ -33,6 +33,9 @@ void drm_fb_xrgb8888_to_rgb888(void *dst, unsigned int dst_pitch, const void *sr void drm_fb_xrgb8888_to_rgb888_toio(void __iomem *dst, unsigned int dst_pitch, const void *vaddr, const struct drm_framebuffer *fb, const struct drm_rect *clip); +void drm_fb_xrgb8888_to_xrgb2101010_toio(void __iomem *dst, unsigned int dst_pitch, + const void *vaddr, const struct drm_framebuffer *fb, + const struct drm_rect *clip); void drm_fb_xrgb8888_to_gray8(void *dst, unsigned int dst_pitch, const void *vaddr, const struct drm_framebuffer *fb, const struct drm_rect *clip); diff --git a/include/drm/drm_gem_cma_helper.h b/include/drm/drm_gem_cma_helper.h index cd13508acbc1..adb507a9dbf0 100644 --- a/include/drm/drm_gem_cma_helper.h +++ b/include/drm/drm_gem_cma_helper.h @@ -32,42 +32,108 @@ struct drm_gem_cma_object { #define to_drm_gem_cma_obj(gem_obj) \ container_of(gem_obj, struct drm_gem_cma_object, base) -#ifndef CONFIG_MMU -#define DRM_GEM_CMA_UNMAPPED_AREA_FOPS \ - .get_unmapped_area = drm_gem_cma_get_unmapped_area, -#else -#define DRM_GEM_CMA_UNMAPPED_AREA_FOPS -#endif +struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm, + size_t size); +void drm_gem_cma_free(struct drm_gem_cma_object *cma_obj); +void drm_gem_cma_print_info(const struct drm_gem_cma_object *cma_obj, + struct drm_printer *p, unsigned int indent); +struct sg_table *drm_gem_cma_get_sg_table(struct drm_gem_cma_object *cma_obj); +int drm_gem_cma_vmap(struct drm_gem_cma_object *cma_obj, struct dma_buf_map *map); +int drm_gem_cma_mmap(struct drm_gem_cma_object *cma_obj, struct vm_area_struct *vma); + +extern const struct vm_operations_struct drm_gem_cma_vm_ops; + +/* + * GEM object functions + */ /** - * DEFINE_DRM_GEM_CMA_FOPS() - macro to generate file operations for CMA drivers - * @name: name for the generated structure + * drm_gem_cma_object_free - GEM object function for drm_gem_cma_free() + * @obj: GEM object to free * - * This macro autogenerates a suitable &struct file_operations for CMA based - * drivers, which can be assigned to &drm_driver.fops. Note that this structure - * cannot be shared between drivers, because it contains a reference to the - * current module using THIS_MODULE. + * This function wraps drm_gem_cma_free_object(). Drivers that employ the CMA helpers + * should use it as their &drm_gem_object_funcs.free handler. + */ +static inline void drm_gem_cma_object_free(struct drm_gem_object *obj) +{ + struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj); + + drm_gem_cma_free(cma_obj); +} + +/** + * drm_gem_cma_object_print_info() - Print &drm_gem_cma_object info for debugfs + * @p: DRM printer + * @indent: Tab indentation level + * @obj: GEM object * - * Note that the declaration is already marked as static - if you need a - * non-static version of this you're probably doing it wrong and will break the - * THIS_MODULE reference by accident. + * This function wraps drm_gem_cma_print_info(). Drivers that employ the CMA helpers + * should use this function as their &drm_gem_object_funcs.print_info handler. */ -#define DEFINE_DRM_GEM_CMA_FOPS(name) \ - static const struct file_operations name = {\ - .owner = THIS_MODULE,\ - .open = drm_open,\ - .release = drm_release,\ - .unlocked_ioctl = drm_ioctl,\ - .compat_ioctl = drm_compat_ioctl,\ - .poll = drm_poll,\ - .read = drm_read,\ - .llseek = noop_llseek,\ - .mmap = drm_gem_mmap,\ - DRM_GEM_CMA_UNMAPPED_AREA_FOPS \ - } +static inline void drm_gem_cma_object_print_info(struct drm_printer *p, unsigned int indent, + const struct drm_gem_object *obj) +{ + const struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj); + + drm_gem_cma_print_info(cma_obj, p, indent); +} + +/** + * drm_gem_cma_object_get_sg_table - GEM object function for drm_gem_cma_get_sg_table() + * @obj: GEM object + * + * This function wraps drm_gem_cma_get_sg_table(). Drivers that employ the CMA helpers should + * use it as their &drm_gem_object_funcs.get_sg_table handler. + * + * Returns: + * A pointer to the scatter/gather table of pinned pages or NULL on failure. + */ +static inline struct sg_table *drm_gem_cma_object_get_sg_table(struct drm_gem_object *obj) +{ + struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj); + + return drm_gem_cma_get_sg_table(cma_obj); +} + +/* + * drm_gem_cma_object_vmap - GEM object function for drm_gem_cma_vmap() + * @obj: GEM object + * @map: Returns the kernel virtual address of the CMA GEM object's backing store. + * + * This function wraps drm_gem_cma_vmap(). Drivers that employ the CMA helpers should + * use it as their &drm_gem_object_funcs.vmap handler. + * + * Returns: + * 0 on success or a negative error code on failure. + */ +static inline int drm_gem_cma_object_vmap(struct drm_gem_object *obj, struct dma_buf_map *map) +{ + struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj); + + return drm_gem_cma_vmap(cma_obj, map); +} + +/** + * drm_gem_cma_object_mmap - GEM object function for drm_gem_cma_mmap() + * @obj: GEM object + * @vma: VMA for the area to be mapped + * + * This function wraps drm_gem_cma_mmap(). Drivers that employ the cma helpers should + * use it as their &drm_gem_object_funcs.mmap handler. + * + * Returns: + * 0 on success or a negative error code on failure. + */ +static inline int drm_gem_cma_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) +{ + struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj); + + return drm_gem_cma_mmap(cma_obj, vma); +} -/* free GEM object */ -void drm_gem_cma_free_object(struct drm_gem_object *gem_obj); +/* + * Driver ops + */ /* create memory region for DRM framebuffer */ int drm_gem_cma_dumb_create_internal(struct drm_file *file_priv, @@ -79,30 +145,10 @@ int drm_gem_cma_dumb_create(struct drm_file *file_priv, struct drm_device *drm, struct drm_mode_create_dumb *args); -/* allocate physical memory */ -struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm, - size_t size); - -extern const struct vm_operations_struct drm_gem_cma_vm_ops; - -#ifndef CONFIG_MMU -unsigned long drm_gem_cma_get_unmapped_area(struct file *filp, - unsigned long addr, - unsigned long len, - unsigned long pgoff, - unsigned long flags); -#endif - -void drm_gem_cma_print_info(struct drm_printer *p, unsigned int indent, - const struct drm_gem_object *obj); - -struct sg_table *drm_gem_cma_get_sg_table(struct drm_gem_object *obj); struct drm_gem_object * drm_gem_cma_prime_import_sg_table(struct drm_device *dev, struct dma_buf_attachment *attach, struct sg_table *sgt); -int drm_gem_cma_vmap(struct drm_gem_object *obj, struct dma_buf_map *map); -int drm_gem_cma_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); /** * DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE - CMA GEM driver operations @@ -185,4 +231,47 @@ drm_gem_cma_prime_import_sg_table_vmap(struct drm_device *drm, struct dma_buf_attachment *attach, struct sg_table *sgt); +/* + * File ops + */ + +#ifndef CONFIG_MMU +unsigned long drm_gem_cma_get_unmapped_area(struct file *filp, + unsigned long addr, + unsigned long len, + unsigned long pgoff, + unsigned long flags); +#define DRM_GEM_CMA_UNMAPPED_AREA_FOPS \ + .get_unmapped_area = drm_gem_cma_get_unmapped_area, +#else +#define DRM_GEM_CMA_UNMAPPED_AREA_FOPS +#endif + +/** + * DEFINE_DRM_GEM_CMA_FOPS() - macro to generate file operations for CMA drivers + * @name: name for the generated structure + * + * This macro autogenerates a suitable &struct file_operations for CMA based + * drivers, which can be assigned to &drm_driver.fops. Note that this structure + * cannot be shared between drivers, because it contains a reference to the + * current module using THIS_MODULE. + * + * Note that the declaration is already marked as static - if you need a + * non-static version of this you're probably doing it wrong and will break the + * THIS_MODULE reference by accident. + */ +#define DEFINE_DRM_GEM_CMA_FOPS(name) \ + static const struct file_operations name = {\ + .owner = THIS_MODULE,\ + .open = drm_open,\ + .release = drm_release,\ + .unlocked_ioctl = drm_ioctl,\ + .compat_ioctl = drm_compat_ioctl,\ + .poll = drm_poll,\ + .read = drm_read,\ + .llseek = noop_llseek,\ + .mmap = drm_gem_mmap,\ + DRM_GEM_CMA_UNMAPPED_AREA_FOPS \ + } + #endif /* __DRM_GEM_CMA_HELPER_H__ */ diff --git a/include/drm/drm_gem_ttm_helper.h b/include/drm/drm_gem_ttm_helper.h index c1aa02bd4c89..78040f6cc6f3 100644 --- a/include/drm/drm_gem_ttm_helper.h +++ b/include/drm/drm_gem_ttm_helper.h @@ -3,7 +3,7 @@ #ifndef DRM_GEM_TTM_HELPER_H #define DRM_GEM_TTM_HELPER_H -#include <linux/kernel.h> +#include <linux/container_of.h> #include <drm/drm_device.h> #include <drm/drm_gem.h> diff --git a/include/drm/drm_gem_vram_helper.h b/include/drm/drm_gem_vram_helper.h index d3cf06c9af65..b4ce27a72773 100644 --- a/include/drm/drm_gem_vram_helper.h +++ b/include/drm/drm_gem_vram_helper.h @@ -11,8 +11,8 @@ #include <drm/ttm/ttm_bo_api.h> #include <drm/ttm/ttm_bo_driver.h> +#include <linux/container_of.h> #include <linux/dma-buf-map.h> -#include <linux/kernel.h> /* for container_of() */ struct drm_mode_create_dumb; struct drm_plane; diff --git a/include/drm/drm_hashtab.h b/include/drm/drm_hashtab.h deleted file mode 100644 index bb95ff011baf..000000000000 --- a/include/drm/drm_hashtab.h +++ /dev/null @@ -1,79 +0,0 @@ -/************************************************************************** - * - * Copyright 2006 Tungsten Graphics, Inc., Bismack, ND. USA. - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sub license, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial portions - * of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, - * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR - * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE - * USE OR OTHER DEALINGS IN THE SOFTWARE. - * - * - **************************************************************************/ -/* - * Simple open hash tab implementation. - * - * Authors: - * Thomas Hellström <thomas-at-tungstengraphics-dot-com> - */ - -#ifndef DRM_HASHTAB_H -#define DRM_HASHTAB_H - -#include <linux/list.h> - -#define drm_hash_entry(_ptr, _type, _member) container_of(_ptr, _type, _member) - -struct drm_hash_item { - struct hlist_node head; - unsigned long key; -}; - -struct drm_open_hash { - struct hlist_head *table; - u8 order; -}; - -int drm_ht_create(struct drm_open_hash *ht, unsigned int order); -int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item); -int drm_ht_just_insert_please(struct drm_open_hash *ht, struct drm_hash_item *item, - unsigned long seed, int bits, int shift, - unsigned long add); -int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key, struct drm_hash_item **item); - -void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key); -int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key); -int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item); -void drm_ht_remove(struct drm_open_hash *ht); - -/* - * RCU-safe interface - * - * The user of this API needs to make sure that two or more instances of the - * hash table manipulation functions are never run simultaneously. - * The lookup function drm_ht_find_item_rcu may, however, run simultaneously - * with any of the manipulation functions as long as it's called from within - * an RCU read-locked section. - */ -#define drm_ht_insert_item_rcu drm_ht_insert_item -#define drm_ht_just_insert_please_rcu drm_ht_just_insert_please -#define drm_ht_remove_key_rcu drm_ht_remove_key -#define drm_ht_remove_item_rcu drm_ht_remove_item -#define drm_ht_find_item_rcu drm_ht_find_item - -#endif diff --git a/include/drm/drm_legacy.h b/include/drm/drm_legacy.h index 58dc8d8cc907..0fc85418aad8 100644 --- a/include/drm/drm_legacy.h +++ b/include/drm/drm_legacy.h @@ -37,7 +37,6 @@ #include <drm/drm.h> #include <drm/drm_auth.h> -#include <drm/drm_hashtab.h> struct drm_device; struct drm_driver; @@ -51,6 +50,20 @@ struct pci_driver; * you're doing it terribly wrong. */ +/* + * Hash-table Support + */ + +struct drm_hash_item { + struct hlist_node head; + unsigned long key; +}; + +struct drm_open_hash { + struct hlist_head *table; + u8 order; +}; + /** * DMA buffer. */ diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h index 9b4292f229c6..ac33ba1b18bc 100644 --- a/include/drm/drm_mm.h +++ b/include/drm/drm_mm.h @@ -39,13 +39,15 @@ */ #include <linux/bug.h> #include <linux/rbtree.h> -#include <linux/kernel.h> +#include <linux/limits.h> #include <linux/mm_types.h> #include <linux/list.h> #include <linux/spinlock.h> #ifdef CONFIG_DRM_DEBUG_MM #include <linux/stackdepot.h> #endif +#include <linux/types.h> + #include <drm/drm_print.h> #ifdef CONFIG_DRM_DEBUG_MM diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h index c00ac54692d7..baf3d1d3d566 100644 --- a/include/drm/i915_pciids.h +++ b/include/drm/i915_pciids.h @@ -666,4 +666,13 @@ INTEL_VGA_DEVICE(0x46C2, info), \ INTEL_VGA_DEVICE(0x46C3, info) +/* RPL-S */ +#define INTEL_RPLS_IDS(info) \ + INTEL_VGA_DEVICE(0xA780, info), \ + INTEL_VGA_DEVICE(0xA781, info), \ + INTEL_VGA_DEVICE(0xA782, info), \ + INTEL_VGA_DEVICE(0xA783, info), \ + INTEL_VGA_DEVICE(0xA788, info), \ + INTEL_VGA_DEVICE(0xA789, info) + #endif /* _I915_PCIIDS_H */ diff --git a/include/drm/intel-gtt.h b/include/drm/intel-gtt.h index abfefaaf897a..67530bfef129 100644 --- a/include/drm/intel-gtt.h +++ b/include/drm/intel-gtt.h @@ -4,9 +4,11 @@ #ifndef _DRM_INTEL_GTT_H #define _DRM_INTEL_GTT_H -#include <linux/agp_backend.h> -#include <linux/intel-iommu.h> -#include <linux/kernel.h> +#include <linux/types.h> + +struct agp_bridge_data; +struct pci_dev; +struct sg_table; void intel_gtt_get(u64 *gtt_total, phys_addr_t *mappable_base, diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h index cd785cfa3123..c17b2df9178b 100644 --- a/include/drm/ttm/ttm_bo_api.h +++ b/include/drm/ttm/ttm_bo_api.h @@ -32,7 +32,6 @@ #define _TTM_BO_API_H_ #include <drm/drm_gem.h> -#include <drm/drm_hashtab.h> #include <drm/drm_vma_manager.h> #include <linux/kref.h> #include <linux/list.h> diff --git a/include/drm/ttm/ttm_placement.h b/include/drm/ttm/ttm_placement.h index 76d1b9119a2b..8074d0f6cae5 100644 --- a/include/drm/ttm/ttm_placement.h +++ b/include/drm/ttm/ttm_placement.h @@ -35,6 +35,17 @@ /* * Memory regions for data placement. + * + * Buffers placed in TTM_PL_SYSTEM are considered under TTMs control and can + * be swapped out whenever TTMs thinks it is a good idea. + * In cases where drivers would like to use TTM_PL_SYSTEM as a valid + * placement they need to be able to handle the issues that arise due to the + * above manually. + * + * For BO's which reside in system memory but for which the accelerator + * requires direct access (i.e. their usage needs to be synchronized + * between the CPU and accelerator via fences) a new, driver private + * placement that can handle such scenarios is a good idea. */ #define TTM_PL_SYSTEM 0 diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 668d007f0917..b28f8790192a 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -1182,7 +1182,6 @@ int acpi_node_prop_get(const struct fwnode_handle *fwnode, const char *propname, struct fwnode_handle *acpi_get_next_subnode(const struct fwnode_handle *fwnode, struct fwnode_handle *child); -struct fwnode_handle *acpi_node_get_parent(const struct fwnode_handle *fwnode); struct acpi_probe_entry; typedef bool (*acpi_probe_entry_validate_subtbl)(struct acpi_subtable_header *, @@ -1288,12 +1287,6 @@ acpi_get_next_subnode(const struct fwnode_handle *fwnode, } static inline struct fwnode_handle * -acpi_node_get_parent(const struct fwnode_handle *fwnode) -{ - return NULL; -} - -static inline struct fwnode_handle * acpi_graph_get_next_endpoint(const struct fwnode_handle *fwnode, struct fwnode_handle *prev) { diff --git a/include/linux/bpf.h b/include/linux/bpf.h index e7a163a3146b..755f38e893be 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -732,6 +732,7 @@ int bpf_trampoline_unlink_prog(struct bpf_prog *prog, struct bpf_trampoline *tr) struct bpf_trampoline *bpf_trampoline_get(u64 key, struct bpf_attach_target_info *tgt_info); void bpf_trampoline_put(struct bpf_trampoline *tr); +int arch_prepare_bpf_dispatcher(void *image, s64 *funcs, int num_funcs); #define BPF_DISPATCHER_INIT(_name) { \ .mutex = __MUTEX_INITIALIZER(_name.mutex), \ .func = &_name##_func, \ @@ -1352,28 +1353,16 @@ extern struct mutex bpf_stats_enabled_mutex; * kprobes, tracepoints) to prevent deadlocks on map operations as any of * these events can happen inside a region which holds a map bucket lock * and can deadlock on it. - * - * Use the preemption safe inc/dec variants on RT because migrate disable - * is preemptible on RT and preemption in the middle of the RMW operation - * might lead to inconsistent state. Use the raw variants for non RT - * kernels as migrate_disable() maps to preempt_disable() so the slightly - * more expensive save operation can be avoided. */ static inline void bpf_disable_instrumentation(void) { migrate_disable(); - if (IS_ENABLED(CONFIG_PREEMPT_RT)) - this_cpu_inc(bpf_prog_active); - else - __this_cpu_inc(bpf_prog_active); + this_cpu_inc(bpf_prog_active); } static inline void bpf_enable_instrumentation(void) { - if (IS_ENABLED(CONFIG_PREEMPT_RT)) - this_cpu_dec(bpf_prog_active); - else - __this_cpu_dec(bpf_prog_active); + this_cpu_dec(bpf_prog_active); migrate_enable(); } diff --git a/include/linux/btf.h b/include/linux/btf.h index 203eef993d76..0e1b6281fd8f 100644 --- a/include/linux/btf.h +++ b/include/linux/btf.h @@ -245,7 +245,10 @@ struct kfunc_btf_id_set { struct module *owner; }; -struct kfunc_btf_id_list; +struct kfunc_btf_id_list { + struct list_head list; + struct mutex mutex; +}; #ifdef CONFIG_DEBUG_INFO_BTF_MODULES void register_kfunc_btf_id_set(struct kfunc_btf_id_list *l, @@ -254,6 +257,9 @@ void unregister_kfunc_btf_id_set(struct kfunc_btf_id_list *l, struct kfunc_btf_id_set *s); bool bpf_check_mod_kfunc_call(struct kfunc_btf_id_list *klist, u32 kfunc_id, struct module *owner); + +extern struct kfunc_btf_id_list bpf_tcp_ca_kfunc_list; +extern struct kfunc_btf_id_list prog_test_kfunc_list; #else static inline void register_kfunc_btf_id_set(struct kfunc_btf_id_list *l, struct kfunc_btf_id_set *s) @@ -268,13 +274,13 @@ static inline bool bpf_check_mod_kfunc_call(struct kfunc_btf_id_list *klist, { return false; } + +static struct kfunc_btf_id_list bpf_tcp_ca_kfunc_list __maybe_unused; +static struct kfunc_btf_id_list prog_test_kfunc_list __maybe_unused; #endif #define DEFINE_KFUNC_BTF_ID_SET(set, name) \ struct kfunc_btf_id_set name = { LIST_HEAD_INIT(name.list), (set), \ THIS_MODULE } -extern struct kfunc_btf_id_list bpf_tcp_ca_kfunc_list; -extern struct kfunc_btf_id_list prog_test_kfunc_list; - #endif diff --git a/include/linux/cacheflush.h b/include/linux/cacheflush.h new file mode 100644 index 000000000000..fef8b607f97e --- /dev/null +++ b/include/linux/cacheflush.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_CACHEFLUSH_H +#define _LINUX_CACHEFLUSH_H + +#include <asm/cacheflush.h> + +#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE +#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_FOLIO +void flush_dcache_folio(struct folio *folio); +#endif +#else +static inline void flush_dcache_folio(struct folio *folio) +{ +} +#define ARCH_IMPLEMENTS_FLUSH_DCACHE_FOLIO 0 +#endif /* ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE */ + +#endif /* _LINUX_CACHEFLUSH_H */ diff --git a/include/linux/cacheinfo.h b/include/linux/cacheinfo.h index 2f909ed084c6..4ff37cb763ae 100644 --- a/include/linux/cacheinfo.h +++ b/include/linux/cacheinfo.h @@ -3,7 +3,6 @@ #define _LINUX_CACHEINFO_H #include <linux/bitops.h> -#include <linux/cpu.h> #include <linux/cpumask.h> #include <linux/smp.h> diff --git a/include/linux/console.h b/include/linux/console.h index a97f277cfdfa..7cd758a4f44e 100644 --- a/include/linux/console.h +++ b/include/linux/console.h @@ -219,12 +219,6 @@ extern atomic_t ignore_console_lock_warning; #define VESA_HSYNC_SUSPEND 2 #define VESA_POWERDOWN 3 -#ifdef CONFIG_VGA_CONSOLE -extern bool vgacon_text_force(void); -#else -static inline bool vgacon_text_force(void) { return false; } -#endif - extern void console_init(void); /* For deferred console takeover */ diff --git a/include/linux/delay.h b/include/linux/delay.h index 8eacf67eb212..039e7e0c7378 100644 --- a/include/linux/delay.h +++ b/include/linux/delay.h @@ -20,6 +20,7 @@ */ #include <linux/math.h> +#include <linux/sched.h> extern unsigned long loops_per_jiffy; @@ -58,7 +59,18 @@ void calibrate_delay(void); void __attribute__((weak)) calibration_delay_done(void); void msleep(unsigned int msecs); unsigned long msleep_interruptible(unsigned int msecs); -void usleep_range(unsigned long min, unsigned long max); +void usleep_range_state(unsigned long min, unsigned long max, + unsigned int state); + +static inline void usleep_range(unsigned long min, unsigned long max) +{ + usleep_range_state(min, max, TASK_UNINTERRUPTIBLE); +} + +static inline void usleep_idle_range(unsigned long min, unsigned long max) +{ + usleep_range_state(min, max, TASK_IDLE); +} static inline void ssleep(unsigned int seconds) { diff --git a/include/linux/device/driver.h b/include/linux/device/driver.h index a498ebcf4993..15e7c5e15d62 100644 --- a/include/linux/device/driver.h +++ b/include/linux/device/driver.h @@ -18,6 +18,7 @@ #include <linux/klist.h> #include <linux/pm.h> #include <linux/device/bus.h> +#include <linux/module.h> /** * enum probe_type - device driver probe type to try diff --git a/include/linux/filter.h b/include/linux/filter.h index 24b7ed2677af..7f1e88e3e2b5 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -6,6 +6,7 @@ #define __LINUX_FILTER_H__ #include <linux/atomic.h> +#include <linux/bpf.h> #include <linux/refcount.h> #include <linux/compat.h> #include <linux/skbuff.h> @@ -26,7 +27,6 @@ #include <asm/byteorder.h> #include <uapi/linux/filter.h> -#include <uapi/linux/bpf.h> struct sk_buff; struct sock; @@ -640,9 +640,6 @@ static __always_inline u32 bpf_prog_run(const struct bpf_prog *prog, const void * This uses migrate_disable/enable() explicitly to document that the * invocation of a BPF program does not require reentrancy protection * against a BPF program which is invoked from a preempting task. - * - * For non RT enabled kernels migrate_disable/enable() maps to - * preempt_disable/enable(), i.e. it disables also preemption. */ static inline u32 bpf_prog_run_pin_on_cpu(const struct bpf_prog *prog, const void *ctx) diff --git a/include/linux/fs.h b/include/linux/fs.h index 1cb616fc1105..bbf812ce89a8 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -2518,7 +2518,6 @@ struct file_system_type { #define FS_USERNS_MOUNT 8 /* Can be mounted by userns root */ #define FS_DISALLOW_NOTIFY_PERM 16 /* Disable fanotify permission events */ #define FS_ALLOW_IDMAP 32 /* FS has been updated to handle vfs idmappings. */ -#define FS_THP_SUPPORT 8192 /* Remove once all fs converted */ #define FS_RENAME_DOES_D_MOVE 32768 /* FS will handle d_move() during rename() internally. */ int (*init_fs_context)(struct fs_context *); const struct fs_parameter_spec *parameters; diff --git a/include/linux/hid.h b/include/linux/hid.h index 9e067f937dbc..f453be385bd4 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h @@ -840,6 +840,11 @@ static inline bool hid_is_using_ll_driver(struct hid_device *hdev, return hdev->ll_driver == driver; } +static inline bool hid_is_usb(struct hid_device *hdev) +{ + return hid_is_using_ll_driver(hdev, &usb_hid_driver); +} + #define PM_HINT_FULLON 1<<5 #define PM_HINT_NORMAL 1<<1 diff --git a/include/linux/highmem.h b/include/linux/highmem.h index 25aff0f2ed0b..39bb9b47fa9c 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h @@ -5,12 +5,11 @@ #include <linux/fs.h> #include <linux/kernel.h> #include <linux/bug.h> +#include <linux/cacheflush.h> #include <linux/mm.h> #include <linux/uaccess.h> #include <linux/hardirq.h> -#include <asm/cacheflush.h> - #include "highmem-internal.h" /** @@ -231,10 +230,10 @@ static inline void tag_clear_highpage(struct page *page) * If we pass in a base or tail page, we can zero up to PAGE_SIZE. * If we pass in a head page, we can zero up to the size of the compound page. */ -#if defined(CONFIG_HIGHMEM) && defined(CONFIG_TRANSPARENT_HUGEPAGE) +#ifdef CONFIG_HIGHMEM void zero_user_segments(struct page *page, unsigned start1, unsigned end1, unsigned start2, unsigned end2); -#else /* !HIGHMEM || !TRANSPARENT_HUGEPAGE */ +#else static inline void zero_user_segments(struct page *page, unsigned start1, unsigned end1, unsigned start2, unsigned end2) @@ -254,7 +253,7 @@ static inline void zero_user_segments(struct page *page, for (i = 0; i < compound_nr(page); i++) flush_dcache_page(page + i); } -#endif /* !HIGHMEM || !TRANSPARENT_HUGEPAGE */ +#endif static inline void zero_user_segment(struct page *page, unsigned start, unsigned end) @@ -364,4 +363,42 @@ static inline void memzero_page(struct page *page, size_t offset, size_t len) kunmap_local(addr); } +/** + * folio_zero_segments() - Zero two byte ranges in a folio. + * @folio: The folio to write to. + * @start1: The first byte to zero. + * @xend1: One more than the last byte in the first range. + * @start2: The first byte to zero in the second range. + * @xend2: One more than the last byte in the second range. + */ +static inline void folio_zero_segments(struct folio *folio, + size_t start1, size_t xend1, size_t start2, size_t xend2) +{ + zero_user_segments(&folio->page, start1, xend1, start2, xend2); +} + +/** + * folio_zero_segment() - Zero a byte range in a folio. + * @folio: The folio to write to. + * @start: The first byte to zero. + * @xend: One more than the last byte to zero. + */ +static inline void folio_zero_segment(struct folio *folio, + size_t start, size_t xend) +{ + zero_user_segments(&folio->page, start, xend, 0, 0); +} + +/** + * folio_zero_range() - Zero a byte range in a folio. + * @folio: The folio to write to. + * @start: The first byte to zero. + * @length: The number of bytes to zero. + */ +static inline void folio_zero_range(struct folio *folio, + size_t start, size_t length) +{ + zero_user_segments(&folio->page, start, start + length, 0, 0); +} + #endif /* _LINUX_HIGHMEM_H */ diff --git a/include/linux/host1x.h b/include/linux/host1x.h index 7bccf589aba7..e8dc5bc41f79 100644 --- a/include/linux/host1x.h +++ b/include/linux/host1x.h @@ -7,6 +7,8 @@ #define __LINUX_HOST1X_H #include <linux/device.h> +#include <linux/dma-direction.h> +#include <linux/spinlock.h> #include <linux/types.h> enum host1x_class { @@ -15,6 +17,8 @@ enum host1x_class { HOST1X_CLASS_GR2D_SB = 0x52, HOST1X_CLASS_VIC = 0x5D, HOST1X_CLASS_GR3D = 0x60, + HOST1X_CLASS_NVDEC = 0xF0, + HOST1X_CLASS_NVDEC1 = 0xF5, }; struct host1x; @@ -24,6 +28,28 @@ struct iommu_group; u64 host1x_get_dma_mask(struct host1x *host1x); /** + * struct host1x_bo_cache - host1x buffer object cache + * @mappings: list of mappings + * @lock: synchronizes accesses to the list of mappings + */ +struct host1x_bo_cache { + struct list_head mappings; + struct mutex lock; +}; + +static inline void host1x_bo_cache_init(struct host1x_bo_cache *cache) +{ + INIT_LIST_HEAD(&cache->mappings); + mutex_init(&cache->lock); +} + +static inline void host1x_bo_cache_destroy(struct host1x_bo_cache *cache) +{ + /* XXX warn if not empty? */ + mutex_destroy(&cache->lock); +} + +/** * struct host1x_client_ops - host1x client operations * @early_init: host1x client early initialization code * @init: host1x client initialization code @@ -73,6 +99,8 @@ struct host1x_client { struct host1x_client *parent; unsigned int usecount; struct mutex lock; + + struct host1x_bo_cache cache; }; /* @@ -82,23 +110,48 @@ struct host1x_client { struct host1x_bo; struct sg_table; +struct host1x_bo_mapping { + struct kref ref; + struct dma_buf_attachment *attach; + enum dma_data_direction direction; + struct list_head list; + struct host1x_bo *bo; + struct sg_table *sgt; + unsigned int chunks; + struct device *dev; + dma_addr_t phys; + size_t size; + + struct host1x_bo_cache *cache; + struct list_head entry; +}; + +static inline struct host1x_bo_mapping *to_host1x_bo_mapping(struct kref *ref) +{ + return container_of(ref, struct host1x_bo_mapping, ref); +} + struct host1x_bo_ops { struct host1x_bo *(*get)(struct host1x_bo *bo); void (*put)(struct host1x_bo *bo); - struct sg_table *(*pin)(struct device *dev, struct host1x_bo *bo, - dma_addr_t *phys); - void (*unpin)(struct device *dev, struct sg_table *sgt); + struct host1x_bo_mapping *(*pin)(struct device *dev, struct host1x_bo *bo, + enum dma_data_direction dir); + void (*unpin)(struct host1x_bo_mapping *map); void *(*mmap)(struct host1x_bo *bo); void (*munmap)(struct host1x_bo *bo, void *addr); }; struct host1x_bo { const struct host1x_bo_ops *ops; + struct list_head mappings; + spinlock_t lock; }; static inline void host1x_bo_init(struct host1x_bo *bo, const struct host1x_bo_ops *ops) { + INIT_LIST_HEAD(&bo->mappings); + spin_lock_init(&bo->lock); bo->ops = ops; } @@ -112,18 +165,10 @@ static inline void host1x_bo_put(struct host1x_bo *bo) bo->ops->put(bo); } -static inline struct sg_table *host1x_bo_pin(struct device *dev, - struct host1x_bo *bo, - dma_addr_t *phys) -{ - return bo->ops->pin(dev, bo, phys); -} - -static inline void host1x_bo_unpin(struct device *dev, struct host1x_bo *bo, - struct sg_table *sgt) -{ - bo->ops->unpin(dev, sgt); -} +struct host1x_bo_mapping *host1x_bo_pin(struct device *dev, struct host1x_bo *bo, + enum dma_data_direction dir, + struct host1x_bo_cache *cache); +void host1x_bo_unpin(struct host1x_bo_mapping *map); static inline void *host1x_bo_mmap(struct host1x_bo *bo) { @@ -181,6 +226,7 @@ struct host1x_job; struct host1x_channel *host1x_channel_request(struct host1x_client *client); struct host1x_channel *host1x_channel_get(struct host1x_channel *channel); +void host1x_channel_stop(struct host1x_channel *channel); void host1x_channel_put(struct host1x_channel *channel); int host1x_job_submit(struct host1x_job *job); diff --git a/include/linux/intel-ish-client-if.h b/include/linux/intel-ish-client-if.h index aee8ff4739b1..f45f13304add 100644 --- a/include/linux/intel-ish-client-if.h +++ b/include/linux/intel-ish-client-if.h @@ -9,7 +9,7 @@ #define _INTEL_ISH_CLIENT_IF_H_ #include <linux/device.h> -#include <linux/uuid.h> +#include <linux/mod_devicetable.h> struct ishtp_cl_device; struct ishtp_device; @@ -40,7 +40,7 @@ enum cl_state { struct ishtp_cl_driver { struct device_driver driver; const char *name; - const guid_t *guid; + const struct ishtp_device_id *id; int (*probe)(struct ishtp_cl_device *dev); void (*remove)(struct ishtp_cl_device *dev); int (*reset)(struct ishtp_cl_device *dev); diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index e974caf39d3e..8c8f7a4d93af 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -153,6 +153,8 @@ struct kretprobe { struct kretprobe_holder *rph; }; +#define KRETPROBE_MAX_DATA_SIZE 4096 + struct kretprobe_instance { union { struct freelist_node freelist; diff --git a/include/linux/mhi.h b/include/linux/mhi.h index 723985879035..a5cc4cdf9cc8 100644 --- a/include/linux/mhi.h +++ b/include/linux/mhi.h @@ -664,6 +664,19 @@ int mhi_pm_suspend(struct mhi_controller *mhi_cntrl); int mhi_pm_resume(struct mhi_controller *mhi_cntrl); /** + * mhi_pm_resume_force - Force resume MHI from suspended state + * @mhi_cntrl: MHI controller + * + * Resume the device irrespective of its MHI state. As per the MHI spec, devices + * has to be in M3 state during resume. But some devices seem to be in a + * different MHI state other than M3 but they continue working fine if allowed. + * This API is intented to be used for such devices. + * + * Return: 0 if the resume succeeds, a negative error code otherwise + */ +int mhi_pm_resume_force(struct mhi_controller *mhi_cntrl); + +/** * mhi_download_rddm_image - Download ramdump image from device for * debugging purpose. * @mhi_cntrl: MHI controller diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 3636df90899a..fbaab440a484 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -9698,7 +9698,10 @@ struct mlx5_ifc_mcam_access_reg_bits { u8 regs_84_to_68[0x11]; u8 tracer_registers[0x4]; - u8 regs_63_to_32[0x20]; + u8 regs_63_to_46[0x12]; + u8 mrtc[0x1]; + u8 regs_44_to_32[0xd]; + u8 regs_31_to_0[0x20]; }; diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h index ae2e75d15b21..4bb71979a8fd 100644 --- a/include/linux/mod_devicetable.h +++ b/include/linux/mod_devicetable.h @@ -895,4 +895,18 @@ struct dfl_device_id { kernel_ulong_t driver_data; }; +/* ISHTP (Integrated Sensor Hub Transport Protocol) */ + +#define ISHTP_MODULE_PREFIX "ishtp:" + +/** + * struct ishtp_device_id - ISHTP device identifier + * @guid: GUID of the device. + * @driver_data: pointer to driver specific data + */ +struct ishtp_device_id { + guid_t guid; + kernel_ulong_t driver_data; +}; + #endif /* LINUX_MOD_DEVICETABLE_H */ diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 3ec42495a43a..be5cb3360b94 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -4404,7 +4404,8 @@ static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits) static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu) { spin_lock(&txq->_xmit_lock); - txq->xmit_lock_owner = cpu; + /* Pairs with READ_ONCE() in __dev_queue_xmit() */ + WRITE_ONCE(txq->xmit_lock_owner, cpu); } static inline bool __netif_tx_acquire(struct netdev_queue *txq) @@ -4421,26 +4422,32 @@ static inline void __netif_tx_release(struct netdev_queue *txq) static inline void __netif_tx_lock_bh(struct netdev_queue *txq) { spin_lock_bh(&txq->_xmit_lock); - txq->xmit_lock_owner = smp_processor_id(); + /* Pairs with READ_ONCE() in __dev_queue_xmit() */ + WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id()); } static inline bool __netif_tx_trylock(struct netdev_queue *txq) { bool ok = spin_trylock(&txq->_xmit_lock); - if (likely(ok)) - txq->xmit_lock_owner = smp_processor_id(); + + if (likely(ok)) { + /* Pairs with READ_ONCE() in __dev_queue_xmit() */ + WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id()); + } return ok; } static inline void __netif_tx_unlock(struct netdev_queue *txq) { - txq->xmit_lock_owner = -1; + /* Pairs with READ_ONCE() in __dev_queue_xmit() */ + WRITE_ONCE(txq->xmit_lock_owner, -1); spin_unlock(&txq->_xmit_lock); } static inline void __netif_tx_unlock_bh(struct netdev_queue *txq) { - txq->xmit_lock_owner = -1; + /* Pairs with READ_ONCE() in __dev_queue_xmit() */ + WRITE_ONCE(txq->xmit_lock_owner, -1); spin_unlock_bh(&txq->_xmit_lock); } diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 52ec4b5e5615..b5f14d581113 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -686,13 +686,13 @@ static inline bool test_set_page_writeback(struct page *page) __PAGEFLAG(Head, head, PF_ANY) CLEARPAGEFLAG(Head, head, PF_ANY) -/* Whether there are one or multiple pages in a folio */ -static inline bool folio_test_single(struct folio *folio) -{ - return !folio_test_head(folio); -} - -static inline bool folio_test_multi(struct folio *folio) +/** + * folio_test_large() - Does this folio contain more than one page? + * @folio: The folio to test. + * + * Return: True if the folio is larger than one page. + */ +static inline bool folio_test_large(struct folio *folio) { return folio_test_head(folio); } diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 1a0c646eb6ff..605246452305 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -84,7 +84,7 @@ enum mapping_flags { AS_EXITING = 4, /* final truncate in progress */ /* writeback related tags are not used */ AS_NO_WRITEBACK_TAGS = 5, - AS_THP_SUPPORT = 6, /* THPs supported */ + AS_LARGE_FOLIO_SUPPORT = 6, }; /** @@ -176,9 +176,25 @@ static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask) m->gfp_mask = mask; } -static inline bool mapping_thp_support(struct address_space *mapping) +/** + * mapping_set_large_folios() - Indicate the file supports large folios. + * @mapping: The file. + * + * The filesystem should call this function in its inode constructor to + * indicate that the VFS can use large folios to cache the contents of + * the file. + * + * Context: This should not be called while the inode is active as it + * is non-atomic. + */ +static inline void mapping_set_large_folios(struct address_space *mapping) +{ + __set_bit(AS_LARGE_FOLIO_SUPPORT, &mapping->flags); +} + +static inline bool mapping_large_folio_support(struct address_space *mapping) { - return test_bit(AS_THP_SUPPORT, &mapping->flags); + return test_bit(AS_LARGE_FOLIO_SUPPORT, &mapping->flags); } static inline int filemap_nr_thps(struct address_space *mapping) @@ -193,7 +209,7 @@ static inline int filemap_nr_thps(struct address_space *mapping) static inline void filemap_nr_thps_inc(struct address_space *mapping) { #ifdef CONFIG_READ_ONLY_THP_FOR_FS - if (!mapping_thp_support(mapping)) + if (!mapping_large_folio_support(mapping)) atomic_inc(&mapping->nr_thps); #else WARN_ON_ONCE(1); @@ -203,7 +219,7 @@ static inline void filemap_nr_thps_inc(struct address_space *mapping) static inline void filemap_nr_thps_dec(struct address_space *mapping) { #ifdef CONFIG_READ_ONLY_THP_FOR_FS - if (!mapping_thp_support(mapping)) + if (!mapping_large_folio_support(mapping)) atomic_dec(&mapping->nr_thps); #else WARN_ON_ONCE(1); diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h index b31d3f3312ce..d73a1c08c3e3 100644 --- a/include/linux/percpu-refcount.h +++ b/include/linux/percpu-refcount.h @@ -51,9 +51,9 @@ #define _LINUX_PERCPU_REFCOUNT_H #include <linux/atomic.h> -#include <linux/kernel.h> #include <linux/percpu.h> #include <linux/rcupdate.h> +#include <linux/types.h> #include <linux/gfp.h> struct percpu_ref; diff --git a/include/linux/phy.h b/include/linux/phy.h index 96e43fbb2dd8..cbf03a5f9cf5 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -538,11 +538,12 @@ struct macsec_ops; * @mac_managed_pm: Set true if MAC driver takes of suspending/resuming PHY * @state: State of the PHY for management purposes * @dev_flags: Device-specific flags used by the PHY driver. - * Bits [15:0] are free to use by the PHY driver to communicate - * driver specific behavior. - * Bits [23:16] are currently reserved for future use. - * Bits [31:24] are reserved for defining generic - * PHY driver behavior. + * + * - Bits [15:0] are free to use by the PHY driver to communicate + * driver specific behavior. + * - Bits [23:16] are currently reserved for future use. + * - Bits [31:24] are reserved for defining generic + * PHY driver behavior. * @irq: IRQ number of the PHY's interrupt (-1 if none) * @phy_timer: The timer for handling the state machine * @phylink: Pointer to phylink instance for this PHY diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h index 222da43b7096..eddd66d426ca 100644 --- a/include/linux/pm_runtime.h +++ b/include/linux/pm_runtime.h @@ -129,7 +129,7 @@ static inline bool pm_runtime_suspended(struct device *dev) * pm_runtime_active - Check whether or not a device is runtime-active. * @dev: Target device. * - * Return %true if runtime PM is enabled for @dev and its runtime PM status is + * Return %true if runtime PM is disabled for @dev or its runtime PM status is * %RPM_ACTIVE, or %false otherwise. * * Note that the return value of this function can only be trusted if it is diff --git a/include/linux/ptp_classify.h b/include/linux/ptp_classify.h index ae04968a3a47..9afd34a2d36c 100644 --- a/include/linux/ptp_classify.h +++ b/include/linux/ptp_classify.h @@ -37,6 +37,7 @@ #define PTP_MSGTYPE_PDELAY_RESP 0x3 #define PTP_EV_PORT 319 +#define PTP_GEN_PORT 320 #define PTP_GEN_BIT 0x08 /* indicates general message, if set in message type */ #define OFF_PTP_SOURCE_UUID 22 /* PTPv1 only */ diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h index bd7a73db2e66..54cf566616ae 100644 --- a/include/linux/regulator/driver.h +++ b/include/linux/regulator/driver.h @@ -499,7 +499,8 @@ struct regulator_irq_data { * best to shut-down regulator(s) or reboot the SOC if error * handling is repeatedly failing. If fatal_cnt is given the IRQ * handling is aborted if it fails for fatal_cnt times and die() - * callback (if populated) or BUG() is called to try to prevent + * callback (if populated) is called. If die() is not populated + * poweroff for the system is attempted in order to prevent any * further damage. * @reread_ms: The time which is waited before attempting to re-read status * at the worker if IC reading fails. Immediate re-read is done @@ -516,11 +517,12 @@ struct regulator_irq_data { * @data: Driver private data pointer which will be passed as such to * the renable, map_event and die callbacks in regulator_irq_data. * @die: Protection callback. If IC status reading or recovery actions - * fail fatal_cnt times this callback or BUG() is called. This - * callback should implement a final protection attempt like - * disabling the regulator. If protection succeeded this may - * return 0. If anything else is returned the core assumes final - * protection failed and calls BUG() as a last resort. + * fail fatal_cnt times this callback is called or system is + * powered off. This callback should implement a final protection + * attempt like disabling the regulator. If protection succeeded + * die() may return 0. If anything else is returned the core + * assumes final protection failed and attempts to perform a + * poweroff as a last resort. * @map_event: Driver callback to map IRQ status into regulator devices with * events / errors. NOTE: callback MUST initialize both the * errors and notifs for all rdevs which it signals having diff --git a/include/linux/sched/cputime.h b/include/linux/sched/cputime.h index 6c9f19a33865..ce3c58286062 100644 --- a/include/linux/sched/cputime.h +++ b/include/linux/sched/cputime.h @@ -18,15 +18,16 @@ #endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN -extern void task_cputime(struct task_struct *t, +extern bool task_cputime(struct task_struct *t, u64 *utime, u64 *stime); extern u64 task_gtime(struct task_struct *t); #else -static inline void task_cputime(struct task_struct *t, +static inline bool task_cputime(struct task_struct *t, u64 *utime, u64 *stime) { *utime = t->utime; *stime = t->stime; + return false; } static inline u64 task_gtime(struct task_struct *t) diff --git a/include/linux/siphash.h b/include/linux/siphash.h index bf21591a9e5e..0cda61855d90 100644 --- a/include/linux/siphash.h +++ b/include/linux/siphash.h @@ -27,9 +27,7 @@ static inline bool siphash_key_is_zero(const siphash_key_t *key) } u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key); -#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key); -#endif u64 siphash_1u64(const u64 a, const siphash_key_t *key); u64 siphash_2u64(const u64 a, const u64 b, const siphash_key_t *key); @@ -82,10 +80,9 @@ static inline u64 ___siphash_aligned(const __le64 *data, size_t len, static inline u64 siphash(const void *data, size_t len, const siphash_key_t *key) { -#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS - if (!IS_ALIGNED((unsigned long)data, SIPHASH_ALIGNMENT)) + if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) || + !IS_ALIGNED((unsigned long)data, SIPHASH_ALIGNMENT)) return __siphash_unaligned(data, len, key); -#endif return ___siphash_aligned(data, len, key); } @@ -96,10 +93,8 @@ typedef struct { u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key); -#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS u32 __hsiphash_unaligned(const void *data, size_t len, const hsiphash_key_t *key); -#endif u32 hsiphash_1u32(const u32 a, const hsiphash_key_t *key); u32 hsiphash_2u32(const u32 a, const u32 b, const hsiphash_key_t *key); @@ -135,10 +130,9 @@ static inline u32 ___hsiphash_aligned(const __le32 *data, size_t len, static inline u32 hsiphash(const void *data, size_t len, const hsiphash_key_t *key) { -#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS - if (!IS_ALIGNED((unsigned long)data, HSIPHASH_ALIGNMENT)) + if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) || + !IS_ALIGNED((unsigned long)data, HSIPHASH_ALIGNMENT)) return __hsiphash_unaligned(data, len, key); -#endif return ___hsiphash_aligned(data, len, key); } diff --git a/include/linux/virtio.h b/include/linux/virtio.h index 44d0e09da2d9..41edbc01ffa4 100644 --- a/include/linux/virtio.h +++ b/include/linux/virtio.h @@ -152,7 +152,6 @@ size_t virtio_max_dma_size(struct virtio_device *vdev); * @feature_table_size: number of entries in the feature table array. * @feature_table_legacy: same as feature_table but when working in legacy mode. * @feature_table_size_legacy: number of entries in feature table legacy array. - * @suppress_used_validation: set to not have core validate used length * @probe: the function to call when a device is found. Returns 0 or -errno. * @scan: optional function to call after successful probe; intended * for virtio-scsi to invoke a scan. @@ -169,7 +168,6 @@ struct virtio_driver { unsigned int feature_table_size; const unsigned int *feature_table_legacy; unsigned int feature_table_size_legacy; - bool suppress_used_validation; int (*validate)(struct virtio_device *dev); int (*probe)(struct virtio_device *dev); void (*scan)(struct virtio_device *dev); diff --git a/include/linux/wait.h b/include/linux/wait.h index 2d0df57c9902..851e07da2583 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h @@ -217,6 +217,7 @@ void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void void __wake_up_locked_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void *key); void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr); void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode); +void __wake_up_pollfree(struct wait_queue_head *wq_head); #define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL) #define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL) @@ -245,6 +246,31 @@ void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode); #define wake_up_interruptible_sync_poll_locked(x, m) \ __wake_up_locked_sync_key((x), TASK_INTERRUPTIBLE, poll_to_key(m)) +/** + * wake_up_pollfree - signal that a polled waitqueue is going away + * @wq_head: the wait queue head + * + * In the very rare cases where a ->poll() implementation uses a waitqueue whose + * lifetime is tied to a task rather than to the 'struct file' being polled, + * this function must be called before the waitqueue is freed so that + * non-blocking polls (e.g. epoll) are notified that the queue is going away. + * + * The caller must also RCU-delay the freeing of the wait_queue_head, e.g. via + * an explicit synchronize_rcu() or call_rcu(), or via SLAB_TYPESAFE_BY_RCU. + */ +static inline void wake_up_pollfree(struct wait_queue_head *wq_head) +{ + /* + * For performance reasons, we don't always take the queue lock here. + * Therefore, we might race with someone removing the last entry from + * the queue, and proceed while they still hold the queue lock. + * However, rcu_read_lock() is required to be held in such cases, so we + * can safely proceed with an RCU-delayed free. + */ + if (waitqueue_active(wq_head)) + __wake_up_pollfree(wq_head); +} + #define ___wait_cond_timeout(condition) \ ({ \ bool __cond = (condition); \ diff --git a/include/net/bond_alb.h b/include/net/bond_alb.h index f6af76c87a6c..191c36afa1f4 100644 --- a/include/net/bond_alb.h +++ b/include/net/bond_alb.h @@ -126,7 +126,7 @@ struct tlb_slave_info { struct alb_bond_info { struct tlb_client_info *tx_hashtbl; /* Dynamically allocated */ u32 unbalanced_load; - int tx_rebalance_counter; + atomic_t tx_rebalance_counter; int lp_counter; /* -------- rlb parameters -------- */ int rlb_enabled; diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h index 4202c609bb0b..c4898fcbf923 100644 --- a/include/net/busy_poll.h +++ b/include/net/busy_poll.h @@ -133,6 +133,19 @@ static inline void sk_mark_napi_id(struct sock *sk, const struct sk_buff *skb) if (unlikely(READ_ONCE(sk->sk_napi_id) != skb->napi_id)) WRITE_ONCE(sk->sk_napi_id, skb->napi_id); #endif + sk_rx_queue_update(sk, skb); +} + +/* Variant of sk_mark_napi_id() for passive flow setup, + * as sk->sk_napi_id and sk->sk_rx_queue_mapping content + * needs to be set. + */ +static inline void sk_mark_napi_id_set(struct sock *sk, + const struct sk_buff *skb) +{ +#ifdef CONFIG_NET_RX_BUSY_POLL + WRITE_ONCE(sk->sk_napi_id, skb->napi_id); +#endif sk_rx_queue_set(sk, skb); } diff --git a/include/net/dst_cache.h b/include/net/dst_cache.h index 67634675e919..df6622a5fe98 100644 --- a/include/net/dst_cache.h +++ b/include/net/dst_cache.h @@ -80,6 +80,17 @@ static inline void dst_cache_reset(struct dst_cache *dst_cache) } /** + * dst_cache_reset_now - invalidate the cache contents immediately + * @dst_cache: the cache + * + * The caller must be sure there are no concurrent users, as this frees + * all dst_cache users immediately, rather than waiting for the next + * per-cpu usage like dst_cache_reset does. Most callers should use the + * higher speed lazily-freed dst_cache_reset function instead. + */ +void dst_cache_reset_now(struct dst_cache *dst_cache); + +/** * dst_cache_init - initialize the cache, allocating the required storage * @dst_cache: the cache * @gfp: allocation flags diff --git a/include/net/fib_rules.h b/include/net/fib_rules.h index 4b10676c69d1..bd07484ab9dd 100644 --- a/include/net/fib_rules.h +++ b/include/net/fib_rules.h @@ -69,7 +69,7 @@ struct fib_rules_ops { int (*action)(struct fib_rule *, struct flowi *, int, struct fib_lookup_arg *); - bool (*suppress)(struct fib_rule *, + bool (*suppress)(struct fib_rule *, int, struct fib_lookup_arg *); int (*match)(struct fib_rule *, struct flowi *, int); @@ -218,7 +218,9 @@ INDIRECT_CALLABLE_DECLARE(int fib4_rule_action(struct fib_rule *rule, struct fib_lookup_arg *arg)); INDIRECT_CALLABLE_DECLARE(bool fib6_rule_suppress(struct fib_rule *rule, + int flags, struct fib_lookup_arg *arg)); INDIRECT_CALLABLE_DECLARE(bool fib4_rule_suppress(struct fib_rule *rule, + int flags, struct fib_lookup_arg *arg)); #endif diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h index c412dde4d67d..83b8070d1cc9 100644 --- a/include/net/ip6_fib.h +++ b/include/net/ip6_fib.h @@ -485,6 +485,7 @@ int fib6_nh_init(struct net *net, struct fib6_nh *fib6_nh, struct fib6_config *cfg, gfp_t gfp_flags, struct netlink_ext_ack *extack); void fib6_nh_release(struct fib6_nh *fib6_nh); +void fib6_nh_release_dsts(struct fib6_nh *fib6_nh); int call_fib6_entry_notifiers(struct net *net, enum fib_event_type event_type, diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h index ab5348e57db1..3417ba2d27ad 100644 --- a/include/net/ip_fib.h +++ b/include/net/ip_fib.h @@ -438,7 +438,7 @@ int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst, #ifdef CONFIG_IP_ROUTE_CLASSID static inline int fib_num_tclassid_users(struct net *net) { - return net->ipv4.fib_num_tclassid_users; + return atomic_read(&net->ipv4.fib_num_tclassid_users); } #else static inline int fib_num_tclassid_users(struct net *net) diff --git a/include/net/ipv6_stubs.h b/include/net/ipv6_stubs.h index afbce90c4480..45e0339be6fa 100644 --- a/include/net/ipv6_stubs.h +++ b/include/net/ipv6_stubs.h @@ -47,6 +47,7 @@ struct ipv6_stub { struct fib6_config *cfg, gfp_t gfp_flags, struct netlink_ext_ack *extack); void (*fib6_nh_release)(struct fib6_nh *fib6_nh); + void (*fib6_nh_release_dsts)(struct fib6_nh *fib6_nh); void (*fib6_update_sernum)(struct net *net, struct fib6_info *rt); int (*ip6_del_rt)(struct net *net, struct fib6_info *rt, bool skip_notify); void (*fib6_rt_update)(struct net *net, struct fib6_info *rt, diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h index cc663c68ddc4..d24b0a34c8f0 100644 --- a/include/net/netfilter/nf_conntrack.h +++ b/include/net/netfilter/nf_conntrack.h @@ -276,14 +276,14 @@ static inline bool nf_is_loopback_packet(const struct sk_buff *skb) /* jiffies until ct expires, 0 if already expired */ static inline unsigned long nf_ct_expires(const struct nf_conn *ct) { - s32 timeout = ct->timeout - nfct_time_stamp; + s32 timeout = READ_ONCE(ct->timeout) - nfct_time_stamp; return timeout > 0 ? timeout : 0; } static inline bool nf_ct_is_expired(const struct nf_conn *ct) { - return (__s32)(ct->timeout - nfct_time_stamp) <= 0; + return (__s32)(READ_ONCE(ct->timeout) - nfct_time_stamp) <= 0; } /* use after obtaining a reference count */ @@ -302,7 +302,7 @@ static inline bool nf_ct_should_gc(const struct nf_conn *ct) static inline void nf_ct_offload_timeout(struct nf_conn *ct) { if (nf_ct_expires(ct) < NF_CT_DAY / 2) - ct->timeout = nfct_time_stamp + NF_CT_DAY; + WRITE_ONCE(ct->timeout, nfct_time_stamp + NF_CT_DAY); } struct kernel_param; diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h index 2f65701a43c9..6c5b2efc4f17 100644 --- a/include/net/netns/ipv4.h +++ b/include/net/netns/ipv4.h @@ -65,7 +65,7 @@ struct netns_ipv4 { bool fib_has_custom_local_routes; bool fib_offload_disabled; #ifdef CONFIG_IP_ROUTE_CLASSID - int fib_num_tclassid_users; + atomic_t fib_num_tclassid_users; #endif struct hlist_head *fib_table_hash; struct sock *fibnl; diff --git a/include/net/nl802154.h b/include/net/nl802154.h index ddcee128f5d9..145acb8f2509 100644 --- a/include/net/nl802154.h +++ b/include/net/nl802154.h @@ -19,6 +19,8 @@ * */ +#include <linux/types.h> + #define NL802154_GENL_NAME "nl802154" enum nl802154_commands { @@ -150,10 +152,9 @@ enum nl802154_attrs { }; enum nl802154_iftype { - /* for backwards compatibility TODO */ - NL802154_IFTYPE_UNSPEC = -1, + NL802154_IFTYPE_UNSPEC = (~(__u32)0), - NL802154_IFTYPE_NODE, + NL802154_IFTYPE_NODE = 0, NL802154_IFTYPE_MONITOR, NL802154_IFTYPE_COORD, diff --git a/include/net/sock.h b/include/net/sock.h index b32906e1ab55..bea21ff70e74 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1913,18 +1913,31 @@ static inline int sk_tx_queue_get(const struct sock *sk) return -1; } -static inline void sk_rx_queue_set(struct sock *sk, const struct sk_buff *skb) +static inline void __sk_rx_queue_set(struct sock *sk, + const struct sk_buff *skb, + bool force_set) { #ifdef CONFIG_SOCK_RX_QUEUE_MAPPING if (skb_rx_queue_recorded(skb)) { u16 rx_queue = skb_get_rx_queue(skb); - if (unlikely(READ_ONCE(sk->sk_rx_queue_mapping) != rx_queue)) + if (force_set || + unlikely(READ_ONCE(sk->sk_rx_queue_mapping) != rx_queue)) WRITE_ONCE(sk->sk_rx_queue_mapping, rx_queue); } #endif } +static inline void sk_rx_queue_set(struct sock *sk, const struct sk_buff *skb) +{ + __sk_rx_queue_set(sk, skb, true); +} + +static inline void sk_rx_queue_update(struct sock *sk, const struct sk_buff *skb) +{ + __sk_rx_queue_set(sk, skb, false); +} + static inline void sk_rx_queue_clear(struct sock *sk) { #ifdef CONFIG_SOCK_RX_QUEUE_MAPPING @@ -2430,19 +2443,22 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk) * @sk: socket * * Use the per task page_frag instead of the per socket one for - * optimization when we know that we're in the normal context and owns + * optimization when we know that we're in process context and own * everything that's associated with %current. * - * gfpflags_allow_blocking() isn't enough here as direct reclaim may nest - * inside other socket operations and end up recursing into sk_page_frag() - * while it's already in use. + * Both direct reclaim and page faults can nest inside other + * socket operations and end up recursing into sk_page_frag() + * while it's already in use: explicitly avoid task page_frag + * usage if the caller is potentially doing any of them. + * This assumes that page fault handlers use the GFP_NOFS flags. * * Return: a per task page_frag if context allows that, * otherwise a per socket one. */ static inline struct page_frag *sk_page_frag(struct sock *sk) { - if (gfpflags_normal_context(sk->sk_allocation)) + if ((sk->sk_allocation & (__GFP_DIRECT_RECLAIM | __GFP_MEMALLOC | __GFP_FS)) == + (__GFP_DIRECT_RECLAIM | __GFP_FS)) return ¤t->task_frag; return &sk->sk_frag; diff --git a/include/soc/mscc/ocelot_vcap.h b/include/soc/mscc/ocelot_vcap.h index eeb1142aa1b1..4d1dfa1136b2 100644 --- a/include/soc/mscc/ocelot_vcap.h +++ b/include/soc/mscc/ocelot_vcap.h @@ -703,6 +703,8 @@ int ocelot_vcap_filter_add(struct ocelot *ocelot, struct netlink_ext_ack *extack); int ocelot_vcap_filter_del(struct ocelot *ocelot, struct ocelot_vcap_filter *rule); +int ocelot_vcap_filter_replace(struct ocelot *ocelot, + struct ocelot_vcap_filter *filter); struct ocelot_vcap_filter * ocelot_vcap_block_find_filter_by_id(struct ocelot_vcap_block *block, unsigned long cookie, bool tc_offload); diff --git a/include/soc/tegra/common.h b/include/soc/tegra/common.h index af41ad80ec21..8ec1ac07fc85 100644 --- a/include/soc/tegra/common.h +++ b/include/soc/tegra/common.h @@ -39,4 +39,19 @@ devm_tegra_core_dev_init_opp_table(struct device *dev, } #endif +static inline int +devm_tegra_core_dev_init_opp_table_common(struct device *dev) +{ + struct tegra_core_opp_params opp_params = {}; + int err; + + opp_params.init_state = true; + + err = devm_tegra_core_dev_init_opp_table(dev, &opp_params); + if (err != -ENODEV) + return err; + + return 0; +} + #endif /* __SOC_TEGRA_COMMON_H__ */ diff --git a/include/sound/soc-acpi.h b/include/sound/soc-acpi.h index 31f4c4f9aeea..ac0893df9c76 100644 --- a/include/sound/soc-acpi.h +++ b/include/sound/soc-acpi.h @@ -147,7 +147,7 @@ struct snd_soc_acpi_link_adr { */ /* Descriptor for SST ASoC machine driver */ struct snd_soc_acpi_mach { - const u8 id[ACPI_ID_LEN]; + u8 id[ACPI_ID_LEN]; const struct snd_soc_acpi_codecs *comp_ids; const u32 link_mask; const struct snd_soc_acpi_link_adr *links; diff --git a/include/trace/events/rpcgss.h b/include/trace/events/rpcgss.h index 3ba63319af3c..c9048f3e471b 100644 --- a/include/trace/events/rpcgss.h +++ b/include/trace/events/rpcgss.h @@ -8,7 +8,7 @@ #undef TRACE_SYSTEM #define TRACE_SYSTEM rpcgss -#if !defined(_TRACE_RPCRDMA_H) || defined(TRACE_HEADER_MULTI_READ) +#if !defined(_TRACE_RPCGSS_H) || defined(TRACE_HEADER_MULTI_READ) #define _TRACE_RPCGSS_H #include <linux/tracepoint.h> diff --git a/include/uapi/asm-generic/poll.h b/include/uapi/asm-generic/poll.h index 41b509f410bf..f9c520ce4bf4 100644 --- a/include/uapi/asm-generic/poll.h +++ b/include/uapi/asm-generic/poll.h @@ -29,7 +29,7 @@ #define POLLRDHUP 0x2000 #endif -#define POLLFREE (__force __poll_t)0x4000 /* currently only for epoll */ +#define POLLFREE (__force __poll_t)0x4000 #define POLL_BUSY_LOOP (__force __poll_t)0x8000 diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h index 26e45fc5eb1a..0b94ec7b73e7 100644 --- a/include/uapi/drm/amdgpu_drm.h +++ b/include/uapi/drm/amdgpu_drm.h @@ -80,7 +80,7 @@ extern "C" { * * %AMDGPU_GEM_DOMAIN_GTT GPU accessible system memory, mapped into the * GPU's virtual address space via gart. Gart memory linearizes non-contiguous - * pages of system memory, allows GPU access system memory in a linezrized + * pages of system memory, allows GPU access system memory in a linearized * fashion. * * %AMDGPU_GEM_DOMAIN_VRAM Local video memory. For APUs, it is memory diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h index 3b810b53ba8b..642808520d92 100644 --- a/include/uapi/drm/drm.h +++ b/include/uapi/drm/drm.h @@ -1096,6 +1096,24 @@ extern "C" { #define DRM_IOCTL_SYNCOBJ_TRANSFER DRM_IOWR(0xCC, struct drm_syncobj_transfer) #define DRM_IOCTL_SYNCOBJ_TIMELINE_SIGNAL DRM_IOWR(0xCD, struct drm_syncobj_timeline_array) +/** + * DRM_IOCTL_MODE_GETFB2 - Get framebuffer metadata. + * + * This queries metadata about a framebuffer. User-space fills + * &drm_mode_fb_cmd2.fb_id as the input, and the kernels fills the rest of the + * struct as the output. + * + * If the client is DRM master or has &CAP_SYS_ADMIN, &drm_mode_fb_cmd2.handles + * will be filled with GEM buffer handles. Planes are valid until one has a + * zero handle -- this can be used to compute the number of planes. + * + * Otherwise, &drm_mode_fb_cmd2.handles will be zeroed and planes are valid + * until one has a zero &drm_mode_fb_cmd2.pitches. + * + * If the framebuffer has a format modifier, &DRM_MODE_FB_MODIFIERS will be set + * in &drm_mode_fb_cmd2.flags and &drm_mode_fb_cmd2.modifier will contain the + * modifier. Otherwise, user-space must ignore &drm_mode_fb_cmd2.modifier. + */ #define DRM_IOCTL_MODE_GETFB2 DRM_IOWR(0xCE, struct drm_mode_fb_cmd2) /* diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h index 7f652c96845b..fc0c1454d275 100644 --- a/include/uapi/drm/drm_fourcc.h +++ b/include/uapi/drm/drm_fourcc.h @@ -314,6 +314,13 @@ extern "C" { */ #define DRM_FORMAT_P016 fourcc_code('P', '0', '1', '6') /* 2x2 subsampled Cr:Cb plane 16 bits per channel */ +/* 2 plane YCbCr420. + * 3 10 bit components and 2 padding bits packed into 4 bytes. + * index 0 = Y plane, [31:0] x:Y2:Y1:Y0 2:10:10:10 little endian + * index 1 = Cr:Cb plane, [63:0] x:Cr2:Cb2:Cr1:x:Cb1:Cr0:Cb0 [2:10:10:10:2:10:10:10] little endian + */ +#define DRM_FORMAT_P030 fourcc_code('P', '0', '3', '0') /* 2x2 subsampled Cr:Cb plane 10 bits per channel packed */ + /* 3 plane non-subsampled (444) YCbCr * 16 bits per component, but only 10 bits are used and 6 bits are padded * index 0: Y plane, [15:0] Y:x [10:6] little endian @@ -854,6 +861,10 @@ drm_fourcc_canonicalize_nvidia_format_mod(__u64 modifier) * and UV. Some SAND-using hardware stores UV in a separate tiled * image from Y to reduce the column height, which is not supported * with these modifiers. + * + * The DRM_FORMAT_MOD_BROADCOM_SAND128_COL_HEIGHT modifier is also + * supported for DRM_FORMAT_P030 where the columns remain as 128 bytes + * wide, but as this is a 10 bpp format that translates to 96 pixels. */ #define DRM_FORMAT_MOD_BROADCOM_SAND32_COL_HEIGHT(v) \ diff --git a/include/uapi/drm/virtgpu_drm.h b/include/uapi/drm/virtgpu_drm.h index a13e20cc66b4..0512fde5e697 100644 --- a/include/uapi/drm/virtgpu_drm.h +++ b/include/uapi/drm/virtgpu_drm.h @@ -196,6 +196,13 @@ struct drm_virtgpu_context_init { __u64 ctx_set_params; }; +/* + * Event code that's given when VIRTGPU_CONTEXT_PARAM_POLL_RINGS_MASK is in + * effect. The event size is sizeof(drm_event), since there is no additional + * payload. + */ +#define VIRTGPU_EVENT_FENCE_SIGNALED 0x90000000 + #define DRM_IOCTL_VIRTGPU_MAP \ DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_MAP, struct drm_virtgpu_map) diff --git a/include/uapi/drm/vmwgfx_drm.h b/include/uapi/drm/vmwgfx_drm.h index 9078775feb51..8277644c1144 100644 --- a/include/uapi/drm/vmwgfx_drm.h +++ b/include/uapi/drm/vmwgfx_drm.h @@ -110,6 +110,7 @@ extern "C" { #define DRM_VMW_PARAM_HW_CAPS2 13 #define DRM_VMW_PARAM_SM4_1 14 #define DRM_VMW_PARAM_SM5 15 +#define DRM_VMW_PARAM_GL43 16 /** * enum drm_vmw_handle_type - handle type for ref ioctls diff --git a/include/uapi/linux/if_ether.h b/include/uapi/linux/if_ether.h index 5da4ee234e0b..c0c2f3ed5729 100644 --- a/include/uapi/linux/if_ether.h +++ b/include/uapi/linux/if_ether.h @@ -117,7 +117,7 @@ #define ETH_P_IFE 0xED3E /* ForCES inter-FE LFB type */ #define ETH_P_AF_IUCV 0xFBFB /* IBM af_iucv [ NOT AN OFFICIALLY REGISTERED ID ] */ -#define ETH_P_802_3_MIN 0x0600 /* If the value in the ethernet type is less than this value +#define ETH_P_802_3_MIN 0x0600 /* If the value in the ethernet type is more than this value * then the frame is Ethernet II. Else it is 802.3 */ /* diff --git a/include/uapi/linux/kfd_sysfs.h b/include/uapi/linux/kfd_sysfs.h new file mode 100644 index 000000000000..3e330f368917 --- /dev/null +++ b/include/uapi/linux/kfd_sysfs.h @@ -0,0 +1,108 @@ +/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR MIT */ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef KFD_SYSFS_H_INCLUDED +#define KFD_SYSFS_H_INCLUDED + +/* Capability bits in node properties */ +#define HSA_CAP_HOT_PLUGGABLE 0x00000001 +#define HSA_CAP_ATS_PRESENT 0x00000002 +#define HSA_CAP_SHARED_WITH_GRAPHICS 0x00000004 +#define HSA_CAP_QUEUE_SIZE_POW2 0x00000008 +#define HSA_CAP_QUEUE_SIZE_32BIT 0x00000010 +#define HSA_CAP_QUEUE_IDLE_EVENT 0x00000020 +#define HSA_CAP_VA_LIMIT 0x00000040 +#define HSA_CAP_WATCH_POINTS_SUPPORTED 0x00000080 +#define HSA_CAP_WATCH_POINTS_TOTALBITS_MASK 0x00000f00 +#define HSA_CAP_WATCH_POINTS_TOTALBITS_SHIFT 8 +#define HSA_CAP_DOORBELL_TYPE_TOTALBITS_MASK 0x00003000 +#define HSA_CAP_DOORBELL_TYPE_TOTALBITS_SHIFT 12 + +#define HSA_CAP_DOORBELL_TYPE_PRE_1_0 0x0 +#define HSA_CAP_DOORBELL_TYPE_1_0 0x1 +#define HSA_CAP_DOORBELL_TYPE_2_0 0x2 +#define HSA_CAP_AQL_QUEUE_DOUBLE_MAP 0x00004000 + +/* Old buggy user mode depends on this being 0 */ +#define HSA_CAP_RESERVED_WAS_SRAM_EDCSUPPORTED 0x00080000 + +#define HSA_CAP_MEM_EDCSUPPORTED 0x00100000 +#define HSA_CAP_RASEVENTNOTIFY 0x00200000 +#define HSA_CAP_ASIC_REVISION_MASK 0x03c00000 +#define HSA_CAP_ASIC_REVISION_SHIFT 22 +#define HSA_CAP_SRAM_EDCSUPPORTED 0x04000000 +#define HSA_CAP_SVMAPI_SUPPORTED 0x08000000 +#define HSA_CAP_FLAGS_COHERENTHOSTACCESS 0x10000000 +#define HSA_CAP_RESERVED 0xe00f8000 + +/* Heap types in memory properties */ +#define HSA_MEM_HEAP_TYPE_SYSTEM 0 +#define HSA_MEM_HEAP_TYPE_FB_PUBLIC 1 +#define HSA_MEM_HEAP_TYPE_FB_PRIVATE 2 +#define HSA_MEM_HEAP_TYPE_GPU_GDS 3 +#define HSA_MEM_HEAP_TYPE_GPU_LDS 4 +#define HSA_MEM_HEAP_TYPE_GPU_SCRATCH 5 + +/* Flag bits in memory properties */ +#define HSA_MEM_FLAGS_HOT_PLUGGABLE 0x00000001 +#define HSA_MEM_FLAGS_NON_VOLATILE 0x00000002 +#define HSA_MEM_FLAGS_RESERVED 0xfffffffc + +/* Cache types in cache properties */ +#define HSA_CACHE_TYPE_DATA 0x00000001 +#define HSA_CACHE_TYPE_INSTRUCTION 0x00000002 +#define HSA_CACHE_TYPE_CPU 0x00000004 +#define HSA_CACHE_TYPE_HSACU 0x00000008 +#define HSA_CACHE_TYPE_RESERVED 0xfffffff0 + +/* Link types in IO link properties (matches CRAT link types) */ +#define HSA_IOLINK_TYPE_UNDEFINED 0 +#define HSA_IOLINK_TYPE_HYPERTRANSPORT 1 +#define HSA_IOLINK_TYPE_PCIEXPRESS 2 +#define HSA_IOLINK_TYPE_AMBA 3 +#define HSA_IOLINK_TYPE_MIPI 4 +#define HSA_IOLINK_TYPE_QPI_1_1 5 +#define HSA_IOLINK_TYPE_RESERVED1 6 +#define HSA_IOLINK_TYPE_RESERVED2 7 +#define HSA_IOLINK_TYPE_RAPID_IO 8 +#define HSA_IOLINK_TYPE_INFINIBAND 9 +#define HSA_IOLINK_TYPE_RESERVED3 10 +#define HSA_IOLINK_TYPE_XGMI 11 +#define HSA_IOLINK_TYPE_XGOP 12 +#define HSA_IOLINK_TYPE_GZ 13 +#define HSA_IOLINK_TYPE_ETHERNET_RDMA 14 +#define HSA_IOLINK_TYPE_RDMA_OTHER 15 +#define HSA_IOLINK_TYPE_OTHER 16 + +/* Flag bits in IO link properties (matches CRAT flags, excluding the + * bi-directional flag, which is not offially part of the CRAT spec, and + * only used internally in KFD) + */ +#define HSA_IOLINK_FLAGS_ENABLED (1 << 0) +#define HSA_IOLINK_FLAGS_NON_COHERENT (1 << 1) +#define HSA_IOLINK_FLAGS_NO_ATOMICS_32_BIT (1 << 2) +#define HSA_IOLINK_FLAGS_NO_ATOMICS_64_BIT (1 << 3) +#define HSA_IOLINK_FLAGS_NO_PEER_TO_PEER_DMA (1 << 4) +#define HSA_IOLINK_FLAGS_RESERVED 0xffffffe0 + +#endif diff --git a/include/uapi/linux/resource.h b/include/uapi/linux/resource.h index 74ef57b38f9f..ac5d6a3031db 100644 --- a/include/uapi/linux/resource.h +++ b/include/uapi/linux/resource.h @@ -66,10 +66,17 @@ struct rlimit64 { #define _STK_LIM (8*1024*1024) /* - * GPG2 wants 64kB of mlocked memory, to make sure pass phrases - * and other sensitive information are never written to disk. + * Limit the amount of locked memory by some sane default: + * root can always increase this limit if needed. + * + * The main use-cases are (1) preventing sensitive memory + * from being swapped; (2) real-time operations; (3) via + * IOURING_REGISTER_BUFFERS. + * + * The first two don't need much. The latter will take as + * much as it can get. 8MB is a reasonably sane default. */ -#define MLOCK_LIMIT ((PAGE_SIZE > 64*1024) ? PAGE_SIZE : 64*1024) +#define MLOCK_LIMIT (8*1024*1024) /* * Due to binary compatibility, the actual resource numbers diff --git a/include/xen/xenbus.h b/include/xen/xenbus.h index b94074c82772..b13eb86395e0 100644 --- a/include/xen/xenbus.h +++ b/include/xen/xenbus.h @@ -112,6 +112,7 @@ struct xenbus_driver { const char *name; /* defaults to ids[0].devicetype */ const struct xenbus_device_id *ids; bool allow_rebind; /* avoid setting xenstore closed during remove */ + bool not_essential; /* is not mandatory for boot progress */ int (*probe)(struct xenbus_device *dev, const struct xenbus_device_id *id); void (*otherend_changed)(struct xenbus_device *dev, |