diff options
Diffstat (limited to 'include/uapi/linux')
27 files changed, 1060 insertions, 25 deletions
| diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index c994ff5b157c..60a9d59beeab 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -1273,6 +1273,9 @@ enum {  /* Create a map that will be registered/unregesitered by the backed bpf_link */  	BPF_F_LINK		= (1U << 13), + +/* Get path from provided FD in BPF_OBJ_PIN/BPF_OBJ_GET commands */ +	BPF_F_PATH_FD		= (1U << 14),  };  /* Flags for BPF_PROG_QUERY. */ @@ -1421,6 +1424,13 @@ union bpf_attr {  		__aligned_u64	pathname;  		__u32		bpf_fd;  		__u32		file_flags; +		/* Same as dirfd in openat() syscall; see openat(2) +		 * manpage for details of path FD and pathname semantics; +		 * path_fd should accompanied by BPF_F_PATH_FD flag set in +		 * file_flags field, otherwise it should be set to zero; +		 * if BPF_F_PATH_FD flag is not set, AT_FDCWD is assumed. +		 */ +		__s32		path_fd;  	};  	struct { /* anonymous struct used by BPF_PROG_ATTACH/DETACH commands */ @@ -3168,6 +3178,10 @@ union bpf_attr {   *		**BPF_FIB_LOOKUP_DIRECT**   *			Do a direct table lookup vs full lookup using FIB   *			rules. + *		**BPF_FIB_LOOKUP_TBID** + *			Used with BPF_FIB_LOOKUP_DIRECT. + *			Use the routing table ID present in *params*->tbid + *			for the fib lookup.   *		**BPF_FIB_LOOKUP_OUTPUT**   *			Perform lookup from an egress perspective (default is   *			ingress). @@ -6822,6 +6836,7 @@ enum {  	BPF_FIB_LOOKUP_DIRECT  = (1U << 0),  	BPF_FIB_LOOKUP_OUTPUT  = (1U << 1),  	BPF_FIB_LOOKUP_SKIP_NEIGH = (1U << 2), +	BPF_FIB_LOOKUP_TBID    = (1U << 3),  };  enum { @@ -6882,9 +6897,19 @@ struct bpf_fib_lookup {  		__u32		ipv6_dst[4];  /* in6_addr; network order */  	}; -	/* output */ -	__be16	h_vlan_proto; -	__be16	h_vlan_TCI; +	union { +		struct { +			/* output */ +			__be16	h_vlan_proto; +			__be16	h_vlan_TCI; +		}; +		/* input: when accompanied with the +		 * 'BPF_FIB_LOOKUP_DIRECT | BPF_FIB_LOOKUP_TBID` flags, a +		 * specific routing table to use for the fib lookup. +		 */ +		__u32	tbid; +	}; +  	__u8	smac[6];     /* ETH_ALEN */  	__u8	dmac[6];     /* ETH_ALEN */  }; diff --git a/include/uapi/linux/can.h b/include/uapi/linux/can.h index dd645ea72306..939db2388208 100644 --- a/include/uapi/linux/can.h +++ b/include/uapi/linux/can.h @@ -285,6 +285,5 @@ struct can_filter {  };  #define CAN_INV_FILTER 0x20000000U /* to be set in can_filter.can_id */ -#define CAN_RAW_FILTER_MAX 512 /* maximum number of can_filter set via setsockopt() */  #endif /* !_UAPI_CAN_H */ diff --git a/include/uapi/linux/can/raw.h b/include/uapi/linux/can/raw.h index ff12f525c37c..31622c9b7988 100644 --- a/include/uapi/linux/can/raw.h +++ b/include/uapi/linux/can/raw.h @@ -49,6 +49,8 @@  #include <linux/can.h>  #define SOL_CAN_RAW (SOL_CAN_BASE + CAN_RAW) +#define CAN_RAW_FILTER_MAX 512 /* maximum number of can_filter set via setsockopt() */ +  enum {  	SCM_CAN_RAW_ERRQUEUE = 1,  }; diff --git a/include/uapi/linux/counter.h b/include/uapi/linux/counter.h index 8ab12d731e3b..fc248ef00e86 100644 --- a/include/uapi/linux/counter.h +++ b/include/uapi/linux/counter.h @@ -127,6 +127,12 @@ enum counter_count_mode {  	COUNTER_COUNT_MODE_RANGE_LIMIT,  	COUNTER_COUNT_MODE_NON_RECYCLE,  	COUNTER_COUNT_MODE_MODULO_N, +	COUNTER_COUNT_MODE_INTERRUPT_ON_TERMINAL_COUNT, +	COUNTER_COUNT_MODE_HARDWARE_RETRIGGERABLE_ONESHOT, +	COUNTER_COUNT_MODE_RATE_GENERATOR, +	COUNTER_COUNT_MODE_SQUARE_WAVE_MODE, +	COUNTER_COUNT_MODE_SOFTWARE_TRIGGERED_STROBE, +	COUNTER_COUNT_MODE_HARDWARE_TRIGGERED_STROBE,  };  /* Count function values */ diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h index 4d1c8d46e7f0..0c8cf359ea5b 100644 --- a/include/uapi/linux/elf.h +++ b/include/uapi/linux/elf.h @@ -404,6 +404,8 @@ typedef struct elf64_shdr {  #define NT_PPC_TM_CPPR	0x10e		/* TM checkpointed Program Priority Register */  #define NT_PPC_TM_CDSCR	0x10f		/* TM checkpointed Data Stream Control Register */  #define NT_PPC_PKEY	0x110		/* Memory Protection Keys registers */ +#define NT_PPC_DEXCR	0x111		/* PowerPC DEXCR registers */ +#define NT_PPC_HASHKEYR	0x112		/* PowerPC HASHKEYR register */  #define NT_386_TLS	0x200		/* i386 TLS slots (struct user_desc) */  #define NT_386_IOPERM	0x201		/* x86 io permission bitmap (1=deny) */  #define NT_X86_XSTATE	0x202		/* x86 extended state using xsave */ @@ -441,6 +443,7 @@ typedef struct elf64_shdr {  #define NT_MIPS_DSP	0x800		/* MIPS DSP ASE registers */  #define NT_MIPS_FP_MODE	0x801		/* MIPS floating-point mode */  #define NT_MIPS_MSA	0x802		/* MIPS SIMD registers */ +#define NT_RISCV_VECTOR	0x900		/* RISC-V vector registers */  #define NT_LOONGARCH_CPUCFG	0xa00	/* LoongArch CPU config registers */  #define NT_LOONGARCH_CSR	0xa01	/* LoongArch control and status registers */  #define NT_LOONGARCH_LSX	0xa02	/* LoongArch Loongson SIMD Extension registers */ diff --git a/include/uapi/linux/fcntl.h b/include/uapi/linux/fcntl.h index e8c07da58c9f..6c80f96049bd 100644 --- a/include/uapi/linux/fcntl.h +++ b/include/uapi/linux/fcntl.h @@ -112,4 +112,9 @@  #define AT_RECURSIVE		0x8000	/* Apply to the entire subtree */ +/* Flags for name_to_handle_at(2). We reuse AT_ flag space to save bits... */ +#define AT_HANDLE_FID		AT_REMOVEDIR	/* file handle is needed to +					compare object identity and may not +					be usable to open_by_handle_at(2) */ +  #endif /* _UAPI_LINUX_FCNTL_H */ diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index 4ac1000b0ef2..0f6a0fe09bdb 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -828,6 +828,7 @@ enum {  	IFLA_VXLAN_TTL_INHERIT,  	IFLA_VXLAN_DF,  	IFLA_VXLAN_VNIFILTER, /* only applicable with COLLECT_METADATA mode */ +	IFLA_VXLAN_LOCALBYPASS,  	__IFLA_VXLAN_MAX  };  #define IFLA_VXLAN_MAX	(__IFLA_VXLAN_MAX - 1) diff --git a/include/uapi/linux/ioprio.h b/include/uapi/linux/ioprio.h index f70f2596a6bf..99440b2e8c35 100644 --- a/include/uapi/linux/ioprio.h +++ b/include/uapi/linux/ioprio.h @@ -2,22 +2,23 @@  #ifndef _UAPI_LINUX_IOPRIO_H  #define _UAPI_LINUX_IOPRIO_H +#include <linux/stddef.h> +#include <linux/types.h> +  /*   * Gives us 8 prio classes with 13-bits of data for each class   */  #define IOPRIO_CLASS_SHIFT	13 -#define IOPRIO_CLASS_MASK	0x07 +#define IOPRIO_NR_CLASSES	8 +#define IOPRIO_CLASS_MASK	(IOPRIO_NR_CLASSES - 1)  #define IOPRIO_PRIO_MASK	((1UL << IOPRIO_CLASS_SHIFT) - 1)  #define IOPRIO_PRIO_CLASS(ioprio)	\  	(((ioprio) >> IOPRIO_CLASS_SHIFT) & IOPRIO_CLASS_MASK)  #define IOPRIO_PRIO_DATA(ioprio)	((ioprio) & IOPRIO_PRIO_MASK) -#define IOPRIO_PRIO_VALUE(class, data)	\ -	((((class) & IOPRIO_CLASS_MASK) << IOPRIO_CLASS_SHIFT) | \ -	 ((data) & IOPRIO_PRIO_MASK))  /* - * These are the io priority groups as implemented by the BFQ and mq-deadline + * These are the io priority classes as implemented by the BFQ and mq-deadline   * schedulers. RT is the realtime class, it always gets premium service. For   * ATA disks supporting NCQ IO priority, RT class IOs will be processed using   * high priority NCQ commands. BE is the best-effort scheduling class, the @@ -25,18 +26,30 @@   * served when no one else is using the disk.   */  enum { -	IOPRIO_CLASS_NONE, -	IOPRIO_CLASS_RT, -	IOPRIO_CLASS_BE, -	IOPRIO_CLASS_IDLE, +	IOPRIO_CLASS_NONE	= 0, +	IOPRIO_CLASS_RT		= 1, +	IOPRIO_CLASS_BE		= 2, +	IOPRIO_CLASS_IDLE	= 3, + +	/* Special class to indicate an invalid ioprio value */ +	IOPRIO_CLASS_INVALID	= 7,  };  /* - * The RT and BE priority classes both support up to 8 priority levels. + * The RT and BE priority classes both support up to 8 priority levels that + * can be specified using the lower 3-bits of the priority data.   */ -#define IOPRIO_NR_LEVELS	8 -#define IOPRIO_BE_NR		IOPRIO_NR_LEVELS +#define IOPRIO_LEVEL_NR_BITS		3 +#define IOPRIO_NR_LEVELS		(1 << IOPRIO_LEVEL_NR_BITS) +#define IOPRIO_LEVEL_MASK		(IOPRIO_NR_LEVELS - 1) +#define IOPRIO_PRIO_LEVEL(ioprio)	((ioprio) & IOPRIO_LEVEL_MASK) + +#define IOPRIO_BE_NR			IOPRIO_NR_LEVELS +/* + * Possible values for the "which" argument of the ioprio_get() and + * ioprio_set() system calls (see "man ioprio_set"). + */  enum {  	IOPRIO_WHO_PROCESS = 1,  	IOPRIO_WHO_PGRP, @@ -44,9 +57,70 @@ enum {  };  /* - * Fallback BE priority level. + * Fallback BE class priority level.   */  #define IOPRIO_NORM	4  #define IOPRIO_BE_NORM	IOPRIO_NORM +/* + * The 10 bits between the priority class and the priority level are used to + * optionally define I/O hints for any combination of I/O priority class and + * level. Depending on the kernel configuration, I/O scheduler being used and + * the target I/O device being used, hints can influence how I/Os are processed + * without affecting the I/O scheduling ordering defined by the I/O priority + * class and level. + */ +#define IOPRIO_HINT_SHIFT		IOPRIO_LEVEL_NR_BITS +#define IOPRIO_HINT_NR_BITS		10 +#define IOPRIO_NR_HINTS			(1 << IOPRIO_HINT_NR_BITS) +#define IOPRIO_HINT_MASK		(IOPRIO_NR_HINTS - 1) +#define IOPRIO_PRIO_HINT(ioprio)	\ +	(((ioprio) >> IOPRIO_HINT_SHIFT) & IOPRIO_HINT_MASK) + +/* + * I/O hints. + */ +enum { +	/* No hint */ +	IOPRIO_HINT_NONE = 0, + +	/* +	 * Device command duration limits: indicate to the device a desired +	 * duration limit for the commands that will be used to process an I/O. +	 * These will currently only be effective for SCSI and ATA devices that +	 * support the command duration limits feature. If this feature is +	 * enabled, then the commands issued to the device to process an I/O with +	 * one of these hints set will have the duration limit index (dld field) +	 * set to the value of the hint. +	 */ +	IOPRIO_HINT_DEV_DURATION_LIMIT_1 = 1, +	IOPRIO_HINT_DEV_DURATION_LIMIT_2 = 2, +	IOPRIO_HINT_DEV_DURATION_LIMIT_3 = 3, +	IOPRIO_HINT_DEV_DURATION_LIMIT_4 = 4, +	IOPRIO_HINT_DEV_DURATION_LIMIT_5 = 5, +	IOPRIO_HINT_DEV_DURATION_LIMIT_6 = 6, +	IOPRIO_HINT_DEV_DURATION_LIMIT_7 = 7, +}; + +#define IOPRIO_BAD_VALUE(val, max) ((val) < 0 || (val) >= (max)) + +/* + * Return an I/O priority value based on a class, a level and a hint. + */ +static __always_inline __u16 ioprio_value(int class, int level, int hint) +{ +	if (IOPRIO_BAD_VALUE(class, IOPRIO_NR_CLASSES) || +	    IOPRIO_BAD_VALUE(level, IOPRIO_NR_LEVELS) || +	    IOPRIO_BAD_VALUE(hint, IOPRIO_NR_HINTS)) +		return IOPRIO_CLASS_INVALID << IOPRIO_CLASS_SHIFT; + +	return (class << IOPRIO_CLASS_SHIFT) | +		(hint << IOPRIO_HINT_SHIFT) | level; +} + +#define IOPRIO_PRIO_VALUE(class, level)			\ +	ioprio_value(class, level, IOPRIO_HINT_NONE) +#define IOPRIO_PRIO_VALUE_HINT(class, level, hint)	\ +	ioprio_value(class, level, hint) +  #endif /* _UAPI_LINUX_IOPRIO_H */ diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h index 2da5c3ad71bd..eeb2fdcbdcb7 100644 --- a/include/uapi/linux/kfd_ioctl.h +++ b/include/uapi/linux/kfd_ioctl.h @@ -38,9 +38,11 @@   * - 1.10 - Add SMI profiler event log   * - 1.11 - Add unified memory for ctx save/restore area   * - 1.12 - Add DMA buf export ioctl + * - 1.13 - Add debugger API + * - 1.14 - Update kfd_event_data   */  #define KFD_IOCTL_MAJOR_VERSION 1 -#define KFD_IOCTL_MINOR_VERSION 12 +#define KFD_IOCTL_MINOR_VERSION 14  struct kfd_ioctl_get_version_args {  	__u32 major_version;	/* from KFD */ @@ -110,6 +112,32 @@ struct kfd_ioctl_get_available_memory_args {  	__u32 pad;  }; +struct kfd_dbg_device_info_entry { +	__u64 exception_status; +	__u64 lds_base; +	__u64 lds_limit; +	__u64 scratch_base; +	__u64 scratch_limit; +	__u64 gpuvm_base; +	__u64 gpuvm_limit; +	__u32 gpu_id; +	__u32 location_id; +	__u32 vendor_id; +	__u32 device_id; +	__u32 revision_id; +	__u32 subsystem_vendor_id; +	__u32 subsystem_device_id; +	__u32 fw_version; +	__u32 gfx_target_version; +	__u32 simd_count; +	__u32 max_waves_per_simd; +	__u32 array_count; +	__u32 simd_arrays_per_engine; +	__u32 num_xcc; +	__u32 capability; +	__u32 debug_prop; +}; +  /* For kfd_ioctl_set_memory_policy_args.default_policy and alternate_policy */  #define KFD_IOC_CACHE_POLICY_COHERENT 0  #define KFD_IOC_CACHE_POLICY_NONCOHERENT 1 @@ -293,12 +321,20 @@ struct kfd_hsa_hw_exception_data {  	__u32 gpu_id;  }; +/* hsa signal event data */ +struct kfd_hsa_signal_event_data { +	__u64 last_event_age;	/* to and from KFD */ +}; +  /* Event data */  struct kfd_event_data {  	union { +		/* From KFD */  		struct kfd_hsa_memory_exception_data memory_exception_data;  		struct kfd_hsa_hw_exception_data hw_exception_data; -	};				/* From KFD */ +		/* To and From KFD */ +		struct kfd_hsa_signal_event_data signal_event_data; +	};  	__u64 kfd_event_data_ext;	/* pointer to an extension structure  					   for future exception types */  	__u32 event_id;		/* to KFD */ @@ -773,6 +809,640 @@ struct kfd_ioctl_set_xnack_mode_args {  	__s32 xnack_enabled;  }; +/* Wave launch override modes */ +enum kfd_dbg_trap_override_mode { +	KFD_DBG_TRAP_OVERRIDE_OR = 0, +	KFD_DBG_TRAP_OVERRIDE_REPLACE = 1 +}; + +/* Wave launch overrides */ +enum kfd_dbg_trap_mask { +	KFD_DBG_TRAP_MASK_FP_INVALID = 1, +	KFD_DBG_TRAP_MASK_FP_INPUT_DENORMAL = 2, +	KFD_DBG_TRAP_MASK_FP_DIVIDE_BY_ZERO = 4, +	KFD_DBG_TRAP_MASK_FP_OVERFLOW = 8, +	KFD_DBG_TRAP_MASK_FP_UNDERFLOW = 16, +	KFD_DBG_TRAP_MASK_FP_INEXACT = 32, +	KFD_DBG_TRAP_MASK_INT_DIVIDE_BY_ZERO = 64, +	KFD_DBG_TRAP_MASK_DBG_ADDRESS_WATCH = 128, +	KFD_DBG_TRAP_MASK_DBG_MEMORY_VIOLATION = 256, +	KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_START = (1 << 30), +	KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_END = (1 << 31) +}; + +/* Wave launch modes */ +enum kfd_dbg_trap_wave_launch_mode { +	KFD_DBG_TRAP_WAVE_LAUNCH_MODE_NORMAL = 0, +	KFD_DBG_TRAP_WAVE_LAUNCH_MODE_HALT = 1, +	KFD_DBG_TRAP_WAVE_LAUNCH_MODE_DEBUG = 3 +}; + +/* Address watch modes */ +enum kfd_dbg_trap_address_watch_mode { +	KFD_DBG_TRAP_ADDRESS_WATCH_MODE_READ = 0, +	KFD_DBG_TRAP_ADDRESS_WATCH_MODE_NONREAD = 1, +	KFD_DBG_TRAP_ADDRESS_WATCH_MODE_ATOMIC = 2, +	KFD_DBG_TRAP_ADDRESS_WATCH_MODE_ALL = 3 +}; + +/* Additional wave settings */ +enum kfd_dbg_trap_flags { +	KFD_DBG_TRAP_FLAG_SINGLE_MEM_OP = 1, +}; + +/* Trap exceptions */ +enum kfd_dbg_trap_exception_code { +	EC_NONE = 0, +	/* per queue */ +	EC_QUEUE_WAVE_ABORT = 1, +	EC_QUEUE_WAVE_TRAP = 2, +	EC_QUEUE_WAVE_MATH_ERROR = 3, +	EC_QUEUE_WAVE_ILLEGAL_INSTRUCTION = 4, +	EC_QUEUE_WAVE_MEMORY_VIOLATION = 5, +	EC_QUEUE_WAVE_APERTURE_VIOLATION = 6, +	EC_QUEUE_PACKET_DISPATCH_DIM_INVALID = 16, +	EC_QUEUE_PACKET_DISPATCH_GROUP_SEGMENT_SIZE_INVALID = 17, +	EC_QUEUE_PACKET_DISPATCH_CODE_INVALID = 18, +	EC_QUEUE_PACKET_RESERVED = 19, +	EC_QUEUE_PACKET_UNSUPPORTED = 20, +	EC_QUEUE_PACKET_DISPATCH_WORK_GROUP_SIZE_INVALID = 21, +	EC_QUEUE_PACKET_DISPATCH_REGISTER_INVALID = 22, +	EC_QUEUE_PACKET_VENDOR_UNSUPPORTED = 23, +	EC_QUEUE_PREEMPTION_ERROR = 30, +	EC_QUEUE_NEW = 31, +	/* per device */ +	EC_DEVICE_QUEUE_DELETE = 32, +	EC_DEVICE_MEMORY_VIOLATION = 33, +	EC_DEVICE_RAS_ERROR = 34, +	EC_DEVICE_FATAL_HALT = 35, +	EC_DEVICE_NEW = 36, +	/* per process */ +	EC_PROCESS_RUNTIME = 48, +	EC_PROCESS_DEVICE_REMOVE = 49, +	EC_MAX +}; + +/* Mask generated by ecode in kfd_dbg_trap_exception_code */ +#define KFD_EC_MASK(ecode)	(1ULL << (ecode - 1)) + +/* Masks for exception code type checks below */ +#define KFD_EC_MASK_QUEUE	(KFD_EC_MASK(EC_QUEUE_WAVE_ABORT) |	\ +				 KFD_EC_MASK(EC_QUEUE_WAVE_TRAP) |	\ +				 KFD_EC_MASK(EC_QUEUE_WAVE_MATH_ERROR) |	\ +				 KFD_EC_MASK(EC_QUEUE_WAVE_ILLEGAL_INSTRUCTION) |	\ +				 KFD_EC_MASK(EC_QUEUE_WAVE_MEMORY_VIOLATION) |	\ +				 KFD_EC_MASK(EC_QUEUE_WAVE_APERTURE_VIOLATION) |	\ +				 KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_DIM_INVALID) |	\ +				 KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_GROUP_SEGMENT_SIZE_INVALID) |	\ +				 KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_CODE_INVALID) |	\ +				 KFD_EC_MASK(EC_QUEUE_PACKET_RESERVED) |	\ +				 KFD_EC_MASK(EC_QUEUE_PACKET_UNSUPPORTED) |	\ +				 KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_WORK_GROUP_SIZE_INVALID) |	\ +				 KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_REGISTER_INVALID) |	\ +				 KFD_EC_MASK(EC_QUEUE_PACKET_VENDOR_UNSUPPORTED)	|	\ +				 KFD_EC_MASK(EC_QUEUE_PREEMPTION_ERROR)	|	\ +				 KFD_EC_MASK(EC_QUEUE_NEW)) +#define KFD_EC_MASK_DEVICE	(KFD_EC_MASK(EC_DEVICE_QUEUE_DELETE) |		\ +				 KFD_EC_MASK(EC_DEVICE_RAS_ERROR) |		\ +				 KFD_EC_MASK(EC_DEVICE_FATAL_HALT) |		\ +				 KFD_EC_MASK(EC_DEVICE_MEMORY_VIOLATION) |	\ +				 KFD_EC_MASK(EC_DEVICE_NEW)) +#define KFD_EC_MASK_PROCESS	(KFD_EC_MASK(EC_PROCESS_RUNTIME) |	\ +				 KFD_EC_MASK(EC_PROCESS_DEVICE_REMOVE)) + +/* Checks for exception code types for KFD search */ +#define KFD_DBG_EC_TYPE_IS_QUEUE(ecode)					\ +			(!!(KFD_EC_MASK(ecode) & KFD_EC_MASK_QUEUE)) +#define KFD_DBG_EC_TYPE_IS_DEVICE(ecode)				\ +			(!!(KFD_EC_MASK(ecode) & KFD_EC_MASK_DEVICE)) +#define KFD_DBG_EC_TYPE_IS_PROCESS(ecode)				\ +			(!!(KFD_EC_MASK(ecode) & KFD_EC_MASK_PROCESS)) + + +/* Runtime enable states */ +enum kfd_dbg_runtime_state { +	DEBUG_RUNTIME_STATE_DISABLED = 0, +	DEBUG_RUNTIME_STATE_ENABLED = 1, +	DEBUG_RUNTIME_STATE_ENABLED_BUSY = 2, +	DEBUG_RUNTIME_STATE_ENABLED_ERROR = 3 +}; + +/* Runtime enable status */ +struct kfd_runtime_info { +	__u64 r_debug; +	__u32 runtime_state; +	__u32 ttmp_setup; +}; + +/* Enable modes for runtime enable */ +#define KFD_RUNTIME_ENABLE_MODE_ENABLE_MASK	1 +#define KFD_RUNTIME_ENABLE_MODE_TTMP_SAVE_MASK	2 + +/** + * kfd_ioctl_runtime_enable_args - Arguments for runtime enable + * + * Coordinates debug exception signalling and debug device enablement with runtime. + * + * @r_debug - pointer to user struct for sharing information between ROCr and the debuggger + * @mode_mask - mask to set mode + *	KFD_RUNTIME_ENABLE_MODE_ENABLE_MASK - enable runtime for debugging, otherwise disable + *	KFD_RUNTIME_ENABLE_MODE_TTMP_SAVE_MASK - enable trap temporary setup (ignore on disable) + * @capabilities_mask - mask to notify runtime on what KFD supports + * + * Return - 0 on SUCCESS. + *	  - EBUSY if runtime enable call already pending. + *	  - EEXIST if user queues already active prior to call. + *	    If process is debug enabled, runtime enable will enable debug devices and + *	    wait for debugger process to send runtime exception EC_PROCESS_RUNTIME + *	    to unblock - see kfd_ioctl_dbg_trap_args. + * + */ +struct kfd_ioctl_runtime_enable_args { +	__u64 r_debug; +	__u32 mode_mask; +	__u32 capabilities_mask; +}; + +/* Queue information */ +struct kfd_queue_snapshot_entry { +	__u64 exception_status; +	__u64 ring_base_address; +	__u64 write_pointer_address; +	__u64 read_pointer_address; +	__u64 ctx_save_restore_address; +	__u32 queue_id; +	__u32 gpu_id; +	__u32 ring_size; +	__u32 queue_type; +	__u32 ctx_save_restore_area_size; +	__u32 reserved; +}; + +/* Queue status return for suspend/resume */ +#define KFD_DBG_QUEUE_ERROR_BIT		30 +#define KFD_DBG_QUEUE_INVALID_BIT	31 +#define KFD_DBG_QUEUE_ERROR_MASK	(1 << KFD_DBG_QUEUE_ERROR_BIT) +#define KFD_DBG_QUEUE_INVALID_MASK	(1 << KFD_DBG_QUEUE_INVALID_BIT) + +/* Context save area header information */ +struct kfd_context_save_area_header { +	struct { +		__u32 control_stack_offset; +		__u32 control_stack_size; +		__u32 wave_state_offset; +		__u32 wave_state_size; +	} wave_state; +	__u32 debug_offset; +	__u32 debug_size; +	__u64 err_payload_addr; +	__u32 err_event_id; +	__u32 reserved1; +}; + +/* + * Debug operations + * + * For specifics on usage and return values, see documentation per operation + * below.  Otherwise, generic error returns apply: + *	- ESRCH if the process to debug does not exist. + * + *	- EINVAL (with KFD_IOC_DBG_TRAP_ENABLE exempt) if operation + *		 KFD_IOC_DBG_TRAP_ENABLE has not succeeded prior. + *		 Also returns this error if GPU hardware scheduling is not supported. + * + *	- EPERM (with KFD_IOC_DBG_TRAP_DISABLE exempt) if target process is not + *		 PTRACE_ATTACHED.  KFD_IOC_DBG_TRAP_DISABLE is exempt to allow + *		 clean up of debug mode as long as process is debug enabled. + * + *	- EACCES if any DBG_HW_OP (debug hardware operation) is requested when + *		 AMDKFD_IOC_RUNTIME_ENABLE has not succeeded prior. + * + *	- ENODEV if any GPU does not support debugging on a DBG_HW_OP call. + * + *	- Other errors may be returned when a DBG_HW_OP occurs while the GPU + *	  is in a fatal state. + * + */ +enum kfd_dbg_trap_operations { +	KFD_IOC_DBG_TRAP_ENABLE = 0, +	KFD_IOC_DBG_TRAP_DISABLE = 1, +	KFD_IOC_DBG_TRAP_SEND_RUNTIME_EVENT = 2, +	KFD_IOC_DBG_TRAP_SET_EXCEPTIONS_ENABLED = 3, +	KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_OVERRIDE = 4,  /* DBG_HW_OP */ +	KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_MODE = 5,      /* DBG_HW_OP */ +	KFD_IOC_DBG_TRAP_SUSPEND_QUEUES = 6,		/* DBG_HW_OP */ +	KFD_IOC_DBG_TRAP_RESUME_QUEUES = 7,		/* DBG_HW_OP */ +	KFD_IOC_DBG_TRAP_SET_NODE_ADDRESS_WATCH = 8,	/* DBG_HW_OP */ +	KFD_IOC_DBG_TRAP_CLEAR_NODE_ADDRESS_WATCH = 9,	/* DBG_HW_OP */ +	KFD_IOC_DBG_TRAP_SET_FLAGS = 10, +	KFD_IOC_DBG_TRAP_QUERY_DEBUG_EVENT = 11, +	KFD_IOC_DBG_TRAP_QUERY_EXCEPTION_INFO = 12, +	KFD_IOC_DBG_TRAP_GET_QUEUE_SNAPSHOT = 13, +	KFD_IOC_DBG_TRAP_GET_DEVICE_SNAPSHOT = 14 +}; + +/** + * kfd_ioctl_dbg_trap_enable_args + * + *     Arguments for KFD_IOC_DBG_TRAP_ENABLE. + * + *     Enables debug session for target process. Call @op KFD_IOC_DBG_TRAP_DISABLE in + *     kfd_ioctl_dbg_trap_args to disable debug session. + * + *     @exception_mask (IN)	- exceptions to raise to the debugger + *     @rinfo_ptr      (IN)	- pointer to runtime info buffer (see kfd_runtime_info) + *     @rinfo_size     (IN/OUT)	- size of runtime info buffer in bytes + *     @dbg_fd	       (IN)	- fd the KFD will nofify the debugger with of raised + *				  exceptions set in exception_mask. + * + *     Generic errors apply (see kfd_dbg_trap_operations). + *     Return - 0 on SUCCESS. + *		Copies KFD saved kfd_runtime_info to @rinfo_ptr on enable. + *		Size of kfd_runtime saved by the KFD returned to @rinfo_size. + *            - EBADF if KFD cannot get a reference to dbg_fd. + *            - EFAULT if KFD cannot copy runtime info to rinfo_ptr. + *            - EINVAL if target process is already debug enabled. + * + */ +struct kfd_ioctl_dbg_trap_enable_args { +	__u64 exception_mask; +	__u64 rinfo_ptr; +	__u32 rinfo_size; +	__u32 dbg_fd; +}; + +/** + * kfd_ioctl_dbg_trap_send_runtime_event_args + * + * + *     Arguments for KFD_IOC_DBG_TRAP_SEND_RUNTIME_EVENT. + *     Raises exceptions to runtime. + * + *     @exception_mask (IN) - exceptions to raise to runtime + *     @gpu_id	       (IN) - target device id + *     @queue_id       (IN) - target queue id + * + *     Generic errors apply (see kfd_dbg_trap_operations). + *     Return - 0 on SUCCESS. + *	      - ENODEV if gpu_id not found. + *		If exception_mask contains EC_PROCESS_RUNTIME, unblocks pending + *		AMDKFD_IOC_RUNTIME_ENABLE call - see kfd_ioctl_runtime_enable_args. + *		All other exceptions are raised to runtime through err_payload_addr. + *		See kfd_context_save_area_header. + */ +struct kfd_ioctl_dbg_trap_send_runtime_event_args { +	__u64 exception_mask; +	__u32 gpu_id; +	__u32 queue_id; +}; + +/** + * kfd_ioctl_dbg_trap_set_exceptions_enabled_args + * + *     Arguments for KFD_IOC_SET_EXCEPTIONS_ENABLED + *     Set new exceptions to be raised to the debugger. + * + *     @exception_mask (IN) - new exceptions to raise the debugger + * + *     Generic errors apply (see kfd_dbg_trap_operations). + *     Return - 0 on SUCCESS. + */ +struct kfd_ioctl_dbg_trap_set_exceptions_enabled_args { +	__u64 exception_mask; +}; + +/** + * kfd_ioctl_dbg_trap_set_wave_launch_override_args + * + *     Arguments for KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_OVERRIDE + *     Enable HW exceptions to raise trap. + * + *     @override_mode	     (IN)     - see kfd_dbg_trap_override_mode + *     @enable_mask	     (IN/OUT) - reference kfd_dbg_trap_mask. + *					IN is the override modes requested to be enabled. + *					OUT is referenced in Return below. + *     @support_request_mask (IN/OUT) - reference kfd_dbg_trap_mask. + *					IN is the override modes requested for support check. + *					OUT is referenced in Return below. + * + *     Generic errors apply (see kfd_dbg_trap_operations). + *     Return - 0 on SUCCESS. + *		Previous enablement is returned in @enable_mask. + *		Actual override support is returned in @support_request_mask. + *	      - EINVAL if override mode is not supported. + *	      - EACCES if trap support requested is not actually supported. + *		i.e. enable_mask (IN) is not a subset of support_request_mask (OUT). + *		Otherwise it is considered a generic error (see kfd_dbg_trap_operations). + */ +struct kfd_ioctl_dbg_trap_set_wave_launch_override_args { +	__u32 override_mode; +	__u32 enable_mask; +	__u32 support_request_mask; +	__u32 pad; +}; + +/** + * kfd_ioctl_dbg_trap_set_wave_launch_mode_args + * + *     Arguments for KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_MODE + *     Set wave launch mode. + * + *     @mode (IN) - see kfd_dbg_trap_wave_launch_mode + * + *     Generic errors apply (see kfd_dbg_trap_operations). + *     Return - 0 on SUCCESS. + */ +struct kfd_ioctl_dbg_trap_set_wave_launch_mode_args { +	__u32 launch_mode; +	__u32 pad; +}; + +/** + * kfd_ioctl_dbg_trap_suspend_queues_ags + * + *     Arguments for KFD_IOC_DBG_TRAP_SUSPEND_QUEUES + *     Suspend queues. + * + *     @exception_mask	(IN) - raised exceptions to clear + *     @queue_array_ptr (IN) - pointer to array of queue ids (u32 per queue id) + *			       to suspend + *     @num_queues	(IN) - number of queues to suspend in @queue_array_ptr + *     @grace_period	(IN) - wave time allowance before preemption + *			       per 1K GPU clock cycle unit + * + *     Generic errors apply (see kfd_dbg_trap_operations). + *     Destruction of a suspended queue is blocked until the queue is + *     resumed.  This allows the debugger to access queue information and + *     the its context save area without running into a race condition on + *     queue destruction. + *     Automatically copies per queue context save area header information + *     into the save area base + *     (see kfd_queue_snapshot_entry and kfd_context_save_area_header). + * + *     Return - Number of queues suspended on SUCCESS. + *	.	KFD_DBG_QUEUE_ERROR_MASK and KFD_DBG_QUEUE_INVALID_MASK masked + *		for each queue id in @queue_array_ptr array reports unsuccessful + *		suspend reason. + *		KFD_DBG_QUEUE_ERROR_MASK = HW failure. + *		KFD_DBG_QUEUE_INVALID_MASK = queue does not exist, is new or + *		is being destroyed. + */ +struct kfd_ioctl_dbg_trap_suspend_queues_args { +	__u64 exception_mask; +	__u64 queue_array_ptr; +	__u32 num_queues; +	__u32 grace_period; +}; + +/** + * kfd_ioctl_dbg_trap_resume_queues_args + * + *     Arguments for KFD_IOC_DBG_TRAP_RESUME_QUEUES + *     Resume queues. + * + *     @queue_array_ptr (IN) - pointer to array of queue ids (u32 per queue id) + *			       to resume + *     @num_queues	(IN) - number of queues to resume in @queue_array_ptr + * + *     Generic errors apply (see kfd_dbg_trap_operations). + *     Return - Number of queues resumed on SUCCESS. + *		KFD_DBG_QUEUE_ERROR_MASK and KFD_DBG_QUEUE_INVALID_MASK mask + *		for each queue id in @queue_array_ptr array reports unsuccessful + *		resume reason. + *		KFD_DBG_QUEUE_ERROR_MASK = HW failure. + *		KFD_DBG_QUEUE_INVALID_MASK = queue does not exist. + */ +struct kfd_ioctl_dbg_trap_resume_queues_args { +	__u64 queue_array_ptr; +	__u32 num_queues; +	__u32 pad; +}; + +/** + * kfd_ioctl_dbg_trap_set_node_address_watch_args + * + *     Arguments for KFD_IOC_DBG_TRAP_SET_NODE_ADDRESS_WATCH + *     Sets address watch for device. + * + *     @address	(IN)  - watch address to set + *     @mode    (IN)  - see kfd_dbg_trap_address_watch_mode + *     @mask    (IN)  - watch address mask + *     @gpu_id  (IN)  - target gpu to set watch point + *     @id      (OUT) - watch id allocated + * + *     Generic errors apply (see kfd_dbg_trap_operations). + *     Return - 0 on SUCCESS. + *		Allocated watch ID returned to @id. + *	      - ENODEV if gpu_id not found. + *	      - ENOMEM if watch IDs can be allocated + */ +struct kfd_ioctl_dbg_trap_set_node_address_watch_args { +	__u64 address; +	__u32 mode; +	__u32 mask; +	__u32 gpu_id; +	__u32 id; +}; + +/** + * kfd_ioctl_dbg_trap_clear_node_address_watch_args + * + *     Arguments for KFD_IOC_DBG_TRAP_CLEAR_NODE_ADDRESS_WATCH + *     Clear address watch for device. + * + *     @gpu_id  (IN)  - target device to clear watch point + *     @id      (IN) - allocated watch id to clear + * + *     Generic errors apply (see kfd_dbg_trap_operations). + *     Return - 0 on SUCCESS. + *	      - ENODEV if gpu_id not found. + *	      - EINVAL if watch ID has not been allocated. + */ +struct kfd_ioctl_dbg_trap_clear_node_address_watch_args { +	__u32 gpu_id; +	__u32 id; +}; + +/** + * kfd_ioctl_dbg_trap_set_flags_args + * + *     Arguments for KFD_IOC_DBG_TRAP_SET_FLAGS + *     Sets flags for wave behaviour. + * + *     @flags (IN/OUT) - IN = flags to enable, OUT = flags previously enabled + * + *     Generic errors apply (see kfd_dbg_trap_operations). + *     Return - 0 on SUCCESS. + *	      - EACCESS if any debug device does not allow flag options. + */ +struct kfd_ioctl_dbg_trap_set_flags_args { +	__u32 flags; +	__u32 pad; +}; + +/** + * kfd_ioctl_dbg_trap_query_debug_event_args + * + *     Arguments for KFD_IOC_DBG_TRAP_QUERY_DEBUG_EVENT + * + *     Find one or more raised exceptions. This function can return multiple + *     exceptions from a single queue or a single device with one call. To find + *     all raised exceptions, this function must be called repeatedly until it + *     returns -EAGAIN. Returned exceptions can optionally be cleared by + *     setting the corresponding bit in the @exception_mask input parameter. + *     However, clearing an exception prevents retrieving further information + *     about it with KFD_IOC_DBG_TRAP_QUERY_EXCEPTION_INFO. + * + *     @exception_mask (IN/OUT) - exception to clear (IN) and raised (OUT) + *     @gpu_id	       (OUT)    - gpu id of exceptions raised + *     @queue_id       (OUT)    - queue id of exceptions raised + * + *     Generic errors apply (see kfd_dbg_trap_operations). + *     Return - 0 on raised exception found + *              Raised exceptions found are returned in @exception mask + *              with reported source id returned in @gpu_id or @queue_id. + *            - EAGAIN if no raised exception has been found + */ +struct kfd_ioctl_dbg_trap_query_debug_event_args { +	__u64 exception_mask; +	__u32 gpu_id; +	__u32 queue_id; +}; + +/** + * kfd_ioctl_dbg_trap_query_exception_info_args + * + *     Arguments KFD_IOC_DBG_TRAP_QUERY_EXCEPTION_INFO + *     Get additional info on raised exception. + * + *     @info_ptr	(IN)	 - pointer to exception info buffer to copy to + *     @info_size	(IN/OUT) - exception info buffer size (bytes) + *     @source_id	(IN)     - target gpu or queue id + *     @exception_code	(IN)     - target exception + *     @clear_exception	(IN)     - clear raised @exception_code exception + *				   (0 = false, 1 = true) + * + *     Generic errors apply (see kfd_dbg_trap_operations). + *     Return - 0 on SUCCESS. + *              If @exception_code is EC_DEVICE_MEMORY_VIOLATION, copy @info_size(OUT) + *		bytes of memory exception data to @info_ptr. + *              If @exception_code is EC_PROCESS_RUNTIME, copy saved + *              kfd_runtime_info to @info_ptr. + *              Actual required @info_ptr size (bytes) is returned in @info_size. + */ +struct kfd_ioctl_dbg_trap_query_exception_info_args { +	__u64 info_ptr; +	__u32 info_size; +	__u32 source_id; +	__u32 exception_code; +	__u32 clear_exception; +}; + +/** + * kfd_ioctl_dbg_trap_get_queue_snapshot_args + * + *     Arguments KFD_IOC_DBG_TRAP_GET_QUEUE_SNAPSHOT + *     Get queue information. + * + *     @exception_mask	 (IN)	  - exceptions raised to clear + *     @snapshot_buf_ptr (IN)	  - queue snapshot entry buffer (see kfd_queue_snapshot_entry) + *     @num_queues	 (IN/OUT) - number of queue snapshot entries + *         The debugger specifies the size of the array allocated in @num_queues. + *         KFD returns the number of queues that actually existed. If this is + *         larger than the size specified by the debugger, KFD will not overflow + *         the array allocated by the debugger. + * + *     @entry_size	 (IN/OUT) - size per entry in bytes + *         The debugger specifies sizeof(struct kfd_queue_snapshot_entry) in + *         @entry_size. KFD returns the number of bytes actually populated per + *         entry. The debugger should use the KFD_IOCTL_MINOR_VERSION to determine, + *         which fields in struct kfd_queue_snapshot_entry are valid. This allows + *         growing the ABI in a backwards compatible manner. + *         Note that entry_size(IN) should still be used to stride the snapshot buffer in the + *         event that it's larger than actual kfd_queue_snapshot_entry. + * + *     Generic errors apply (see kfd_dbg_trap_operations). + *     Return - 0 on SUCCESS. + *              Copies @num_queues(IN) queue snapshot entries of size @entry_size(IN) + *              into @snapshot_buf_ptr if @num_queues(IN) > 0. + *              Otherwise return @num_queues(OUT) queue snapshot entries that exist. + */ +struct kfd_ioctl_dbg_trap_queue_snapshot_args { +	__u64 exception_mask; +	__u64 snapshot_buf_ptr; +	__u32 num_queues; +	__u32 entry_size; +}; + +/** + * kfd_ioctl_dbg_trap_get_device_snapshot_args + * + *     Arguments for KFD_IOC_DBG_TRAP_GET_DEVICE_SNAPSHOT + *     Get device information. + * + *     @exception_mask	 (IN)	  - exceptions raised to clear + *     @snapshot_buf_ptr (IN)	  - pointer to snapshot buffer (see kfd_dbg_device_info_entry) + *     @num_devices	 (IN/OUT) - number of debug devices to snapshot + *         The debugger specifies the size of the array allocated in @num_devices. + *         KFD returns the number of devices that actually existed. If this is + *         larger than the size specified by the debugger, KFD will not overflow + *         the array allocated by the debugger. + * + *     @entry_size	 (IN/OUT) - size per entry in bytes + *         The debugger specifies sizeof(struct kfd_dbg_device_info_entry) in + *         @entry_size. KFD returns the number of bytes actually populated. The + *         debugger should use KFD_IOCTL_MINOR_VERSION to determine, which fields + *         in struct kfd_dbg_device_info_entry are valid. This allows growing the + *         ABI in a backwards compatible manner. + *         Note that entry_size(IN) should still be used to stride the snapshot buffer in the + *         event that it's larger than actual kfd_dbg_device_info_entry. + * + *     Generic errors apply (see kfd_dbg_trap_operations). + *     Return - 0 on SUCCESS. + *              Copies @num_devices(IN) device snapshot entries of size @entry_size(IN) + *              into @snapshot_buf_ptr if @num_devices(IN) > 0. + *              Otherwise return @num_devices(OUT) queue snapshot entries that exist. + */ +struct kfd_ioctl_dbg_trap_device_snapshot_args { +	__u64 exception_mask; +	__u64 snapshot_buf_ptr; +	__u32 num_devices; +	__u32 entry_size; +}; + +/** + * kfd_ioctl_dbg_trap_args + * + * Arguments to debug target process. + * + *     @pid - target process to debug + *     @op  - debug operation (see kfd_dbg_trap_operations) + * + *     @op determines which union struct args to use. + *     Refer to kern docs for each kfd_ioctl_dbg_trap_*_args struct. + */ +struct kfd_ioctl_dbg_trap_args { +	__u32 pid; +	__u32 op; + +	union { +		struct kfd_ioctl_dbg_trap_enable_args enable; +		struct kfd_ioctl_dbg_trap_send_runtime_event_args send_runtime_event; +		struct kfd_ioctl_dbg_trap_set_exceptions_enabled_args set_exceptions_enabled; +		struct kfd_ioctl_dbg_trap_set_wave_launch_override_args launch_override; +		struct kfd_ioctl_dbg_trap_set_wave_launch_mode_args launch_mode; +		struct kfd_ioctl_dbg_trap_suspend_queues_args suspend_queues; +		struct kfd_ioctl_dbg_trap_resume_queues_args resume_queues; +		struct kfd_ioctl_dbg_trap_set_node_address_watch_args set_node_address_watch; +		struct kfd_ioctl_dbg_trap_clear_node_address_watch_args clear_node_address_watch; +		struct kfd_ioctl_dbg_trap_set_flags_args set_flags; +		struct kfd_ioctl_dbg_trap_query_debug_event_args query_debug_event; +		struct kfd_ioctl_dbg_trap_query_exception_info_args query_exception_info; +		struct kfd_ioctl_dbg_trap_queue_snapshot_args queue_snapshot; +		struct kfd_ioctl_dbg_trap_device_snapshot_args device_snapshot; +	}; +}; +  #define AMDKFD_IOCTL_BASE 'K'  #define AMDKFD_IO(nr)			_IO(AMDKFD_IOCTL_BASE, nr)  #define AMDKFD_IOR(nr, type)		_IOR(AMDKFD_IOCTL_BASE, nr, type) @@ -887,7 +1557,13 @@ struct kfd_ioctl_set_xnack_mode_args {  #define AMDKFD_IOC_EXPORT_DMABUF		\  		AMDKFD_IOWR(0x24, struct kfd_ioctl_export_dmabuf_args) +#define AMDKFD_IOC_RUNTIME_ENABLE		\ +		AMDKFD_IOWR(0x25, struct kfd_ioctl_runtime_enable_args) + +#define AMDKFD_IOC_DBG_TRAP			\ +		AMDKFD_IOWR(0x26, struct kfd_ioctl_dbg_trap_args) +  #define AMDKFD_COMMAND_START		0x01 -#define AMDKFD_COMMAND_END		0x25 +#define AMDKFD_COMMAND_END		0x27  #endif diff --git a/include/uapi/linux/kfd_sysfs.h b/include/uapi/linux/kfd_sysfs.h index 3e330f368917..a51b7331e0b4 100644 --- a/include/uapi/linux/kfd_sysfs.h +++ b/include/uapi/linux/kfd_sysfs.h @@ -43,6 +43,11 @@  #define HSA_CAP_DOORBELL_TYPE_2_0		0x2  #define HSA_CAP_AQL_QUEUE_DOUBLE_MAP		0x00004000 +#define HSA_CAP_TRAP_DEBUG_SUPPORT              0x00008000 +#define HSA_CAP_TRAP_DEBUG_WAVE_LAUNCH_TRAP_OVERRIDE_SUPPORTED  0x00010000 +#define HSA_CAP_TRAP_DEBUG_WAVE_LAUNCH_MODE_SUPPORTED           0x00020000 +#define HSA_CAP_TRAP_DEBUG_PRECISE_MEMORY_OPERATIONS_SUPPORTED  0x00040000 +  /* Old buggy user mode depends on this being 0 */  #define HSA_CAP_RESERVED_WAS_SRAM_EDCSUPPORTED	0x00080000 @@ -53,8 +58,18 @@  #define HSA_CAP_SRAM_EDCSUPPORTED		0x04000000  #define HSA_CAP_SVMAPI_SUPPORTED		0x08000000  #define HSA_CAP_FLAGS_COHERENTHOSTACCESS	0x10000000 +#define HSA_CAP_TRAP_DEBUG_FIRMWARE_SUPPORTED   0x20000000  #define HSA_CAP_RESERVED			0xe00f8000 +/* debug_prop bits in node properties */ +#define HSA_DBG_WATCH_ADDR_MASK_LO_BIT_MASK     0x0000000f +#define HSA_DBG_WATCH_ADDR_MASK_LO_BIT_SHIFT    0 +#define HSA_DBG_WATCH_ADDR_MASK_HI_BIT_MASK     0x000003f0 +#define HSA_DBG_WATCH_ADDR_MASK_HI_BIT_SHIFT    4 +#define HSA_DBG_DISPATCH_INFO_ALWAYS_VALID      0x00000400 +#define HSA_DBG_WATCHPOINTS_EXCLUSIVE           0x00000800 +#define HSA_DBG_RESERVED                0xfffffffffffff000ull +  /* Heap types in memory properties */  #define HSA_MEM_HEAP_TYPE_SYSTEM	0  #define HSA_MEM_HEAP_TYPE_FB_PUBLIC	1 diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 737318b1c1d9..f089ab290978 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1190,6 +1190,8 @@ struct kvm_ppc_resize_hpt {  #define KVM_CAP_DIRTY_LOG_RING_WITH_BITMAP 225  #define KVM_CAP_PMU_EVENT_MASKED_EVENTS 226  #define KVM_CAP_COUNTER_OFFSET 227 +#define KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE 228 +#define KVM_CAP_ARM_SUPPORTED_BLOCK_SIZES 229  #ifdef KVM_CAP_IRQ_ROUTING @@ -1442,6 +1444,8 @@ enum kvm_device_type {  #define KVM_DEV_TYPE_XIVE		KVM_DEV_TYPE_XIVE  	KVM_DEV_TYPE_ARM_PV_TIME,  #define KVM_DEV_TYPE_ARM_PV_TIME	KVM_DEV_TYPE_ARM_PV_TIME +	KVM_DEV_TYPE_RISCV_AIA, +#define KVM_DEV_TYPE_RISCV_AIA		KVM_DEV_TYPE_RISCV_AIA  	KVM_DEV_TYPE_MAX,  }; @@ -1613,7 +1617,7 @@ struct kvm_s390_ucas_mapping {  #define KVM_GET_DEBUGREGS         _IOR(KVMIO,  0xa1, struct kvm_debugregs)  #define KVM_SET_DEBUGREGS         _IOW(KVMIO,  0xa2, struct kvm_debugregs)  /* - * vcpu version available with KVM_ENABLE_CAP + * vcpu version available with KVM_CAP_ENABLE_CAP   * vm version available with KVM_CAP_ENABLE_CAP_VM   */  #define KVM_ENABLE_CAP            _IOW(KVMIO,  0xa3, struct kvm_enable_cap) diff --git a/include/uapi/linux/mdio.h b/include/uapi/linux/mdio.h index 256b463e47a6..b826598d1e94 100644 --- a/include/uapi/linux/mdio.h +++ b/include/uapi/linux/mdio.h @@ -231,6 +231,30 @@  #define MDIO_PMA_EXTABLE_BT1		0x0800	/* BASE-T1 ability */  #define MDIO_PMA_EXTABLE_NBT		0x4000  /* 2.5/5GBASE-T ability */ +/* AN Clause 73 linkword */ +#define MDIO_AN_C73_0_S_MASK		GENMASK(4, 0) +#define MDIO_AN_C73_0_E_MASK		GENMASK(9, 5) +#define MDIO_AN_C73_0_PAUSE		BIT(10) +#define MDIO_AN_C73_0_ASM_DIR		BIT(11) +#define MDIO_AN_C73_0_C2		BIT(12) +#define MDIO_AN_C73_0_RF		BIT(13) +#define MDIO_AN_C73_0_ACK		BIT(14) +#define MDIO_AN_C73_0_NP		BIT(15) +#define MDIO_AN_C73_1_T_MASK		GENMASK(4, 0) +#define MDIO_AN_C73_1_1000BASE_KX	BIT(5) +#define MDIO_AN_C73_1_10GBASE_KX4	BIT(6) +#define MDIO_AN_C73_1_10GBASE_KR	BIT(7) +#define MDIO_AN_C73_1_40GBASE_KR4	BIT(8) +#define MDIO_AN_C73_1_40GBASE_CR4	BIT(9) +#define MDIO_AN_C73_1_100GBASE_CR10	BIT(10) +#define MDIO_AN_C73_1_100GBASE_KP4	BIT(11) +#define MDIO_AN_C73_1_100GBASE_KR4	BIT(12) +#define MDIO_AN_C73_1_100GBASE_CR4	BIT(13) +#define MDIO_AN_C73_1_25GBASE_R_S	BIT(14) +#define MDIO_AN_C73_1_25GBASE_R		BIT(15) +#define MDIO_AN_C73_2_2500BASE_KX	BIT(0) +#define MDIO_AN_C73_2_5GBASE_KR		BIT(1) +  /* PHY XGXS lane state register. */  #define MDIO_PHYXS_LNSTAT_SYNC0		0x0001  #define MDIO_PHYXS_LNSTAT_SYNC1		0x0002 diff --git a/include/uapi/linux/mptcp.h b/include/uapi/linux/mptcp.h index 32af2d278cb4..ee9c49f949a2 100644 --- a/include/uapi/linux/mptcp.h +++ b/include/uapi/linux/mptcp.h @@ -123,6 +123,11 @@ struct mptcp_info {  	__u8	mptcpi_local_addr_used;  	__u8	mptcpi_local_addr_max;  	__u8	mptcpi_csum_enabled; +	__u32	mptcpi_retransmits; +	__u64	mptcpi_bytes_retrans; +	__u64	mptcpi_bytes_sent; +	__u64	mptcpi_bytes_received; +	__u64	mptcpi_bytes_acked;  };  /* @@ -244,9 +249,33 @@ struct mptcp_subflow_addrs {  	};  }; +struct mptcp_subflow_info { +	__u32				id; +	struct mptcp_subflow_addrs	addrs; +}; + +struct mptcp_full_info { +	__u32		size_tcpinfo_kernel;	/* must be 0, set by kernel */ +	__u32		size_tcpinfo_user; +	__u32		size_sfinfo_kernel;	/* must be 0, set by kernel */ +	__u32		size_sfinfo_user; +	__u32		num_subflows;		/* must be 0, set by kernel (real subflow count) */ +	__u32		size_arrays_user;	/* max subflows that userspace is interested in; +						 * the buffers at subflow_info/tcp_info +						 * are respectively at least: +						 *  size_arrays * size_sfinfo_user +						 *  size_arrays * size_tcpinfo_user +						 * bytes wide +						 */ +	__aligned_u64		subflow_info; +	__aligned_u64		tcp_info; +	struct mptcp_info	mptcp_info; +}; +  /* MPTCP socket options */  #define MPTCP_INFO		1  #define MPTCP_TCPINFO		2  #define MPTCP_SUBFLOW_ADDRS	3 +#define MPTCP_FULL_INFO		4  #endif /* _UAPI_MPTCP_H */ diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h index c4d4d8e42dc8..8466c2a9938f 100644 --- a/include/uapi/linux/netfilter/nf_tables.h +++ b/include/uapi/linux/netfilter/nf_tables.h @@ -105,6 +105,7 @@ enum nft_verdicts {   * @NFT_MSG_DESTROYSETELEM: destroy a set element (enum nft_set_elem_attributes)   * @NFT_MSG_DESTROYOBJ: destroy a stateful object (enum nft_object_attributes)   * @NFT_MSG_DESTROYFLOWTABLE: destroy flow table (enum nft_flowtable_attributes) + * @NFT_MSG_GETSETELEM_RESET: get set elements and reset attached stateful expressions (enum nft_set_elem_attributes)   */  enum nf_tables_msg_types {  	NFT_MSG_NEWTABLE, @@ -140,6 +141,7 @@ enum nf_tables_msg_types {  	NFT_MSG_DESTROYSETELEM,  	NFT_MSG_DESTROYOBJ,  	NFT_MSG_DESTROYFLOWTABLE, +	NFT_MSG_GETSETELEM_RESET,  	NFT_MSG_MAX,  }; @@ -859,12 +861,14 @@ enum nft_exthdr_flags {   * @NFT_EXTHDR_OP_TCP: match against tcp options   * @NFT_EXTHDR_OP_IPV4: match against ipv4 options   * @NFT_EXTHDR_OP_SCTP: match against sctp chunks + * @NFT_EXTHDR_OP_DCCP: match against dccp otions   */  enum nft_exthdr_op {  	NFT_EXTHDR_OP_IPV6,  	NFT_EXTHDR_OP_TCPOPT,  	NFT_EXTHDR_OP_IPV4,  	NFT_EXTHDR_OP_SCTP, +	NFT_EXTHDR_OP_DCCP,  	__NFT_EXTHDR_OP_MAX  };  #define NFT_EXTHDR_OP_MAX	(__NFT_EXTHDR_OP_MAX - 1) diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index c59fec406da5..88eb85c63029 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -11,7 +11,7 @@   * Copyright 2008 Jouni Malinen <[email protected]>   * Copyright 2008 Colin McCabe <[email protected]>   * Copyright 2015-2017	Intel Deutschland GmbH - * Copyright (C) 2018-2022 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation   *   * Permission to use, copy, modify, and/or distribute this software for any   * purpose with or without fee is hereby granted, provided that the above @@ -1309,6 +1309,11 @@   *	The number of peers that HW timestamping can be enabled for concurrently   *	is indicated by %NL80211_ATTR_MAX_HW_TIMESTAMP_PEERS.   * + * @NL80211_CMD_LINKS_REMOVED: Notify userspace about the removal of STA MLD + *	setup links due to AP MLD removing the corresponding affiliated APs with + *	Multi-Link reconfiguration. %NL80211_ATTR_MLO_LINKS is used to provide + *	information about the removed STA MLD setup links. + *   * @NL80211_CMD_MAX: highest used command number   * @__NL80211_CMD_AFTER_LAST: internal use   */ @@ -1562,6 +1567,8 @@ enum nl80211_commands {  	NL80211_CMD_SET_HW_TIMESTAMP, +	NL80211_CMD_LINKS_REMOVED, +  	/* add new commands above here */  	/* used to define NL80211_CMD_MAX below */ @@ -2805,6 +2812,9 @@ enum nl80211_commands {   *	index. If the userspace includes more RNR elements than number of   *	MBSSID elements then these will be added in every EMA beacon.   * + * @NL80211_ATTR_MLO_LINK_DISABLED: Flag attribute indicating that the link is + *	disabled. + *   * @NUM_NL80211_ATTR: total number of nl80211_attrs available   * @NL80211_ATTR_MAX: highest attribute number currently defined   * @__NL80211_ATTR_AFTER_LAST: internal use @@ -3341,6 +3351,8 @@ enum nl80211_attrs {  	NL80211_ATTR_EMA_RNR_ELEMS, +	NL80211_ATTR_MLO_LINK_DISABLED, +  	/* add attributes here, update the policy in nl80211.c */  	__NL80211_ATTR_AFTER_LAST, @@ -3667,6 +3679,13 @@ enum nl80211_eht_ru_alloc {   *	(u8, see &enum nl80211_eht_gi)   * @NL80211_RATE_INFO_EHT_RU_ALLOC: EHT RU allocation, if not present then   *	non-OFDMA was used (u8, see &enum nl80211_eht_ru_alloc) + * @NL80211_RATE_INFO_S1G_MCS: S1G MCS index (u8, 0-10) + * @NL80211_RATE_INFO_S1G_NSS: S1G NSS value (u8, 1-4) + * @NL80211_RATE_INFO_1_MHZ_WIDTH: 1 MHz S1G rate + * @NL80211_RATE_INFO_2_MHZ_WIDTH: 2 MHz S1G rate + * @NL80211_RATE_INFO_4_MHZ_WIDTH: 4 MHz S1G rate + * @NL80211_RATE_INFO_8_MHZ_WIDTH: 8 MHz S1G rate + * @NL80211_RATE_INFO_16_MHZ_WIDTH: 16 MHz S1G rate   * @__NL80211_RATE_INFO_AFTER_LAST: internal use   */  enum nl80211_rate_info { @@ -3693,6 +3712,13 @@ enum nl80211_rate_info {  	NL80211_RATE_INFO_EHT_NSS,  	NL80211_RATE_INFO_EHT_GI,  	NL80211_RATE_INFO_EHT_RU_ALLOC, +	NL80211_RATE_INFO_S1G_MCS, +	NL80211_RATE_INFO_S1G_NSS, +	NL80211_RATE_INFO_1_MHZ_WIDTH, +	NL80211_RATE_INFO_2_MHZ_WIDTH, +	NL80211_RATE_INFO_4_MHZ_WIDTH, +	NL80211_RATE_INFO_8_MHZ_WIDTH, +	NL80211_RATE_INFO_16_MHZ_WIDTH,  	/* keep last */  	__NL80211_RATE_INFO_AFTER_LAST, @@ -4424,6 +4450,7 @@ enum nl80211_sched_scan_match_attr {   * @NL80211_RRF_NO_160MHZ: 160MHz operation not allowed   * @NL80211_RRF_NO_HE: HE operation not allowed   * @NL80211_RRF_NO_320MHZ: 320MHz operation not allowed + * @NL80211_RRF_NO_EHT: EHT operation not allowed   */  enum nl80211_reg_rule_flags {  	NL80211_RRF_NO_OFDM		= 1<<0, @@ -4443,6 +4470,7 @@ enum nl80211_reg_rule_flags {  	NL80211_RRF_NO_160MHZ		= 1<<16,  	NL80211_RRF_NO_HE		= 1<<17,  	NL80211_RRF_NO_320MHZ		= 1<<18, +	NL80211_RRF_NO_EHT		= 1<<19,  };  #define NL80211_RRF_PASSIVE_SCAN	NL80211_RRF_NO_IR diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h index c5d62ee82567..e94870e77ee9 100644 --- a/include/uapi/linux/openvswitch.h +++ b/include/uapi/linux/openvswitch.h @@ -765,6 +765,7 @@ struct ovs_action_push_vlan {   */  enum ovs_hash_alg {  	OVS_HASH_ALG_L4, +	OVS_HASH_ALG_SYM_L4,  };  /* diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h index dc2000e0fe3a..e5f558d96493 100644 --- a/include/uapi/linux/pci_regs.h +++ b/include/uapi/linux/pci_regs.h @@ -738,6 +738,7 @@  #define PCI_EXT_CAP_ID_DVSEC	0x23	/* Designated Vendor-Specific */  #define PCI_EXT_CAP_ID_DLF	0x25	/* Data Link Feature */  #define PCI_EXT_CAP_ID_PL_16GT	0x26	/* Physical Layer 16.0 GT/s */ +#define PCI_EXT_CAP_ID_PL_32GT  0x2A    /* Physical Layer 32.0 GT/s */  #define PCI_EXT_CAP_ID_DOE	0x2E	/* Data Object Exchange */  #define PCI_EXT_CAP_ID_MAX	PCI_EXT_CAP_ID_DOE diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h index 648a82f32666..7865f5a9885b 100644 --- a/include/uapi/linux/pkt_cls.h +++ b/include/uapi/linux/pkt_cls.h @@ -594,6 +594,10 @@ enum {  	TCA_FLOWER_KEY_L2TPV3_SID,	/* be32 */ +	TCA_FLOWER_L2_MISS,		/* u8 */ + +	TCA_FLOWER_KEY_CFM,		/* nested */ +  	__TCA_FLOWER_MAX,  }; @@ -702,6 +706,13 @@ enum {  	TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST = (1 << 1),  }; +enum { +	TCA_FLOWER_KEY_CFM_OPT_UNSPEC, +	TCA_FLOWER_KEY_CFM_MD_LEVEL, +	TCA_FLOWER_KEY_CFM_OPCODE, +	TCA_FLOWER_KEY_CFM_OPT_MAX, +}; +  #define TCA_FLOWER_MASK_FLAGS_RANGE	(1 << 0) /* Range-based match */  /* Match-all classifier */ diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h index 51a7addc56c6..00f6ff0aff1f 100644 --- a/include/uapi/linux/pkt_sched.h +++ b/include/uapi/linux/pkt_sched.h @@ -1260,6 +1260,16 @@ enum {  };  enum { +	TCA_TAPRIO_OFFLOAD_STATS_PAD = 1,	/* u64 */ +	TCA_TAPRIO_OFFLOAD_STATS_WINDOW_DROPS,	/* u64 */ +	TCA_TAPRIO_OFFLOAD_STATS_TX_OVERRUNS,	/* u64 */ + +	/* add new constants above here */ +	__TCA_TAPRIO_OFFLOAD_STATS_CNT, +	TCA_TAPRIO_OFFLOAD_STATS_MAX = (__TCA_TAPRIO_OFFLOAD_STATS_CNT - 1) +}; + +enum {  	TCA_TAPRIO_ATTR_UNSPEC,  	TCA_TAPRIO_ATTR_PRIOMAP, /* struct tc_mqprio_qopt */  	TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST, /* nested of entry */ diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h index f23d9a16507f..3c36aeade991 100644 --- a/include/uapi/linux/prctl.h +++ b/include/uapi/linux/prctl.h @@ -294,4 +294,15 @@ struct prctl_mm_map {  #define PR_SET_MEMORY_MERGE		67  #define PR_GET_MEMORY_MERGE		68 + +#define PR_RISCV_V_SET_CONTROL		69 +#define PR_RISCV_V_GET_CONTROL		70 +# define PR_RISCV_V_VSTATE_CTRL_DEFAULT		0 +# define PR_RISCV_V_VSTATE_CTRL_OFF		1 +# define PR_RISCV_V_VSTATE_CTRL_ON		2 +# define PR_RISCV_V_VSTATE_CTRL_INHERIT		(1 << 4) +# define PR_RISCV_V_VSTATE_CTRL_CUR_MASK	0x3 +# define PR_RISCV_V_VSTATE_CTRL_NEXT_MASK	0xc +# define PR_RISCV_V_VSTATE_CTRL_MASK		0x1f +  #endif /* _LINUX_PRCTL_H */ diff --git a/include/uapi/linux/ptp_clock.h b/include/uapi/linux/ptp_clock.h index 1d108d597f66..05cc35fc94ac 100644 --- a/include/uapi/linux/ptp_clock.h +++ b/include/uapi/linux/ptp_clock.h @@ -95,7 +95,8 @@ struct ptp_clock_caps {  	int cross_timestamping;  	/* Whether the clock supports adjust phase */  	int adjust_phase; -	int rsv[12];   /* Reserved for future use. */ +	int max_phase_adj; /* Maximum phase adjustment in nanoseconds. */ +	int rsv[11];       /* Reserved for future use. */  };  struct ptp_extts_request { diff --git a/include/uapi/linux/tps6594_pfsm.h b/include/uapi/linux/tps6594_pfsm.h new file mode 100644 index 000000000000..c69569e0a7a2 --- /dev/null +++ b/include/uapi/linux/tps6594_pfsm.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * Userspace ABI for TPS6594 PMIC Pre-configurable Finite State Machine + * + * Copyright (C) 2023 BayLibre Incorporated - https://www.baylibre.com/ + */ + +#ifndef __TPS6594_PFSM_H +#define __TPS6594_PFSM_H + +#include <linux/const.h> +#include <linux/ioctl.h> +#include <linux/types.h> + +/** + * struct pmic_state_opt - PMIC state options + * @gpio_retention: if enabled, power rails associated with GPIO retention remain active + * @ddr_retention: if enabled, power rails associated with DDR retention remain active + * @mcu_only_startup_dest: if enabled, startup destination state is MCU_ONLY + */ +struct pmic_state_opt { +	__u8 gpio_retention; +	__u8 ddr_retention; +	__u8 mcu_only_startup_dest; +}; + +/* Commands */ +#define PMIC_BASE			'P' + +#define PMIC_GOTO_STANDBY		_IO(PMIC_BASE, 0) +#define PMIC_GOTO_LP_STANDBY		_IO(PMIC_BASE, 1) +#define PMIC_UPDATE_PGM			_IO(PMIC_BASE, 2) +#define PMIC_SET_ACTIVE_STATE		_IO(PMIC_BASE, 3) +#define PMIC_SET_MCU_ONLY_STATE		_IOW(PMIC_BASE, 4, struct pmic_state_opt) +#define PMIC_SET_RETENTION_STATE	_IOW(PMIC_BASE, 5, struct pmic_state_opt) + +#endif /*  __TPS6594_PFSM_H */ diff --git a/include/uapi/linux/usb/ch9.h b/include/uapi/linux/usb/ch9.h index b17e3a21b15f..82ec6af71a1d 100644 --- a/include/uapi/linux/usb/ch9.h +++ b/include/uapi/linux/usb/ch9.h @@ -376,7 +376,10 @@ struct usb_string_descriptor {  	__u8  bLength;  	__u8  bDescriptorType; -	__le16 wData[1];		/* UTF-16LE encoded */ +	union { +		__le16 legacy_padding; +		__DECLARE_FLEX_ARRAY(__le16, wData);	/* UTF-16LE encoded */ +	};  } __attribute__ ((packed));  /* note that "string" zero is special, it holds language codes that diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h index b71276bd7f91..20c804bdc09c 100644 --- a/include/uapi/linux/vfio.h +++ b/include/uapi/linux/vfio.h @@ -213,6 +213,7 @@ struct vfio_device_info {  #define VFIO_DEVICE_FLAGS_AP	(1 << 5)	/* vfio-ap device */  #define VFIO_DEVICE_FLAGS_FSL_MC (1 << 6)	/* vfio-fsl-mc device */  #define VFIO_DEVICE_FLAGS_CAPS	(1 << 7)	/* Info supports caps */ +#define VFIO_DEVICE_FLAGS_CDX	(1 << 8)	/* vfio-cdx device */  	__u32	num_regions;	/* Max region index + 1 */  	__u32	num_irqs;	/* Max IRQ index + 1 */  	__u32   cap_offset;	/* Offset within info struct of first cap */ @@ -240,6 +241,20 @@ struct vfio_device_info {  #define VFIO_DEVICE_INFO_CAP_ZPCI_UTIL		3  #define VFIO_DEVICE_INFO_CAP_ZPCI_PFIP		4 +/* + * The following VFIO_DEVICE_INFO capability reports support for PCIe AtomicOp + * completion to the root bus with supported widths provided via flags. + */ +#define VFIO_DEVICE_INFO_CAP_PCI_ATOMIC_COMP	5 +struct vfio_device_info_cap_pci_atomic_comp { +	struct vfio_info_cap_header header; +	__u32 flags; +#define VFIO_PCI_ATOMIC_COMP32	(1 << 0) +#define VFIO_PCI_ATOMIC_COMP64	(1 << 1) +#define VFIO_PCI_ATOMIC_COMP128	(1 << 2) +	__u32 reserved; +}; +  /**   * VFIO_DEVICE_GET_REGION_INFO - _IOWR(VFIO_TYPE, VFIO_BASE + 8,   *				       struct vfio_region_info) @@ -511,6 +526,9 @@ struct vfio_region_info_cap_nvlink2_lnkspd {   * then add and unmask vectors, it's up to userspace to make the decision   * whether to allocate the maximum supported number of vectors or tear   * down setup and incrementally increase the vectors as each is enabled. + * Absence of the NORESIZE flag indicates that vectors can be enabled + * and disabled dynamically without impacting other vectors within the + * index.   */  struct vfio_irq_info {  	__u32	argsz; diff --git a/include/uapi/linux/vhost.h b/include/uapi/linux/vhost.h index 92e1b700b51c..f5c48b61ab62 100644 --- a/include/uapi/linux/vhost.h +++ b/include/uapi/linux/vhost.h @@ -45,6 +45,25 @@  #define VHOST_SET_LOG_BASE _IOW(VHOST_VIRTIO, 0x04, __u64)  /* Specify an eventfd file descriptor to signal on log write. */  #define VHOST_SET_LOG_FD _IOW(VHOST_VIRTIO, 0x07, int) +/* By default, a device gets one vhost_worker that its virtqueues share. This + * command allows the owner of the device to create an additional vhost_worker + * for the device. It can later be bound to 1 or more of its virtqueues using + * the VHOST_ATTACH_VRING_WORKER command. + * + * This must be called after VHOST_SET_OWNER and the caller must be the owner + * of the device. The new thread will inherit caller's cgroups and namespaces, + * and will share the caller's memory space. The new thread will also be + * counted against the caller's RLIMIT_NPROC value. + * + * The worker's ID used in other commands will be returned in + * vhost_worker_state. + */ +#define VHOST_NEW_WORKER _IOR(VHOST_VIRTIO, 0x8, struct vhost_worker_state) +/* Free a worker created with VHOST_NEW_WORKER if it's not attached to any + * virtqueue. If userspace is not able to call this for workers its created, + * the kernel will free all the device's workers when the device is closed. + */ +#define VHOST_FREE_WORKER _IOW(VHOST_VIRTIO, 0x9, struct vhost_worker_state)  /* Ring setup. */  /* Set number of descriptors in ring. This parameter can not @@ -70,6 +89,18 @@  #define VHOST_VRING_BIG_ENDIAN 1  #define VHOST_SET_VRING_ENDIAN _IOW(VHOST_VIRTIO, 0x13, struct vhost_vring_state)  #define VHOST_GET_VRING_ENDIAN _IOW(VHOST_VIRTIO, 0x14, struct vhost_vring_state) +/* Attach a vhost_worker created with VHOST_NEW_WORKER to one of the device's + * virtqueues. + * + * This will replace the virtqueue's existing worker. If the replaced worker + * is no longer attached to any virtqueues, it can be freed with + * VHOST_FREE_WORKER. + */ +#define VHOST_ATTACH_VRING_WORKER _IOW(VHOST_VIRTIO, 0x15,		\ +				       struct vhost_vring_worker) +/* Return the vring worker's ID */ +#define VHOST_GET_VRING_WORKER _IOWR(VHOST_VIRTIO, 0x16,		\ +				     struct vhost_vring_worker)  /* The following ioctls use eventfd file descriptors to signal and poll   * for events. */ diff --git a/include/uapi/linux/vhost_types.h b/include/uapi/linux/vhost_types.h index c5690a8992d8..d3aad12ad1fa 100644 --- a/include/uapi/linux/vhost_types.h +++ b/include/uapi/linux/vhost_types.h @@ -47,6 +47,22 @@ struct vhost_vring_addr {  	__u64 log_guest_addr;  }; +struct vhost_worker_state { +	/* +	 * For VHOST_NEW_WORKER the kernel will return the new vhost_worker id. +	 * For VHOST_FREE_WORKER this must be set to the id of the vhost_worker +	 * to free. +	 */ +	unsigned int worker_id; +}; + +struct vhost_vring_worker { +	/* vring index */ +	unsigned int index; +	/* The id of the vhost_worker returned from VHOST_NEW_WORKER */ +	unsigned int worker_id; +}; +  /* no alignment requirement */  struct vhost_iotlb_msg {  	__u64 iova; diff --git a/include/uapi/linux/wireless.h b/include/uapi/linux/wireless.h index 08967b3f19c8..3c2ad5fae17f 100644 --- a/include/uapi/linux/wireless.h +++ b/include/uapi/linux/wireless.h @@ -835,7 +835,7 @@ struct iw_encode_ext {  			       * individual keys */  	__u16		alg; /* IW_ENCODE_ALG_* */  	__u16		key_len; -	__u8		key[0]; +	__u8		key[];  };  /* SIOCSIWMLME data */ |