diff options
Diffstat (limited to 'include/uapi/linux/bpf.h')
| -rw-r--r-- | include/uapi/linux/bpf.h | 326 | 
1 files changed, 257 insertions, 69 deletions
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 22f235260a3a..7bbf1b65be10 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -73,7 +73,7 @@ struct bpf_insn {  /* Key of an a BPF_MAP_TYPE_LPM_TRIE entry */  struct bpf_lpm_trie_key {  	__u32	prefixlen;	/* up to 32 for AF_INET, 128 for AF_INET6 */ -	__u8	data[0];	/* Arbitrary size */ +	__u8	data[];	/* Arbitrary size */  };  struct bpf_cgroup_storage_key { @@ -111,6 +111,8 @@ enum bpf_cmd {  	BPF_MAP_LOOKUP_AND_DELETE_BATCH,  	BPF_MAP_UPDATE_BATCH,  	BPF_MAP_DELETE_BATCH, +	BPF_LINK_CREATE, +	BPF_LINK_UPDATE,  };  enum bpf_map_type { @@ -181,6 +183,7 @@ enum bpf_prog_type {  	BPF_PROG_TYPE_TRACING,  	BPF_PROG_TYPE_STRUCT_OPS,  	BPF_PROG_TYPE_EXT, +	BPF_PROG_TYPE_LSM,  };  enum bpf_attach_type { @@ -210,6 +213,8 @@ enum bpf_attach_type {  	BPF_TRACE_RAW_TP,  	BPF_TRACE_FENTRY,  	BPF_TRACE_FEXIT, +	BPF_MODIFY_RETURN, +	BPF_LSM_MAC,  	__MAX_BPF_ATTACH_TYPE  }; @@ -325,44 +330,46 @@ enum bpf_attach_type {  #define BPF_PSEUDO_CALL		1  /* flags for BPF_MAP_UPDATE_ELEM command */ -#define BPF_ANY		0 /* create new element or update existing */ -#define BPF_NOEXIST	1 /* create new element if it didn't exist */ -#define BPF_EXIST	2 /* update existing element */ -#define BPF_F_LOCK	4 /* spin_lock-ed map_lookup/map_update */ +enum { +	BPF_ANY		= 0, /* create new element or update existing */ +	BPF_NOEXIST	= 1, /* create new element if it didn't exist */ +	BPF_EXIST	= 2, /* update existing element */ +	BPF_F_LOCK	= 4, /* spin_lock-ed map_lookup/map_update */ +};  /* flags for BPF_MAP_CREATE command */ -#define BPF_F_NO_PREALLOC	(1U << 0) +enum { +	BPF_F_NO_PREALLOC	= (1U << 0),  /* Instead of having one common LRU list in the   * BPF_MAP_TYPE_LRU_[PERCPU_]HASH map, use a percpu LRU list   * which can scale and perform better.   * Note, the LRU nodes (including free nodes) cannot be moved   * across different LRU lists.   */ -#define BPF_F_NO_COMMON_LRU	(1U << 1) +	BPF_F_NO_COMMON_LRU	= (1U << 1),  /* Specify numa node during map creation */ -#define BPF_F_NUMA_NODE		(1U << 2) - -#define BPF_OBJ_NAME_LEN 16U +	BPF_F_NUMA_NODE		= (1U << 2),  /* Flags for accessing BPF object from syscall side. */ -#define BPF_F_RDONLY		(1U << 3) -#define BPF_F_WRONLY		(1U << 4) +	BPF_F_RDONLY		= (1U << 3), +	BPF_F_WRONLY		= (1U << 4),  /* Flag for stack_map, store build_id+offset instead of pointer */ -#define BPF_F_STACK_BUILD_ID	(1U << 5) +	BPF_F_STACK_BUILD_ID	= (1U << 5),  /* Zero-initialize hash function seed. This should only be used for testing. */ -#define BPF_F_ZERO_SEED		(1U << 6) +	BPF_F_ZERO_SEED		= (1U << 6),  /* Flags for accessing BPF object from program side. */ -#define BPF_F_RDONLY_PROG	(1U << 7) -#define BPF_F_WRONLY_PROG	(1U << 8) +	BPF_F_RDONLY_PROG	= (1U << 7), +	BPF_F_WRONLY_PROG	= (1U << 8),  /* Clone map from listener for newly accepted socket */ -#define BPF_F_CLONE		(1U << 9) +	BPF_F_CLONE		= (1U << 9),  /* Enable memory-mapping BPF map */ -#define BPF_F_MMAPABLE		(1U << 10) +	BPF_F_MMAPABLE		= (1U << 10), +};  /* Flags for BPF_PROG_QUERY. */ @@ -391,6 +398,8 @@ struct bpf_stack_build_id {  	};  }; +#define BPF_OBJ_NAME_LEN 16U +  union bpf_attr {  	struct { /* anonymous struct used by BPF_MAP_CREATE command */  		__u32	map_type;	/* one of enum bpf_map_type */ @@ -534,7 +543,7 @@ union bpf_attr {  		__u32		prog_cnt;  	} query; -	struct { +	struct { /* anonymous struct used by BPF_RAW_TRACEPOINT_OPEN command */  		__u64 name;  		__u32 prog_fd;  	} raw_tracepoint; @@ -562,6 +571,24 @@ union bpf_attr {  		__u64		probe_offset;	/* output: probe_offset */  		__u64		probe_addr;	/* output: probe_addr */  	} task_fd_query; + +	struct { /* struct used by BPF_LINK_CREATE command */ +		__u32		prog_fd;	/* eBPF program to attach */ +		__u32		target_fd;	/* object to attach to */ +		__u32		attach_type;	/* attach type */ +		__u32		flags;		/* extra flags */ +	} link_create; + +	struct { /* struct used by BPF_LINK_UPDATE command */ +		__u32		link_fd;	/* link fd */ +		/* new program fd to update link with */ +		__u32		new_prog_fd; +		__u32		flags;		/* extra flags */ +		/* expected link's program fd; is specified only if +		 * BPF_F_REPLACE flag is set in flags */ +		__u32		old_prog_fd; +	} link_update; +  } __attribute__((aligned(8)));  /* The description below is an attempt at providing documentation to eBPF @@ -1615,7 +1642,7 @@ union bpf_attr {   * 		ifindex, but doesn't require a map to do so.   * 	Return   * 		**XDP_REDIRECT** on success, or the value of the two lower bits - * 		of the **flags* argument on error. + * 		of the *flags* argument on error.   *   * int bpf_sk_redirect_map(struct sk_buff *skb, struct bpf_map *map, u32 key, u64 flags)   * 	Description @@ -2890,6 +2917,114 @@ union bpf_attr {   *		Obtain the 64bit jiffies   *	Return   *		The 64 bit jiffies + * + * int bpf_read_branch_records(struct bpf_perf_event_data *ctx, void *buf, u32 size, u64 flags) + *	Description + *		For an eBPF program attached to a perf event, retrieve the + *		branch records (struct perf_branch_entry) associated to *ctx* + *		and store it in	the buffer pointed by *buf* up to size + *		*size* bytes. + *	Return + *		On success, number of bytes written to *buf*. On error, a + *		negative value. + * + *		The *flags* can be set to **BPF_F_GET_BRANCH_RECORDS_SIZE** to + *		instead	return the number of bytes required to store all the + *		branch entries. If this flag is set, *buf* may be NULL. + * + *		**-EINVAL** if arguments invalid or **size** not a multiple + *		of sizeof(struct perf_branch_entry). + * + *		**-ENOENT** if architecture does not support branch records. + * + * int bpf_get_ns_current_pid_tgid(u64 dev, u64 ino, struct bpf_pidns_info *nsdata, u32 size) + *	Description + *		Returns 0 on success, values for *pid* and *tgid* as seen from the current + *		*namespace* will be returned in *nsdata*. + * + *		On failure, the returned value is one of the following: + * + *		**-EINVAL** if dev and inum supplied don't match dev_t and inode number + *              with nsfs of current task, or if dev conversion to dev_t lost high bits. + * + *		**-ENOENT** if pidns does not exists for the current task. + * + * int bpf_xdp_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size) + *	Description + *		Write raw *data* blob into a special BPF perf event held by + *		*map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf + *		event must have the following attributes: **PERF_SAMPLE_RAW** + *		as **sample_type**, **PERF_TYPE_SOFTWARE** as **type**, and + *		**PERF_COUNT_SW_BPF_OUTPUT** as **config**. + * + *		The *flags* are used to indicate the index in *map* for which + *		the value must be put, masked with **BPF_F_INDEX_MASK**. + *		Alternatively, *flags* can be set to **BPF_F_CURRENT_CPU** + *		to indicate that the index of the current CPU core should be + *		used. + * + *		The value to write, of *size*, is passed through eBPF stack and + *		pointed by *data*. + * + *		*ctx* is a pointer to in-kernel struct xdp_buff. + * + *		This helper is similar to **bpf_perf_eventoutput**\ () but + *		restricted to raw_tracepoint bpf programs. + *	Return + *		0 on success, or a negative error in case of failure. + * + * u64 bpf_get_netns_cookie(void *ctx) + * 	Description + * 		Retrieve the cookie (generated by the kernel) of the network + * 		namespace the input *ctx* is associated with. The network + * 		namespace cookie remains stable for its lifetime and provides + * 		a global identifier that can be assumed unique. If *ctx* is + * 		NULL, then the helper returns the cookie for the initial + * 		network namespace. The cookie itself is very similar to that + * 		of bpf_get_socket_cookie() helper, but for network namespaces + * 		instead of sockets. + * 	Return + * 		A 8-byte long opaque number. + * + * u64 bpf_get_current_ancestor_cgroup_id(int ancestor_level) + * 	Description + * 		Return id of cgroup v2 that is ancestor of the cgroup associated + * 		with the current task at the *ancestor_level*. The root cgroup + * 		is at *ancestor_level* zero and each step down the hierarchy + * 		increments the level. If *ancestor_level* == level of cgroup + * 		associated with the current task, then return value will be the + * 		same as that of **bpf_get_current_cgroup_id**\ (). + * + * 		The helper is useful to implement policies based on cgroups + * 		that are upper in hierarchy than immediate cgroup associated + * 		with the current task. + * + * 		The format of returned id and helper limitations are same as in + * 		**bpf_get_current_cgroup_id**\ (). + * 	Return + * 		The id is returned or 0 in case the id could not be retrieved. + * + * int bpf_sk_assign(struct sk_buff *skb, struct bpf_sock *sk, u64 flags) + *	Description + *		Assign the *sk* to the *skb*. When combined with appropriate + *		routing configuration to receive the packet towards the socket, + *		will cause *skb* to be delivered to the specified socket. + *		Subsequent redirection of *skb* via  **bpf_redirect**\ (), + *		**bpf_clone_redirect**\ () or other methods outside of BPF may + *		interfere with successful delivery to the socket. + * + *		This operation is only valid from TC ingress path. + * + *		The *flags* argument must be zero. + *	Return + *		0 on success, or a negative errno in case of failure. + * + *		* **-EINVAL**		Unsupported flags specified. + *		* **-ENOENT**		Socket is unavailable for assignment. + *		* **-ENETUNREACH**	Socket is unreachable (wrong netns). + *		* **-EOPNOTSUPP**	Unsupported operation, for example a + *					call from outside of TC ingress. + *		* **-ESOCKTNOSUPPORT**	Socket type not supported (reuseport).   */  #define __BPF_FUNC_MAPPER(FN)		\  	FN(unspec),			\ @@ -3010,7 +3145,13 @@ union bpf_attr {  	FN(probe_read_kernel_str),	\  	FN(tcp_send_ack),		\  	FN(send_signal_thread),		\ -	FN(jiffies64), +	FN(jiffies64),			\ +	FN(read_branch_records),	\ +	FN(get_ns_current_pid_tgid),	\ +	FN(xdp_output),			\ +	FN(get_netns_cookie),		\ +	FN(get_current_ancestor_cgroup_id),	\ +	FN(sk_assign),  /* integer value in 'imm' field of BPF_CALL instruction selects which helper   * function eBPF program intends to call @@ -3025,69 +3166,100 @@ enum bpf_func_id {  /* All flags used by eBPF helper functions, placed here. */  /* BPF_FUNC_skb_store_bytes flags. */ -#define BPF_F_RECOMPUTE_CSUM		(1ULL << 0) -#define BPF_F_INVALIDATE_HASH		(1ULL << 1) +enum { +	BPF_F_RECOMPUTE_CSUM		= (1ULL << 0), +	BPF_F_INVALIDATE_HASH		= (1ULL << 1), +};  /* BPF_FUNC_l3_csum_replace and BPF_FUNC_l4_csum_replace flags.   * First 4 bits are for passing the header field size.   */ -#define BPF_F_HDR_FIELD_MASK		0xfULL +enum { +	BPF_F_HDR_FIELD_MASK		= 0xfULL, +};  /* BPF_FUNC_l4_csum_replace flags. */ -#define BPF_F_PSEUDO_HDR		(1ULL << 4) -#define BPF_F_MARK_MANGLED_0		(1ULL << 5) -#define BPF_F_MARK_ENFORCE		(1ULL << 6) +enum { +	BPF_F_PSEUDO_HDR		= (1ULL << 4), +	BPF_F_MARK_MANGLED_0		= (1ULL << 5), +	BPF_F_MARK_ENFORCE		= (1ULL << 6), +};  /* BPF_FUNC_clone_redirect and BPF_FUNC_redirect flags. */ -#define BPF_F_INGRESS			(1ULL << 0) +enum { +	BPF_F_INGRESS			= (1ULL << 0), +};  /* BPF_FUNC_skb_set_tunnel_key and BPF_FUNC_skb_get_tunnel_key flags. */ -#define BPF_F_TUNINFO_IPV6		(1ULL << 0) +enum { +	BPF_F_TUNINFO_IPV6		= (1ULL << 0), +};  /* flags for both BPF_FUNC_get_stackid and BPF_FUNC_get_stack. */ -#define BPF_F_SKIP_FIELD_MASK		0xffULL -#define BPF_F_USER_STACK		(1ULL << 8) +enum { +	BPF_F_SKIP_FIELD_MASK		= 0xffULL, +	BPF_F_USER_STACK		= (1ULL << 8),  /* flags used by BPF_FUNC_get_stackid only. */ -#define BPF_F_FAST_STACK_CMP		(1ULL << 9) -#define BPF_F_REUSE_STACKID		(1ULL << 10) +	BPF_F_FAST_STACK_CMP		= (1ULL << 9), +	BPF_F_REUSE_STACKID		= (1ULL << 10),  /* flags used by BPF_FUNC_get_stack only. */ -#define BPF_F_USER_BUILD_ID		(1ULL << 11) +	BPF_F_USER_BUILD_ID		= (1ULL << 11), +};  /* BPF_FUNC_skb_set_tunnel_key flags. */ -#define BPF_F_ZERO_CSUM_TX		(1ULL << 1) -#define BPF_F_DONT_FRAGMENT		(1ULL << 2) -#define BPF_F_SEQ_NUMBER		(1ULL << 3) +enum { +	BPF_F_ZERO_CSUM_TX		= (1ULL << 1), +	BPF_F_DONT_FRAGMENT		= (1ULL << 2), +	BPF_F_SEQ_NUMBER		= (1ULL << 3), +};  /* BPF_FUNC_perf_event_output, BPF_FUNC_perf_event_read and   * BPF_FUNC_perf_event_read_value flags.   */ -#define BPF_F_INDEX_MASK		0xffffffffULL -#define BPF_F_CURRENT_CPU		BPF_F_INDEX_MASK +enum { +	BPF_F_INDEX_MASK		= 0xffffffffULL, +	BPF_F_CURRENT_CPU		= BPF_F_INDEX_MASK,  /* BPF_FUNC_perf_event_output for sk_buff input context. */ -#define BPF_F_CTXLEN_MASK		(0xfffffULL << 32) +	BPF_F_CTXLEN_MASK		= (0xfffffULL << 32), +};  /* Current network namespace */ -#define BPF_F_CURRENT_NETNS		(-1L) +enum { +	BPF_F_CURRENT_NETNS		= (-1L), +};  /* BPF_FUNC_skb_adjust_room flags. */ -#define BPF_F_ADJ_ROOM_FIXED_GSO	(1ULL << 0) +enum { +	BPF_F_ADJ_ROOM_FIXED_GSO	= (1ULL << 0), +	BPF_F_ADJ_ROOM_ENCAP_L3_IPV4	= (1ULL << 1), +	BPF_F_ADJ_ROOM_ENCAP_L3_IPV6	= (1ULL << 2), +	BPF_F_ADJ_ROOM_ENCAP_L4_GRE	= (1ULL << 3), +	BPF_F_ADJ_ROOM_ENCAP_L4_UDP	= (1ULL << 4), +}; -#define BPF_ADJ_ROOM_ENCAP_L2_MASK	0xff -#define BPF_ADJ_ROOM_ENCAP_L2_SHIFT	56 +enum { +	BPF_ADJ_ROOM_ENCAP_L2_MASK	= 0xff, +	BPF_ADJ_ROOM_ENCAP_L2_SHIFT	= 56, +}; -#define BPF_F_ADJ_ROOM_ENCAP_L3_IPV4	(1ULL << 1) -#define BPF_F_ADJ_ROOM_ENCAP_L3_IPV6	(1ULL << 2) -#define BPF_F_ADJ_ROOM_ENCAP_L4_GRE	(1ULL << 3) -#define BPF_F_ADJ_ROOM_ENCAP_L4_UDP	(1ULL << 4)  #define BPF_F_ADJ_ROOM_ENCAP_L2(len)	(((__u64)len & \  					  BPF_ADJ_ROOM_ENCAP_L2_MASK) \  					 << BPF_ADJ_ROOM_ENCAP_L2_SHIFT)  /* BPF_FUNC_sysctl_get_name flags. */ -#define BPF_F_SYSCTL_BASE_NAME		(1ULL << 0) +enum { +	BPF_F_SYSCTL_BASE_NAME		= (1ULL << 0), +};  /* BPF_FUNC_sk_storage_get flags */ -#define BPF_SK_STORAGE_GET_F_CREATE	(1ULL << 0) +enum { +	BPF_SK_STORAGE_GET_F_CREATE	= (1ULL << 0), +}; + +/* BPF_FUNC_read_branch_records flags. */ +enum { +	BPF_F_GET_BRANCH_RECORDS_SIZE	= (1ULL << 0), +};  /* Mode for BPF_FUNC_skb_adjust_room helper. */  enum bpf_adj_room_mode { @@ -3153,6 +3325,7 @@ struct __sk_buff {  	__u32 wire_len;  	__u32 gso_segs;  	__bpf_md_ptr(struct bpf_sock *, sk); +	__u32 gso_size;  };  struct bpf_tunnel_key { @@ -3505,13 +3678,14 @@ struct bpf_sock_ops {  };  /* Definitions for bpf_sock_ops_cb_flags */ -#define BPF_SOCK_OPS_RTO_CB_FLAG	(1<<0) -#define BPF_SOCK_OPS_RETRANS_CB_FLAG	(1<<1) -#define BPF_SOCK_OPS_STATE_CB_FLAG	(1<<2) -#define BPF_SOCK_OPS_RTT_CB_FLAG	(1<<3) -#define BPF_SOCK_OPS_ALL_CB_FLAGS       0xF		/* Mask of all currently -							 * supported cb flags -							 */ +enum { +	BPF_SOCK_OPS_RTO_CB_FLAG	= (1<<0), +	BPF_SOCK_OPS_RETRANS_CB_FLAG	= (1<<1), +	BPF_SOCK_OPS_STATE_CB_FLAG	= (1<<2), +	BPF_SOCK_OPS_RTT_CB_FLAG	= (1<<3), +/* Mask of all currently supported cb flags */ +	BPF_SOCK_OPS_ALL_CB_FLAGS       = 0xF, +};  /* List of known BPF sock_ops operators.   * New entries can only be added at the end @@ -3590,8 +3764,10 @@ enum {  	BPF_TCP_MAX_STATES	/* Leave at the end! */  }; -#define TCP_BPF_IW		1001	/* Set TCP initial congestion window */ -#define TCP_BPF_SNDCWND_CLAMP	1002	/* Set sndcwnd_clamp */ +enum { +	TCP_BPF_IW		= 1001,	/* Set TCP initial congestion window */ +	TCP_BPF_SNDCWND_CLAMP	= 1002,	/* Set sndcwnd_clamp */ +};  struct bpf_perf_event_value {  	__u64 counter; @@ -3599,12 +3775,16 @@ struct bpf_perf_event_value {  	__u64 running;  }; -#define BPF_DEVCG_ACC_MKNOD	(1ULL << 0) -#define BPF_DEVCG_ACC_READ	(1ULL << 1) -#define BPF_DEVCG_ACC_WRITE	(1ULL << 2) +enum { +	BPF_DEVCG_ACC_MKNOD	= (1ULL << 0), +	BPF_DEVCG_ACC_READ	= (1ULL << 1), +	BPF_DEVCG_ACC_WRITE	= (1ULL << 2), +}; -#define BPF_DEVCG_DEV_BLOCK	(1ULL << 0) -#define BPF_DEVCG_DEV_CHAR	(1ULL << 1) +enum { +	BPF_DEVCG_DEV_BLOCK	= (1ULL << 0), +	BPF_DEVCG_DEV_CHAR	= (1ULL << 1), +};  struct bpf_cgroup_dev_ctx {  	/* access_type encoded as (BPF_DEVCG_ACC_* << 16) | BPF_DEVCG_DEV_* */ @@ -3620,8 +3800,10 @@ struct bpf_raw_tracepoint_args {  /* DIRECT:  Skip the FIB rules and go to FIB table associated with device   * OUTPUT:  Do lookup from egress perspective; default is ingress   */ -#define BPF_FIB_LOOKUP_DIRECT  (1U << 0) -#define BPF_FIB_LOOKUP_OUTPUT  (1U << 1) +enum { +	BPF_FIB_LOOKUP_DIRECT  = (1U << 0), +	BPF_FIB_LOOKUP_OUTPUT  = (1U << 1), +};  enum {  	BPF_FIB_LKUP_RET_SUCCESS,      /* lookup successful */ @@ -3693,9 +3875,11 @@ enum bpf_task_fd_type {  	BPF_FD_TYPE_URETPROBE,		/* filename + offset */  }; -#define BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG		(1U << 0) -#define BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL		(1U << 1) -#define BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP		(1U << 2) +enum { +	BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG		= (1U << 0), +	BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL		= (1U << 1), +	BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP		= (1U << 2), +};  struct bpf_flow_keys {  	__u16	nhoff; @@ -3761,4 +3945,8 @@ struct bpf_sockopt {  	__s32	retval;  }; +struct bpf_pidns_info { +	__u32 pid; +	__u32 tgid; +};  #endif /* _UAPI__LINUX_BPF_H__ */  |