diff options
Diffstat (limited to 'include/linux/bpf.h')
| -rw-r--r-- | include/linux/bpf.h | 73 | 
1 files changed, 66 insertions, 7 deletions
| diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 8ee27b8afe81..11134238417d 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -11,14 +11,17 @@  #include <linux/workqueue.h>  #include <linux/file.h>  #include <linux/percpu.h> +#include <linux/err.h> +struct perf_event;  struct bpf_map;  /* map is generic key/value storage optionally accesible by eBPF programs */  struct bpf_map_ops {  	/* funcs callable from userspace (via syscall) */  	struct bpf_map *(*map_alloc)(union bpf_attr *attr); -	void (*map_free)(struct bpf_map *); +	void (*map_release)(struct bpf_map *map, struct file *map_file); +	void (*map_free)(struct bpf_map *map);  	int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key);  	/* funcs callable from userspace and from eBPF programs */ @@ -27,8 +30,9 @@ struct bpf_map_ops {  	int (*map_delete_elem)(struct bpf_map *map, void *key);  	/* funcs called by prog_array and perf_event_array map */ -	void *(*map_fd_get_ptr) (struct bpf_map *map, int fd); -	void (*map_fd_put_ptr) (void *ptr); +	void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file, +				int fd); +	void (*map_fd_put_ptr)(void *ptr);  };  struct bpf_map { @@ -111,6 +115,31 @@ enum bpf_access_type {  	BPF_WRITE = 2  }; +/* types of values stored in eBPF registers */ +enum bpf_reg_type { +	NOT_INIT = 0,		 /* nothing was written into register */ +	UNKNOWN_VALUE,		 /* reg doesn't contain a valid pointer */ +	PTR_TO_CTX,		 /* reg points to bpf_context */ +	CONST_PTR_TO_MAP,	 /* reg points to struct bpf_map */ +	PTR_TO_MAP_VALUE,	 /* reg points to map element value */ +	PTR_TO_MAP_VALUE_OR_NULL,/* points to map elem value or NULL */ +	FRAME_PTR,		 /* reg == frame_pointer */ +	PTR_TO_STACK,		 /* reg == frame_pointer + imm */ +	CONST_IMM,		 /* constant integer value */ + +	/* PTR_TO_PACKET represents: +	 * skb->data +	 * skb->data + imm +	 * skb->data + (u16) var +	 * skb->data + (u16) var + imm +	 * if (range > 0) then [ptr, ptr + range - off) is safe to access +	 * if (id > 0) means that some 'var' was added +	 * if (off > 0) menas that 'imm' was added +	 */ +	PTR_TO_PACKET, +	PTR_TO_PACKET_END,	 /* skb->data + headlen */ +}; +  struct bpf_prog;  struct bpf_verifier_ops { @@ -120,7 +149,8 @@ struct bpf_verifier_ops {  	/* return true if 'size' wide access at offset 'off' within bpf_context  	 * with 'type' (read or write) is allowed  	 */ -	bool (*is_valid_access)(int off, int size, enum bpf_access_type type); +	bool (*is_valid_access)(int off, int size, enum bpf_access_type type, +				enum bpf_reg_type *reg_type);  	u32 (*convert_ctx_access)(enum bpf_access_type type, int dst_reg,  				  int src_reg, int ctx_off, @@ -163,15 +193,28 @@ struct bpf_array {  		void __percpu *pptrs[0] __aligned(8);  	};  }; +  #define MAX_TAIL_CALL_CNT 32 +struct bpf_event_entry { +	struct perf_event *event; +	struct file *perf_file; +	struct file *map_file; +	struct rcu_head rcu; +}; +  u64 bpf_tail_call(u64 ctx, u64 r2, u64 index, u64 r4, u64 r5);  u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); -void bpf_fd_array_map_clear(struct bpf_map *map); +  bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp);  const struct bpf_func_proto *bpf_get_trace_printk_proto(void); -const struct bpf_func_proto *bpf_get_event_output_proto(void); + +typedef unsigned long (*bpf_ctx_copy_t)(void *dst, const void *src, +					unsigned long off, unsigned long len); + +u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, +		     void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy);  #ifdef CONFIG_BPF_SYSCALL  DECLARE_PER_CPU(int, bpf_prog_active); @@ -180,9 +223,10 @@ void bpf_register_prog_type(struct bpf_prog_type_list *tl);  void bpf_register_map_type(struct bpf_map_type_list *tl);  struct bpf_prog *bpf_prog_get(u32 ufd); +struct bpf_prog *bpf_prog_get_type(u32 ufd, enum bpf_prog_type type); +struct bpf_prog *bpf_prog_add(struct bpf_prog *prog, int i);  struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog);  void bpf_prog_put(struct bpf_prog *prog); -void bpf_prog_put_rcu(struct bpf_prog *prog);  struct bpf_map *bpf_map_get_with_uref(u32 ufd);  struct bpf_map *__bpf_map_get(struct fd f); @@ -205,8 +249,13 @@ int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value,  			   u64 flags);  int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,  			    u64 flags); +  int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value); +int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file, +				 void *key, void *value, u64 map_flags); +void bpf_fd_array_map_clear(struct bpf_map *map); +  /* memcpy that is used with 8-byte aligned pointers, power-of-8 size and   * forced to use 'long' read/writes to try to atomically copy long counters.   * Best-effort only.  No barriers here, since it _will_ race with concurrent @@ -235,6 +284,16 @@ static inline struct bpf_prog *bpf_prog_get(u32 ufd)  	return ERR_PTR(-EOPNOTSUPP);  } +static inline struct bpf_prog *bpf_prog_get_type(u32 ufd, +						 enum bpf_prog_type type) +{ +	return ERR_PTR(-EOPNOTSUPP); +} +static inline struct bpf_prog *bpf_prog_add(struct bpf_prog *prog, int i) +{ +	return ERR_PTR(-EOPNOTSUPP); +} +  static inline void bpf_prog_put(struct bpf_prog *prog)  {  } |