diff options
Diffstat (limited to 'include/linux/skbuff.h')
| -rw-r--r-- | include/linux/skbuff.h | 109 | 
1 files changed, 86 insertions, 23 deletions
| diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 1c2902eaebd3..29c3ea5b6e93 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -706,6 +706,13 @@ typedef unsigned int sk_buff_data_t;  typedef unsigned char *sk_buff_data_t;  #endif +enum skb_tstamp_type { +	SKB_CLOCK_REALTIME, +	SKB_CLOCK_MONOTONIC, +	SKB_CLOCK_TAI, +	__SKB_CLOCK_MAX = SKB_CLOCK_TAI, +}; +  /**   * DOC: Basic sk_buff geometry   * @@ -823,10 +830,8 @@ typedef unsigned char *sk_buff_data_t;   *	@dst_pending_confirm: need to confirm neighbour   *	@decrypted: Decrypted SKB   *	@slow_gro: state present at GRO time, slower prepare step required - *	@mono_delivery_time: When set, skb->tstamp has the - *		delivery_time in mono clock base (i.e. EDT).  Otherwise, the - *		skb->tstamp has the (rcv) timestamp at ingress and - *		delivery_time at egress. + *	@tstamp_type: When set, skb->tstamp has the + *		delivery_time clock base of skb->tstamp.   *	@napi_id: id of the NAPI struct this skb came from   *	@sender_cpu: (aka @napi_id) source CPU in XPS   *	@alloc_cpu: CPU which did the skb allocation. @@ -954,7 +959,7 @@ struct sk_buff {  	/* private: */  	__u8			__mono_tc_offset[0];  	/* public: */ -	__u8			mono_delivery_time:1;	/* See SKB_MONO_DELIVERY_TIME_MASK */ +	__u8			tstamp_type:2;	/* See skb_tstamp_type */  #ifdef CONFIG_NET_XGRESS  	__u8			tc_at_ingress:1;	/* See TC_AT_INGRESS_MASK */  	__u8			tc_skip_classify:1; @@ -1084,15 +1089,16 @@ struct sk_buff {  #endif  #define PKT_TYPE_OFFSET		offsetof(struct sk_buff, __pkt_type_offset) -/* if you move tc_at_ingress or mono_delivery_time +/* if you move tc_at_ingress or tstamp_type   * around, you also must adapt these constants.   */  #ifdef __BIG_ENDIAN_BITFIELD -#define SKB_MONO_DELIVERY_TIME_MASK	(1 << 7) -#define TC_AT_INGRESS_MASK		(1 << 6) +#define SKB_TSTAMP_TYPE_MASK		(3 << 6) +#define SKB_TSTAMP_TYPE_RSHIFT		(6) +#define TC_AT_INGRESS_MASK		(1 << 5)  #else -#define SKB_MONO_DELIVERY_TIME_MASK	(1 << 0) -#define TC_AT_INGRESS_MASK		(1 << 1) +#define SKB_TSTAMP_TYPE_MASK		(3) +#define TC_AT_INGRESS_MASK		(1 << 2)  #endif  #define SKB_BF_MONO_TC_OFFSET		offsetof(struct sk_buff, __mono_tc_offset) @@ -1245,8 +1251,14 @@ static inline bool skb_data_unref(const struct sk_buff *skb,  	return true;  } -void __fix_address -kfree_skb_reason(struct sk_buff *skb, enum skb_drop_reason reason); +void __fix_address sk_skb_reason_drop(struct sock *sk, struct sk_buff *skb, +				      enum skb_drop_reason reason); + +static inline void +kfree_skb_reason(struct sk_buff *skb, enum skb_drop_reason reason) +{ +	sk_skb_reason_drop(NULL, skb, reason); +}  /**   *	kfree_skb - free an sk_buff with 'NOT_SPECIFIED' reason @@ -1492,8 +1504,14 @@ __skb_set_sw_hash(struct sk_buff *skb, __u32 hash, bool is_l4)  	__skb_set_hash(skb, hash, true, is_l4);  } -void __skb_get_hash(struct sk_buff *skb); -u32 __skb_get_hash_symmetric(const struct sk_buff *skb); +u32 __skb_get_hash_symmetric_net(const struct net *net, const struct sk_buff *skb); + +static inline u32 __skb_get_hash_symmetric(const struct sk_buff *skb) +{ +	return __skb_get_hash_symmetric_net(NULL, skb); +} + +void __skb_get_hash_net(const struct net *net, struct sk_buff *skb);  u32 skb_get_poff(const struct sk_buff *skb);  u32 __skb_get_poff(const struct sk_buff *skb, const void *data,  		   const struct flow_keys_basic *keys, int hlen); @@ -1572,10 +1590,18 @@ void skb_flow_dissect_hash(const struct sk_buff *skb,  			   struct flow_dissector *flow_dissector,  			   void *target_container); +static inline __u32 skb_get_hash_net(const struct net *net, struct sk_buff *skb) +{ +	if (!skb->l4_hash && !skb->sw_hash) +		__skb_get_hash_net(net, skb); + +	return skb->hash; +} +  static inline __u32 skb_get_hash(struct sk_buff *skb)  {  	if (!skb->l4_hash && !skb->sw_hash) -		__skb_get_hash(skb); +		__skb_get_hash_net(NULL, skb);  	return skb->hash;  } @@ -1677,6 +1703,9 @@ int __zerocopy_sg_from_iter(struct msghdr *msg, struct sock *sk,  			    struct sk_buff *skb, struct iov_iter *from,  			    size_t length); +int zerocopy_fill_skb_from_iter(struct sk_buff *skb, +				struct iov_iter *from, size_t length); +  static inline int skb_zerocopy_iter_dgram(struct sk_buff *skb,  					  struct msghdr *msg, int len)  { @@ -3400,6 +3429,10 @@ static inline struct page *__dev_alloc_pages_noprof(gfp_t gfp_mask,  }  #define __dev_alloc_pages(...)	alloc_hooks(__dev_alloc_pages_noprof(__VA_ARGS__)) +/* + * This specialized allocator has to be a macro for its allocations to be + * accounted separately (to have a separate alloc_tag). + */  #define dev_alloc_pages(_order) __dev_alloc_pages(GFP_ATOMIC | __GFP_NOWARN, _order)  /** @@ -3416,6 +3449,10 @@ static inline struct page *__dev_alloc_page_noprof(gfp_t gfp_mask)  }  #define __dev_alloc_page(...)	alloc_hooks(__dev_alloc_page_noprof(__VA_ARGS__)) +/* + * This specialized allocator has to be a macro for its allocations to be + * accounted separately (to have a separate alloc_tag). + */  #define dev_alloc_page()	dev_alloc_pages(0)  /** @@ -4179,7 +4216,7 @@ static inline void skb_get_new_timestampns(const struct sk_buff *skb,  static inline void __net_timestamp(struct sk_buff *skb)  {  	skb->tstamp = ktime_get_real(); -	skb->mono_delivery_time = 0; +	skb->tstamp_type = SKB_CLOCK_REALTIME;  }  static inline ktime_t net_timedelta(ktime_t t) @@ -4188,10 +4225,36 @@ static inline ktime_t net_timedelta(ktime_t t)  }  static inline void skb_set_delivery_time(struct sk_buff *skb, ktime_t kt, -					 bool mono) +					 u8 tstamp_type)  {  	skb->tstamp = kt; -	skb->mono_delivery_time = kt && mono; + +	if (kt) +		skb->tstamp_type = tstamp_type; +	else +		skb->tstamp_type = SKB_CLOCK_REALTIME; +} + +static inline void skb_set_delivery_type_by_clockid(struct sk_buff *skb, +						    ktime_t kt, clockid_t clockid) +{ +	u8 tstamp_type = SKB_CLOCK_REALTIME; + +	switch (clockid) { +	case CLOCK_REALTIME: +		break; +	case CLOCK_MONOTONIC: +		tstamp_type = SKB_CLOCK_MONOTONIC; +		break; +	case CLOCK_TAI: +		tstamp_type = SKB_CLOCK_TAI; +		break; +	default: +		WARN_ON_ONCE(1); +		kt = 0; +	} + +	skb_set_delivery_time(skb, kt, tstamp_type);  }  DECLARE_STATIC_KEY_FALSE(netstamp_needed_key); @@ -4201,8 +4264,8 @@ DECLARE_STATIC_KEY_FALSE(netstamp_needed_key);   */  static inline void skb_clear_delivery_time(struct sk_buff *skb)  { -	if (skb->mono_delivery_time) { -		skb->mono_delivery_time = 0; +	if (skb->tstamp_type) { +		skb->tstamp_type = SKB_CLOCK_REALTIME;  		if (static_branch_unlikely(&netstamp_needed_key))  			skb->tstamp = ktime_get_real();  		else @@ -4212,7 +4275,7 @@ static inline void skb_clear_delivery_time(struct sk_buff *skb)  static inline void skb_clear_tstamp(struct sk_buff *skb)  { -	if (skb->mono_delivery_time) +	if (skb->tstamp_type)  		return;  	skb->tstamp = 0; @@ -4220,7 +4283,7 @@ static inline void skb_clear_tstamp(struct sk_buff *skb)  static inline ktime_t skb_tstamp(const struct sk_buff *skb)  { -	if (skb->mono_delivery_time) +	if (skb->tstamp_type)  		return 0;  	return skb->tstamp; @@ -4228,7 +4291,7 @@ static inline ktime_t skb_tstamp(const struct sk_buff *skb)  static inline ktime_t skb_tstamp_cond(const struct sk_buff *skb, bool cond)  { -	if (!skb->mono_delivery_time && skb->tstamp) +	if (skb->tstamp_type != SKB_CLOCK_MONOTONIC && skb->tstamp)  		return skb->tstamp;  	if (static_branch_unlikely(&netstamp_needed_key) || cond) |