diff options
Diffstat (limited to 'include/linux/skbuff.h')
| -rw-r--r-- | include/linux/skbuff.h | 132 | 
1 files changed, 85 insertions, 47 deletions
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index ff7ad331fb82..738776ab8838 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -37,7 +37,7 @@  #include <linux/netfilter/nf_conntrack_common.h>  #endif  #include <net/net_debug.h> -#include <net/dropreason.h> +#include <net/dropreason-core.h>  /**   * DOC: skb checksums @@ -294,6 +294,7 @@ struct nf_bridge_info {  	u8			pkt_otherhost:1;  	u8			in_prerouting:1;  	u8			bridged_dnat:1; +	u8			sabotage_in_done:1;  	__u16			frag_max_size;  	struct net_device	*physindev; @@ -345,18 +346,12 @@ struct sk_buff_head {  struct sk_buff; -/* To allow 64K frame to be packed as single skb without frag_list we - * require 64K/PAGE_SIZE pages plus 1 additional page to allow for - * buffers which do not start on a page boundary. - * - * Since GRO uses frags we allocate at least 16 regardless of page - * size. - */ -#if (65536/PAGE_SIZE + 1) < 16 -#define MAX_SKB_FRAGS 16UL -#else -#define MAX_SKB_FRAGS (65536/PAGE_SIZE + 1) +#ifndef CONFIG_MAX_SKB_FRAGS +# define CONFIG_MAX_SKB_FRAGS 17  #endif + +#define MAX_SKB_FRAGS CONFIG_MAX_SKB_FRAGS +  extern int sysctl_max_skb_frags;  /* Set skb_shinfo(skb)->gso_size to this in case you want skb_segment to @@ -810,7 +805,6 @@ typedef unsigned char *sk_buff_data_t;   *	@csum_level: indicates the number of consecutive checksums found in   *		the packet minus one that have been verified as   *		CHECKSUM_UNNECESSARY (max 3) - *	@scm_io_uring: SKB holds io_uring registered files   *	@dst_pending_confirm: need to confirm neighbour   *	@decrypted: Decrypted SKB   *	@slow_gro: state present at GRO time, slower prepare step required @@ -941,38 +935,44 @@ struct sk_buff {  	/* public: */  	__u8			pkt_type:3; /* see PKT_TYPE_MAX */  	__u8			ignore_df:1; -	__u8			nf_trace:1; +	__u8			dst_pending_confirm:1;  	__u8			ip_summed:2;  	__u8			ooo_okay:1; +	/* private: */ +	__u8			__mono_tc_offset[0]; +	/* public: */ +	__u8			mono_delivery_time:1;	/* See SKB_MONO_DELIVERY_TIME_MASK */ +#ifdef CONFIG_NET_CLS_ACT +	__u8			tc_at_ingress:1;	/* See TC_AT_INGRESS_MASK */ +	__u8			tc_skip_classify:1; +#endif +	__u8			remcsum_offload:1; +	__u8			csum_complete_sw:1; +	__u8			csum_level:2; +	__u8			inner_protocol_type:1; +  	__u8			l4_hash:1;  	__u8			sw_hash:1; +#ifdef CONFIG_WIRELESS  	__u8			wifi_acked_valid:1;  	__u8			wifi_acked:1; +#endif  	__u8			no_fcs:1;  	/* Indicates the inner headers are valid in the skbuff. */  	__u8			encapsulation:1;  	__u8			encap_hdr_csum:1;  	__u8			csum_valid:1; - -	/* private: */ -	__u8			__pkt_vlan_present_offset[0]; -	/* public: */ -	__u8			remcsum_offload:1; -	__u8			csum_complete_sw:1; -	__u8			csum_level:2; -	__u8			dst_pending_confirm:1; -	__u8			mono_delivery_time:1;	/* See SKB_MONO_DELIVERY_TIME_MASK */ -#ifdef CONFIG_NET_CLS_ACT -	__u8			tc_skip_classify:1; -	__u8			tc_at_ingress:1;	/* See TC_AT_INGRESS_MASK */ -#endif  #ifdef CONFIG_IPV6_NDISC_NODETYPE  	__u8			ndisc_nodetype:2;  #endif +#if IS_ENABLED(CONFIG_IP_VS)  	__u8			ipvs_property:1; -	__u8			inner_protocol_type:1; +#endif +#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || IS_ENABLED(CONFIG_NF_TABLES) +	__u8			nf_trace:1; +#endif  #ifdef CONFIG_NET_SWITCHDEV  	__u8			offload_fwd_mark:1;  	__u8			offload_l3_fwd_mark:1; @@ -988,13 +988,16 @@ struct sk_buff {  	__u8			decrypted:1;  #endif  	__u8			slow_gro:1; +#if IS_ENABLED(CONFIG_IP_SCTP)  	__u8			csum_not_inet:1; -	__u8			scm_io_uring:1; +#endif  #ifdef CONFIG_NET_SCHED  	__u16			tc_index;	/* traffic control index */  #endif +	u16			alloc_cpu; +  	union {  		__wsum		csum;  		struct { @@ -1018,7 +1021,6 @@ struct sk_buff {  		unsigned int	sender_cpu;  	};  #endif -	u16			alloc_cpu;  #ifdef CONFIG_NETWORK_SECMARK  	__u32		secmark;  #endif @@ -1074,13 +1076,13 @@ struct sk_buff {   * around, you also must adapt these constants.   */  #ifdef __BIG_ENDIAN_BITFIELD -#define TC_AT_INGRESS_MASK		(1 << 0) -#define SKB_MONO_DELIVERY_TIME_MASK	(1 << 2) +#define SKB_MONO_DELIVERY_TIME_MASK	(1 << 7) +#define TC_AT_INGRESS_MASK		(1 << 6)  #else -#define TC_AT_INGRESS_MASK		(1 << 7) -#define SKB_MONO_DELIVERY_TIME_MASK	(1 << 5) +#define SKB_MONO_DELIVERY_TIME_MASK	(1 << 0) +#define TC_AT_INGRESS_MASK		(1 << 1)  #endif -#define PKT_VLAN_PRESENT_OFFSET	offsetof(struct sk_buff, __pkt_vlan_present_offset) +#define SKB_BF_MONO_TC_OFFSET		offsetof(struct sk_buff, __mono_tc_offset)  #ifdef __KERNEL__  /* @@ -1195,6 +1197,15 @@ static inline unsigned int skb_napi_id(const struct sk_buff *skb)  #endif  } +static inline bool skb_wifi_acked_valid(const struct sk_buff *skb) +{ +#ifdef CONFIG_WIRELESS +	return skb->wifi_acked_valid; +#else +	return 0; +#endif +} +  /**   * skb_unref - decrement the skb's reference count   * @skb: buffer @@ -3242,7 +3253,7 @@ static inline struct sk_buff *napi_alloc_skb(struct napi_struct *napi,  void napi_consume_skb(struct sk_buff *skb, int budget);  void napi_skb_free_stolen_head(struct sk_buff *skb); -void __kfree_skb_defer(struct sk_buff *skb); +void __napi_kfree_skb(struct sk_buff *skb, enum skb_drop_reason reason);  /**   * __dev_alloc_pages - allocate page for network Rx @@ -3394,6 +3405,18 @@ static inline void skb_frag_ref(struct sk_buff *skb, int f)  	__skb_frag_ref(&skb_shinfo(skb)->frags[f]);  } +static inline void +napi_frag_unref(skb_frag_t *frag, bool recycle, bool napi_safe) +{ +	struct page *page = skb_frag_page(frag); + +#ifdef CONFIG_PAGE_POOL +	if (recycle && page_pool_return_skb_page(page, napi_safe)) +		return; +#endif +	put_page(page); +} +  /**   * __skb_frag_unref - release a reference on a paged fragment.   * @frag: the paged fragment @@ -3404,13 +3427,7 @@ static inline void skb_frag_ref(struct sk_buff *skb, int f)   */  static inline void __skb_frag_unref(skb_frag_t *frag, bool recycle)  { -	struct page *page = skb_frag_page(frag); - -#ifdef CONFIG_PAGE_POOL -	if (recycle && page_pool_return_skb_page(page)) -		return; -#endif -	put_page(page); +	napi_frag_unref(frag, recycle, false);  }  /** @@ -4712,7 +4729,7 @@ static inline void nf_reset_ct(struct sk_buff *skb)  static inline void nf_reset_trace(struct sk_buff *skb)  { -#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES) +#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || IS_ENABLED(CONFIG_NF_TABLES)  	skb->nf_trace = 0;  #endif  } @@ -4732,7 +4749,7 @@ static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src,  	dst->_nfct = src->_nfct;  	nf_conntrack_get(skb_nfct(src));  #endif -#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES) +#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || IS_ENABLED(CONFIG_NF_TABLES)  	if (copy)  		dst->nf_trace = src->nf_trace;  #endif @@ -5049,9 +5066,30 @@ static inline void skb_reset_redirect(struct sk_buff *skb)  	skb->redirected = 0;  } +static inline void skb_set_redirected_noclear(struct sk_buff *skb, +					      bool from_ingress) +{ +	skb->redirected = 1; +#ifdef CONFIG_NET_REDIRECT +	skb->from_ingress = from_ingress; +#endif +} +  static inline bool skb_csum_is_sctp(struct sk_buff *skb)  { +#if IS_ENABLED(CONFIG_IP_SCTP)  	return skb->csum_not_inet; +#else +	return 0; +#endif +} + +static inline void skb_reset_csum_not_inet(struct sk_buff *skb) +{ +	skb->ip_summed = CHECKSUM_NONE; +#if IS_ENABLED(CONFIG_IP_SCTP) +	skb->csum_not_inet = 0; +#endif  }  static inline void skb_set_kcov_handle(struct sk_buff *skb, @@ -5071,12 +5109,12 @@ static inline u64 skb_get_kcov_handle(struct sk_buff *skb)  #endif  } -#ifdef CONFIG_PAGE_POOL  static inline void skb_mark_for_recycle(struct sk_buff *skb)  { +#ifdef CONFIG_PAGE_POOL  	skb->pp_recycle = 1; -}  #endif +}  #endif	/* __KERNEL__ */  #endif	/* _LINUX_SKBUFF_H */  |