diff options
| author | Mark Brown <[email protected]> | 2015-10-12 18:09:27 +0100 | 
|---|---|---|
| committer | Mark Brown <[email protected]> | 2015-10-12 18:09:27 +0100 | 
| commit | 79828b4fa835f73cdaf4bffa48696abdcbea9d02 (patch) | |
| tree | 5e0fa7156acb75ba603022bc807df8f2fedb97a8 /include/linux/skbuff.h | |
| parent | 721b51fcf91898299d96f4b72cb9434cda29dce6 (diff) | |
| parent | 8c1a9d6323abf0fb1e5dad96cf3f1c783505ea5a (diff) | |
Merge remote-tracking branch 'asoc/fix/rt5645' into asoc-fix-rt5645
Diffstat (limited to 'include/linux/skbuff.h')
| -rw-r--r-- | include/linux/skbuff.h | 173 | 
1 files changed, 135 insertions, 38 deletions
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index d6cdd6e87d53..2738d355cdf9 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -37,6 +37,7 @@  #include <net/flow_dissector.h>  #include <linux/splice.h>  #include <linux/in6.h> +#include <net/flow.h>  /* A. Checksumming of received packets by device.   * @@ -173,17 +174,24 @@ struct nf_bridge_info {  		BRNF_PROTO_8021Q,  		BRNF_PROTO_PPPOE  	} orig_proto:8; -	bool			pkt_otherhost; +	u8			pkt_otherhost:1; +	u8			in_prerouting:1; +	u8			bridged_dnat:1;  	__u16			frag_max_size; -	unsigned int		mask;  	struct net_device	*physindev;  	union { -		struct net_device *physoutdev; -		char neigh_header[8]; -	}; -	union { +		/* prerouting: detect dnat in orig/reply direction */  		__be32          ipv4_daddr;  		struct in6_addr ipv6_daddr; + +		/* after prerouting + nat detected: store original source +		 * mac since neigh resolution overwrites it, only used while +		 * skb is out in neigh layer. +		 */ +		char neigh_header[8]; + +		/* always valid & non-NULL from FORWARD on, for physdev match */ +		struct net_device *physoutdev;  	};  };  #endif @@ -506,6 +514,7 @@ static inline u32 skb_mstamp_us_delta(const struct skb_mstamp *t1,   *	@no_fcs:  Request NIC to treat last 4 bytes as Ethernet FCS    *	@napi_id: id of the NAPI struct this skb came from   *	@secmark: security marking + *	@offload_fwd_mark: fwding offload mark   *	@mark: Generic packet mark   *	@vlan_proto: vlan encapsulation protocol   *	@vlan_tci: vlan tag control information @@ -650,9 +659,15 @@ struct sk_buff {  		unsigned int	sender_cpu;  	};  #endif +	union {  #ifdef CONFIG_NETWORK_SECMARK -	__u32			secmark; +		__u32		secmark; +#endif +#ifdef CONFIG_NET_SWITCHDEV +		__u32		offload_fwd_mark;  #endif +	}; +  	union {  		__u32		mark;  		__u32		reserved_tailroom; @@ -922,14 +937,90 @@ enum pkt_hash_types {  	PKT_HASH_TYPE_L4,	/* Input: src_IP, dst_IP, src_port, dst_port */  }; -static inline void -skb_set_hash(struct sk_buff *skb, __u32 hash, enum pkt_hash_types type) +static inline void skb_clear_hash(struct sk_buff *skb)  { -	skb->l4_hash = (type == PKT_HASH_TYPE_L4); +	skb->hash = 0;  	skb->sw_hash = 0; +	skb->l4_hash = 0; +} + +static inline void skb_clear_hash_if_not_l4(struct sk_buff *skb) +{ +	if (!skb->l4_hash) +		skb_clear_hash(skb); +} + +static inline void +__skb_set_hash(struct sk_buff *skb, __u32 hash, bool is_sw, bool is_l4) +{ +	skb->l4_hash = is_l4; +	skb->sw_hash = is_sw;  	skb->hash = hash;  } +static inline void +skb_set_hash(struct sk_buff *skb, __u32 hash, enum pkt_hash_types type) +{ +	/* Used by drivers to set hash from HW */ +	__skb_set_hash(skb, hash, false, type == PKT_HASH_TYPE_L4); +} + +static inline void +__skb_set_sw_hash(struct sk_buff *skb, __u32 hash, bool is_l4) +{ +	__skb_set_hash(skb, hash, true, is_l4); +} + +void __skb_get_hash(struct sk_buff *skb); +u32 skb_get_poff(const struct sk_buff *skb); +u32 __skb_get_poff(const struct sk_buff *skb, void *data, +		   const struct flow_keys *keys, int hlen); +__be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto, +			    void *data, int hlen_proto); + +static inline __be32 skb_flow_get_ports(const struct sk_buff *skb, +					int thoff, u8 ip_proto) +{ +	return __skb_flow_get_ports(skb, thoff, ip_proto, NULL, 0); +} + +void skb_flow_dissector_init(struct flow_dissector *flow_dissector, +			     const struct flow_dissector_key *key, +			     unsigned int key_count); + +bool __skb_flow_dissect(const struct sk_buff *skb, +			struct flow_dissector *flow_dissector, +			void *target_container, +			void *data, __be16 proto, int nhoff, int hlen, +			unsigned int flags); + +static inline bool skb_flow_dissect(const struct sk_buff *skb, +				    struct flow_dissector *flow_dissector, +				    void *target_container, unsigned int flags) +{ +	return __skb_flow_dissect(skb, flow_dissector, target_container, +				  NULL, 0, 0, 0, flags); +} + +static inline bool skb_flow_dissect_flow_keys(const struct sk_buff *skb, +					      struct flow_keys *flow, +					      unsigned int flags) +{ +	memset(flow, 0, sizeof(*flow)); +	return __skb_flow_dissect(skb, &flow_keys_dissector, flow, +				  NULL, 0, 0, 0, flags); +} + +static inline bool skb_flow_dissect_flow_keys_buf(struct flow_keys *flow, +						  void *data, __be16 proto, +						  int nhoff, int hlen, +						  unsigned int flags) +{ +	memset(flow, 0, sizeof(*flow)); +	return __skb_flow_dissect(NULL, &flow_keys_buf_dissector, flow, +				  data, proto, nhoff, hlen, flags); +} +  static inline __u32 skb_get_hash(struct sk_buff *skb)  {  	if (!skb->l4_hash && !skb->sw_hash) @@ -938,24 +1029,39 @@ static inline __u32 skb_get_hash(struct sk_buff *skb)  	return skb->hash;  } -__u32 skb_get_hash_perturb(const struct sk_buff *skb, u32 perturb); +__u32 __skb_get_hash_flowi6(struct sk_buff *skb, const struct flowi6 *fl6); -static inline __u32 skb_get_hash_raw(const struct sk_buff *skb) +static inline __u32 skb_get_hash_flowi6(struct sk_buff *skb, const struct flowi6 *fl6)  { +	if (!skb->l4_hash && !skb->sw_hash) { +		struct flow_keys keys; +		__u32 hash = __get_hash_from_flowi6(fl6, &keys); + +		__skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys)); +	} +  	return skb->hash;  } -static inline void skb_clear_hash(struct sk_buff *skb) +__u32 __skb_get_hash_flowi4(struct sk_buff *skb, const struct flowi4 *fl); + +static inline __u32 skb_get_hash_flowi4(struct sk_buff *skb, const struct flowi4 *fl4)  { -	skb->hash = 0; -	skb->sw_hash = 0; -	skb->l4_hash = 0; +	if (!skb->l4_hash && !skb->sw_hash) { +		struct flow_keys keys; +		__u32 hash = __get_hash_from_flowi4(fl4, &keys); + +		__skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys)); +	} + +	return skb->hash;  } -static inline void skb_clear_hash_if_not_l4(struct sk_buff *skb) +__u32 skb_get_hash_perturb(const struct sk_buff *skb, u32 perturb); + +static inline __u32 skb_get_hash_raw(const struct sk_buff *skb)  { -	if (!skb->l4_hash) -		skb_clear_hash(skb); +	return skb->hash;  }  static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from) @@ -1602,20 +1708,16 @@ static inline void __skb_fill_page_desc(struct sk_buff *skb, int i,  	skb_frag_t *frag = &skb_shinfo(skb)->frags[i];  	/* -	 * Propagate page->pfmemalloc to the skb if we can. The problem is -	 * that not all callers have unique ownership of the page. If -	 * pfmemalloc is set, we check the mapping as a mapping implies -	 * page->index is set (index and pfmemalloc share space). -	 * If it's a valid mapping, we cannot use page->pfmemalloc but we -	 * do not lose pfmemalloc information as the pages would not be -	 * allocated using __GFP_MEMALLOC. +	 * Propagate page pfmemalloc to the skb if we can. The problem is +	 * that not all callers have unique ownership of the page but rely +	 * on page_is_pfmemalloc doing the right thing(tm).  	 */  	frag->page.p		  = page;  	frag->page_offset	  = off;  	skb_frag_size_set(frag, size);  	page = compound_head(page); -	if (page->pfmemalloc && !page->mapping) +	if (page_is_pfmemalloc(page))  		skb->pfmemalloc	= true;  } @@ -1947,7 +2049,7 @@ static inline void skb_probe_transport_header(struct sk_buff *skb,  	if (skb_transport_header_was_set(skb))  		return; -	else if (skb_flow_dissect_flow_keys(skb, &keys)) +	else if (skb_flow_dissect_flow_keys(skb, &keys, 0))  		skb_set_transport_header(skb, keys.control.thoff);  	else  		skb_set_transport_header(skb, offset_hint); @@ -2263,7 +2365,7 @@ static inline struct page *dev_alloc_page(void)  static inline void skb_propagate_pfmemalloc(struct page *page,  					     struct sk_buff *skb)  { -	if (page && page->pfmemalloc) +	if (page_is_pfmemalloc(page))  		skb->pfmemalloc = true;  } @@ -2671,12 +2773,6 @@ static inline void skb_frag_list_init(struct sk_buff *skb)  	skb_shinfo(skb)->frag_list = NULL;  } -static inline void skb_frag_add_head(struct sk_buff *skb, struct sk_buff *frag) -{ -	frag->next = skb_shinfo(skb)->frag_list; -	skb_shinfo(skb)->frag_list = frag; -} -  #define skb_walk_frags(skb, iter)	\  	for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next) @@ -2884,11 +2980,11 @@ static inline bool skb_defer_rx_timestamp(struct sk_buff *skb)   *   * PHY drivers may accept clones of transmitted packets for   * timestamping via their phy_driver.txtstamp method. These drivers - * must call this function to return the skb back to the stack, with - * or without a timestamp. + * must call this function to return the skb back to the stack with a + * timestamp.   *   * @skb: clone of the the original outgoing packet - * @hwtstamps: hardware time stamps, may be NULL if not available + * @hwtstamps: hardware time stamps   *   */  void skb_complete_tx_timestamp(struct sk_buff *skb, @@ -3468,5 +3564,6 @@ static inline unsigned int skb_gso_network_seglen(const struct sk_buff *skb)  			       skb_network_header(skb);  	return hdr_len + skb_gso_transport_seglen(skb);  } +  #endif	/* __KERNEL__ */  #endif	/* _LINUX_SKBUFF_H */  |