diff options
| author | Mark Brown <[email protected]> | 2015-10-12 18:09:27 +0100 | 
|---|---|---|
| committer | Mark Brown <[email protected]> | 2015-10-12 18:09:27 +0100 | 
| commit | 79828b4fa835f73cdaf4bffa48696abdcbea9d02 (patch) | |
| tree | 5e0fa7156acb75ba603022bc807df8f2fedb97a8 /net/openvswitch/actions.c | |
| parent | 721b51fcf91898299d96f4b72cb9434cda29dce6 (diff) | |
| parent | 8c1a9d6323abf0fb1e5dad96cf3f1c783505ea5a (diff) | |
Merge remote-tracking branch 'asoc/fix/rt5645' into asoc-fix-rt5645
Diffstat (limited to 'net/openvswitch/actions.c')
| -rw-r--r-- | net/openvswitch/actions.c | 276 | 
1 files changed, 228 insertions, 48 deletions
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c index 8a8c0b8b4f63..315f5330b6e5 100644 --- a/net/openvswitch/actions.c +++ b/net/openvswitch/actions.c @@ -22,6 +22,7 @@  #include <linux/in.h>  #include <linux/ip.h>  #include <linux/openvswitch.h> +#include <linux/netfilter_ipv6.h>  #include <linux/sctp.h>  #include <linux/tcp.h>  #include <linux/udp.h> @@ -29,8 +30,10 @@  #include <linux/if_arp.h>  #include <linux/if_vlan.h> +#include <net/dst.h>  #include <net/ip.h>  #include <net/ipv6.h> +#include <net/ip6_fib.h>  #include <net/checksum.h>  #include <net/dsfield.h>  #include <net/mpls.h> @@ -38,6 +41,7 @@  #include "datapath.h"  #include "flow.h" +#include "conntrack.h"  #include "vport.h"  static int do_execute_actions(struct datapath *dp, struct sk_buff *skb, @@ -52,6 +56,20 @@ struct deferred_action {  	struct sw_flow_key pkt_key;  }; +#define MAX_L2_LEN	(VLAN_ETH_HLEN + 3 * MPLS_HLEN) +struct ovs_frag_data { +	unsigned long dst; +	struct vport *vport; +	struct ovs_skb_cb cb; +	__be16 inner_protocol; +	__u16 vlan_tci; +	__be16 vlan_proto; +	unsigned int l2_len; +	u8 l2_data[MAX_L2_LEN]; +}; + +static DEFINE_PER_CPU(struct ovs_frag_data, ovs_frag_data_storage); +  #define DEFERRED_ACTION_FIFO_SIZE 10  struct action_fifo {  	int head; @@ -185,10 +203,6 @@ static int pop_mpls(struct sk_buff *skb, struct sw_flow_key *key,  	return 0;  } -/* 'KEY' must not have any bits set outside of the 'MASK' */ -#define MASKED(OLD, KEY, MASK) ((KEY) | ((OLD) & ~(MASK))) -#define SET_MASKED(OLD, KEY, MASK) ((OLD) = MASKED(OLD, KEY, MASK)) -  static int set_mpls(struct sk_buff *skb, struct sw_flow_key *flow_key,  		    const __be32 *mpls_lse, const __be32 *mask)  { @@ -201,7 +215,7 @@ static int set_mpls(struct sk_buff *skb, struct sw_flow_key *flow_key,  		return err;  	stack = (__be32 *)skb_mpls_header(skb); -	lse = MASKED(*stack, *mpls_lse, *mask); +	lse = OVS_MASKED(*stack, *mpls_lse, *mask);  	if (skb->ip_summed == CHECKSUM_COMPLETE) {  		__be32 diff[] = { ~(*stack), lse }; @@ -244,9 +258,9 @@ static void ether_addr_copy_masked(u8 *dst_, const u8 *src_, const u8 *mask_)  	const u16 *src = (const u16 *)src_;  	const u16 *mask = (const u16 *)mask_; -	SET_MASKED(dst[0], src[0], mask[0]); -	SET_MASKED(dst[1], src[1], mask[1]); -	SET_MASKED(dst[2], src[2], mask[2]); +	OVS_SET_MASKED(dst[0], src[0], mask[0]); +	OVS_SET_MASKED(dst[1], src[1], mask[1]); +	OVS_SET_MASKED(dst[2], src[2], mask[2]);  }  static int set_eth_addr(struct sk_buff *skb, struct sw_flow_key *flow_key, @@ -273,28 +287,36 @@ static int set_eth_addr(struct sk_buff *skb, struct sw_flow_key *flow_key,  	return 0;  } -static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh, -			__be32 *addr, __be32 new_addr) +static void update_ip_l4_checksum(struct sk_buff *skb, struct iphdr *nh, +				  __be32 addr, __be32 new_addr)  {  	int transport_len = skb->len - skb_transport_offset(skb); +	if (nh->frag_off & htons(IP_OFFSET)) +		return; +  	if (nh->protocol == IPPROTO_TCP) {  		if (likely(transport_len >= sizeof(struct tcphdr)))  			inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb, -						 *addr, new_addr, 1); +						 addr, new_addr, true);  	} else if (nh->protocol == IPPROTO_UDP) {  		if (likely(transport_len >= sizeof(struct udphdr))) {  			struct udphdr *uh = udp_hdr(skb);  			if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {  				inet_proto_csum_replace4(&uh->check, skb, -							 *addr, new_addr, 1); +							 addr, new_addr, true);  				if (!uh->check)  					uh->check = CSUM_MANGLED_0;  			}  		}  	} +} +static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh, +			__be32 *addr, __be32 new_addr) +{ +	update_ip_l4_checksum(skb, nh, *addr, new_addr);  	csum_replace4(&nh->check, *addr, new_addr);  	skb_clear_hash(skb);  	*addr = new_addr; @@ -308,14 +330,14 @@ static void update_ipv6_checksum(struct sk_buff *skb, u8 l4_proto,  	if (l4_proto == NEXTHDR_TCP) {  		if (likely(transport_len >= sizeof(struct tcphdr)))  			inet_proto_csum_replace16(&tcp_hdr(skb)->check, skb, -						  addr, new_addr, 1); +						  addr, new_addr, true);  	} else if (l4_proto == NEXTHDR_UDP) {  		if (likely(transport_len >= sizeof(struct udphdr))) {  			struct udphdr *uh = udp_hdr(skb);  			if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {  				inet_proto_csum_replace16(&uh->check, skb, -							  addr, new_addr, 1); +							  addr, new_addr, true);  				if (!uh->check)  					uh->check = CSUM_MANGLED_0;  			} @@ -323,17 +345,17 @@ static void update_ipv6_checksum(struct sk_buff *skb, u8 l4_proto,  	} else if (l4_proto == NEXTHDR_ICMP) {  		if (likely(transport_len >= sizeof(struct icmp6hdr)))  			inet_proto_csum_replace16(&icmp6_hdr(skb)->icmp6_cksum, -						  skb, addr, new_addr, 1); +						  skb, addr, new_addr, true);  	}  }  static void mask_ipv6_addr(const __be32 old[4], const __be32 addr[4],  			   const __be32 mask[4], __be32 masked[4])  { -	masked[0] = MASKED(old[0], addr[0], mask[0]); -	masked[1] = MASKED(old[1], addr[1], mask[1]); -	masked[2] = MASKED(old[2], addr[2], mask[2]); -	masked[3] = MASKED(old[3], addr[3], mask[3]); +	masked[0] = OVS_MASKED(old[0], addr[0], mask[0]); +	masked[1] = OVS_MASKED(old[1], addr[1], mask[1]); +	masked[2] = OVS_MASKED(old[2], addr[2], mask[2]); +	masked[3] = OVS_MASKED(old[3], addr[3], mask[3]);  }  static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto, @@ -350,15 +372,15 @@ static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto,  static void set_ipv6_fl(struct ipv6hdr *nh, u32 fl, u32 mask)  {  	/* Bits 21-24 are always unmasked, so this retains their values. */ -	SET_MASKED(nh->flow_lbl[0], (u8)(fl >> 16), (u8)(mask >> 16)); -	SET_MASKED(nh->flow_lbl[1], (u8)(fl >> 8), (u8)(mask >> 8)); -	SET_MASKED(nh->flow_lbl[2], (u8)fl, (u8)mask); +	OVS_SET_MASKED(nh->flow_lbl[0], (u8)(fl >> 16), (u8)(mask >> 16)); +	OVS_SET_MASKED(nh->flow_lbl[1], (u8)(fl >> 8), (u8)(mask >> 8)); +	OVS_SET_MASKED(nh->flow_lbl[2], (u8)fl, (u8)mask);  }  static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl,  		       u8 mask)  { -	new_ttl = MASKED(nh->ttl, new_ttl, mask); +	new_ttl = OVS_MASKED(nh->ttl, new_ttl, mask);  	csum_replace2(&nh->check, htons(nh->ttl << 8), htons(new_ttl << 8));  	nh->ttl = new_ttl; @@ -384,7 +406,7 @@ static int set_ipv4(struct sk_buff *skb, struct sw_flow_key *flow_key,  	 * makes sense to check if the value actually changed.  	 */  	if (mask->ipv4_src) { -		new_addr = MASKED(nh->saddr, key->ipv4_src, mask->ipv4_src); +		new_addr = OVS_MASKED(nh->saddr, key->ipv4_src, mask->ipv4_src);  		if (unlikely(new_addr != nh->saddr)) {  			set_ip_addr(skb, nh, &nh->saddr, new_addr); @@ -392,7 +414,7 @@ static int set_ipv4(struct sk_buff *skb, struct sw_flow_key *flow_key,  		}  	}  	if (mask->ipv4_dst) { -		new_addr = MASKED(nh->daddr, key->ipv4_dst, mask->ipv4_dst); +		new_addr = OVS_MASKED(nh->daddr, key->ipv4_dst, mask->ipv4_dst);  		if (unlikely(new_addr != nh->daddr)) {  			set_ip_addr(skb, nh, &nh->daddr, new_addr); @@ -480,7 +502,8 @@ static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key,  		    *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL);  	}  	if (mask->ipv6_hlimit) { -		SET_MASKED(nh->hop_limit, key->ipv6_hlimit, mask->ipv6_hlimit); +		OVS_SET_MASKED(nh->hop_limit, key->ipv6_hlimit, +			       mask->ipv6_hlimit);  		flow_key->ip.ttl = nh->hop_limit;  	}  	return 0; @@ -490,7 +513,7 @@ static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key,  static void set_tp_port(struct sk_buff *skb, __be16 *port,  			__be16 new_port, __sum16 *check)  { -	inet_proto_csum_replace2(check, skb, *port, new_port, 0); +	inet_proto_csum_replace2(check, skb, *port, new_port, false);  	*port = new_port;  } @@ -509,8 +532,8 @@ static int set_udp(struct sk_buff *skb, struct sw_flow_key *flow_key,  	uh = udp_hdr(skb);  	/* Either of the masks is non-zero, so do not bother checking them. */ -	src = MASKED(uh->source, key->udp_src, mask->udp_src); -	dst = MASKED(uh->dest, key->udp_dst, mask->udp_dst); +	src = OVS_MASKED(uh->source, key->udp_src, mask->udp_src); +	dst = OVS_MASKED(uh->dest, key->udp_dst, mask->udp_dst);  	if (uh->check && skb->ip_summed != CHECKSUM_PARTIAL) {  		if (likely(src != uh->source)) { @@ -550,12 +573,12 @@ static int set_tcp(struct sk_buff *skb, struct sw_flow_key *flow_key,  		return err;  	th = tcp_hdr(skb); -	src = MASKED(th->source, key->tcp_src, mask->tcp_src); +	src = OVS_MASKED(th->source, key->tcp_src, mask->tcp_src);  	if (likely(src != th->source)) {  		set_tp_port(skb, &th->source, src, &th->check);  		flow_key->tp.src = src;  	} -	dst = MASKED(th->dest, key->tcp_dst, mask->tcp_dst); +	dst = OVS_MASKED(th->dest, key->tcp_dst, mask->tcp_dst);  	if (likely(dst != th->dest)) {  		set_tp_port(skb, &th->dest, dst, &th->check);  		flow_key->tp.dst = dst; @@ -582,8 +605,8 @@ static int set_sctp(struct sk_buff *skb, struct sw_flow_key *flow_key,  	old_csum = sh->checksum;  	old_correct_csum = sctp_compute_cksum(skb, sctphoff); -	sh->source = MASKED(sh->source, key->sctp_src, mask->sctp_src); -	sh->dest = MASKED(sh->dest, key->sctp_dst, mask->sctp_dst); +	sh->source = OVS_MASKED(sh->source, key->sctp_src, mask->sctp_src); +	sh->dest = OVS_MASKED(sh->dest, key->sctp_dst, mask->sctp_dst);  	new_csum = sctp_compute_cksum(skb, sctphoff); @@ -597,27 +620,159 @@ static int set_sctp(struct sk_buff *skb, struct sw_flow_key *flow_key,  	return 0;  } -static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port) +static int ovs_vport_output(struct sock *sock, struct sk_buff *skb) +{ +	struct ovs_frag_data *data = this_cpu_ptr(&ovs_frag_data_storage); +	struct vport *vport = data->vport; + +	if (skb_cow_head(skb, data->l2_len) < 0) { +		kfree_skb(skb); +		return -ENOMEM; +	} + +	__skb_dst_copy(skb, data->dst); +	*OVS_CB(skb) = data->cb; +	skb->inner_protocol = data->inner_protocol; +	skb->vlan_tci = data->vlan_tci; +	skb->vlan_proto = data->vlan_proto; + +	/* Reconstruct the MAC header.  */ +	skb_push(skb, data->l2_len); +	memcpy(skb->data, &data->l2_data, data->l2_len); +	ovs_skb_postpush_rcsum(skb, skb->data, data->l2_len); +	skb_reset_mac_header(skb); + +	ovs_vport_send(vport, skb); +	return 0; +} + +static unsigned int +ovs_dst_get_mtu(const struct dst_entry *dst) +{ +	return dst->dev->mtu; +} + +static struct dst_ops ovs_dst_ops = { +	.family = AF_UNSPEC, +	.mtu = ovs_dst_get_mtu, +}; + +/* prepare_frag() is called once per (larger-than-MTU) frame; its inverse is + * ovs_vport_output(), which is called once per fragmented packet. + */ +static void prepare_frag(struct vport *vport, struct sk_buff *skb) +{ +	unsigned int hlen = skb_network_offset(skb); +	struct ovs_frag_data *data; + +	data = this_cpu_ptr(&ovs_frag_data_storage); +	data->dst = skb->_skb_refdst; +	data->vport = vport; +	data->cb = *OVS_CB(skb); +	data->inner_protocol = skb->inner_protocol; +	data->vlan_tci = skb->vlan_tci; +	data->vlan_proto = skb->vlan_proto; +	data->l2_len = hlen; +	memcpy(&data->l2_data, skb->data, hlen); + +	memset(IPCB(skb), 0, sizeof(struct inet_skb_parm)); +	skb_pull(skb, hlen); +} + +static void ovs_fragment(struct vport *vport, struct sk_buff *skb, u16 mru, +			 __be16 ethertype) +{ +	if (skb_network_offset(skb) > MAX_L2_LEN) { +		OVS_NLERR(1, "L2 header too long to fragment"); +		return; +	} + +	if (ethertype == htons(ETH_P_IP)) { +		struct dst_entry ovs_dst; +		unsigned long orig_dst; + +		prepare_frag(vport, skb); +		dst_init(&ovs_dst, &ovs_dst_ops, NULL, 1, +			 DST_OBSOLETE_NONE, DST_NOCOUNT); +		ovs_dst.dev = vport->dev; + +		orig_dst = skb->_skb_refdst; +		skb_dst_set_noref(skb, &ovs_dst); +		IPCB(skb)->frag_max_size = mru; + +		ip_do_fragment(skb->sk, skb, ovs_vport_output); +		refdst_drop(orig_dst); +	} else if (ethertype == htons(ETH_P_IPV6)) { +		const struct nf_ipv6_ops *v6ops = nf_get_ipv6_ops(); +		unsigned long orig_dst; +		struct rt6_info ovs_rt; + +		if (!v6ops) { +			kfree_skb(skb); +			return; +		} + +		prepare_frag(vport, skb); +		memset(&ovs_rt, 0, sizeof(ovs_rt)); +		dst_init(&ovs_rt.dst, &ovs_dst_ops, NULL, 1, +			 DST_OBSOLETE_NONE, DST_NOCOUNT); +		ovs_rt.dst.dev = vport->dev; + +		orig_dst = skb->_skb_refdst; +		skb_dst_set_noref(skb, &ovs_rt.dst); +		IP6CB(skb)->frag_max_size = mru; + +		v6ops->fragment(skb->sk, skb, ovs_vport_output); +		refdst_drop(orig_dst); +	} else { +		WARN_ONCE(1, "Failed fragment ->%s: eth=%04x, MRU=%d, MTU=%d.", +			  ovs_vport_name(vport), ntohs(ethertype), mru, +			  vport->dev->mtu); +		kfree_skb(skb); +	} +} + +static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port, +		      struct sw_flow_key *key)  {  	struct vport *vport = ovs_vport_rcu(dp, out_port); -	if (likely(vport)) -		ovs_vport_send(vport, skb); -	else +	if (likely(vport)) { +		u16 mru = OVS_CB(skb)->mru; + +		if (likely(!mru || (skb->len <= mru + ETH_HLEN))) { +			ovs_vport_send(vport, skb); +		} else if (mru <= vport->dev->mtu) { +			__be16 ethertype = key->eth.type; + +			if (!is_flow_key_valid(key)) { +				if (eth_p_mpls(skb->protocol)) +					ethertype = skb->inner_protocol; +				else +					ethertype = vlan_get_protocol(skb); +			} + +			ovs_fragment(vport, skb, mru, ethertype); +		} else { +			kfree_skb(skb); +		} +	} else {  		kfree_skb(skb); +	}  }  static int output_userspace(struct datapath *dp, struct sk_buff *skb,  			    struct sw_flow_key *key, const struct nlattr *attr,  			    const struct nlattr *actions, int actions_len)  { -	struct ovs_tunnel_info info; +	struct ip_tunnel_info info;  	struct dp_upcall_info upcall;  	const struct nlattr *a;  	int rem;  	memset(&upcall, 0, sizeof(upcall));  	upcall.cmd = OVS_PACKET_CMD_ACTION; +	upcall.mru = OVS_CB(skb)->mru;  	for (a = nla_data(attr), rem = nla_len(attr); rem > 0;  		 a = nla_next(a, &rem)) { @@ -638,11 +793,13 @@ static int output_userspace(struct datapath *dp, struct sk_buff *skb,  			if (vport) {  				int err; +				upcall.egress_tun_info = &info;  				err = ovs_vport_get_egress_tun_info(vport, skb, -								    &info); -				if (!err) -					upcall.egress_tun_info = &info; +								    &upcall); +				if (err) +					upcall.egress_tun_info = NULL;  			} +  			break;  		} @@ -669,9 +826,12 @@ static int sample(struct datapath *dp, struct sk_buff *skb,  	for (a = nla_data(attr), rem = nla_len(attr); rem > 0;  		 a = nla_next(a, &rem)) { +		u32 probability; +  		switch (nla_type(a)) {  		case OVS_SAMPLE_ATTR_PROBABILITY: -			if (prandom_u32() >= nla_get_u32(a)) +			probability = nla_get_u32(a); +			if (!probability || prandom_u32() > probability)  				return 0;  			break; @@ -733,7 +893,11 @@ static int execute_set_action(struct sk_buff *skb,  {  	/* Only tunnel set execution is supported without a mask. */  	if (nla_type(a) == OVS_KEY_ATTR_TUNNEL_INFO) { -		OVS_CB(skb)->egress_tun_info = nla_data(a); +		struct ovs_tunnel_info *tun = nla_data(a); + +		skb_dst_drop(skb); +		dst_hold((struct dst_entry *)tun->tun_dst); +		skb_dst_set(skb, (struct dst_entry *)tun->tun_dst);  		return 0;  	} @@ -751,12 +915,13 @@ static int execute_masked_set_action(struct sk_buff *skb,  	switch (nla_type(a)) {  	case OVS_KEY_ATTR_PRIORITY: -		SET_MASKED(skb->priority, nla_get_u32(a), *get_mask(a, u32 *)); +		OVS_SET_MASKED(skb->priority, nla_get_u32(a), +			       *get_mask(a, u32 *));  		flow_key->phy.priority = skb->priority;  		break;  	case OVS_KEY_ATTR_SKB_MARK: -		SET_MASKED(skb->mark, nla_get_u32(a), *get_mask(a, u32 *)); +		OVS_SET_MASKED(skb->mark, nla_get_u32(a), *get_mask(a, u32 *));  		flow_key->phy.skb_mark = skb->mark;  		break; @@ -799,6 +964,13 @@ static int execute_masked_set_action(struct sk_buff *skb,  		err = set_mpls(skb, flow_key, nla_data(a), get_mask(a,  								    __be32 *));  		break; + +	case OVS_KEY_ATTR_CT_STATE: +	case OVS_KEY_ATTR_CT_ZONE: +	case OVS_KEY_ATTR_CT_MARK: +	case OVS_KEY_ATTR_CT_LABEL: +		err = -EINVAL; +		break;  	}  	return err; @@ -868,7 +1040,7 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,  			struct sk_buff *out_skb = skb_clone(skb, GFP_ATOMIC);  			if (out_skb) -				do_output(dp, out_skb, prev_port); +				do_output(dp, out_skb, prev_port, key);  			prev_port = -1;  		} @@ -925,6 +1097,15 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,  		case OVS_ACTION_ATTR_SAMPLE:  			err = sample(dp, skb, key, a, attr, len);  			break; + +		case OVS_ACTION_ATTR_CT: +			err = ovs_ct_execute(ovs_dp_get_net(dp), skb, key, +					     nla_data(a)); + +			/* Hide stolen IP fragments from user space. */ +			if (err == -EINPROGRESS) +				return 0; +			break;  		}  		if (unlikely(err)) { @@ -934,7 +1115,7 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,  	}  	if (prev_port != -1) -		do_output(dp, skb, prev_port); +		do_output(dp, skb, prev_port, key);  	else  		consume_skb(skb); @@ -976,7 +1157,6 @@ int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb,  	int err;  	this_cpu_inc(exec_actions_level); -	OVS_CB(skb)->egress_tun_info = NULL;  	err = do_execute_actions(dp, skb, key,  				 acts->actions, acts->actions_len);  |