diff options
Diffstat (limited to 'drivers/net/gtp.c')
| -rw-r--r-- | drivers/net/gtp.c | 865 | 
1 files changed, 735 insertions, 130 deletions
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c index e62d6cbdf9bc..427b91aca50d 100644 --- a/drivers/net/gtp.c +++ b/drivers/net/gtp.c @@ -24,6 +24,7 @@  #include <net/net_namespace.h>  #include <net/protocol.h>  #include <net/ip.h> +#include <net/ipv6.h>  #include <net/udp.h>  #include <net/udp_tunnel.h>  #include <net/icmp.h> @@ -50,8 +51,14 @@ struct pdp_ctx {  	u8			gtp_version;  	u16			af; -	struct in_addr		ms_addr_ip4; -	struct in_addr		peer_addr_ip4; +	union { +		struct in_addr	addr; +		struct in6_addr	addr6; +	} ms; +	union { +		struct in_addr	addr; +		struct in6_addr	addr6; +	} peer;  	struct sock		*sk;  	struct net_device       *dev; @@ -80,9 +87,15 @@ struct gtp_dev {  };  struct echo_info { -	struct in_addr		ms_addr_ip4; -	struct in_addr		peer_addr_ip4; +	u16			af;  	u8			gtp_version; + +	union { +		struct in_addr	addr; +	} ms; +	union { +		struct in_addr	addr; +	} peer;  };  static unsigned int gtp_net_id __read_mostly; @@ -121,8 +134,14 @@ static inline u32 ipv4_hashfn(__be32 ip)  	return jhash_1word((__force u32)ip, gtp_h_initval);  } +static u32 ipv6_hashfn(const struct in6_addr *ip6) +{ +	return jhash_2words((__force u32)ip6->s6_addr32[0], +			    (__force u32)ip6->s6_addr32[1], gtp_h_initval); +} +  /* Resolve a PDP context structure based on the 64bit TID. */ -static struct pdp_ctx *gtp0_pdp_find(struct gtp_dev *gtp, u64 tid) +static struct pdp_ctx *gtp0_pdp_find(struct gtp_dev *gtp, u64 tid, u16 family)  {  	struct hlist_head *head;  	struct pdp_ctx *pdp; @@ -130,7 +149,8 @@ static struct pdp_ctx *gtp0_pdp_find(struct gtp_dev *gtp, u64 tid)  	head = >p->tid_hash[gtp0_hashfn(tid) % gtp->hash_size];  	hlist_for_each_entry_rcu(pdp, head, hlist_tid) { -		if (pdp->gtp_version == GTP_V0 && +		if (pdp->af == family && +		    pdp->gtp_version == GTP_V0 &&  		    pdp->u.v0.tid == tid)  			return pdp;  	} @@ -138,7 +158,7 @@ static struct pdp_ctx *gtp0_pdp_find(struct gtp_dev *gtp, u64 tid)  }  /* Resolve a PDP context structure based on the 32bit TEI. */ -static struct pdp_ctx *gtp1_pdp_find(struct gtp_dev *gtp, u32 tid) +static struct pdp_ctx *gtp1_pdp_find(struct gtp_dev *gtp, u32 tid, u16 family)  {  	struct hlist_head *head;  	struct pdp_ctx *pdp; @@ -146,7 +166,8 @@ static struct pdp_ctx *gtp1_pdp_find(struct gtp_dev *gtp, u32 tid)  	head = >p->tid_hash[gtp1u_hashfn(tid) % gtp->hash_size];  	hlist_for_each_entry_rcu(pdp, head, hlist_tid) { -		if (pdp->gtp_version == GTP_V1 && +		if (pdp->af == family && +		    pdp->gtp_version == GTP_V1 &&  		    pdp->u.v1.i_tei == tid)  			return pdp;  	} @@ -163,7 +184,42 @@ static struct pdp_ctx *ipv4_pdp_find(struct gtp_dev *gtp, __be32 ms_addr)  	hlist_for_each_entry_rcu(pdp, head, hlist_addr) {  		if (pdp->af == AF_INET && -		    pdp->ms_addr_ip4.s_addr == ms_addr) +		    pdp->ms.addr.s_addr == ms_addr) +			return pdp; +	} + +	return NULL; +} + +/* 3GPP TS 29.060: PDN Connection: the association between a MS represented by + * [...] one IPv6 *prefix* and a PDN represented by an APN. + * + * Then, 3GPP TS 29.061, Section 11.2.1.3 says: The size of the prefix shall be + * according to the maximum prefix length for a global IPv6 address as + * specified in the IPv6 Addressing Architecture, see RFC 4291. + * + * Finally, RFC 4291 section 2.5.4 states: All Global Unicast addresses other + * than those that start with binary 000 have a 64-bit interface ID field + * (i.e., n + m = 64). + */ +static bool ipv6_pdp_addr_equal(const struct in6_addr *a, +				const struct in6_addr *b) +{ +	return a->s6_addr32[0] == b->s6_addr32[0] && +	       a->s6_addr32[1] == b->s6_addr32[1]; +} + +static struct pdp_ctx *ipv6_pdp_find(struct gtp_dev *gtp, +				     const struct in6_addr *ms_addr) +{ +	struct hlist_head *head; +	struct pdp_ctx *pdp; + +	head = >p->addr_hash[ipv6_hashfn(ms_addr) % gtp->hash_size]; + +	hlist_for_each_entry_rcu(pdp, head, hlist_addr) { +		if (pdp->af == AF_INET6 && +		    ipv6_pdp_addr_equal(&pdp->ms.addr6, ms_addr))  			return pdp;  	} @@ -181,34 +237,85 @@ static bool gtp_check_ms_ipv4(struct sk_buff *skb, struct pdp_ctx *pctx,  	iph = (struct iphdr *)(skb->data + hdrlen);  	if (role == GTP_ROLE_SGSN) -		return iph->daddr == pctx->ms_addr_ip4.s_addr; +		return iph->daddr == pctx->ms.addr.s_addr;  	else -		return iph->saddr == pctx->ms_addr_ip4.s_addr; +		return iph->saddr == pctx->ms.addr.s_addr; +} + +static bool gtp_check_ms_ipv6(struct sk_buff *skb, struct pdp_ctx *pctx, +			      unsigned int hdrlen, unsigned int role) +{ +	struct ipv6hdr *ip6h; +	int ret; + +	if (!pskb_may_pull(skb, hdrlen + sizeof(struct ipv6hdr))) +		return false; + +	ip6h = (struct ipv6hdr *)(skb->data + hdrlen); + +	if ((ipv6_addr_type(&ip6h->saddr) & IPV6_ADDR_LINKLOCAL) || +	    (ipv6_addr_type(&ip6h->daddr) & IPV6_ADDR_LINKLOCAL)) +		return false; + +	if (role == GTP_ROLE_SGSN) { +		ret = ipv6_pdp_addr_equal(&ip6h->daddr, &pctx->ms.addr6); +	} else { +		ret = ipv6_pdp_addr_equal(&ip6h->saddr, &pctx->ms.addr6); +	} + +	return ret;  }  /* Check if the inner IP address in this packet is assigned to any   * existing mobile subscriber.   */  static bool gtp_check_ms(struct sk_buff *skb, struct pdp_ctx *pctx, -			     unsigned int hdrlen, unsigned int role) +			 unsigned int hdrlen, unsigned int role, +			 __u16 inner_proto)  { -	switch (ntohs(skb->protocol)) { +	switch (inner_proto) {  	case ETH_P_IP:  		return gtp_check_ms_ipv4(skb, pctx, hdrlen, role); +	case ETH_P_IPV6: +		return gtp_check_ms_ipv6(skb, pctx, hdrlen, role);  	}  	return false;  } +static int gtp_inner_proto(struct sk_buff *skb, unsigned int hdrlen, +			   __u16 *inner_proto) +{ +	__u8 *ip_version, _ip_version; + +	ip_version = skb_header_pointer(skb, hdrlen, sizeof(*ip_version), +					&_ip_version); +	if (!ip_version) +		return -1; + +	switch (*ip_version & 0xf0) { +	case 0x40: +		*inner_proto = ETH_P_IP; +		break; +	case 0x60: +		*inner_proto = ETH_P_IPV6; +		break; +	default: +		return -1; +	} + +	return 0; +} +  static int gtp_rx(struct pdp_ctx *pctx, struct sk_buff *skb, -			unsigned int hdrlen, unsigned int role) +		  unsigned int hdrlen, unsigned int role, __u16 inner_proto)  { -	if (!gtp_check_ms(skb, pctx, hdrlen, role)) { +	if (!gtp_check_ms(skb, pctx, hdrlen, role, inner_proto)) {  		netdev_dbg(pctx->dev, "No PDP ctx for this MS\n");  		return 1;  	}  	/* Get rid of the GTP + UDP headers. */ -	if (iptunnel_pull_header(skb, hdrlen, skb->protocol, +	if (iptunnel_pull_header(skb, hdrlen, htons(inner_proto),  			 !net_eq(sock_net(pctx->sk), dev_net(pctx->dev)))) {  		pctx->dev->stats.rx_length_errors++;  		goto err; @@ -250,6 +357,27 @@ static struct rtable *ip4_route_output_gtp(struct flowi4 *fl4,  	return ip_route_output_key(sock_net(sk), fl4);  } +static struct rt6_info *ip6_route_output_gtp(struct net *net, +					     struct flowi6 *fl6, +					     const struct sock *sk, +					     const struct in6_addr *daddr, +					     struct in6_addr *saddr) +{ +	struct dst_entry *dst; + +	memset(fl6, 0, sizeof(*fl6)); +	fl6->flowi6_oif		= sk->sk_bound_dev_if; +	fl6->daddr		= *daddr; +	fl6->saddr		= *saddr; +	fl6->flowi6_proto	= sk->sk_protocol; + +	dst = ipv6_stub->ipv6_dst_lookup_flow(net, sk, fl6, NULL); +	if (IS_ERR(dst)) +		return ERR_PTR(-ENETUNREACH); + +	return (struct rt6_info *)dst; +} +  /* GSM TS 09.60. 7.3   * In all Path Management messages:   * - TID: is not used and shall be set to 0. @@ -292,13 +420,39 @@ static void gtp0_build_echo_msg(struct gtp0_header *hdr, __u8 msg_type)  		hdr->length = 0;  } +static int gtp0_send_echo_resp_ip(struct gtp_dev *gtp, struct sk_buff *skb) +{ +	struct iphdr *iph = ip_hdr(skb); +	struct flowi4 fl4; +	struct rtable *rt; + +	/* find route to the sender, +	 * src address becomes dst address and vice versa. +	 */ +	rt = ip4_route_output_gtp(&fl4, gtp->sk0, iph->saddr, iph->daddr); +	if (IS_ERR(rt)) { +		netdev_dbg(gtp->dev, "no route for echo response from %pI4\n", +			   &iph->saddr); +		return -1; +	} + +	udp_tunnel_xmit_skb(rt, gtp->sk0, skb, +			    fl4.saddr, fl4.daddr, +			    iph->tos, +			    ip4_dst_hoplimit(&rt->dst), +			    0, +			    htons(GTP0_PORT), htons(GTP0_PORT), +			    !net_eq(sock_net(gtp->sk1u), +				    dev_net(gtp->dev)), +			    false); + +	return 0; +} +  static int gtp0_send_echo_resp(struct gtp_dev *gtp, struct sk_buff *skb)  {  	struct gtp0_packet *gtp_pkt;  	struct gtp0_header *gtp0; -	struct rtable *rt; -	struct flowi4 fl4; -	struct iphdr *iph;  	__be16 seq;  	gtp0 = (struct gtp0_header *)(skb->data + sizeof(struct udphdr)); @@ -325,27 +479,15 @@ static int gtp0_send_echo_resp(struct gtp_dev *gtp, struct sk_buff *skb)  	gtp_pkt->ie.tag = GTPIE_RECOVERY;  	gtp_pkt->ie.val = gtp->restart_count; -	iph = ip_hdr(skb); - -	/* find route to the sender, -	 * src address becomes dst address and vice versa. -	 */ -	rt = ip4_route_output_gtp(&fl4, gtp->sk0, iph->saddr, iph->daddr); -	if (IS_ERR(rt)) { -		netdev_dbg(gtp->dev, "no route for echo response from %pI4\n", -			   &iph->saddr); +	switch (gtp->sk0->sk_family) { +	case AF_INET: +		if (gtp0_send_echo_resp_ip(gtp, skb) < 0) +			return -1; +		break; +	case AF_INET6:  		return -1;  	} -	udp_tunnel_xmit_skb(rt, gtp->sk0, skb, -			    fl4.saddr, fl4.daddr, -			    iph->tos, -			    ip4_dst_hoplimit(&rt->dst), -			    0, -			    htons(GTP0_PORT), htons(GTP0_PORT), -			    !net_eq(sock_net(gtp->sk1u), -				    dev_net(gtp->dev)), -			    false);  	return 0;  } @@ -360,8 +502,8 @@ static int gtp_genl_fill_echo(struct sk_buff *skb, u32 snd_portid, u32 snd_seq,  		goto failure;  	if (nla_put_u32(skb, GTPA_VERSION, echo.gtp_version) || -	    nla_put_be32(skb, GTPA_PEER_ADDRESS, echo.peer_addr_ip4.s_addr) || -	    nla_put_be32(skb, GTPA_MS_ADDRESS, echo.ms_addr_ip4.s_addr)) +	    nla_put_be32(skb, GTPA_PEER_ADDRESS, echo.peer.addr.s_addr) || +	    nla_put_be32(skb, GTPA_MS_ADDRESS, echo.ms.addr.s_addr))  		goto failure;  	genlmsg_end(skb, genlh); @@ -372,12 +514,20 @@ failure:  	return -EMSGSIZE;  } +static void gtp0_handle_echo_resp_ip(struct sk_buff *skb, struct echo_info *echo) +{ +	struct iphdr *iph = ip_hdr(skb); + +	echo->ms.addr.s_addr = iph->daddr; +	echo->peer.addr.s_addr = iph->saddr; +	echo->gtp_version = GTP_V0; +} +  static int gtp0_handle_echo_resp(struct gtp_dev *gtp, struct sk_buff *skb)  {  	struct gtp0_header *gtp0;  	struct echo_info echo;  	struct sk_buff *msg; -	struct iphdr *iph;  	int ret;  	gtp0 = (struct gtp0_header *)(skb->data + sizeof(struct udphdr)); @@ -385,10 +535,13 @@ static int gtp0_handle_echo_resp(struct gtp_dev *gtp, struct sk_buff *skb)  	if (!gtp0_validate_echo_hdr(gtp0))  		return -1; -	iph = ip_hdr(skb); -	echo.ms_addr_ip4.s_addr = iph->daddr; -	echo.peer_addr_ip4.s_addr = iph->saddr; -	echo.gtp_version = GTP_V0; +	switch (gtp->sk0->sk_family) { +	case AF_INET: +		gtp0_handle_echo_resp_ip(skb, &echo); +		break; +	case AF_INET6: +		return -1; +	}  	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);  	if (!msg) @@ -404,6 +557,21 @@ static int gtp0_handle_echo_resp(struct gtp_dev *gtp, struct sk_buff *skb)  				       msg, 0, GTP_GENL_MCGRP, GFP_ATOMIC);  } +static int gtp_proto_to_family(__u16 proto) +{ +	switch (proto) { +	case ETH_P_IP: +		return AF_INET; +	case ETH_P_IPV6: +		return AF_INET6; +	default: +		WARN_ON_ONCE(1); +		break; +	} + +	return AF_UNSPEC; +} +  /* 1 means pass up to the stack, -1 means drop and 0 means decapsulated. */  static int gtp0_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb)  { @@ -411,6 +579,7 @@ static int gtp0_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb)  			      sizeof(struct gtp0_header);  	struct gtp0_header *gtp0;  	struct pdp_ctx *pctx; +	__u16 inner_proto;  	if (!pskb_may_pull(skb, hdrlen))  		return -1; @@ -433,13 +602,19 @@ static int gtp0_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb)  	if (gtp0->type != GTP_TPDU)  		return 1; -	pctx = gtp0_pdp_find(gtp, be64_to_cpu(gtp0->tid)); +	if (gtp_inner_proto(skb, hdrlen, &inner_proto) < 0) { +		netdev_dbg(gtp->dev, "GTP packet does not encapsulate an IP packet\n"); +		return -1; +	} + +	pctx = gtp0_pdp_find(gtp, be64_to_cpu(gtp0->tid), +			     gtp_proto_to_family(inner_proto));  	if (!pctx) {  		netdev_dbg(gtp->dev, "No PDP ctx to decap skb=%p\n", skb);  		return 1;  	} -	return gtp_rx(pctx, skb, hdrlen, gtp->role); +	return gtp_rx(pctx, skb, hdrlen, gtp->role, inner_proto);  }  /* msg_type has to be GTP_ECHO_REQ or GTP_ECHO_RSP */ @@ -549,8 +724,8 @@ static int gtp1u_handle_echo_resp(struct gtp_dev *gtp, struct sk_buff *skb)  		return -1;  	iph = ip_hdr(skb); -	echo.ms_addr_ip4.s_addr = iph->daddr; -	echo.peer_addr_ip4.s_addr = iph->saddr; +	echo.ms.addr.s_addr = iph->daddr; +	echo.peer.addr.s_addr = iph->saddr;  	echo.gtp_version = GTP_V1;  	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); @@ -567,12 +742,50 @@ static int gtp1u_handle_echo_resp(struct gtp_dev *gtp, struct sk_buff *skb)  				       msg, 0, GTP_GENL_MCGRP, GFP_ATOMIC);  } +static int gtp_parse_exthdrs(struct sk_buff *skb, unsigned int *hdrlen) +{ +	struct gtp_ext_hdr *gtp_exthdr, _gtp_exthdr; +	unsigned int offset = *hdrlen; +	__u8 *next_type, _next_type; + +	/* From 29.060: "The Extension Header Length field specifies the length +	 * of the particular Extension header in 4 octets units." +	 * +	 * This length field includes length field size itself (1 byte), +	 * payload (variable length) and next type (1 byte). The extension +	 * header is aligned to to 4 bytes. +	 */ + +	do { +		gtp_exthdr = skb_header_pointer(skb, offset, sizeof(*gtp_exthdr), +						&_gtp_exthdr); +		if (!gtp_exthdr || !gtp_exthdr->len) +			return -1; + +		offset += gtp_exthdr->len * 4; + +		/* From 29.060: "If no such Header follows, then the value of +		 * the Next Extension Header Type shall be 0." +		 */ +		next_type = skb_header_pointer(skb, offset - 1, +					       sizeof(_next_type), &_next_type); +		if (!next_type) +			return -1; + +	} while (*next_type != 0); + +	*hdrlen = offset; + +	return 0; +} +  static int gtp1u_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb)  {  	unsigned int hdrlen = sizeof(struct udphdr) +  			      sizeof(struct gtp1_header);  	struct gtp1_header *gtp1;  	struct pdp_ctx *pctx; +	__u16 inner_proto;  	if (!pskb_may_pull(skb, hdrlen))  		return -1; @@ -608,15 +821,25 @@ static int gtp1u_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb)  	if (!pskb_may_pull(skb, hdrlen))  		return -1; +	if (gtp_inner_proto(skb, hdrlen, &inner_proto) < 0) { +		netdev_dbg(gtp->dev, "GTP packet does not encapsulate an IP packet\n"); +		return -1; +	} +  	gtp1 = (struct gtp1_header *)(skb->data + sizeof(struct udphdr)); -	pctx = gtp1_pdp_find(gtp, ntohl(gtp1->tid)); +	pctx = gtp1_pdp_find(gtp, ntohl(gtp1->tid), +			     gtp_proto_to_family(inner_proto));  	if (!pctx) {  		netdev_dbg(gtp->dev, "No PDP ctx to decap skb=%p\n", skb);  		return 1;  	} -	return gtp_rx(pctx, skb, hdrlen, gtp->role); +	if (gtp1->flags & GTP1_F_EXTHDR && +	    gtp_parse_exthdrs(skb, &hdrlen) < 0) +		return -1; + +	return gtp_rx(pctx, skb, hdrlen, gtp->role, inner_proto);  }  static void __gtp_encap_destroy(struct sock *sk) @@ -760,11 +983,17 @@ static inline void gtp1_push_header(struct sk_buff *skb, struct pdp_ctx *pctx)  struct gtp_pktinfo {  	struct sock		*sk; -	struct iphdr		*iph; -	struct flowi4		fl4; -	struct rtable		*rt; +	union { +		struct flowi4	fl4; +		struct flowi6	fl6; +	}; +	union { +		struct rtable	*rt; +		struct rt6_info	*rt6; +	};  	struct pdp_ctx		*pctx;  	struct net_device	*dev; +	__u8			tos;  	__be16			gtph_port;  }; @@ -783,64 +1012,61 @@ static void gtp_push_header(struct sk_buff *skb, struct gtp_pktinfo *pktinfo)  }  static inline void gtp_set_pktinfo_ipv4(struct gtp_pktinfo *pktinfo, -					struct sock *sk, struct iphdr *iph, +					struct sock *sk, __u8 tos,  					struct pdp_ctx *pctx, struct rtable *rt,  					struct flowi4 *fl4,  					struct net_device *dev)  {  	pktinfo->sk	= sk; -	pktinfo->iph	= iph; +	pktinfo->tos	= tos;  	pktinfo->pctx	= pctx;  	pktinfo->rt	= rt;  	pktinfo->fl4	= *fl4;  	pktinfo->dev	= dev;  } -static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev, -			     struct gtp_pktinfo *pktinfo) +static void gtp_set_pktinfo_ipv6(struct gtp_pktinfo *pktinfo, +				 struct sock *sk, __u8 tos, +				 struct pdp_ctx *pctx, struct rt6_info *rt6, +				 struct flowi6 *fl6, +				 struct net_device *dev) +{ +	pktinfo->sk	= sk; +	pktinfo->tos	= tos; +	pktinfo->pctx	= pctx; +	pktinfo->rt6	= rt6; +	pktinfo->fl6	= *fl6; +	pktinfo->dev	= dev; +} + +static int gtp_build_skb_outer_ip4(struct sk_buff *skb, struct net_device *dev, +				   struct gtp_pktinfo *pktinfo, +				   struct pdp_ctx *pctx, __u8 tos, +				   __be16 frag_off)  { -	struct gtp_dev *gtp = netdev_priv(dev); -	struct pdp_ctx *pctx;  	struct rtable *rt;  	struct flowi4 fl4; -	struct iphdr *iph;  	__be16 df;  	int mtu; -	/* Read the IP destination address and resolve the PDP context. -	 * Prepend PDP header with TEI/TID from PDP ctx. -	 */ -	iph = ip_hdr(skb); -	if (gtp->role == GTP_ROLE_SGSN) -		pctx = ipv4_pdp_find(gtp, iph->saddr); -	else -		pctx = ipv4_pdp_find(gtp, iph->daddr); - -	if (!pctx) { -		netdev_dbg(dev, "no PDP ctx found for %pI4, skip\n", -			   &iph->daddr); -		return -ENOENT; -	} -	netdev_dbg(dev, "found PDP context %p\n", pctx); - -	rt = ip4_route_output_gtp(&fl4, pctx->sk, pctx->peer_addr_ip4.s_addr, +	rt = ip4_route_output_gtp(&fl4, pctx->sk, pctx->peer.addr.s_addr,  				  inet_sk(pctx->sk)->inet_saddr);  	if (IS_ERR(rt)) {  		netdev_dbg(dev, "no route to SSGN %pI4\n", -			   &pctx->peer_addr_ip4.s_addr); +			   &pctx->peer.addr.s_addr);  		dev->stats.tx_carrier_errors++;  		goto err;  	}  	if (rt->dst.dev == dev) {  		netdev_dbg(dev, "circular route to SSGN %pI4\n", -			   &pctx->peer_addr_ip4.s_addr); +			   &pctx->peer.addr.s_addr);  		dev->stats.collisions++;  		goto err_rt;  	}  	/* This is similar to tnl_update_pmtu(). */ -	df = iph->frag_off; +	df = frag_off;  	if (df) {  		mtu = dst_mtu(&rt->dst) - dev->hard_header_len -  			sizeof(struct iphdr) - sizeof(struct udphdr); @@ -858,7 +1084,7 @@ static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev,  	skb_dst_update_pmtu_no_confirm(skb, mtu); -	if (iph->frag_off & htons(IP_DF) && +	if (frag_off & htons(IP_DF) &&  	    ((!skb_is_gso(skb) && skb->len > mtu) ||  	     (skb_is_gso(skb) && !skb_gso_validate_network_len(skb, mtu)))) {  		netdev_dbg(dev, "packet too big, fragmentation needed\n"); @@ -867,7 +1093,7 @@ static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev,  		goto err_rt;  	} -	gtp_set_pktinfo_ipv4(pktinfo, pctx->sk, iph, pctx, rt, &fl4, dev); +	gtp_set_pktinfo_ipv4(pktinfo, pctx->sk, tos, pctx, rt, &fl4, dev);  	gtp_push_header(skb, pktinfo);  	return 0; @@ -877,6 +1103,162 @@ err:  	return -EBADMSG;  } +static int gtp_build_skb_outer_ip6(struct net *net, struct sk_buff *skb, +				   struct net_device *dev, +				   struct gtp_pktinfo *pktinfo, +				   struct pdp_ctx *pctx, __u8 tos) +{ +	struct dst_entry *dst; +	struct rt6_info *rt; +	struct flowi6 fl6; +	int mtu; + +	rt = ip6_route_output_gtp(net, &fl6, pctx->sk, &pctx->peer.addr6, +				  &inet6_sk(pctx->sk)->saddr); +	if (IS_ERR(rt)) { +		netdev_dbg(dev, "no route to SSGN %pI6\n", +			   &pctx->peer.addr6); +		dev->stats.tx_carrier_errors++; +		goto err; +	} +	dst = &rt->dst; + +	if (rt->dst.dev == dev) { +		netdev_dbg(dev, "circular route to SSGN %pI6\n", +			   &pctx->peer.addr6); +		dev->stats.collisions++; +		goto err_rt; +	} + +	mtu = dst_mtu(&rt->dst) - dev->hard_header_len - +		sizeof(struct ipv6hdr) - sizeof(struct udphdr); +	switch (pctx->gtp_version) { +	case GTP_V0: +		mtu -= sizeof(struct gtp0_header); +		break; +	case GTP_V1: +		mtu -= sizeof(struct gtp1_header); +		break; +	} + +	skb_dst_update_pmtu_no_confirm(skb, mtu); + +	if ((!skb_is_gso(skb) && skb->len > mtu) || +	    (skb_is_gso(skb) && !skb_gso_validate_network_len(skb, mtu))) { +		netdev_dbg(dev, "packet too big, fragmentation needed\n"); +		icmpv6_ndo_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); +		goto err_rt; +	} + +	gtp_set_pktinfo_ipv6(pktinfo, pctx->sk, tos, pctx, rt, &fl6, dev); +	gtp_push_header(skb, pktinfo); + +	return 0; +err_rt: +	dst_release(dst); +err: +	return -EBADMSG; +} + +static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev, +			     struct gtp_pktinfo *pktinfo) +{ +	struct gtp_dev *gtp = netdev_priv(dev); +	struct net *net = gtp->net; +	struct pdp_ctx *pctx; +	struct iphdr *iph; +	int ret; + +	/* Read the IP destination address and resolve the PDP context. +	 * Prepend PDP header with TEI/TID from PDP ctx. +	 */ +	iph = ip_hdr(skb); +	if (gtp->role == GTP_ROLE_SGSN) +		pctx = ipv4_pdp_find(gtp, iph->saddr); +	else +		pctx = ipv4_pdp_find(gtp, iph->daddr); + +	if (!pctx) { +		netdev_dbg(dev, "no PDP ctx found for %pI4, skip\n", +			   &iph->daddr); +		return -ENOENT; +	} +	netdev_dbg(dev, "found PDP context %p\n", pctx); + +	switch (pctx->sk->sk_family) { +	case AF_INET: +		ret = gtp_build_skb_outer_ip4(skb, dev, pktinfo, pctx, +					      iph->tos, iph->frag_off); +		break; +	case AF_INET6: +		ret = gtp_build_skb_outer_ip6(net, skb, dev, pktinfo, pctx, +					      iph->tos); +		break; +	default: +		ret = -1; +		WARN_ON_ONCE(1); +		break; +	} + +	if (ret < 0) +		return ret; + +	netdev_dbg(dev, "gtp -> IP src: %pI4 dst: %pI4\n", +		   &iph->saddr, &iph->daddr); + +	return 0; +} + +static int gtp_build_skb_ip6(struct sk_buff *skb, struct net_device *dev, +			     struct gtp_pktinfo *pktinfo) +{ +	struct gtp_dev *gtp = netdev_priv(dev); +	struct net *net = gtp->net; +	struct pdp_ctx *pctx; +	struct ipv6hdr *ip6h; +	__u8 tos; +	int ret; + +	/* Read the IP destination address and resolve the PDP context. +	 * Prepend PDP header with TEI/TID from PDP ctx. +	 */ +	ip6h = ipv6_hdr(skb); +	if (gtp->role == GTP_ROLE_SGSN) +		pctx = ipv6_pdp_find(gtp, &ip6h->saddr); +	else +		pctx = ipv6_pdp_find(gtp, &ip6h->daddr); + +	if (!pctx) { +		netdev_dbg(dev, "no PDP ctx found for %pI6, skip\n", +			   &ip6h->daddr); +		return -ENOENT; +	} +	netdev_dbg(dev, "found PDP context %p\n", pctx); + +	tos = ipv6_get_dsfield(ip6h); + +	switch (pctx->sk->sk_family) { +	case AF_INET: +		ret = gtp_build_skb_outer_ip4(skb, dev, pktinfo, pctx, tos, 0); +		break; +	case AF_INET6: +		ret = gtp_build_skb_outer_ip6(net, skb, dev, pktinfo, pctx, tos); +		break; +	default: +		ret = -1; +		WARN_ON_ONCE(1); +		break; +	} + +	if (ret < 0) +		return ret; + +	netdev_dbg(dev, "gtp -> IP src: %pI6 dst: %pI6\n", +		   &ip6h->saddr, &ip6h->daddr); + +	return 0; +} +  static netdev_tx_t gtp_dev_xmit(struct sk_buff *skb, struct net_device *dev)  {  	unsigned int proto = ntohs(skb->protocol); @@ -895,6 +1277,9 @@ static netdev_tx_t gtp_dev_xmit(struct sk_buff *skb, struct net_device *dev)  	case ETH_P_IP:  		err = gtp_build_skb_ip4(skb, dev, &pktinfo);  		break; +	case ETH_P_IPV6: +		err = gtp_build_skb_ip6(skb, dev, &pktinfo); +		break;  	default:  		err = -EOPNOTSUPP;  		break; @@ -904,13 +1289,11 @@ static netdev_tx_t gtp_dev_xmit(struct sk_buff *skb, struct net_device *dev)  	if (err < 0)  		goto tx_err; -	switch (proto) { -	case ETH_P_IP: -		netdev_dbg(pktinfo.dev, "gtp -> IP src: %pI4 dst: %pI4\n", -			   &pktinfo.iph->saddr, &pktinfo.iph->daddr); +	switch (pktinfo.pctx->sk->sk_family) { +	case AF_INET:  		udp_tunnel_xmit_skb(pktinfo.rt, pktinfo.sk, skb,  				    pktinfo.fl4.saddr, pktinfo.fl4.daddr, -				    pktinfo.iph->tos, +				    pktinfo.tos,  				    ip4_dst_hoplimit(&pktinfo.rt->dst),  				    0,  				    pktinfo.gtph_port, pktinfo.gtph_port, @@ -918,6 +1301,19 @@ static netdev_tx_t gtp_dev_xmit(struct sk_buff *skb, struct net_device *dev)  					    dev_net(dev)),  				    false);  		break; +	case AF_INET6: +#if IS_ENABLED(CONFIG_IPV6) +		udp_tunnel6_xmit_skb(&pktinfo.rt6->dst, pktinfo.sk, skb, dev, +				     &pktinfo.fl6.saddr, &pktinfo.fl6.daddr, +				     pktinfo.tos, +				     ip6_dst_hoplimit(&pktinfo.rt->dst), +				     0, +				     pktinfo.gtph_port, pktinfo.gtph_port, +				     false); +#else +		goto tx_err; +#endif +		break;  	}  	return NETDEV_TX_OK; @@ -936,11 +1332,11 @@ static const struct device_type gtp_type = {  	.name = "gtp",  }; +#define GTP_TH_MAXLEN	(sizeof(struct udphdr) + sizeof(struct gtp0_header)) +#define GTP_IPV4_MAXLEN	(sizeof(struct iphdr) + GTP_TH_MAXLEN) +  static void gtp_link_setup(struct net_device *dev)  { -	unsigned int max_gtp_header_len = sizeof(struct iphdr) + -					  sizeof(struct udphdr) + -					  sizeof(struct gtp0_header);  	struct gtp_dev *gtp = netdev_priv(dev);  	dev->netdev_ops		= >p_netdev_ops; @@ -949,7 +1345,7 @@ static void gtp_link_setup(struct net_device *dev)  	dev->hard_header_len = 0;  	dev->addr_len = 0; -	dev->mtu = ETH_DATA_LEN - max_gtp_header_len; +	dev->mtu = ETH_DATA_LEN - GTP_IPV4_MAXLEN;  	/* Zero header length. */  	dev->type = ARPHRD_NONE; @@ -960,7 +1356,7 @@ static void gtp_link_setup(struct net_device *dev)  	dev->features	|= NETIF_F_LLTX;  	netif_keep_dst(dev); -	dev->needed_headroom	= LL_MAX_HEADER + max_gtp_header_len; +	dev->needed_headroom	= LL_MAX_HEADER + GTP_IPV4_MAXLEN;  	gtp->dev = dev;  } @@ -975,17 +1371,45 @@ static void gtp_destructor(struct net_device *dev)  	kfree(gtp->tid_hash);  } -static struct sock *gtp_create_sock(int type, struct gtp_dev *gtp) +static int gtp_sock_udp_config(struct udp_port_cfg *udp_conf, +			       const struct nlattr *nla, int family) +{ +	udp_conf->family = family; + +	switch (udp_conf->family) { +	case AF_INET: +		udp_conf->local_ip.s_addr = nla_get_be32(nla); +		break; +#if IS_ENABLED(CONFIG_IPV6) +	case AF_INET6: +		udp_conf->local_ip6 = nla_get_in6_addr(nla); +		break; +#endif +	default: +		return -EOPNOTSUPP; +	} + +	return 0; +} + +static struct sock *gtp_create_sock(int type, struct gtp_dev *gtp, +				    const struct nlattr *nla, int family)  {  	struct udp_tunnel_sock_cfg tuncfg = {}; -	struct udp_port_cfg udp_conf = { -		.local_ip.s_addr	= htonl(INADDR_ANY), -		.family			= AF_INET, -	}; +	struct udp_port_cfg udp_conf = {};  	struct net *net = gtp->net;  	struct socket *sock;  	int err; +	if (nla) { +		err = gtp_sock_udp_config(&udp_conf, nla, family); +		if (err < 0) +			return ERR_PTR(err); +	} else { +		udp_conf.local_ip.s_addr = htonl(INADDR_ANY); +		udp_conf.family = AF_INET; +	} +  	if (type == UDP_ENCAP_GTP0)  		udp_conf.local_udp_port = htons(GTP0_PORT);  	else if (type == UDP_ENCAP_GTP1U) @@ -1007,16 +1431,17 @@ static struct sock *gtp_create_sock(int type, struct gtp_dev *gtp)  	return sock->sk;  } -static int gtp_create_sockets(struct gtp_dev *gtp, struct nlattr *data[]) +static int gtp_create_sockets(struct gtp_dev *gtp, const struct nlattr *nla, +			      int family)  { -	struct sock *sk1u = NULL; -	struct sock *sk0 = NULL; +	struct sock *sk1u; +	struct sock *sk0; -	sk0 = gtp_create_sock(UDP_ENCAP_GTP0, gtp); +	sk0 = gtp_create_sock(UDP_ENCAP_GTP0, gtp, nla, family);  	if (IS_ERR(sk0))  		return PTR_ERR(sk0); -	sk1u = gtp_create_sock(UDP_ENCAP_GTP1U, gtp); +	sk1u = gtp_create_sock(UDP_ENCAP_GTP1U, gtp, nla, family);  	if (IS_ERR(sk1u)) {  		udp_tunnel_sock_release(sk0->sk_socket);  		return PTR_ERR(sk1u); @@ -1029,6 +1454,9 @@ static int gtp_create_sockets(struct gtp_dev *gtp, struct nlattr *data[])  	return 0;  } +#define GTP_TH_MAXLEN	(sizeof(struct udphdr) + sizeof(struct gtp0_header)) +#define GTP_IPV6_MAXLEN	(sizeof(struct ipv6hdr) + GTP_TH_MAXLEN) +  static int gtp_newlink(struct net *src_net, struct net_device *dev,  		       struct nlattr *tb[], struct nlattr *data[],  		       struct netlink_ext_ack *extack) @@ -1038,6 +1466,11 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev,  	struct gtp_net *gn;  	int hashsize, err; +#if !IS_ENABLED(CONFIG_IPV6) +	if (data[IFLA_GTP_LOCAL6]) +		return -EAFNOSUPPORT; +#endif +  	gtp = netdev_priv(dev);  	if (!data[IFLA_GTP_PDP_HASHSIZE]) { @@ -1066,13 +1499,24 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev,  	if (err < 0)  		return err; -	if (data[IFLA_GTP_CREATE_SOCKETS]) -		err = gtp_create_sockets(gtp, data); -	else +	if (data[IFLA_GTP_CREATE_SOCKETS]) { +		if (data[IFLA_GTP_LOCAL6]) +			err = gtp_create_sockets(gtp, data[IFLA_GTP_LOCAL6], AF_INET6); +		else +			err = gtp_create_sockets(gtp, data[IFLA_GTP_LOCAL], AF_INET); +	} else {  		err = gtp_encap_enable(gtp, data); +	} +  	if (err < 0)  		goto out_hashtable; +	if ((gtp->sk0 && gtp->sk0->sk_family == AF_INET6) || +	    (gtp->sk1u && gtp->sk1u->sk_family == AF_INET6)) { +		dev->mtu = ETH_DATA_LEN - GTP_IPV6_MAXLEN; +		dev->needed_headroom = LL_MAX_HEADER + GTP_IPV6_MAXLEN; +	} +  	err = register_netdevice(dev);  	if (err < 0) {  		netdev_dbg(dev, "failed to register new netdev %d\n", err); @@ -1117,6 +1561,8 @@ static const struct nla_policy gtp_policy[IFLA_GTP_MAX + 1] = {  	[IFLA_GTP_ROLE]			= { .type = NLA_U32 },  	[IFLA_GTP_CREATE_SOCKETS]	= { .type = NLA_U8 },  	[IFLA_GTP_RESTART_COUNT]	= { .type = NLA_U8 }, +	[IFLA_GTP_LOCAL]		= { .type = NLA_U32 }, +	[IFLA_GTP_LOCAL6]		= { .len = sizeof(struct in6_addr) },  };  static int gtp_validate(struct nlattr *tb[], struct nlattr *data[], @@ -1216,6 +1662,12 @@ static struct sock *gtp_encap_enable_socket(int fd, int type,  		goto out_sock;  	} +	if (sk->sk_family == AF_INET6 && +	    !sk->sk_ipv6only) { +		sk = ERR_PTR(-EADDRNOTAVAIL); +		goto out_sock; +	} +  	lock_sock(sk);  	if (sk->sk_user_data) {  		sk = ERR_PTR(-EBUSY); @@ -1267,6 +1719,13 @@ static int gtp_encap_enable(struct gtp_dev *gtp, struct nlattr *data[])  	gtp->sk0 = sk0;  	gtp->sk1u = sk1u; +	if (sk0 && sk1u && +	    sk0->sk_family != sk1u->sk_family) { +		gtp_encap_disable_sock(sk0); +		gtp_encap_disable_sock(sk1u); +		return -EINVAL; +	} +  	return 0;  } @@ -1296,14 +1755,9 @@ static struct gtp_dev *gtp_find_dev(struct net *src_net, struct nlattr *nla[])  	return gtp;  } -static void ipv4_pdp_fill(struct pdp_ctx *pctx, struct genl_info *info) +static void gtp_pdp_fill(struct pdp_ctx *pctx, struct genl_info *info)  {  	pctx->gtp_version = nla_get_u32(info->attrs[GTPA_VERSION]); -	pctx->af = AF_INET; -	pctx->peer_addr_ip4.s_addr = -		nla_get_be32(info->attrs[GTPA_PEER_ADDRESS]); -	pctx->ms_addr_ip4.s_addr = -		nla_get_be32(info->attrs[GTPA_MS_ADDRESS]);  	switch (pctx->gtp_version) {  	case GTP_V0: @@ -1323,29 +1777,102 @@ static void ipv4_pdp_fill(struct pdp_ctx *pctx, struct genl_info *info)  	}  } +static void ip_pdp_peer_fill(struct pdp_ctx *pctx, struct genl_info *info) +{ +	if (info->attrs[GTPA_PEER_ADDRESS]) { +		pctx->peer.addr.s_addr = +			nla_get_be32(info->attrs[GTPA_PEER_ADDRESS]); +	} else if (info->attrs[GTPA_PEER_ADDR6]) { +		pctx->peer.addr6 = nla_get_in6_addr(info->attrs[GTPA_PEER_ADDR6]); +	} +} + +static void ipv4_pdp_fill(struct pdp_ctx *pctx, struct genl_info *info) +{ +	ip_pdp_peer_fill(pctx, info); +	pctx->ms.addr.s_addr = +		nla_get_be32(info->attrs[GTPA_MS_ADDRESS]); +	gtp_pdp_fill(pctx, info); +} + +static bool ipv6_pdp_fill(struct pdp_ctx *pctx, struct genl_info *info) +{ +	ip_pdp_peer_fill(pctx, info); +	pctx->ms.addr6 = nla_get_in6_addr(info->attrs[GTPA_MS_ADDR6]); +	if (pctx->ms.addr6.s6_addr32[2] || +	    pctx->ms.addr6.s6_addr32[3]) +		return false; + +	gtp_pdp_fill(pctx, info); + +	return true; +} +  static struct pdp_ctx *gtp_pdp_add(struct gtp_dev *gtp, struct sock *sk,  				   struct genl_info *info)  {  	struct pdp_ctx *pctx, *pctx_tid = NULL;  	struct net_device *dev = gtp->dev;  	u32 hash_ms, hash_tid = 0; +	struct in6_addr ms_addr6;  	unsigned int version;  	bool found = false;  	__be32 ms_addr; +	int family; -	ms_addr = nla_get_be32(info->attrs[GTPA_MS_ADDRESS]); -	hash_ms = ipv4_hashfn(ms_addr) % gtp->hash_size;  	version = nla_get_u32(info->attrs[GTPA_VERSION]); -	pctx = ipv4_pdp_find(gtp, ms_addr); +	if (info->attrs[GTPA_FAMILY]) +		family = nla_get_u8(info->attrs[GTPA_FAMILY]); +	else +		family = AF_INET; + +#if !IS_ENABLED(CONFIG_IPV6) +	if (family == AF_INET6) +		return ERR_PTR(-EAFNOSUPPORT); +#endif +	if (!info->attrs[GTPA_PEER_ADDRESS] && +	    !info->attrs[GTPA_PEER_ADDR6]) +		return ERR_PTR(-EINVAL); + +	if ((info->attrs[GTPA_PEER_ADDRESS] && +	     sk->sk_family == AF_INET6) || +	    (info->attrs[GTPA_PEER_ADDR6] && +	     sk->sk_family == AF_INET)) +		return ERR_PTR(-EAFNOSUPPORT); + +	switch (family) { +	case AF_INET: +		if (!info->attrs[GTPA_MS_ADDRESS] || +		    info->attrs[GTPA_MS_ADDR6]) +			return ERR_PTR(-EINVAL); + +		ms_addr = nla_get_be32(info->attrs[GTPA_MS_ADDRESS]); +		hash_ms = ipv4_hashfn(ms_addr) % gtp->hash_size; +		pctx = ipv4_pdp_find(gtp, ms_addr); +		break; +	case AF_INET6: +		if (!info->attrs[GTPA_MS_ADDR6] || +		    info->attrs[GTPA_MS_ADDRESS]) +			return ERR_PTR(-EINVAL); + +		ms_addr6 = nla_get_in6_addr(info->attrs[GTPA_MS_ADDR6]); +		hash_ms = ipv6_hashfn(&ms_addr6) % gtp->hash_size; +		pctx = ipv6_pdp_find(gtp, &ms_addr6); +		break; +	default: +		return ERR_PTR(-EAFNOSUPPORT); +	}  	if (pctx)  		found = true;  	if (version == GTP_V0)  		pctx_tid = gtp0_pdp_find(gtp, -					 nla_get_u64(info->attrs[GTPA_TID])); +					 nla_get_u64(info->attrs[GTPA_TID]), +					 family);  	else if (version == GTP_V1)  		pctx_tid = gtp1_pdp_find(gtp, -					 nla_get_u32(info->attrs[GTPA_I_TEI])); +					 nla_get_u32(info->attrs[GTPA_I_TEI]), +					 family);  	if (pctx_tid)  		found = true; @@ -1360,7 +1887,15 @@ static struct pdp_ctx *gtp_pdp_add(struct gtp_dev *gtp, struct sock *sk,  		if (!pctx)  			pctx = pctx_tid; -		ipv4_pdp_fill(pctx, info); +		switch (pctx->af) { +		case AF_INET: +			ipv4_pdp_fill(pctx, info); +			break; +		case AF_INET6: +			if (!ipv6_pdp_fill(pctx, info)) +				return ERR_PTR(-EADDRNOTAVAIL); +			break; +		}  		if (pctx->gtp_version == GTP_V0)  			netdev_dbg(dev, "GTPv0-U: update tunnel id = %llx (pdp %p)\n", @@ -1380,7 +1915,32 @@ static struct pdp_ctx *gtp_pdp_add(struct gtp_dev *gtp, struct sock *sk,  	sock_hold(sk);  	pctx->sk = sk;  	pctx->dev = gtp->dev; -	ipv4_pdp_fill(pctx, info); +	pctx->af = family; + +	switch (pctx->af) { +	case AF_INET: +		if (!info->attrs[GTPA_MS_ADDRESS]) { +			sock_put(sk); +			kfree(pctx); +			return ERR_PTR(-EINVAL); +		} + +		ipv4_pdp_fill(pctx, info); +		break; +	case AF_INET6: +		if (!info->attrs[GTPA_MS_ADDR6]) { +			sock_put(sk); +			kfree(pctx); +			return ERR_PTR(-EINVAL); +		} + +		if (!ipv6_pdp_fill(pctx, info)) { +			sock_put(sk); +			kfree(pctx); +			return ERR_PTR(-EADDRNOTAVAIL); +		} +		break; +	}  	atomic_set(&pctx->tx_seq, 0);  	switch (pctx->gtp_version) { @@ -1403,13 +1963,13 @@ static struct pdp_ctx *gtp_pdp_add(struct gtp_dev *gtp, struct sock *sk,  	switch (pctx->gtp_version) {  	case GTP_V0:  		netdev_dbg(dev, "GTPv0-U: new PDP ctx id=%llx ssgn=%pI4 ms=%pI4 (pdp=%p)\n", -			   pctx->u.v0.tid, &pctx->peer_addr_ip4, -			   &pctx->ms_addr_ip4, pctx); +			   pctx->u.v0.tid, &pctx->peer.addr, +			   &pctx->ms.addr, pctx);  		break;  	case GTP_V1:  		netdev_dbg(dev, "GTPv1-U: new PDP ctx id=%x/%x ssgn=%pI4 ms=%pI4 (pdp=%p)\n",  			   pctx->u.v1.i_tei, pctx->u.v1.o_tei, -			   &pctx->peer_addr_ip4, &pctx->ms_addr_ip4, pctx); +			   &pctx->peer.addr, &pctx->ms.addr, pctx);  		break;  	} @@ -1442,9 +2002,7 @@ static int gtp_genl_new_pdp(struct sk_buff *skb, struct genl_info *info)  	int err;  	if (!info->attrs[GTPA_VERSION] || -	    !info->attrs[GTPA_LINK] || -	    !info->attrs[GTPA_PEER_ADDRESS] || -	    !info->attrs[GTPA_MS_ADDRESS]) +	    !info->attrs[GTPA_LINK])  		return -EINVAL;  	version = nla_get_u32(info->attrs[GTPA_VERSION]); @@ -1502,6 +2060,12 @@ static struct pdp_ctx *gtp_find_pdp_by_link(struct net *net,  					    struct nlattr *nla[])  {  	struct gtp_dev *gtp; +	int family; + +	if (nla[GTPA_FAMILY]) +		family = nla_get_u8(nla[GTPA_FAMILY]); +	else +		family = AF_INET;  	gtp = gtp_find_dev(net, nla);  	if (!gtp) @@ -1510,14 +2074,31 @@ static struct pdp_ctx *gtp_find_pdp_by_link(struct net *net,  	if (nla[GTPA_MS_ADDRESS]) {  		__be32 ip = nla_get_be32(nla[GTPA_MS_ADDRESS]); +		if (family != AF_INET) +			return ERR_PTR(-EINVAL); +  		return ipv4_pdp_find(gtp, ip); +	} else if (nla[GTPA_MS_ADDR6]) { +		struct in6_addr addr = nla_get_in6_addr(nla[GTPA_MS_ADDR6]); + +		if (family != AF_INET6) +			return ERR_PTR(-EINVAL); + +		if (addr.s6_addr32[2] || +		    addr.s6_addr32[3]) +			return ERR_PTR(-EADDRNOTAVAIL); + +		return ipv6_pdp_find(gtp, &addr);  	} else if (nla[GTPA_VERSION]) {  		u32 gtp_version = nla_get_u32(nla[GTPA_VERSION]); -		if (gtp_version == GTP_V0 && nla[GTPA_TID]) -			return gtp0_pdp_find(gtp, nla_get_u64(nla[GTPA_TID])); -		else if (gtp_version == GTP_V1 && nla[GTPA_I_TEI]) -			return gtp1_pdp_find(gtp, nla_get_u32(nla[GTPA_I_TEI])); +		if (gtp_version == GTP_V0 && nla[GTPA_TID]) { +			return gtp0_pdp_find(gtp, nla_get_u64(nla[GTPA_TID]), +					     family); +		} else if (gtp_version == GTP_V1 && nla[GTPA_I_TEI]) { +			return gtp1_pdp_find(gtp, nla_get_u32(nla[GTPA_I_TEI]), +					     family); +		}  	}  	return ERR_PTR(-EINVAL); @@ -1581,10 +2162,31 @@ static int gtp_genl_fill_info(struct sk_buff *skb, u32 snd_portid, u32 snd_seq,  	if (nla_put_u32(skb, GTPA_VERSION, pctx->gtp_version) ||  	    nla_put_u32(skb, GTPA_LINK, pctx->dev->ifindex) || -	    nla_put_be32(skb, GTPA_PEER_ADDRESS, pctx->peer_addr_ip4.s_addr) || -	    nla_put_be32(skb, GTPA_MS_ADDRESS, pctx->ms_addr_ip4.s_addr)) +	    nla_put_u8(skb, GTPA_FAMILY, pctx->af))  		goto nla_put_failure; +	switch (pctx->af) { +	case AF_INET: +		if (nla_put_be32(skb, GTPA_MS_ADDRESS, pctx->ms.addr.s_addr)) +			goto nla_put_failure; +		break; +	case AF_INET6: +		if (nla_put_in6_addr(skb, GTPA_MS_ADDR6, &pctx->ms.addr6)) +			goto nla_put_failure; +		break; +	} + +	switch (pctx->sk->sk_family) { +	case AF_INET: +		if (nla_put_be32(skb, GTPA_PEER_ADDRESS, pctx->peer.addr.s_addr)) +			goto nla_put_failure; +		break; +	case AF_INET6: +		if (nla_put_in6_addr(skb, GTPA_PEER_ADDR6, &pctx->peer.addr6)) +			goto nla_put_failure; +		break; +	} +  	switch (pctx->gtp_version) {  	case GTP_V0:  		if (nla_put_u64_64bit(skb, GTPA_TID, pctx->u.v0.tid, GTPA_PAD) || @@ -1811,6 +2413,9 @@ static const struct nla_policy gtp_genl_policy[GTPA_MAX + 1] = {  	[GTPA_NET_NS_FD]	= { .type = NLA_U32, },  	[GTPA_I_TEI]		= { .type = NLA_U32, },  	[GTPA_O_TEI]		= { .type = NLA_U32, }, +	[GTPA_PEER_ADDR6]	= { .len = sizeof(struct in6_addr), }, +	[GTPA_MS_ADDR6]		= { .len = sizeof(struct in6_addr), }, +	[GTPA_FAMILY]		= { .type = NLA_U8, },  };  static const struct genl_small_ops gtp_genl_ops[] = {  |