diff options
Diffstat (limited to 'net/tipc/link.c')
| -rw-r--r-- | net/tipc/link.c | 109 | 
1 files changed, 60 insertions, 49 deletions
diff --git a/net/tipc/link.c b/net/tipc/link.c index 999eab592de8..24d4d10756d3 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c @@ -44,6 +44,7 @@  #include "netlink.h"  #include "monitor.h"  #include "trace.h" +#include "crypto.h"  #include <linux/pkt_sched.h> @@ -397,6 +398,15 @@ int tipc_link_mtu(struct tipc_link *l)  	return l->mtu;  } +int tipc_link_mss(struct tipc_link *l) +{ +#ifdef CONFIG_TIPC_CRYPTO +	return l->mtu - INT_H_SIZE - EMSG_OVERHEAD; +#else +	return l->mtu - INT_H_SIZE; +#endif +} +  u16 tipc_link_rcv_nxt(struct tipc_link *l)  {  	return l->rcv_nxt; @@ -540,7 +550,7 @@ bool tipc_link_bc_create(struct net *net, u32 ownnode, u32 peer,  	/* Disable replicast if even a single peer doesn't support it */  	if (link_is_bc_rcvlink(l) && !(peer_caps & TIPC_BCAST_RCAST)) -		tipc_bcast_disable_rcast(net); +		tipc_bcast_toggle_rcast(net, false);  	return true;  } @@ -940,16 +950,18 @@ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,  		   struct sk_buff_head *xmitq)  {  	struct tipc_msg *hdr = buf_msg(skb_peek(list)); -	unsigned int maxwin = l->window; -	int imp = msg_importance(hdr); -	unsigned int mtu = l->mtu; +	struct sk_buff_head *backlogq = &l->backlogq; +	struct sk_buff_head *transmq = &l->transmq; +	struct sk_buff *skb, *_skb; +	u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;  	u16 ack = l->rcv_nxt - 1;  	u16 seqno = l->snd_nxt; -	u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1; -	struct sk_buff_head *transmq = &l->transmq; -	struct sk_buff_head *backlogq = &l->backlogq; -	struct sk_buff *skb, *_skb, **tskb;  	int pkt_cnt = skb_queue_len(list); +	int imp = msg_importance(hdr); +	unsigned int mss = tipc_link_mss(l); +	unsigned int maxwin = l->window; +	unsigned int mtu = l->mtu; +	bool new_bundle;  	int rc = 0;  	if (unlikely(msg_size(hdr) > mtu)) { @@ -975,20 +987,18 @@ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,  	}  	/* Prepare each packet for sending, and add to relevant queue: */ -	while (skb_queue_len(list)) { -		skb = skb_peek(list); -		hdr = buf_msg(skb); -		msg_set_seqno(hdr, seqno); -		msg_set_ack(hdr, ack); -		msg_set_bcast_ack(hdr, bc_ack); - +	while ((skb = __skb_dequeue(list))) {  		if (likely(skb_queue_len(transmq) < maxwin)) { +			hdr = buf_msg(skb); +			msg_set_seqno(hdr, seqno); +			msg_set_ack(hdr, ack); +			msg_set_bcast_ack(hdr, bc_ack);  			_skb = skb_clone(skb, GFP_ATOMIC);  			if (!_skb) { +				kfree_skb(skb);  				__skb_queue_purge(list);  				return -ENOBUFS;  			} -			__skb_dequeue(list);  			__skb_queue_tail(transmq, skb);  			/* next retransmit attempt */  			if (link_is_bc_sndlink(l)) @@ -1000,22 +1010,25 @@ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,  			seqno++;  			continue;  		} -		tskb = &l->backlog[imp].target_bskb; -		if (tipc_msg_bundle(*tskb, hdr, mtu)) { -			kfree_skb(__skb_dequeue(list)); -			l->stats.sent_bundled++; -			continue; -		} -		if (tipc_msg_make_bundle(tskb, hdr, mtu, l->addr)) { -			kfree_skb(__skb_dequeue(list)); -			__skb_queue_tail(backlogq, *tskb); -			l->backlog[imp].len++; -			l->stats.sent_bundled++; -			l->stats.sent_bundles++; +		if (tipc_msg_try_bundle(l->backlog[imp].target_bskb, &skb, +					mss, l->addr, &new_bundle)) { +			if (skb) { +				/* Keep a ref. to the skb for next try */ +				l->backlog[imp].target_bskb = skb; +				l->backlog[imp].len++; +				__skb_queue_tail(backlogq, skb); +			} else { +				if (new_bundle) { +					l->stats.sent_bundles++; +					l->stats.sent_bundled++; +				} +				l->stats.sent_bundled++; +			}  			continue;  		}  		l->backlog[imp].target_bskb = NULL; -		l->backlog[imp].len += skb_queue_len(list); +		l->backlog[imp].len += (1 + skb_queue_len(list)); +		__skb_queue_tail(backlogq, skb);  		skb_queue_splice_tail_init(list, backlogq);  	}  	l->snd_nxt = seqno; @@ -1084,7 +1097,7 @@ static bool link_retransmit_failure(struct tipc_link *l, struct tipc_link *r,  		return false;  	if (!time_after(jiffies, TIPC_SKB_CB(skb)->retr_stamp + -			msecs_to_jiffies(r->tolerance))) +			msecs_to_jiffies(r->tolerance * 10)))  		return false;  	hdr = buf_msg(skb); @@ -1151,7 +1164,7 @@ static int tipc_link_bc_retrans(struct tipc_link *l, struct tipc_link *r,  		if (time_before(jiffies, TIPC_SKB_CB(skb)->nxt_retr))  			continue;  		TIPC_SKB_CB(skb)->nxt_retr = TIPC_BC_RETR_LIM; -		_skb = __pskb_copy(skb, LL_MAX_HEADER + MIN_H_SIZE, GFP_ATOMIC); +		_skb = pskb_copy(skb, GFP_ATOMIC);  		if (!_skb)  			return 0;  		hdr = buf_msg(_skb); @@ -1427,8 +1440,7 @@ next_gap_ack:  			if (time_before(jiffies, TIPC_SKB_CB(skb)->nxt_retr))  				continue;  			TIPC_SKB_CB(skb)->nxt_retr = TIPC_UC_RETR_TIME; -			_skb = __pskb_copy(skb, LL_MAX_HEADER + MIN_H_SIZE, -					   GFP_ATOMIC); +			_skb = pskb_copy(skb, GFP_ATOMIC);  			if (!_skb)  				continue;  			hdr = buf_msg(_skb); @@ -1728,21 +1740,6 @@ void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl,  		return;  	__skb_queue_head_init(&tnlq); -	__skb_queue_head_init(&tmpxq); -	__skb_queue_head_init(&frags); - -	/* At least one packet required for safe algorithm => add dummy */ -	skb = tipc_msg_create(TIPC_LOW_IMPORTANCE, TIPC_DIRECT_MSG, -			      BASIC_H_SIZE, 0, l->addr, tipc_own_addr(l->net), -			      0, 0, TIPC_ERR_NO_PORT); -	if (!skb) { -		pr_warn("%sunable to create tunnel packet\n", link_co_err); -		return; -	} -	__skb_queue_tail(&tnlq, skb); -	tipc_link_xmit(l, &tnlq, &tmpxq); -	__skb_queue_purge(&tmpxq); -  	/* Link Synching:  	 * From now on, send only one single ("dummy") SYNCH message  	 * to peer. The SYNCH message does not contain any data, just @@ -1768,6 +1765,20 @@ void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl,  		return;  	} +	__skb_queue_head_init(&tmpxq); +	__skb_queue_head_init(&frags); +	/* At least one packet required for safe algorithm => add dummy */ +	skb = tipc_msg_create(TIPC_LOW_IMPORTANCE, TIPC_DIRECT_MSG, +			      BASIC_H_SIZE, 0, l->addr, tipc_own_addr(l->net), +			      0, 0, TIPC_ERR_NO_PORT); +	if (!skb) { +		pr_warn("%sunable to create tunnel packet\n", link_co_err); +		return; +	} +	__skb_queue_tail(&tnlq, skb); +	tipc_link_xmit(l, &tnlq, &tmpxq); +	__skb_queue_purge(&tmpxq); +  	/* Initialize reusable tunnel packet header */  	tipc_msg_init(tipc_own_addr(l->net), &tnlhdr, TUNNEL_PROTOCOL,  		      mtyp, INT_H_SIZE, l->addr); @@ -1873,7 +1884,7 @@ void tipc_link_failover_prepare(struct tipc_link *l, struct tipc_link *tnl,  	tipc_link_create_dummy_tnl_msg(tnl, xmitq); -	/* This failover link enpoint was never established before, +	/* This failover link endpoint was never established before,  	 * so it has not received anything from peer.  	 * Otherwise, it must be a normal failover situation or the  	 * node has entered SELF_DOWN_PEER_LEAVING and both peer nodes  |