diff options
Diffstat (limited to 'net/tipc')
| -rw-r--r-- | net/tipc/bearer.c | 12 | ||||
| -rw-r--r-- | net/tipc/group.c | 1 | ||||
| -rw-r--r-- | net/tipc/link.c | 69 | ||||
| -rw-r--r-- | net/tipc/link.h | 3 | ||||
| -rw-r--r-- | net/tipc/name_distr.c | 4 | ||||
| -rw-r--r-- | net/tipc/node.c | 14 | ||||
| -rw-r--r-- | net/tipc/socket.c | 18 | 
7 files changed, 104 insertions, 17 deletions
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c index 418f03d0be90..645c16052052 100644 --- a/net/tipc/bearer.c +++ b/net/tipc/bearer.c @@ -609,16 +609,18 @@ static int tipc_l2_device_event(struct notifier_block *nb, unsigned long evt,  	switch (evt) {  	case NETDEV_CHANGE: -		if (netif_carrier_ok(dev)) +		if (netif_carrier_ok(dev) && netif_oper_up(dev)) { +			test_and_set_bit_lock(0, &b->up);  			break; -		/* else: fall through */ -	case NETDEV_UP: -		test_and_set_bit_lock(0, &b->up); -		break; +		} +		/* fall through */  	case NETDEV_GOING_DOWN:  		clear_bit_unlock(0, &b->up);  		tipc_reset_bearer(net, b);  		break; +	case NETDEV_UP: +		test_and_set_bit_lock(0, &b->up); +		break;  	case NETDEV_CHANGEMTU:  		if (tipc_mtu_bad(dev, 0)) {  			bearer_disable(net, b); diff --git a/net/tipc/group.c b/net/tipc/group.c index e82f13cb2dc5..06fee142f09f 100644 --- a/net/tipc/group.c +++ b/net/tipc/group.c @@ -666,6 +666,7 @@ static void tipc_group_create_event(struct tipc_group *grp,  	struct sk_buff *skb;  	struct tipc_msg *hdr; +	memset(&evt, 0, sizeof(evt));  	evt.event = event;  	evt.found_lower = m->instance;  	evt.found_upper = m->instance; diff --git a/net/tipc/link.c b/net/tipc/link.c index b1f0bee54eac..201c3b5bc96b 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c @@ -410,6 +410,11 @@ char *tipc_link_name(struct tipc_link *l)  	return l->name;  } +u32 tipc_link_state(struct tipc_link *l) +{ +	return l->state; +} +  /**   * tipc_link_create - create a new link   * @n: pointer to associated node @@ -472,6 +477,8 @@ bool tipc_link_create(struct net *net, char *if_name, int bearer_id,  	l->in_session = false;  	l->bearer_id = bearer_id;  	l->tolerance = tolerance; +	if (bc_rcvlink) +		bc_rcvlink->tolerance = tolerance;  	l->net_plane = net_plane;  	l->advertised_mtu = mtu;  	l->mtu = mtu; @@ -838,12 +845,24 @@ static void link_prepare_wakeup(struct tipc_link *l)  void tipc_link_reset(struct tipc_link *l)  { +	struct sk_buff_head list; + +	__skb_queue_head_init(&list); +  	l->in_session = false;  	l->session++;  	l->mtu = l->advertised_mtu; + +	spin_lock_bh(&l->wakeupq.lock); +	skb_queue_splice_init(&l->wakeupq, &list); +	spin_unlock_bh(&l->wakeupq.lock); + +	spin_lock_bh(&l->inputq->lock); +	skb_queue_splice_init(&list, l->inputq); +	spin_unlock_bh(&l->inputq->lock); +  	__skb_queue_purge(&l->transmq);  	__skb_queue_purge(&l->deferdq); -	skb_queue_splice_init(&l->wakeupq, l->inputq);  	__skb_queue_purge(&l->backlogq);  	l->backlog[TIPC_LOW_IMPORTANCE].len = 0;  	l->backlog[TIPC_MEDIUM_IMPORTANCE].len = 0; @@ -1021,7 +1040,8 @@ static int tipc_link_retrans(struct tipc_link *l, struct tipc_link *r,  	/* Detect repeated retransmit failures on same packet */  	if (r->last_retransm != buf_seqno(skb)) {  		r->last_retransm = buf_seqno(skb); -		r->stale_limit = jiffies + msecs_to_jiffies(l->tolerance); +		r->stale_limit = jiffies + msecs_to_jiffies(r->tolerance); +		r->stale_cnt = 0;  	} else if (++r->stale_cnt > 99 && time_after(jiffies, r->stale_limit)) {  		link_retransmit_failure(l, skb);  		if (link_is_bc_sndlink(l)) @@ -1380,6 +1400,36 @@ static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,  	__skb_queue_tail(xmitq, skb);  } +void tipc_link_create_dummy_tnl_msg(struct tipc_link *l, +				    struct sk_buff_head *xmitq) +{ +	u32 onode = tipc_own_addr(l->net); +	struct tipc_msg *hdr, *ihdr; +	struct sk_buff_head tnlq; +	struct sk_buff *skb; +	u32 dnode = l->addr; + +	skb_queue_head_init(&tnlq); +	skb = tipc_msg_create(TUNNEL_PROTOCOL, FAILOVER_MSG, +			      INT_H_SIZE, BASIC_H_SIZE, +			      dnode, onode, 0, 0, 0); +	if (!skb) { +		pr_warn("%sunable to create tunnel packet\n", link_co_err); +		return; +	} + +	hdr = buf_msg(skb); +	msg_set_msgcnt(hdr, 1); +	msg_set_bearer_id(hdr, l->peer_bearer_id); + +	ihdr = (struct tipc_msg *)msg_data(hdr); +	tipc_msg_init(onode, ihdr, TIPC_LOW_IMPORTANCE, TIPC_DIRECT_MSG, +		      BASIC_H_SIZE, dnode); +	msg_set_errcode(ihdr, TIPC_ERR_NO_PORT); +	__skb_queue_tail(&tnlq, skb); +	tipc_link_xmit(l, &tnlq, xmitq); +} +  /* tipc_link_tnl_prepare(): prepare and return a list of tunnel packets   * with contents of the link's transmit and backlog queues.   */ @@ -1476,6 +1526,9 @@ bool tipc_link_validate_msg(struct tipc_link *l, struct tipc_msg *hdr)  			return false;  		if (session != curr_session)  			return false; +		/* Extra sanity check */ +		if (!link_is_up(l) && msg_ack(hdr)) +			return false;  		if (!(l->peer_caps & TIPC_LINK_PROTO_SEQNO))  			return true;  		/* Accept only STATE with new sequence number */ @@ -1533,9 +1586,10 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,  		strncpy(if_name, data, TIPC_MAX_IF_NAME);  		/* Update own tolerance if peer indicates a non-zero value */ -		if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL)) +		if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL)) {  			l->tolerance = peers_tol; - +			l->bc_rcvlink->tolerance = peers_tol; +		}  		/* Update own priority if peer's priority is higher */  		if (in_range(peers_prio, l->priority + 1, TIPC_MAX_LINK_PRI))  			l->priority = peers_prio; @@ -1561,9 +1615,10 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,  		l->rcv_nxt_state = msg_seqno(hdr) + 1;  		/* Update own tolerance if peer indicates a non-zero value */ -		if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL)) +		if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL)) {  			l->tolerance = peers_tol; - +			l->bc_rcvlink->tolerance = peers_tol; +		}  		/* Update own prio if peer indicates a different value */  		if ((peers_prio != l->priority) &&  		    in_range(peers_prio, 1, TIPC_MAX_LINK_PRI)) { @@ -2180,6 +2235,8 @@ void tipc_link_set_tolerance(struct tipc_link *l, u32 tol,  			     struct sk_buff_head *xmitq)  {  	l->tolerance = tol; +	if (l->bc_rcvlink) +		l->bc_rcvlink->tolerance = tol;  	if (link_is_up(l))  		tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, tol, 0, xmitq);  } diff --git a/net/tipc/link.h b/net/tipc/link.h index 7bc494a33fdf..90488c538a4e 100644 --- a/net/tipc/link.h +++ b/net/tipc/link.h @@ -88,6 +88,8 @@ bool tipc_link_bc_create(struct net *net, u32 ownnode, u32 peer,  			 struct tipc_link **link);  void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl,  			   int mtyp, struct sk_buff_head *xmitq); +void tipc_link_create_dummy_tnl_msg(struct tipc_link *tnl, +				    struct sk_buff_head *xmitq);  void tipc_link_build_reset_msg(struct tipc_link *l, struct sk_buff_head *xmitq);  int tipc_link_fsm_evt(struct tipc_link *l, int evt);  bool tipc_link_is_up(struct tipc_link *l); @@ -107,6 +109,7 @@ u16 tipc_link_rcv_nxt(struct tipc_link *l);  u16 tipc_link_acked(struct tipc_link *l);  u32 tipc_link_id(struct tipc_link *l);  char *tipc_link_name(struct tipc_link *l); +u32 tipc_link_state(struct tipc_link *l);  char tipc_link_plane(struct tipc_link *l);  int tipc_link_prio(struct tipc_link *l);  int tipc_link_window(struct tipc_link *l); diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c index 51b4b96f89db..3cfeb9df64b0 100644 --- a/net/tipc/name_distr.c +++ b/net/tipc/name_distr.c @@ -115,7 +115,7 @@ struct sk_buff *tipc_named_withdraw(struct net *net, struct publication *publ)  	struct sk_buff *buf;  	struct distr_item *item; -	list_del(&publ->binding_node); +	list_del_rcu(&publ->binding_node);  	if (publ->scope == TIPC_NODE_SCOPE)  		return NULL; @@ -147,7 +147,7 @@ static void named_distribute(struct net *net, struct sk_buff_head *list,  			ITEM_SIZE) * ITEM_SIZE;  	u32 msg_rem = msg_dsz; -	list_for_each_entry(publ, pls, binding_node) { +	list_for_each_entry_rcu(publ, pls, binding_node) {  		/* Prepare next buffer: */  		if (!skb) {  			skb = named_prepare_buf(net, PUBLICATION, msg_rem, diff --git a/net/tipc/node.c b/net/tipc/node.c index 68014f1b6976..2afc4f8c37a7 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c @@ -111,6 +111,7 @@ struct tipc_node {  	int action_flags;  	struct list_head list;  	int state; +	bool failover_sent;  	u16 sync_point;  	int link_cnt;  	u16 working_links; @@ -680,6 +681,7 @@ static void __tipc_node_link_up(struct tipc_node *n, int bearer_id,  		*slot0 = bearer_id;  		*slot1 = bearer_id;  		tipc_node_fsm_evt(n, SELF_ESTABL_CONTACT_EVT); +		n->failover_sent = false;  		n->action_flags |= TIPC_NOTIFY_NODE_UP;  		tipc_link_set_active(nl, true);  		tipc_bcast_add_peer(n->net, nl, xmitq); @@ -911,6 +913,7 @@ void tipc_node_check_dest(struct net *net, u32 addr,  	bool reset = true;  	char *if_name;  	unsigned long intv; +	u16 session;  	*dupl_addr = false;  	*respond = false; @@ -997,9 +1000,10 @@ void tipc_node_check_dest(struct net *net, u32 addr,  			goto exit;  		if_name = strchr(b->name, ':') + 1; +		get_random_bytes(&session, sizeof(u16));  		if (!tipc_link_create(net, if_name, b->identity, b->tolerance,  				      b->net_plane, b->mtu, b->priority, -				      b->window, mod(tipc_net(net)->random), +				      b->window, session,  				      tipc_own_addr(net), addr, peer_id,  				      n->capabilities,  				      tipc_bc_sndlink(n->net), n->bc_entry.link, @@ -1615,6 +1619,14 @@ static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb,  			tipc_skb_queue_splice_tail_init(tipc_link_inputq(pl),  							tipc_link_inputq(l));  		} +		/* If parallel link was already down, and this happened before +		 * the tunnel link came up, FAILOVER was never sent. Ensure that +		 * FAILOVER is sent to get peer out of NODE_FAILINGOVER state. +		 */ +		if (n->state != NODE_FAILINGOVER && !n->failover_sent) { +			tipc_link_create_dummy_tnl_msg(l, xmitq); +			n->failover_sent = true; +		}  		/* If pkts arrive out of order, use lowest calculated syncpt */  		if (less(syncpt, n->sync_point))  			n->sync_point = syncpt; diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 3f03ddd0e35b..49810fdff4c5 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -1196,6 +1196,7 @@ void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,   * @skb: pointer to message buffer.   */  static void tipc_sk_conn_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb, +				   struct sk_buff_head *inputq,  				   struct sk_buff_head *xmitq)  {  	struct tipc_msg *hdr = buf_msg(skb); @@ -1213,7 +1214,16 @@ static void tipc_sk_conn_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb,  		tipc_node_remove_conn(sock_net(sk), tsk_peer_node(tsk),  				      tsk_peer_port(tsk));  		sk->sk_state_change(sk); -		goto exit; + +		/* State change is ignored if socket already awake, +		 * - convert msg to abort msg and add to inqueue +		 */ +		msg_set_user(hdr, TIPC_CRITICAL_IMPORTANCE); +		msg_set_type(hdr, TIPC_CONN_MSG); +		msg_set_size(hdr, BASIC_H_SIZE); +		msg_set_hdr_sz(hdr, BASIC_H_SIZE); +		__skb_queue_tail(inputq, skb); +		return;  	}  	tsk->probe_unacked = false; @@ -1419,8 +1429,10 @@ static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dlen)  	/* Handle implicit connection setup */  	if (unlikely(dest)) {  		rc = __tipc_sendmsg(sock, m, dlen); -		if (dlen && (dlen == rc)) +		if (dlen && dlen == rc) { +			tsk->peer_caps = tipc_node_get_capabilities(net, dnode);  			tsk->snt_unacked = tsk_inc(tsk, dlen + msg_hdr_sz(hdr)); +		}  		return rc;  	} @@ -1934,7 +1946,7 @@ static void tipc_sk_proto_rcv(struct sock *sk,  	switch (msg_user(hdr)) {  	case CONN_MANAGER: -		tipc_sk_conn_proto_rcv(tsk, skb, xmitq); +		tipc_sk_conn_proto_rcv(tsk, skb, inputq, xmitq);  		return;  	case SOCK_WAKEUP:  		tipc_dest_del(&tsk->cong_links, msg_orignode(hdr), 0);  |