diff options
Diffstat (limited to 'net/tipc/socket.c')
| -rw-r--r-- | net/tipc/socket.c | 419 | 
1 files changed, 312 insertions, 107 deletions
diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 51bddc236a15..4731cad99d1c 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -121,6 +121,14 @@ static const struct proto_ops msg_ops;  static struct proto tipc_proto;  static struct proto tipc_proto_kern; +static const struct nla_policy tipc_nl_sock_policy[TIPC_NLA_SOCK_MAX + 1] = { +	[TIPC_NLA_SOCK_UNSPEC]		= { .type = NLA_UNSPEC }, +	[TIPC_NLA_SOCK_ADDR]		= { .type = NLA_U32 }, +	[TIPC_NLA_SOCK_REF]		= { .type = NLA_U32 }, +	[TIPC_NLA_SOCK_CON]		= { .type = NLA_NESTED }, +	[TIPC_NLA_SOCK_HAS_PUBL]	= { .type = NLA_FLAG } +}; +  /*   * Revised TIPC socket locking policy:   * @@ -236,12 +244,12 @@ static void tsk_advance_rx_queue(struct sock *sk)   */  static void tsk_rej_rx_queue(struct sock *sk)  { -	struct sk_buff *buf; +	struct sk_buff *skb;  	u32 dnode; -	while ((buf = __skb_dequeue(&sk->sk_receive_queue))) { -		if (tipc_msg_reverse(buf, &dnode, TIPC_ERR_NO_PORT)) -			tipc_link_xmit(buf, dnode, 0); +	while ((skb = __skb_dequeue(&sk->sk_receive_queue))) { +		if (tipc_msg_reverse(skb, &dnode, TIPC_ERR_NO_PORT)) +			tipc_link_xmit_skb(skb, dnode, 0);  	}  } @@ -454,7 +462,7 @@ static int tipc_release(struct socket *sock)  {  	struct sock *sk = sock->sk;  	struct tipc_sock *tsk; -	struct sk_buff *buf; +	struct sk_buff *skb;  	u32 dnode;  	/* @@ -473,11 +481,11 @@ static int tipc_release(struct socket *sock)  	 */  	dnode = tsk_peer_node(tsk);  	while (sock->state != SS_DISCONNECTING) { -		buf = __skb_dequeue(&sk->sk_receive_queue); -		if (buf == NULL) +		skb = __skb_dequeue(&sk->sk_receive_queue); +		if (skb == NULL)  			break; -		if (TIPC_SKB_CB(buf)->handle != NULL) -			kfree_skb(buf); +		if (TIPC_SKB_CB(skb)->handle != NULL) +			kfree_skb(skb);  		else {  			if ((sock->state == SS_CONNECTING) ||  			    (sock->state == SS_CONNECTED)) { @@ -485,8 +493,8 @@ static int tipc_release(struct socket *sock)  				tsk->connected = 0;  				tipc_node_remove_conn(dnode, tsk->ref);  			} -			if (tipc_msg_reverse(buf, &dnode, TIPC_ERR_NO_PORT)) -				tipc_link_xmit(buf, dnode, 0); +			if (tipc_msg_reverse(skb, &dnode, TIPC_ERR_NO_PORT)) +				tipc_link_xmit_skb(skb, dnode, 0);  		}  	} @@ -494,12 +502,12 @@ static int tipc_release(struct socket *sock)  	tipc_sk_ref_discard(tsk->ref);  	k_cancel_timer(&tsk->timer);  	if (tsk->connected) { -		buf = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG, +		skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG,  				      SHORT_H_SIZE, 0, dnode, tipc_own_addr,  				      tsk_peer_port(tsk),  				      tsk->ref, TIPC_ERR_NO_PORT); -		if (buf) -			tipc_link_xmit(buf, dnode, tsk->ref); +		if (skb) +			tipc_link_xmit_skb(skb, dnode, tsk->ref);  		tipc_node_remove_conn(dnode, tsk->ref);  	}  	k_term_timer(&tsk->timer); @@ -692,7 +700,7 @@ static unsigned int tipc_poll(struct file *file, struct socket *sock,   * tipc_sendmcast - send multicast message   * @sock: socket structure   * @seq: destination address - * @iov: message data to send + * @msg: message to send   * @dsz: total length of message data   * @timeo: timeout to wait for wakeup   * @@ -700,11 +708,11 @@ static unsigned int tipc_poll(struct file *file, struct socket *sock,   * Returns the number of bytes sent on success, or errno   */  static int tipc_sendmcast(struct  socket *sock, struct tipc_name_seq *seq, -			  struct iovec *iov, size_t dsz, long timeo) +			  struct msghdr *msg, size_t dsz, long timeo)  {  	struct sock *sk = sock->sk;  	struct tipc_msg *mhdr = &tipc_sk(sk)->phdr; -	struct sk_buff *buf; +	struct sk_buff_head head;  	uint mtu;  	int rc; @@ -719,12 +727,13 @@ static int tipc_sendmcast(struct  socket *sock, struct tipc_name_seq *seq,  new_mtu:  	mtu = tipc_bclink_get_mtu(); -	rc = tipc_msg_build(mhdr, iov, 0, dsz, mtu, &buf); +	__skb_queue_head_init(&head); +	rc = tipc_msg_build(mhdr, msg, 0, dsz, mtu, &head);  	if (unlikely(rc < 0))  		return rc;  	do { -		rc = tipc_bclink_xmit(buf); +		rc = tipc_bclink_xmit(&head);  		if (likely(rc >= 0)) {  			rc = dsz;  			break; @@ -736,7 +745,7 @@ new_mtu:  		tipc_sk(sk)->link_cong = 1;  		rc = tipc_wait_for_sndmsg(sock, &timeo);  		if (rc) -			kfree_skb_list(buf); +			__skb_queue_purge(&head);  	} while (!rc);  	return rc;  } @@ -818,39 +827,6 @@ exit:  	return TIPC_OK;  } -/** - * dest_name_check - verify user is permitted to send to specified port name - * @dest: destination address - * @m: descriptor for message to be sent - * - * Prevents restricted configuration commands from being issued by - * unauthorized users. - * - * Returns 0 if permission is granted, otherwise errno - */ -static int dest_name_check(struct sockaddr_tipc *dest, struct msghdr *m) -{ -	struct tipc_cfg_msg_hdr hdr; - -	if (unlikely(dest->addrtype == TIPC_ADDR_ID)) -		return 0; -	if (likely(dest->addr.name.name.type >= TIPC_RESERVED_TYPES)) -		return 0; -	if (likely(dest->addr.name.name.type == TIPC_TOP_SRV)) -		return 0; -	if (likely(dest->addr.name.name.type != TIPC_CFG_SRV)) -		return -EACCES; - -	if (!m->msg_iovlen || (m->msg_iov[0].iov_len < sizeof(hdr))) -		return -EMSGSIZE; -	if (copy_from_user(&hdr, m->msg_iov[0].iov_base, sizeof(hdr))) -		return -EFAULT; -	if ((ntohs(hdr.tcm_type) & 0xC000) && (!capable(CAP_NET_ADMIN))) -		return -EACCES; - -	return 0; -} -  static int tipc_wait_for_sndmsg(struct socket *sock, long *timeo_p)  {  	struct sock *sk = sock->sk; @@ -897,13 +873,13 @@ static int tipc_sendmsg(struct kiocb *iocb, struct socket *sock,  	struct sock *sk = sock->sk;  	struct tipc_sock *tsk = tipc_sk(sk);  	struct tipc_msg *mhdr = &tsk->phdr; -	struct iovec *iov = m->msg_iov;  	u32 dnode, dport; -	struct sk_buff *buf; +	struct sk_buff_head head; +	struct sk_buff *skb;  	struct tipc_name_seq *seq = &dest->addr.nameseq;  	u32 mtu;  	long timeo; -	int rc = -EINVAL; +	int rc;  	if (unlikely(!dest))  		return -EDESTADDRREQ; @@ -936,14 +912,11 @@ static int tipc_sendmsg(struct kiocb *iocb, struct socket *sock,  			tsk->conn_instance = dest->addr.name.name.instance;  		}  	} -	rc = dest_name_check(dest, m); -	if (rc) -		goto exit;  	timeo = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);  	if (dest->addrtype == TIPC_ADDR_MCAST) { -		rc = tipc_sendmcast(sock, seq, iov, dsz, timeo); +		rc = tipc_sendmcast(sock, seq, m, dsz, timeo);  		goto exit;  	} else if (dest->addrtype == TIPC_ADDR_NAME) {  		u32 type = dest->addr.name.name.type; @@ -974,13 +947,15 @@ static int tipc_sendmsg(struct kiocb *iocb, struct socket *sock,  new_mtu:  	mtu = tipc_node_get_mtu(dnode, tsk->ref); -	rc = tipc_msg_build(mhdr, iov, 0, dsz, mtu, &buf); +	__skb_queue_head_init(&head); +	rc = tipc_msg_build(mhdr, m, 0, dsz, mtu, &head);  	if (rc < 0)  		goto exit;  	do { -		TIPC_SKB_CB(buf)->wakeup_pending = tsk->link_cong; -		rc = tipc_link_xmit(buf, dnode, tsk->ref); +		skb = skb_peek(&head); +		TIPC_SKB_CB(skb)->wakeup_pending = tsk->link_cong; +		rc = tipc_link_xmit(&head, dnode, tsk->ref);  		if (likely(rc >= 0)) {  			if (sock->state != SS_READY)  				sock->state = SS_CONNECTING; @@ -994,7 +969,7 @@ new_mtu:  		tsk->link_cong = 1;  		rc = tipc_wait_for_sndmsg(sock, &timeo);  		if (rc) -			kfree_skb_list(buf); +			__skb_queue_purge(&head);  	} while (!rc);  exit:  	if (iocb) @@ -1051,7 +1026,7 @@ static int tipc_send_stream(struct kiocb *iocb, struct socket *sock,  	struct sock *sk = sock->sk;  	struct tipc_sock *tsk = tipc_sk(sk);  	struct tipc_msg *mhdr = &tsk->phdr; -	struct sk_buff *buf; +	struct sk_buff_head head;  	DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);  	u32 ref = tsk->ref;  	int rc = -EINVAL; @@ -1086,12 +1061,13 @@ static int tipc_send_stream(struct kiocb *iocb, struct socket *sock,  next:  	mtu = tsk->max_pkt;  	send = min_t(uint, dsz - sent, TIPC_MAX_USER_MSG_SIZE); -	rc = tipc_msg_build(mhdr, m->msg_iov, sent, send, mtu, &buf); +	__skb_queue_head_init(&head); +	rc = tipc_msg_build(mhdr, m, sent, send, mtu, &head);  	if (unlikely(rc < 0))  		goto exit;  	do {  		if (likely(!tsk_conn_cong(tsk))) { -			rc = tipc_link_xmit(buf, dnode, ref); +			rc = tipc_link_xmit(&head, dnode, ref);  			if (likely(!rc)) {  				tsk->sent_unacked++;  				sent += send; @@ -1109,7 +1085,7 @@ next:  		}  		rc = tipc_wait_for_sndpkt(sock, &timeo);  		if (rc) -			kfree_skb_list(buf); +			__skb_queue_purge(&head);  	} while (!rc);  exit:  	if (iocb) @@ -1254,20 +1230,20 @@ static int tipc_sk_anc_data_recv(struct msghdr *m, struct tipc_msg *msg,  static void tipc_sk_send_ack(struct tipc_sock *tsk, uint ack)  { -	struct sk_buff *buf = NULL; +	struct sk_buff *skb = NULL;  	struct tipc_msg *msg;  	u32 peer_port = tsk_peer_port(tsk);  	u32 dnode = tsk_peer_node(tsk);  	if (!tsk->connected)  		return; -	buf = tipc_msg_create(CONN_MANAGER, CONN_ACK, INT_H_SIZE, 0, dnode, +	skb = tipc_msg_create(CONN_MANAGER, CONN_ACK, INT_H_SIZE, 0, dnode,  			      tipc_own_addr, peer_port, tsk->ref, TIPC_OK); -	if (!buf) +	if (!skb)  		return; -	msg = buf_msg(buf); +	msg = buf_msg(skb);  	msg_set_msgcnt(msg, ack); -	tipc_link_xmit(buf, dnode, msg_link_selector(msg)); +	tipc_link_xmit_skb(skb, dnode, msg_link_selector(msg));  }  static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop) @@ -1372,8 +1348,7 @@ restart:  			sz = buf_len;  			m->msg_flags |= MSG_TRUNC;  		} -		res = skb_copy_datagram_iovec(buf, msg_hdr_sz(msg), -					      m->msg_iov, sz); +		res = skb_copy_datagram_msg(buf, msg_hdr_sz(msg), m, sz);  		if (res)  			goto exit;  		res = sz; @@ -1473,8 +1448,8 @@ restart:  		needed = (buf_len - sz_copied);  		sz_to_copy = (sz <= needed) ? sz : needed; -		res = skb_copy_datagram_iovec(buf, msg_hdr_sz(msg) + offset, -					      m->msg_iov, sz_to_copy); +		res = skb_copy_datagram_msg(buf, msg_hdr_sz(msg) + offset, +					    m, sz_to_copy);  		if (res)  			goto exit; @@ -1556,7 +1531,7 @@ static void tipc_data_ready(struct sock *sk)   * @tsk: TIPC socket   * @msg: message   * - * Returns 0 (TIPC_OK) if everyting ok, -TIPC_ERR_NO_PORT otherwise + * Returns 0 (TIPC_OK) if everything ok, -TIPC_ERR_NO_PORT otherwise   */  static int filter_connect(struct tipc_sock *tsk, struct sk_buff **buf)  { @@ -1723,20 +1698,20 @@ static int filter_rcv(struct sock *sk, struct sk_buff *buf)  /**   * tipc_backlog_rcv - handle incoming message from backlog queue   * @sk: socket - * @buf: message + * @skb: message   *   * Caller must hold socket lock, but not port lock.   *   * Returns 0   */ -static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *buf) +static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb)  {  	int rc;  	u32 onode;  	struct tipc_sock *tsk = tipc_sk(sk); -	uint truesize = buf->truesize; +	uint truesize = skb->truesize; -	rc = filter_rcv(sk, buf); +	rc = filter_rcv(sk, skb);  	if (likely(!rc)) {  		if (atomic_read(&tsk->dupl_rcvcnt) < TIPC_CONN_OVERLOAD_LIMIT) @@ -1744,25 +1719,25 @@ static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *buf)  		return 0;  	} -	if ((rc < 0) && !tipc_msg_reverse(buf, &onode, -rc)) +	if ((rc < 0) && !tipc_msg_reverse(skb, &onode, -rc))  		return 0; -	tipc_link_xmit(buf, onode, 0); +	tipc_link_xmit_skb(skb, onode, 0);  	return 0;  }  /**   * tipc_sk_rcv - handle incoming message - * @buf: buffer containing arriving message + * @skb: buffer containing arriving message   * Consumes buffer   * Returns 0 if success, or errno: -EHOSTUNREACH   */ -int tipc_sk_rcv(struct sk_buff *buf) +int tipc_sk_rcv(struct sk_buff *skb)  {  	struct tipc_sock *tsk;  	struct sock *sk; -	u32 dport = msg_destport(buf_msg(buf)); +	u32 dport = msg_destport(buf_msg(skb));  	int rc = TIPC_OK;  	uint limit;  	u32 dnode; @@ -1770,7 +1745,7 @@ int tipc_sk_rcv(struct sk_buff *buf)  	/* Validate destination and message */  	tsk = tipc_sk_get(dport);  	if (unlikely(!tsk)) { -		rc = tipc_msg_eval(buf, &dnode); +		rc = tipc_msg_eval(skb, &dnode);  		goto exit;  	}  	sk = &tsk->sk; @@ -1779,12 +1754,12 @@ int tipc_sk_rcv(struct sk_buff *buf)  	spin_lock_bh(&sk->sk_lock.slock);  	if (!sock_owned_by_user(sk)) { -		rc = filter_rcv(sk, buf); +		rc = filter_rcv(sk, skb);  	} else {  		if (sk->sk_backlog.len == 0)  			atomic_set(&tsk->dupl_rcvcnt, 0); -		limit = rcvbuf_limit(sk, buf) + atomic_read(&tsk->dupl_rcvcnt); -		if (sk_add_backlog(sk, buf, limit)) +		limit = rcvbuf_limit(sk, skb) + atomic_read(&tsk->dupl_rcvcnt); +		if (sk_add_backlog(sk, skb, limit))  			rc = -TIPC_ERR_OVERLOAD;  	}  	spin_unlock_bh(&sk->sk_lock.slock); @@ -1792,10 +1767,10 @@ int tipc_sk_rcv(struct sk_buff *buf)  	if (likely(!rc))  		return 0;  exit: -	if ((rc < 0) && !tipc_msg_reverse(buf, &dnode, -rc)) +	if ((rc < 0) && !tipc_msg_reverse(skb, &dnode, -rc))  		return -EHOSTUNREACH; -	tipc_link_xmit(buf, dnode, 0); +	tipc_link_xmit_skb(skb, dnode, 0);  	return (rc < 0) ? -EHOSTUNREACH : 0;  } @@ -2053,7 +2028,7 @@ static int tipc_shutdown(struct socket *sock, int how)  {  	struct sock *sk = sock->sk;  	struct tipc_sock *tsk = tipc_sk(sk); -	struct sk_buff *buf; +	struct sk_buff *skb;  	u32 dnode;  	int res; @@ -2068,23 +2043,23 @@ static int tipc_shutdown(struct socket *sock, int how)  restart:  		/* Disconnect and send a 'FIN+' or 'FIN-' message to peer */ -		buf = __skb_dequeue(&sk->sk_receive_queue); -		if (buf) { -			if (TIPC_SKB_CB(buf)->handle != NULL) { -				kfree_skb(buf); +		skb = __skb_dequeue(&sk->sk_receive_queue); +		if (skb) { +			if (TIPC_SKB_CB(skb)->handle != NULL) { +				kfree_skb(skb);  				goto restart;  			} -			if (tipc_msg_reverse(buf, &dnode, TIPC_CONN_SHUTDOWN)) -				tipc_link_xmit(buf, dnode, tsk->ref); +			if (tipc_msg_reverse(skb, &dnode, TIPC_CONN_SHUTDOWN)) +				tipc_link_xmit_skb(skb, dnode, tsk->ref);  			tipc_node_remove_conn(dnode, tsk->ref);  		} else {  			dnode = tsk_peer_node(tsk); -			buf = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, +			skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE,  					      TIPC_CONN_MSG, SHORT_H_SIZE,  					      0, dnode, tipc_own_addr,  					      tsk_peer_port(tsk),  					      tsk->ref, TIPC_CONN_SHUTDOWN); -			tipc_link_xmit(buf, dnode, tsk->ref); +			tipc_link_xmit_skb(skb, dnode, tsk->ref);  		}  		tsk->connected = 0;  		sock->state = SS_DISCONNECTING; @@ -2113,7 +2088,7 @@ static void tipc_sk_timeout(unsigned long ref)  {  	struct tipc_sock *tsk;  	struct sock *sk; -	struct sk_buff *buf = NULL; +	struct sk_buff *skb = NULL;  	u32 peer_port, peer_node;  	tsk = tipc_sk_get(ref); @@ -2131,20 +2106,20 @@ static void tipc_sk_timeout(unsigned long ref)  	if (tsk->probing_state == TIPC_CONN_PROBING) {  		/* Previous probe not answered -> self abort */ -		buf = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG, +		skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG,  				      SHORT_H_SIZE, 0, tipc_own_addr,  				      peer_node, ref, peer_port,  				      TIPC_ERR_NO_PORT);  	} else { -		buf = tipc_msg_create(CONN_MANAGER, CONN_PROBE, INT_H_SIZE, +		skb = tipc_msg_create(CONN_MANAGER, CONN_PROBE, INT_H_SIZE,  				      0, peer_node, tipc_own_addr,  				      peer_port, ref, TIPC_OK);  		tsk->probing_state = TIPC_CONN_PROBING;  		k_start_timer(&tsk->timer, tsk->probing_interval);  	}  	bh_unlock_sock(sk); -	if (buf) -		tipc_link_xmit(buf, peer_node, ref); +	if (skb) +		tipc_link_xmit_skb(skb, peer_node, ref);  exit:  	tipc_sk_put(tsk);  } @@ -2802,3 +2777,233 @@ void tipc_socket_stop(void)  	sock_unregister(tipc_family_ops.family);  	proto_unregister(&tipc_proto);  } + +/* Caller should hold socket lock for the passed tipc socket. */ +static int __tipc_nl_add_sk_con(struct sk_buff *skb, struct tipc_sock *tsk) +{ +	u32 peer_node; +	u32 peer_port; +	struct nlattr *nest; + +	peer_node = tsk_peer_node(tsk); +	peer_port = tsk_peer_port(tsk); + +	nest = nla_nest_start(skb, TIPC_NLA_SOCK_CON); + +	if (nla_put_u32(skb, TIPC_NLA_CON_NODE, peer_node)) +		goto msg_full; +	if (nla_put_u32(skb, TIPC_NLA_CON_SOCK, peer_port)) +		goto msg_full; + +	if (tsk->conn_type != 0) { +		if (nla_put_flag(skb, TIPC_NLA_CON_FLAG)) +			goto msg_full; +		if (nla_put_u32(skb, TIPC_NLA_CON_TYPE, tsk->conn_type)) +			goto msg_full; +		if (nla_put_u32(skb, TIPC_NLA_CON_INST, tsk->conn_instance)) +			goto msg_full; +	} +	nla_nest_end(skb, nest); + +	return 0; + +msg_full: +	nla_nest_cancel(skb, nest); + +	return -EMSGSIZE; +} + +/* Caller should hold socket lock for the passed tipc socket. */ +static int __tipc_nl_add_sk(struct sk_buff *skb, struct netlink_callback *cb, +			    struct tipc_sock *tsk) +{ +	int err; +	void *hdr; +	struct nlattr *attrs; + +	hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, +			  &tipc_genl_v2_family, NLM_F_MULTI, TIPC_NL_SOCK_GET); +	if (!hdr) +		goto msg_cancel; + +	attrs = nla_nest_start(skb, TIPC_NLA_SOCK); +	if (!attrs) +		goto genlmsg_cancel; +	if (nla_put_u32(skb, TIPC_NLA_SOCK_REF, tsk->ref)) +		goto attr_msg_cancel; +	if (nla_put_u32(skb, TIPC_NLA_SOCK_ADDR, tipc_own_addr)) +		goto attr_msg_cancel; + +	if (tsk->connected) { +		err = __tipc_nl_add_sk_con(skb, tsk); +		if (err) +			goto attr_msg_cancel; +	} else if (!list_empty(&tsk->publications)) { +		if (nla_put_flag(skb, TIPC_NLA_SOCK_HAS_PUBL)) +			goto attr_msg_cancel; +	} +	nla_nest_end(skb, attrs); +	genlmsg_end(skb, hdr); + +	return 0; + +attr_msg_cancel: +	nla_nest_cancel(skb, attrs); +genlmsg_cancel: +	genlmsg_cancel(skb, hdr); +msg_cancel: +	return -EMSGSIZE; +} + +int tipc_nl_sk_dump(struct sk_buff *skb, struct netlink_callback *cb) +{ +	int err; +	struct tipc_sock *tsk; +	u32 prev_ref = cb->args[0]; +	u32 ref = prev_ref; + +	tsk = tipc_sk_get_next(&ref); +	for (; tsk; tsk = tipc_sk_get_next(&ref)) { +		lock_sock(&tsk->sk); +		err = __tipc_nl_add_sk(skb, cb, tsk); +		release_sock(&tsk->sk); +		tipc_sk_put(tsk); +		if (err) +			break; + +		prev_ref = ref; +	} + +	cb->args[0] = prev_ref; + +	return skb->len; +} + +/* Caller should hold socket lock for the passed tipc socket. */ +static int __tipc_nl_add_sk_publ(struct sk_buff *skb, +				 struct netlink_callback *cb, +				 struct publication *publ) +{ +	void *hdr; +	struct nlattr *attrs; + +	hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, +			  &tipc_genl_v2_family, NLM_F_MULTI, TIPC_NL_PUBL_GET); +	if (!hdr) +		goto msg_cancel; + +	attrs = nla_nest_start(skb, TIPC_NLA_PUBL); +	if (!attrs) +		goto genlmsg_cancel; + +	if (nla_put_u32(skb, TIPC_NLA_PUBL_KEY, publ->key)) +		goto attr_msg_cancel; +	if (nla_put_u32(skb, TIPC_NLA_PUBL_TYPE, publ->type)) +		goto attr_msg_cancel; +	if (nla_put_u32(skb, TIPC_NLA_PUBL_LOWER, publ->lower)) +		goto attr_msg_cancel; +	if (nla_put_u32(skb, TIPC_NLA_PUBL_UPPER, publ->upper)) +		goto attr_msg_cancel; + +	nla_nest_end(skb, attrs); +	genlmsg_end(skb, hdr); + +	return 0; + +attr_msg_cancel: +	nla_nest_cancel(skb, attrs); +genlmsg_cancel: +	genlmsg_cancel(skb, hdr); +msg_cancel: +	return -EMSGSIZE; +} + +/* Caller should hold socket lock for the passed tipc socket. */ +static int __tipc_nl_list_sk_publ(struct sk_buff *skb, +				  struct netlink_callback *cb, +				  struct tipc_sock *tsk, u32 *last_publ) +{ +	int err; +	struct publication *p; + +	if (*last_publ) { +		list_for_each_entry(p, &tsk->publications, pport_list) { +			if (p->key == *last_publ) +				break; +		} +		if (p->key != *last_publ) { +			/* We never set seq or call nl_dump_check_consistent() +			 * this means that setting prev_seq here will cause the +			 * consistence check to fail in the netlink callback +			 * handler. Resulting in the last NLMSG_DONE message +			 * having the NLM_F_DUMP_INTR flag set. +			 */ +			cb->prev_seq = 1; +			*last_publ = 0; +			return -EPIPE; +		} +	} else { +		p = list_first_entry(&tsk->publications, struct publication, +				     pport_list); +	} + +	list_for_each_entry_from(p, &tsk->publications, pport_list) { +		err = __tipc_nl_add_sk_publ(skb, cb, p); +		if (err) { +			*last_publ = p->key; +			return err; +		} +	} +	*last_publ = 0; + +	return 0; +} + +int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb) +{ +	int err; +	u32 tsk_ref = cb->args[0]; +	u32 last_publ = cb->args[1]; +	u32 done = cb->args[2]; +	struct tipc_sock *tsk; + +	if (!tsk_ref) { +		struct nlattr **attrs; +		struct nlattr *sock[TIPC_NLA_SOCK_MAX + 1]; + +		err = tipc_nlmsg_parse(cb->nlh, &attrs); +		if (err) +			return err; + +		err = nla_parse_nested(sock, TIPC_NLA_SOCK_MAX, +				       attrs[TIPC_NLA_SOCK], +				       tipc_nl_sock_policy); +		if (err) +			return err; + +		if (!sock[TIPC_NLA_SOCK_REF]) +			return -EINVAL; + +		tsk_ref = nla_get_u32(sock[TIPC_NLA_SOCK_REF]); +	} + +	if (done) +		return 0; + +	tsk = tipc_sk_get(tsk_ref); +	if (!tsk) +		return -EINVAL; + +	lock_sock(&tsk->sk); +	err = __tipc_nl_list_sk_publ(skb, cb, tsk, &last_publ); +	if (!err) +		done = 1; +	release_sock(&tsk->sk); +	tipc_sk_put(tsk); + +	cb->args[0] = tsk_ref; +	cb->args[1] = last_publ; +	cb->args[2] = done; + +	return skb->len; +}  |