diff options
Diffstat (limited to 'net/unix/af_unix.c')
| -rw-r--r-- | net/unix/af_unix.c | 164 | 
1 files changed, 96 insertions, 68 deletions
| diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index e4af6616e1df..142f56770b77 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -221,15 +221,9 @@ static inline int unix_may_send(struct sock *sk, struct sock *osk)  	return unix_peer(osk) == NULL || unix_our_peer(sk, osk);  } -static inline int unix_recvq_full(const struct sock *sk) -{ -	return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog; -} -  static inline int unix_recvq_full_lockless(const struct sock *sk)  { -	return skb_queue_len_lockless(&sk->sk_receive_queue) > -		READ_ONCE(sk->sk_max_ack_backlog); +	return skb_queue_len_lockless(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;  }  struct sock *unix_peer_get(struct sock *s) @@ -530,10 +524,10 @@ static int unix_dgram_peer_wake_me(struct sock *sk, struct sock *other)  	return 0;  } -static int unix_writable(const struct sock *sk) +static int unix_writable(const struct sock *sk, unsigned char state)  { -	return sk->sk_state != TCP_LISTEN && -	       (refcount_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf; +	return state != TCP_LISTEN && +		(refcount_read(&sk->sk_wmem_alloc) << 2) <= READ_ONCE(sk->sk_sndbuf);  }  static void unix_write_space(struct sock *sk) @@ -541,7 +535,7 @@ static void unix_write_space(struct sock *sk)  	struct socket_wq *wq;  	rcu_read_lock(); -	if (unix_writable(sk)) { +	if (unix_writable(sk, READ_ONCE(sk->sk_state))) {  		wq = rcu_dereference(sk->sk_wq);  		if (skwq_has_sleeper(wq))  			wake_up_interruptible_sync_poll(&wq->wait, @@ -570,7 +564,6 @@ static void unix_dgram_disconnected(struct sock *sk, struct sock *other)  			sk_error_report(other);  		}  	} -	other->sk_state = TCP_CLOSE;  }  static void unix_sock_destructor(struct sock *sk) @@ -617,7 +610,7 @@ static void unix_release_sock(struct sock *sk, int embrion)  	u->path.dentry = NULL;  	u->path.mnt = NULL;  	state = sk->sk_state; -	sk->sk_state = TCP_CLOSE; +	WRITE_ONCE(sk->sk_state, TCP_CLOSE);  	skpair = unix_peer(sk);  	unix_peer(sk) = NULL; @@ -638,7 +631,7 @@ static void unix_release_sock(struct sock *sk, int embrion)  			unix_state_lock(skpair);  			/* No more writes */  			WRITE_ONCE(skpair->sk_shutdown, SHUTDOWN_MASK); -			if (!skb_queue_empty(&sk->sk_receive_queue) || embrion) +			if (!skb_queue_empty_lockless(&sk->sk_receive_queue) || embrion)  				WRITE_ONCE(skpair->sk_err, ECONNRESET);  			unix_state_unlock(skpair);  			skpair->sk_state_change(skpair); @@ -731,7 +724,7 @@ static int unix_listen(struct socket *sock, int backlog)  	if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)  		goto out;	/* Only stream/seqpacket sockets accept */  	err = -EINVAL; -	if (!u->addr) +	if (!READ_ONCE(u->addr))  		goto out;	/* No listens on an unbound socket */  	unix_state_lock(sk);  	if (sk->sk_state != TCP_CLOSE && sk->sk_state != TCP_LISTEN) @@ -739,7 +732,8 @@ static int unix_listen(struct socket *sock, int backlog)  	if (backlog > sk->sk_max_ack_backlog)  		wake_up_interruptible_all(&u->peer_wait);  	sk->sk_max_ack_backlog	= backlog; -	sk->sk_state		= TCP_LISTEN; +	WRITE_ONCE(sk->sk_state, TCP_LISTEN); +  	/* set credentials so connect can copy them */  	init_peercred(sk);  	err = 0; @@ -976,7 +970,7 @@ static struct sock *unix_create1(struct net *net, struct socket *sock, int kern,  	sk->sk_hash		= unix_unbound_hash(sk);  	sk->sk_allocation	= GFP_KERNEL_ACCOUNT;  	sk->sk_write_space	= unix_write_space; -	sk->sk_max_ack_backlog	= net->unx.sysctl_max_dgram_qlen; +	sk->sk_max_ack_backlog	= READ_ONCE(net->unx.sysctl_max_dgram_qlen);  	sk->sk_destruct		= unix_sock_destructor;  	u = unix_sk(sk);  	u->listener = NULL; @@ -1131,8 +1125,8 @@ static struct sock *unix_find_other(struct net *net,  static int unix_autobind(struct sock *sk)  { -	unsigned int new_hash, old_hash = sk->sk_hash;  	struct unix_sock *u = unix_sk(sk); +	unsigned int new_hash, old_hash;  	struct net *net = sock_net(sk);  	struct unix_address *addr;  	u32 lastnum, ordernum; @@ -1155,6 +1149,7 @@ static int unix_autobind(struct sock *sk)  	addr->name->sun_family = AF_UNIX;  	refcount_set(&addr->refcnt, 1); +	old_hash = sk->sk_hash;  	ordernum = get_random_u32();  	lastnum = ordernum & 0xFFFFF;  retry: @@ -1195,8 +1190,8 @@ static int unix_bind_bsd(struct sock *sk, struct sockaddr_un *sunaddr,  {  	umode_t mode = S_IFSOCK |  	       (SOCK_INODE(sk->sk_socket)->i_mode & ~current_umask()); -	unsigned int new_hash, old_hash = sk->sk_hash;  	struct unix_sock *u = unix_sk(sk); +	unsigned int new_hash, old_hash;  	struct net *net = sock_net(sk);  	struct mnt_idmap *idmap;  	struct unix_address *addr; @@ -1234,6 +1229,7 @@ static int unix_bind_bsd(struct sock *sk, struct sockaddr_un *sunaddr,  	if (u->addr)  		goto out_unlock; +	old_hash = sk->sk_hash;  	new_hash = unix_bsd_hash(d_backing_inode(dentry));  	unix_table_double_lock(net, old_hash, new_hash);  	u->path.mnt = mntget(parent.mnt); @@ -1261,8 +1257,8 @@ out:  static int unix_bind_abstract(struct sock *sk, struct sockaddr_un *sunaddr,  			      int addr_len)  { -	unsigned int new_hash, old_hash = sk->sk_hash;  	struct unix_sock *u = unix_sk(sk); +	unsigned int new_hash, old_hash;  	struct net *net = sock_net(sk);  	struct unix_address *addr;  	int err; @@ -1280,6 +1276,7 @@ static int unix_bind_abstract(struct sock *sk, struct sockaddr_un *sunaddr,  		goto out_mutex;  	} +	old_hash = sk->sk_hash;  	new_hash = unix_abstract_hash(addr->name, addr->len, sk->sk_type);  	unix_table_double_lock(net, old_hash, new_hash); @@ -1369,7 +1366,7 @@ static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,  		if ((test_bit(SOCK_PASSCRED, &sock->flags) ||  		     test_bit(SOCK_PASSPIDFD, &sock->flags)) && -		    !unix_sk(sk)->addr) { +		    !READ_ONCE(unix_sk(sk)->addr)) {  			err = unix_autobind(sk);  			if (err)  				goto out; @@ -1399,7 +1396,8 @@ restart:  		if (err)  			goto out_unlock; -		sk->sk_state = other->sk_state = TCP_ESTABLISHED; +		WRITE_ONCE(sk->sk_state, TCP_ESTABLISHED); +		WRITE_ONCE(other->sk_state, TCP_ESTABLISHED);  	} else {  		/*  		 *	1003.1g breaking connected state with AF_UNSPEC @@ -1416,13 +1414,20 @@ restart:  		unix_peer(sk) = other;  		if (!other) -			sk->sk_state = TCP_CLOSE; +			WRITE_ONCE(sk->sk_state, TCP_CLOSE);  		unix_dgram_peer_wake_disconnect_wakeup(sk, old_peer);  		unix_state_double_unlock(sk, other); -		if (other != old_peer) +		if (other != old_peer) {  			unix_dgram_disconnected(sk, old_peer); + +			unix_state_lock(old_peer); +			if (!unix_peer(old_peer)) +				WRITE_ONCE(old_peer->sk_state, TCP_CLOSE); +			unix_state_unlock(old_peer); +		} +  		sock_put(old_peer);  	} else {  		unix_peer(sk) = other; @@ -1470,7 +1475,6 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,  	struct sk_buff *skb = NULL;  	long timeo;  	int err; -	int st;  	err = unix_validate_addr(sunaddr, addr_len);  	if (err) @@ -1481,7 +1485,8 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,  		goto out;  	if ((test_bit(SOCK_PASSCRED, &sock->flags) || -	     test_bit(SOCK_PASSPIDFD, &sock->flags)) && !u->addr) { +	     test_bit(SOCK_PASSPIDFD, &sock->flags)) && +	    !READ_ONCE(u->addr)) {  		err = unix_autobind(sk);  		if (err)  			goto out; @@ -1534,7 +1539,7 @@ restart:  	if (other->sk_shutdown & RCV_SHUTDOWN)  		goto out_unlock; -	if (unix_recvq_full(other)) { +	if (unix_recvq_full_lockless(other)) {  		err = -EAGAIN;  		if (!timeo)  			goto out_unlock; @@ -1559,9 +1564,7 @@ restart:  	   Well, and we have to recheck the state after socket locked.  	 */ -	st = sk->sk_state; - -	switch (st) { +	switch (READ_ONCE(sk->sk_state)) {  	case TCP_CLOSE:  		/* This is ok... continue with connect */  		break; @@ -1576,7 +1579,7 @@ restart:  	unix_state_lock_nested(sk, U_LOCK_SECOND); -	if (sk->sk_state != st) { +	if (sk->sk_state != TCP_CLOSE) {  		unix_state_unlock(sk);  		unix_state_unlock(other);  		sock_put(other); @@ -1629,7 +1632,7 @@ restart:  	copy_peercred(sk, other);  	sock->state	= SS_CONNECTED; -	sk->sk_state	= TCP_ESTABLISHED; +	WRITE_ONCE(sk->sk_state, TCP_ESTABLISHED);  	sock_hold(newsk);  	smp_mb__after_atomic();	/* sock_hold() does an atomic_inc() */ @@ -1701,7 +1704,7 @@ static int unix_accept(struct socket *sock, struct socket *newsock,  		goto out;  	arg->err = -EINVAL; -	if (sk->sk_state != TCP_LISTEN) +	if (READ_ONCE(sk->sk_state) != TCP_LISTEN)  		goto out;  	/* If socket state is TCP_LISTEN it cannot change (for now...), @@ -1950,14 +1953,15 @@ static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg,  	}  	if ((test_bit(SOCK_PASSCRED, &sock->flags) || -	     test_bit(SOCK_PASSPIDFD, &sock->flags)) && !u->addr) { +	     test_bit(SOCK_PASSPIDFD, &sock->flags)) && +	    !READ_ONCE(u->addr)) {  		err = unix_autobind(sk);  		if (err)  			goto out;  	}  	err = -EMSGSIZE; -	if (len > sk->sk_sndbuf - 32) +	if (len > READ_ONCE(sk->sk_sndbuf) - 32)  		goto out;  	if (len > SKB_MAX_ALLOC) { @@ -2039,7 +2043,7 @@ restart_locked:  			unix_peer(sk) = NULL;  			unix_dgram_peer_wake_disconnect_wakeup(sk, other); -			sk->sk_state = TCP_CLOSE; +			WRITE_ONCE(sk->sk_state, TCP_CLOSE);  			unix_state_unlock(sk);  			unix_dgram_disconnected(sk, other); @@ -2216,7 +2220,7 @@ static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,  	}  	if (msg->msg_namelen) { -		err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP; +		err = READ_ONCE(sk->sk_state) == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;  		goto out_err;  	} else {  		err = -ENOTCONN; @@ -2237,7 +2241,7 @@ static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,  						   &err, 0);  		} else {  			/* Keep two messages in the pipe so it schedules better */ -			size = min_t(int, size, (sk->sk_sndbuf >> 1) - 64); +			size = min_t(int, size, (READ_ONCE(sk->sk_sndbuf) >> 1) - 64);  			/* allow fallback to order-0 allocations */  			size = min_t(int, size, SKB_MAX_HEAD(0) + UNIX_SKB_FRAGS_SZ); @@ -2330,7 +2334,7 @@ static int unix_seqpacket_sendmsg(struct socket *sock, struct msghdr *msg,  	if (err)  		return err; -	if (sk->sk_state != TCP_ESTABLISHED) +	if (READ_ONCE(sk->sk_state) != TCP_ESTABLISHED)  		return -ENOTCONN;  	if (msg->msg_namelen) @@ -2344,7 +2348,7 @@ static int unix_seqpacket_recvmsg(struct socket *sock, struct msghdr *msg,  {  	struct sock *sk = sock->sk; -	if (sk->sk_state != TCP_ESTABLISHED) +	if (READ_ONCE(sk->sk_state) != TCP_ESTABLISHED)  		return -ENOTCONN;  	return unix_dgram_recvmsg(sock, msg, size, flags); @@ -2609,10 +2613,24 @@ static struct sk_buff *manage_oob(struct sk_buff *skb, struct sock *sk,  {  	struct unix_sock *u = unix_sk(sk); -	if (!unix_skb_len(skb) && !(flags & MSG_PEEK)) { -		skb_unlink(skb, &sk->sk_receive_queue); -		consume_skb(skb); -		skb = NULL; +	if (!unix_skb_len(skb)) { +		struct sk_buff *unlinked_skb = NULL; + +		spin_lock(&sk->sk_receive_queue.lock); + +		if (copied && (!u->oob_skb || skb == u->oob_skb)) { +			skb = NULL; +		} else if (flags & MSG_PEEK) { +			skb = skb_peek_next(skb, &sk->sk_receive_queue); +		} else { +			unlinked_skb = skb; +			skb = skb_peek_next(skb, &sk->sk_receive_queue); +			__skb_unlink(unlinked_skb, &sk->sk_receive_queue); +		} + +		spin_unlock(&sk->sk_receive_queue.lock); + +		consume_skb(unlinked_skb);  	} else {  		struct sk_buff *unlinked_skb = NULL; @@ -2621,18 +2639,18 @@ static struct sk_buff *manage_oob(struct sk_buff *skb, struct sock *sk,  		if (skb == u->oob_skb) {  			if (copied) {  				skb = NULL; -			} else if (sock_flag(sk, SOCK_URGINLINE)) { -				if (!(flags & MSG_PEEK)) { +			} else if (!(flags & MSG_PEEK)) { +				if (sock_flag(sk, SOCK_URGINLINE)) {  					WRITE_ONCE(u->oob_skb, NULL);  					consume_skb(skb); +				} else { +					__skb_unlink(skb, &sk->sk_receive_queue); +					WRITE_ONCE(u->oob_skb, NULL); +					unlinked_skb = skb; +					skb = skb_peek(&sk->sk_receive_queue);  				} -			} else if (flags & MSG_PEEK) { -				skb = NULL; -			} else { -				__skb_unlink(skb, &sk->sk_receive_queue); -				WRITE_ONCE(u->oob_skb, NULL); -				unlinked_skb = skb; -				skb = skb_peek(&sk->sk_receive_queue); +			} else if (!sock_flag(sk, SOCK_URGINLINE)) { +				skb = skb_peek_next(skb, &sk->sk_receive_queue);  			}  		} @@ -2649,7 +2667,7 @@ static struct sk_buff *manage_oob(struct sk_buff *skb, struct sock *sk,  static int unix_stream_read_skb(struct sock *sk, skb_read_actor_t recv_actor)  { -	if (unlikely(sk->sk_state != TCP_ESTABLISHED)) +	if (unlikely(READ_ONCE(sk->sk_state) != TCP_ESTABLISHED))  		return -ENOTCONN;  	return unix_read_skb(sk, recv_actor); @@ -2673,7 +2691,7 @@ static int unix_stream_read_generic(struct unix_stream_read_state *state,  	size_t size = state->size;  	unsigned int last_len; -	if (unlikely(sk->sk_state != TCP_ESTABLISHED)) { +	if (unlikely(READ_ONCE(sk->sk_state) != TCP_ESTABLISHED)) {  		err = -EINVAL;  		goto out;  	} @@ -3004,7 +3022,7 @@ long unix_inq_len(struct sock *sk)  	struct sk_buff *skb;  	long amount = 0; -	if (sk->sk_state == TCP_LISTEN) +	if (READ_ONCE(sk->sk_state) == TCP_LISTEN)  		return -EINVAL;  	spin_lock(&sk->sk_receive_queue.lock); @@ -3089,12 +3107,23 @@ static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)  #if IS_ENABLED(CONFIG_AF_UNIX_OOB)  	case SIOCATMARK:  		{ +			struct unix_sock *u = unix_sk(sk);  			struct sk_buff *skb;  			int answ = 0; +			mutex_lock(&u->iolock); +  			skb = skb_peek(&sk->sk_receive_queue); -			if (skb && skb == READ_ONCE(unix_sk(sk)->oob_skb)) -				answ = 1; +			if (skb) { +				struct sk_buff *oob_skb = READ_ONCE(u->oob_skb); + +				if (skb == oob_skb || +				    (!oob_skb && !unix_skb_len(skb))) +					answ = 1; +			} + +			mutex_unlock(&u->iolock); +  			err = put_user(answ, (int __user *)arg);  		}  		break; @@ -3116,12 +3145,14 @@ static int unix_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned lon  static __poll_t unix_poll(struct file *file, struct socket *sock, poll_table *wait)  {  	struct sock *sk = sock->sk; +	unsigned char state;  	__poll_t mask;  	u8 shutdown;  	sock_poll_wait(file, sock, wait);  	mask = 0;  	shutdown = READ_ONCE(sk->sk_shutdown); +	state = READ_ONCE(sk->sk_state);  	/* exceptional events? */  	if (READ_ONCE(sk->sk_err)) @@ -3143,14 +3174,14 @@ static __poll_t unix_poll(struct file *file, struct socket *sock, poll_table *wa  	/* Connection-based need to check for termination and startup */  	if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) && -	    sk->sk_state == TCP_CLOSE) +	    state == TCP_CLOSE)  		mask |= EPOLLHUP;  	/*  	 * we set writable also when the other side has shut down the  	 * connection. This prevents stuck sockets.  	 */ -	if (unix_writable(sk)) +	if (unix_writable(sk, state))  		mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;  	return mask; @@ -3161,12 +3192,14 @@ static __poll_t unix_dgram_poll(struct file *file, struct socket *sock,  {  	struct sock *sk = sock->sk, *other;  	unsigned int writable; +	unsigned char state;  	__poll_t mask;  	u8 shutdown;  	sock_poll_wait(file, sock, wait);  	mask = 0;  	shutdown = READ_ONCE(sk->sk_shutdown); +	state = READ_ONCE(sk->sk_state);  	/* exceptional events? */  	if (READ_ONCE(sk->sk_err) || @@ -3186,19 +3219,14 @@ static __poll_t unix_dgram_poll(struct file *file, struct socket *sock,  		mask |= EPOLLIN | EPOLLRDNORM;  	/* Connection-based need to check for termination and startup */ -	if (sk->sk_type == SOCK_SEQPACKET) { -		if (sk->sk_state == TCP_CLOSE) -			mask |= EPOLLHUP; -		/* connection hasn't started yet? */ -		if (sk->sk_state == TCP_SYN_SENT) -			return mask; -	} +	if (sk->sk_type == SOCK_SEQPACKET && state == TCP_CLOSE) +		mask |= EPOLLHUP;  	/* No write status requested, avoid expensive OUT tests. */  	if (!(poll_requested_events(wait) & (EPOLLWRBAND|EPOLLWRNORM|EPOLLOUT)))  		return mask; -	writable = unix_writable(sk); +	writable = unix_writable(sk, state);  	if (writable) {  		unix_state_lock(sk); |