diff options
Diffstat (limited to 'drivers/nvme/host/tcp.c')
| -rw-r--r-- | drivers/nvme/host/tcp.c | 79 | 
1 files changed, 53 insertions, 26 deletions
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index 7723a4989524..49c9e7bc9116 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -208,6 +208,18 @@ static inline u8 nvme_tcp_ddgst_len(struct nvme_tcp_queue *queue)  	return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0;  } +static inline void *nvme_tcp_req_cmd_pdu(struct nvme_tcp_request *req) +{ +	return req->pdu; +} + +static inline void *nvme_tcp_req_data_pdu(struct nvme_tcp_request *req) +{ +	/* use the pdu space in the back for the data pdu */ +	return req->pdu + sizeof(struct nvme_tcp_cmd_pdu) - +		sizeof(struct nvme_tcp_data_pdu); +} +  static inline size_t nvme_tcp_inline_data_size(struct nvme_tcp_request *req)  {  	if (nvme_is_fabrics(req->req.cmd)) @@ -614,7 +626,7 @@ static int nvme_tcp_handle_comp(struct nvme_tcp_queue *queue,  static void nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req)  { -	struct nvme_tcp_data_pdu *data = req->pdu; +	struct nvme_tcp_data_pdu *data = nvme_tcp_req_data_pdu(req);  	struct nvme_tcp_queue *queue = req->queue;  	struct request *rq = blk_mq_rq_from_pdu(req);  	u32 h2cdata_sent = req->pdu_len; @@ -1038,7 +1050,7 @@ static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)  static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request *req)  {  	struct nvme_tcp_queue *queue = req->queue; -	struct nvme_tcp_cmd_pdu *pdu = req->pdu; +	struct nvme_tcp_cmd_pdu *pdu = nvme_tcp_req_cmd_pdu(req);  	bool inline_data = nvme_tcp_has_inline_data(req);  	u8 hdgst = nvme_tcp_hdgst_len(queue);  	int len = sizeof(*pdu) + hdgst - req->offset; @@ -1077,7 +1089,7 @@ static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request *req)  static int nvme_tcp_try_send_data_pdu(struct nvme_tcp_request *req)  {  	struct nvme_tcp_queue *queue = req->queue; -	struct nvme_tcp_data_pdu *pdu = req->pdu; +	struct nvme_tcp_data_pdu *pdu = nvme_tcp_req_data_pdu(req);  	u8 hdgst = nvme_tcp_hdgst_len(queue);  	int len = sizeof(*pdu) - req->offset + hdgst;  	int ret; @@ -1608,22 +1620,7 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, int qid)  	if (ret)  		goto err_init_connect; -	queue->rd_enabled = true;  	set_bit(NVME_TCP_Q_ALLOCATED, &queue->flags); -	nvme_tcp_init_recv_ctx(queue); - -	write_lock_bh(&queue->sock->sk->sk_callback_lock); -	queue->sock->sk->sk_user_data = queue; -	queue->state_change = queue->sock->sk->sk_state_change; -	queue->data_ready = queue->sock->sk->sk_data_ready; -	queue->write_space = queue->sock->sk->sk_write_space; -	queue->sock->sk->sk_data_ready = nvme_tcp_data_ready; -	queue->sock->sk->sk_state_change = nvme_tcp_state_change; -	queue->sock->sk->sk_write_space = nvme_tcp_write_space; -#ifdef CONFIG_NET_RX_BUSY_POLL -	queue->sock->sk->sk_ll_usec = 1; -#endif -	write_unlock_bh(&queue->sock->sk->sk_callback_lock);  	return 0; @@ -1643,7 +1640,7 @@ err_destroy_mutex:  	return ret;  } -static void nvme_tcp_restore_sock_calls(struct nvme_tcp_queue *queue) +static void nvme_tcp_restore_sock_ops(struct nvme_tcp_queue *queue)  {  	struct socket *sock = queue->sock; @@ -1658,7 +1655,7 @@ static void nvme_tcp_restore_sock_calls(struct nvme_tcp_queue *queue)  static void __nvme_tcp_stop_queue(struct nvme_tcp_queue *queue)  {  	kernel_sock_shutdown(queue->sock, SHUT_RDWR); -	nvme_tcp_restore_sock_calls(queue); +	nvme_tcp_restore_sock_ops(queue);  	cancel_work_sync(&queue->io_work);  } @@ -1676,21 +1673,42 @@ static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)  	mutex_unlock(&queue->queue_lock);  } +static void nvme_tcp_setup_sock_ops(struct nvme_tcp_queue *queue) +{ +	write_lock_bh(&queue->sock->sk->sk_callback_lock); +	queue->sock->sk->sk_user_data = queue; +	queue->state_change = queue->sock->sk->sk_state_change; +	queue->data_ready = queue->sock->sk->sk_data_ready; +	queue->write_space = queue->sock->sk->sk_write_space; +	queue->sock->sk->sk_data_ready = nvme_tcp_data_ready; +	queue->sock->sk->sk_state_change = nvme_tcp_state_change; +	queue->sock->sk->sk_write_space = nvme_tcp_write_space; +#ifdef CONFIG_NET_RX_BUSY_POLL +	queue->sock->sk->sk_ll_usec = 1; +#endif +	write_unlock_bh(&queue->sock->sk->sk_callback_lock); +} +  static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx)  {  	struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); +	struct nvme_tcp_queue *queue = &ctrl->queues[idx];  	int ret; +	queue->rd_enabled = true; +	nvme_tcp_init_recv_ctx(queue); +	nvme_tcp_setup_sock_ops(queue); +  	if (idx)  		ret = nvmf_connect_io_queue(nctrl, idx);  	else  		ret = nvmf_connect_admin_queue(nctrl);  	if (!ret) { -		set_bit(NVME_TCP_Q_LIVE, &ctrl->queues[idx].flags); +		set_bit(NVME_TCP_Q_LIVE, &queue->flags);  	} else { -		if (test_bit(NVME_TCP_Q_ALLOCATED, &ctrl->queues[idx].flags)) -			__nvme_tcp_stop_queue(&ctrl->queues[idx]); +		if (test_bit(NVME_TCP_Q_ALLOCATED, &queue->flags)) +			__nvme_tcp_stop_queue(queue);  		dev_err(nctrl->device,  			"failed to connect queue: %d ret=%d\n", idx, ret);  	} @@ -2284,7 +2302,7 @@ static enum blk_eh_timer_return nvme_tcp_timeout(struct request *rq)  {  	struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);  	struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl; -	struct nvme_tcp_cmd_pdu *pdu = req->pdu; +	struct nvme_tcp_cmd_pdu *pdu = nvme_tcp_req_cmd_pdu(req);  	u8 opc = pdu->cmd.common.opcode, fctype = pdu->cmd.fabrics.fctype;  	int qid = nvme_tcp_queue_id(req->queue); @@ -2323,7 +2341,7 @@ static blk_status_t nvme_tcp_map_data(struct nvme_tcp_queue *queue,  			struct request *rq)  {  	struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq); -	struct nvme_tcp_cmd_pdu *pdu = req->pdu; +	struct nvme_tcp_cmd_pdu *pdu = nvme_tcp_req_cmd_pdu(req);  	struct nvme_command *c = &pdu->cmd;  	c->common.flags |= NVME_CMD_SGL_METABUF; @@ -2343,7 +2361,7 @@ static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns,  		struct request *rq)  {  	struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq); -	struct nvme_tcp_cmd_pdu *pdu = req->pdu; +	struct nvme_tcp_cmd_pdu *pdu = nvme_tcp_req_cmd_pdu(req);  	struct nvme_tcp_queue *queue = req->queue;  	u8 hdgst = nvme_tcp_hdgst_len(queue), ddgst = 0;  	blk_status_t ret; @@ -2682,6 +2700,15 @@ static struct nvmf_transport_ops nvme_tcp_transport = {  static int __init nvme_tcp_init_module(void)  { +	BUILD_BUG_ON(sizeof(struct nvme_tcp_hdr) != 8); +	BUILD_BUG_ON(sizeof(struct nvme_tcp_cmd_pdu) != 72); +	BUILD_BUG_ON(sizeof(struct nvme_tcp_data_pdu) != 24); +	BUILD_BUG_ON(sizeof(struct nvme_tcp_rsp_pdu) != 24); +	BUILD_BUG_ON(sizeof(struct nvme_tcp_r2t_pdu) != 24); +	BUILD_BUG_ON(sizeof(struct nvme_tcp_icreq_pdu) != 128); +	BUILD_BUG_ON(sizeof(struct nvme_tcp_icresp_pdu) != 128); +	BUILD_BUG_ON(sizeof(struct nvme_tcp_term_pdu) != 24); +  	nvme_tcp_wq = alloc_workqueue("nvme_tcp_wq",  			WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);  	if (!nvme_tcp_wq)  |