diff options
Diffstat (limited to 'drivers/vhost/scsi.c')
| -rw-r--r-- | drivers/vhost/scsi.c | 399 | 
1 files changed, 282 insertions, 117 deletions
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index b22adf03f584..6ff8a5096691 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c @@ -52,7 +52,6 @@  #define VHOST_SCSI_VERSION  "v0.1"  #define VHOST_SCSI_NAMELEN 256  #define VHOST_SCSI_MAX_CDB_SIZE 32 -#define VHOST_SCSI_DEFAULT_TAGS 256  #define VHOST_SCSI_PREALLOC_SGLS 2048  #define VHOST_SCSI_PREALLOC_UPAGES 2048  #define VHOST_SCSI_PREALLOC_PROT_SGLS 2048 @@ -140,6 +139,7 @@ struct vhost_scsi_tpg {  	struct se_portal_group se_tpg;  	/* Pointer back to vhost_scsi, protected by tv_tpg_mutex */  	struct vhost_scsi *vhost_scsi; +	struct list_head tmf_queue;  };  struct vhost_scsi_tport { @@ -189,6 +189,9 @@ struct vhost_scsi_virtqueue {  	 * Writers must also take dev mutex and flush under it.  	 */  	int inflight_idx; +	struct vhost_scsi_cmd *scsi_cmds; +	struct sbitmap scsi_tags; +	int max_cmds;  };  struct vhost_scsi { @@ -209,6 +212,21 @@ struct vhost_scsi {  	int vs_events_nr; /* num of pending events, protected by vq->mutex */  }; +struct vhost_scsi_tmf { +	struct vhost_work vwork; +	struct vhost_scsi_tpg *tpg; +	struct vhost_scsi *vhost; +	struct vhost_scsi_virtqueue *svq; +	struct list_head queue_entry; + +	struct se_cmd se_cmd; +	u8 scsi_resp; +	struct vhost_scsi_inflight *inflight; +	struct iovec resp_iov; +	int in_iovs; +	int vq_desc; +}; +  /*   * Context for processing request and control queue operations.   */ @@ -320,11 +338,13 @@ static u32 vhost_scsi_tpg_get_inst_index(struct se_portal_group *se_tpg)  	return 1;  } -static void vhost_scsi_release_cmd(struct se_cmd *se_cmd) +static void vhost_scsi_release_cmd_res(struct se_cmd *se_cmd)  {  	struct vhost_scsi_cmd *tv_cmd = container_of(se_cmd,  				struct vhost_scsi_cmd, tvc_se_cmd); -	struct se_session *se_sess = tv_cmd->tvc_nexus->tvn_se_sess; +	struct vhost_scsi_virtqueue *svq = container_of(tv_cmd->tvc_vq, +				struct vhost_scsi_virtqueue, vq); +	struct vhost_scsi_inflight *inflight = tv_cmd->inflight;  	int i;  	if (tv_cmd->tvc_sgl_count) { @@ -336,8 +356,36 @@ static void vhost_scsi_release_cmd(struct se_cmd *se_cmd)  			put_page(sg_page(&tv_cmd->tvc_prot_sgl[i]));  	} -	vhost_scsi_put_inflight(tv_cmd->inflight); -	target_free_tag(se_sess, se_cmd); +	sbitmap_clear_bit(&svq->scsi_tags, se_cmd->map_tag); +	vhost_scsi_put_inflight(inflight); +} + +static void vhost_scsi_release_tmf_res(struct vhost_scsi_tmf *tmf) +{ +	struct vhost_scsi_tpg *tpg = tmf->tpg; +	struct vhost_scsi_inflight *inflight = tmf->inflight; + +	mutex_lock(&tpg->tv_tpg_mutex); +	list_add_tail(&tpg->tmf_queue, &tmf->queue_entry); +	mutex_unlock(&tpg->tv_tpg_mutex); +	vhost_scsi_put_inflight(inflight); +} + +static void vhost_scsi_release_cmd(struct se_cmd *se_cmd) +{ +	if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) { +		struct vhost_scsi_tmf *tmf = container_of(se_cmd, +					struct vhost_scsi_tmf, se_cmd); + +		vhost_work_queue(&tmf->vhost->dev, &tmf->vwork); +	} else { +		struct vhost_scsi_cmd *cmd = container_of(se_cmd, +					struct vhost_scsi_cmd, tvc_se_cmd); +		struct vhost_scsi *vs = cmd->tvc_vhost; + +		llist_add(&cmd->tvc_completion_list, &vs->vs_completion_list); +		vhost_work_queue(&vs->dev, &vs->vs_completion_work); +	}  }  static u32 vhost_scsi_sess_get_index(struct se_session *se_sess) @@ -362,34 +410,25 @@ static int vhost_scsi_get_cmd_state(struct se_cmd *se_cmd)  	return 0;  } -static void vhost_scsi_complete_cmd(struct vhost_scsi_cmd *cmd) -{ -	struct vhost_scsi *vs = cmd->tvc_vhost; - -	llist_add(&cmd->tvc_completion_list, &vs->vs_completion_list); - -	vhost_work_queue(&vs->dev, &vs->vs_completion_work); -} -  static int vhost_scsi_queue_data_in(struct se_cmd *se_cmd)  { -	struct vhost_scsi_cmd *cmd = container_of(se_cmd, -				struct vhost_scsi_cmd, tvc_se_cmd); -	vhost_scsi_complete_cmd(cmd); +	transport_generic_free_cmd(se_cmd, 0);  	return 0;  }  static int vhost_scsi_queue_status(struct se_cmd *se_cmd)  { -	struct vhost_scsi_cmd *cmd = container_of(se_cmd, -				struct vhost_scsi_cmd, tvc_se_cmd); -	vhost_scsi_complete_cmd(cmd); +	transport_generic_free_cmd(se_cmd, 0);  	return 0;  }  static void vhost_scsi_queue_tm_rsp(struct se_cmd *se_cmd)  { -	return; +	struct vhost_scsi_tmf *tmf = container_of(se_cmd, struct vhost_scsi_tmf, +						  se_cmd); + +	tmf->scsi_resp = se_cmd->se_tmr_req->response; +	transport_generic_free_cmd(&tmf->se_cmd, 0);  }  static void vhost_scsi_aborted_task(struct se_cmd *se_cmd) @@ -429,15 +468,6 @@ vhost_scsi_allocate_evt(struct vhost_scsi *vs,  	return evt;  } -static void vhost_scsi_free_cmd(struct vhost_scsi_cmd *cmd) -{ -	struct se_cmd *se_cmd = &cmd->tvc_se_cmd; - -	/* TODO locking against target/backend threads? */ -	transport_generic_free_cmd(se_cmd, 0); - -} -  static int vhost_scsi_check_stop_free(struct se_cmd *se_cmd)  {  	return target_put_sess_cmd(se_cmd); @@ -556,7 +586,7 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)  		} else  			pr_err("Faulted on virtio_scsi_cmd_resp\n"); -		vhost_scsi_free_cmd(cmd); +		vhost_scsi_release_cmd_res(se_cmd);  	}  	vq = -1; @@ -566,31 +596,31 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)  }  static struct vhost_scsi_cmd * -vhost_scsi_get_tag(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg, +vhost_scsi_get_cmd(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg,  		   unsigned char *cdb, u64 scsi_tag, u16 lun, u8 task_attr,  		   u32 exp_data_len, int data_direction)  { +	struct vhost_scsi_virtqueue *svq = container_of(vq, +					struct vhost_scsi_virtqueue, vq);  	struct vhost_scsi_cmd *cmd;  	struct vhost_scsi_nexus *tv_nexus; -	struct se_session *se_sess;  	struct scatterlist *sg, *prot_sg;  	struct page **pages; -	int tag, cpu; +	int tag;  	tv_nexus = tpg->tpg_nexus;  	if (!tv_nexus) {  		pr_err("Unable to locate active struct vhost_scsi_nexus\n");  		return ERR_PTR(-EIO);  	} -	se_sess = tv_nexus->tvn_se_sess; -	tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu); +	tag = sbitmap_get(&svq->scsi_tags, 0, false);  	if (tag < 0) {  		pr_err("Unable to obtain tag for vhost_scsi_cmd\n");  		return ERR_PTR(-ENOMEM);  	} -	cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[tag]; +	cmd = &svq->scsi_cmds[tag];  	sg = cmd->tvc_sgl;  	prot_sg = cmd->tvc_prot_sgl;  	pages = cmd->tvc_upages; @@ -599,7 +629,6 @@ vhost_scsi_get_tag(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg,  	cmd->tvc_prot_sgl = prot_sg;  	cmd->tvc_upages = pages;  	cmd->tvc_se_cmd.map_tag = tag; -	cmd->tvc_se_cmd.map_cpu = cpu;  	cmd->tvc_tag = scsi_tag;  	cmd->tvc_lun = lun;  	cmd->tvc_task_attr = task_attr; @@ -907,6 +936,11 @@ vhost_scsi_get_req(struct vhost_virtqueue *vq, struct vhost_scsi_ctx *vc,  	return ret;  } +static u16 vhost_buf_to_lun(u8 *lun_buf) +{ +	return ((lun_buf[2] << 8) | lun_buf[3]) & 0x3FFF; +} +  static void  vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)  { @@ -1045,12 +1079,12 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)  			tag = vhost64_to_cpu(vq, v_req_pi.tag);  			task_attr = v_req_pi.task_attr;  			cdb = &v_req_pi.cdb[0]; -			lun = ((v_req_pi.lun[2] << 8) | v_req_pi.lun[3]) & 0x3FFF; +			lun = vhost_buf_to_lun(v_req_pi.lun);  		} else {  			tag = vhost64_to_cpu(vq, v_req.tag);  			task_attr = v_req.task_attr;  			cdb = &v_req.cdb[0]; -			lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF; +			lun = vhost_buf_to_lun(v_req.lun);  		}  		/*  		 * Check that the received CDB size does not exceeded our @@ -1065,11 +1099,11 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)  				scsi_command_size(cdb), VHOST_SCSI_MAX_CDB_SIZE);  				goto err;  		} -		cmd = vhost_scsi_get_tag(vq, tpg, cdb, tag, lun, task_attr, +		cmd = vhost_scsi_get_cmd(vq, tpg, cdb, tag, lun, task_attr,  					 exp_data_len + prot_bytes,  					 data_direction);  		if (IS_ERR(cmd)) { -			vq_err(vq, "vhost_scsi_get_tag failed %ld\n", +			vq_err(vq, "vhost_scsi_get_cmd failed %ld\n",  			       PTR_ERR(cmd));  			goto err;  		} @@ -1088,7 +1122,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)  						      &prot_iter, exp_data_len,  						      &data_iter))) {  				vq_err(vq, "Failed to map iov to sgl\n"); -				vhost_scsi_release_cmd(&cmd->tvc_se_cmd); +				vhost_scsi_release_cmd_res(&cmd->tvc_se_cmd);  				goto err;  			}  		} @@ -1124,9 +1158,9 @@ out:  }  static void -vhost_scsi_send_tmf_reject(struct vhost_scsi *vs, -			   struct vhost_virtqueue *vq, -			   struct vhost_scsi_ctx *vc) +vhost_scsi_send_tmf_resp(struct vhost_scsi *vs, struct vhost_virtqueue *vq, +			 int in_iovs, int vq_desc, struct iovec *resp_iov, +			 int tmf_resp_code)  {  	struct virtio_scsi_ctrl_tmf_resp rsp;  	struct iov_iter iov_iter; @@ -1134,17 +1168,87 @@ vhost_scsi_send_tmf_reject(struct vhost_scsi *vs,  	pr_debug("%s\n", __func__);  	memset(&rsp, 0, sizeof(rsp)); -	rsp.response = VIRTIO_SCSI_S_FUNCTION_REJECTED; +	rsp.response = tmf_resp_code; -	iov_iter_init(&iov_iter, READ, &vq->iov[vc->out], vc->in, sizeof(rsp)); +	iov_iter_init(&iov_iter, READ, resp_iov, in_iovs, sizeof(rsp));  	ret = copy_to_iter(&rsp, sizeof(rsp), &iov_iter);  	if (likely(ret == sizeof(rsp))) -		vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0); +		vhost_add_used_and_signal(&vs->dev, vq, vq_desc, 0);  	else  		pr_err("Faulted on virtio_scsi_ctrl_tmf_resp\n");  } +static void vhost_scsi_tmf_resp_work(struct vhost_work *work) +{ +	struct vhost_scsi_tmf *tmf = container_of(work, struct vhost_scsi_tmf, +						  vwork); +	int resp_code; + +	if (tmf->scsi_resp == TMR_FUNCTION_COMPLETE) +		resp_code = VIRTIO_SCSI_S_FUNCTION_SUCCEEDED; +	else +		resp_code = VIRTIO_SCSI_S_FUNCTION_REJECTED; + +	vhost_scsi_send_tmf_resp(tmf->vhost, &tmf->svq->vq, tmf->in_iovs, +				 tmf->vq_desc, &tmf->resp_iov, resp_code); +	vhost_scsi_release_tmf_res(tmf); +} + +static void +vhost_scsi_handle_tmf(struct vhost_scsi *vs, struct vhost_scsi_tpg *tpg, +		      struct vhost_virtqueue *vq, +		      struct virtio_scsi_ctrl_tmf_req *vtmf, +		      struct vhost_scsi_ctx *vc) +{ +	struct vhost_scsi_virtqueue *svq = container_of(vq, +					struct vhost_scsi_virtqueue, vq); +	struct vhost_scsi_tmf *tmf; + +	if (vhost32_to_cpu(vq, vtmf->subtype) != +	    VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET) +		goto send_reject; + +	if (!tpg->tpg_nexus || !tpg->tpg_nexus->tvn_se_sess) { +		pr_err("Unable to locate active struct vhost_scsi_nexus for LUN RESET.\n"); +		goto send_reject; +	} + +	mutex_lock(&tpg->tv_tpg_mutex); +	if (list_empty(&tpg->tmf_queue)) { +		pr_err("Missing reserve TMF. Could not handle LUN RESET.\n"); +		mutex_unlock(&tpg->tv_tpg_mutex); +		goto send_reject; +	} + +	tmf = list_first_entry(&tpg->tmf_queue, struct vhost_scsi_tmf, +			       queue_entry); +	list_del_init(&tmf->queue_entry); +	mutex_unlock(&tpg->tv_tpg_mutex); + +	tmf->tpg = tpg; +	tmf->vhost = vs; +	tmf->svq = svq; +	tmf->resp_iov = vq->iov[vc->out]; +	tmf->vq_desc = vc->head; +	tmf->in_iovs = vc->in; +	tmf->inflight = vhost_scsi_get_inflight(vq); + +	if (target_submit_tmr(&tmf->se_cmd, tpg->tpg_nexus->tvn_se_sess, NULL, +			      vhost_buf_to_lun(vtmf->lun), NULL, +			      TMR_LUN_RESET, GFP_KERNEL, 0, +			      TARGET_SCF_ACK_KREF) < 0) { +		vhost_scsi_release_tmf_res(tmf); +		goto send_reject; +	} + +	return; + +send_reject: +	vhost_scsi_send_tmf_resp(vs, vq, vc->in, vc->head, &vq->iov[vc->out], +				 VIRTIO_SCSI_S_FUNCTION_REJECTED); +} +  static void  vhost_scsi_send_an_resp(struct vhost_scsi *vs,  			struct vhost_virtqueue *vq, @@ -1170,6 +1274,7 @@ vhost_scsi_send_an_resp(struct vhost_scsi *vs,  static void  vhost_scsi_ctl_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)  { +	struct vhost_scsi_tpg *tpg;  	union {  		__virtio32 type;  		struct virtio_scsi_ctrl_an_req an; @@ -1251,12 +1356,12 @@ vhost_scsi_ctl_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)  		vc.req += typ_size;  		vc.req_size -= typ_size; -		ret = vhost_scsi_get_req(vq, &vc, NULL); +		ret = vhost_scsi_get_req(vq, &vc, &tpg);  		if (ret)  			goto err;  		if (v_req.type == VIRTIO_SCSI_T_TMF) -			vhost_scsi_send_tmf_reject(vs, vq, &vc); +			vhost_scsi_handle_tmf(vs, tpg, vq, &v_req.tmf, &vc);  		else  			vhost_scsi_send_an_resp(vs, vq, &vc);  err: @@ -1373,6 +1478,83 @@ static void vhost_scsi_flush(struct vhost_scsi *vs)  		wait_for_completion(&old_inflight[i]->comp);  } +static void vhost_scsi_destroy_vq_cmds(struct vhost_virtqueue *vq) +{ +	struct vhost_scsi_virtqueue *svq = container_of(vq, +					struct vhost_scsi_virtqueue, vq); +	struct vhost_scsi_cmd *tv_cmd; +	unsigned int i; + +	if (!svq->scsi_cmds) +		return; + +	for (i = 0; i < svq->max_cmds; i++) { +		tv_cmd = &svq->scsi_cmds[i]; + +		kfree(tv_cmd->tvc_sgl); +		kfree(tv_cmd->tvc_prot_sgl); +		kfree(tv_cmd->tvc_upages); +	} + +	sbitmap_free(&svq->scsi_tags); +	kfree(svq->scsi_cmds); +	svq->scsi_cmds = NULL; +} + +static int vhost_scsi_setup_vq_cmds(struct vhost_virtqueue *vq, int max_cmds) +{ +	struct vhost_scsi_virtqueue *svq = container_of(vq, +					struct vhost_scsi_virtqueue, vq); +	struct vhost_scsi_cmd *tv_cmd; +	unsigned int i; + +	if (svq->scsi_cmds) +		return 0; + +	if (sbitmap_init_node(&svq->scsi_tags, max_cmds, -1, GFP_KERNEL, +			      NUMA_NO_NODE)) +		return -ENOMEM; +	svq->max_cmds = max_cmds; + +	svq->scsi_cmds = kcalloc(max_cmds, sizeof(*tv_cmd), GFP_KERNEL); +	if (!svq->scsi_cmds) { +		sbitmap_free(&svq->scsi_tags); +		return -ENOMEM; +	} + +	for (i = 0; i < max_cmds; i++) { +		tv_cmd = &svq->scsi_cmds[i]; + +		tv_cmd->tvc_sgl = kcalloc(VHOST_SCSI_PREALLOC_SGLS, +					  sizeof(struct scatterlist), +					  GFP_KERNEL); +		if (!tv_cmd->tvc_sgl) { +			pr_err("Unable to allocate tv_cmd->tvc_sgl\n"); +			goto out; +		} + +		tv_cmd->tvc_upages = kcalloc(VHOST_SCSI_PREALLOC_UPAGES, +					     sizeof(struct page *), +					     GFP_KERNEL); +		if (!tv_cmd->tvc_upages) { +			pr_err("Unable to allocate tv_cmd->tvc_upages\n"); +			goto out; +		} + +		tv_cmd->tvc_prot_sgl = kcalloc(VHOST_SCSI_PREALLOC_PROT_SGLS, +					       sizeof(struct scatterlist), +					       GFP_KERNEL); +		if (!tv_cmd->tvc_prot_sgl) { +			pr_err("Unable to allocate tv_cmd->tvc_prot_sgl\n"); +			goto out; +		} +	} +	return 0; +out: +	vhost_scsi_destroy_vq_cmds(vq); +	return -ENOMEM; +} +  /*   * Called from vhost_scsi_ioctl() context to walk the list of available   * vhost_scsi_tpg with an active struct vhost_scsi_nexus @@ -1427,10 +1609,9 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs,  		if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) {  			if (vs->vs_tpg && vs->vs_tpg[tpg->tport_tpgt]) { -				kfree(vs_tpg);  				mutex_unlock(&tpg->tv_tpg_mutex);  				ret = -EEXIST; -				goto out; +				goto undepend;  			}  			/*  			 * In order to ensure individual vhost-scsi configfs @@ -1442,9 +1623,8 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs,  			ret = target_depend_item(&se_tpg->tpg_group.cg_item);  			if (ret) {  				pr_warn("target_depend_item() failed: %d\n", ret); -				kfree(vs_tpg);  				mutex_unlock(&tpg->tv_tpg_mutex); -				goto out; +				goto undepend;  			}  			tpg->tv_tpg_vhost_count++;  			tpg->vhost_scsi = vs; @@ -1457,6 +1637,16 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs,  	if (match) {  		memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn,  		       sizeof(vs->vs_vhost_wwpn)); + +		for (i = VHOST_SCSI_VQ_IO; i < VHOST_SCSI_MAX_VQ; i++) { +			vq = &vs->vqs[i].vq; +			if (!vhost_vq_is_setup(vq)) +				continue; + +			if (vhost_scsi_setup_vq_cmds(vq, vq->num)) +				goto destroy_vq_cmds; +		} +  		for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {  			vq = &vs->vqs[i].vq;  			mutex_lock(&vq->mutex); @@ -1476,7 +1666,22 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs,  	vhost_scsi_flush(vs);  	kfree(vs->vs_tpg);  	vs->vs_tpg = vs_tpg; +	goto out; +destroy_vq_cmds: +	for (i--; i >= VHOST_SCSI_VQ_IO; i--) { +		if (!vhost_vq_get_backend(&vs->vqs[i].vq)) +			vhost_scsi_destroy_vq_cmds(&vs->vqs[i].vq); +	} +undepend: +	for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) { +		tpg = vs_tpg[i]; +		if (tpg) { +			tpg->tv_tpg_vhost_count--; +			target_undepend_item(&tpg->se_tpg.tpg_group.cg_item); +		} +	} +	kfree(vs_tpg);  out:  	mutex_unlock(&vs->dev.mutex);  	mutex_unlock(&vhost_scsi_mutex); @@ -1549,6 +1754,12 @@ vhost_scsi_clear_endpoint(struct vhost_scsi *vs,  			mutex_lock(&vq->mutex);  			vhost_vq_set_backend(vq, NULL);  			mutex_unlock(&vq->mutex); +			/* +			 * Make sure cmds are not running before tearing them +			 * down. +			 */ +			vhost_scsi_flush(vs); +			vhost_scsi_destroy_vq_cmds(vq);  		}  	}  	/* @@ -1811,11 +2022,19 @@ static int vhost_scsi_port_link(struct se_portal_group *se_tpg,  {  	struct vhost_scsi_tpg *tpg = container_of(se_tpg,  				struct vhost_scsi_tpg, se_tpg); +	struct vhost_scsi_tmf *tmf; + +	tmf = kzalloc(sizeof(*tmf), GFP_KERNEL); +	if (!tmf) +		return -ENOMEM; +	INIT_LIST_HEAD(&tmf->queue_entry); +	vhost_work_init(&tmf->vwork, vhost_scsi_tmf_resp_work);  	mutex_lock(&vhost_scsi_mutex);  	mutex_lock(&tpg->tv_tpg_mutex);  	tpg->tv_tpg_port_count++; +	list_add_tail(&tmf->queue_entry, &tpg->tmf_queue);  	mutex_unlock(&tpg->tv_tpg_mutex);  	vhost_scsi_hotplug(tpg, lun); @@ -1830,11 +2049,16 @@ static void vhost_scsi_port_unlink(struct se_portal_group *se_tpg,  {  	struct vhost_scsi_tpg *tpg = container_of(se_tpg,  				struct vhost_scsi_tpg, se_tpg); +	struct vhost_scsi_tmf *tmf;  	mutex_lock(&vhost_scsi_mutex);  	mutex_lock(&tpg->tv_tpg_mutex);  	tpg->tv_tpg_port_count--; +	tmf = list_first_entry(&tpg->tmf_queue, struct vhost_scsi_tmf, +			       queue_entry); +	list_del(&tmf->queue_entry); +	kfree(tmf);  	mutex_unlock(&tpg->tv_tpg_mutex);  	vhost_scsi_hotunplug(tpg, lun); @@ -1842,23 +2066,6 @@ static void vhost_scsi_port_unlink(struct se_portal_group *se_tpg,  	mutex_unlock(&vhost_scsi_mutex);  } -static void vhost_scsi_free_cmd_map_res(struct se_session *se_sess) -{ -	struct vhost_scsi_cmd *tv_cmd; -	unsigned int i; - -	if (!se_sess->sess_cmd_map) -		return; - -	for (i = 0; i < VHOST_SCSI_DEFAULT_TAGS; i++) { -		tv_cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[i]; - -		kfree(tv_cmd->tvc_sgl); -		kfree(tv_cmd->tvc_prot_sgl); -		kfree(tv_cmd->tvc_upages); -	} -} -  static ssize_t vhost_scsi_tpg_attrib_fabric_prot_type_store(  		struct config_item *item, const char *page, size_t count)  { @@ -1898,45 +2105,6 @@ static struct configfs_attribute *vhost_scsi_tpg_attrib_attrs[] = {  	NULL,  }; -static int vhost_scsi_nexus_cb(struct se_portal_group *se_tpg, -			       struct se_session *se_sess, void *p) -{ -	struct vhost_scsi_cmd *tv_cmd; -	unsigned int i; - -	for (i = 0; i < VHOST_SCSI_DEFAULT_TAGS; i++) { -		tv_cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[i]; - -		tv_cmd->tvc_sgl = kcalloc(VHOST_SCSI_PREALLOC_SGLS, -					  sizeof(struct scatterlist), -					  GFP_KERNEL); -		if (!tv_cmd->tvc_sgl) { -			pr_err("Unable to allocate tv_cmd->tvc_sgl\n"); -			goto out; -		} - -		tv_cmd->tvc_upages = kcalloc(VHOST_SCSI_PREALLOC_UPAGES, -					     sizeof(struct page *), -					     GFP_KERNEL); -		if (!tv_cmd->tvc_upages) { -			pr_err("Unable to allocate tv_cmd->tvc_upages\n"); -			goto out; -		} - -		tv_cmd->tvc_prot_sgl = kcalloc(VHOST_SCSI_PREALLOC_PROT_SGLS, -					       sizeof(struct scatterlist), -					       GFP_KERNEL); -		if (!tv_cmd->tvc_prot_sgl) { -			pr_err("Unable to allocate tv_cmd->tvc_prot_sgl\n"); -			goto out; -		} -	} -	return 0; -out: -	vhost_scsi_free_cmd_map_res(se_sess); -	return -ENOMEM; -} -  static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg,  				const char *name)  { @@ -1960,12 +2128,9 @@ static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg,  	 * struct se_node_acl for the vhost_scsi struct se_portal_group with  	 * the SCSI Initiator port name of the passed configfs group 'name'.  	 */ -	tv_nexus->tvn_se_sess = target_setup_session(&tpg->se_tpg, -					VHOST_SCSI_DEFAULT_TAGS, -					sizeof(struct vhost_scsi_cmd), +	tv_nexus->tvn_se_sess = target_setup_session(&tpg->se_tpg, 0, 0,  					TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS, -					(unsigned char *)name, tv_nexus, -					vhost_scsi_nexus_cb); +					(unsigned char *)name, tv_nexus, NULL);  	if (IS_ERR(tv_nexus->tvn_se_sess)) {  		mutex_unlock(&tpg->tv_tpg_mutex);  		kfree(tv_nexus); @@ -2015,7 +2180,6 @@ static int vhost_scsi_drop_nexus(struct vhost_scsi_tpg *tpg)  		" %s Initiator Port: %s\n", vhost_scsi_dump_proto_id(tpg->tport),  		tv_nexus->tvn_se_sess->se_node_acl->initiatorname); -	vhost_scsi_free_cmd_map_res(se_sess);  	/*  	 * Release the SCSI I_T Nexus to the emulated vhost Target Port  	 */ @@ -2155,6 +2319,7 @@ vhost_scsi_make_tpg(struct se_wwn *wwn, const char *name)  	}  	mutex_init(&tpg->tv_tpg_mutex);  	INIT_LIST_HEAD(&tpg->tv_tpg_list); +	INIT_LIST_HEAD(&tpg->tmf_queue);  	tpg->tport = tport;  	tpg->tport_tpgt = tpgt;  |