diff options
Diffstat (limited to 'drivers/gpu/drm/virtio')
| -rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_debugfs.c | 1 | ||||
| -rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_display.c | 12 | ||||
| -rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_drv.c | 6 | ||||
| -rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_drv.h | 36 | ||||
| -rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_gem.c | 2 | ||||
| -rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_ioctl.c | 90 | ||||
| -rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_kms.c | 41 | ||||
| -rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_object.c | 114 | ||||
| -rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_plane.c | 7 | ||||
| -rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_vq.c | 369 | 
10 files changed, 372 insertions, 306 deletions
| diff --git a/drivers/gpu/drm/virtio/virtgpu_debugfs.c b/drivers/gpu/drm/virtio/virtgpu_debugfs.c index 5156e6b279db..e27120d512b0 100644 --- a/drivers/gpu/drm/virtio/virtgpu_debugfs.c +++ b/drivers/gpu/drm/virtio/virtgpu_debugfs.c @@ -47,6 +47,7 @@ static int virtio_gpu_features(struct seq_file *m, void *data)  	virtio_add_bool(m, "virgl", vgdev->has_virgl_3d);  	virtio_add_bool(m, "edid", vgdev->has_edid); +	virtio_add_bool(m, "indirect", vgdev->has_indirect);  	virtio_add_int(m, "cap sets", vgdev->num_capsets);  	virtio_add_int(m, "scanouts", vgdev->num_scanouts);  	return 0; diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c index 0966208ec30d..2b7e6ae65546 100644 --- a/drivers/gpu/drm/virtio/virtgpu_display.c +++ b/drivers/gpu/drm/virtio/virtgpu_display.c @@ -30,7 +30,6 @@  #include <drm/drm_fourcc.h>  #include <drm/drm_gem_framebuffer_helper.h>  #include <drm/drm_probe_helper.h> -#include <drm/drm_vblank.h>  #include "virtgpu_drv.h" @@ -91,6 +90,7 @@ static void virtio_gpu_crtc_mode_set_nofb(struct drm_crtc *crtc)  	virtio_gpu_cmd_set_scanout(vgdev, output->index, 0,  				   crtc->mode.hdisplay,  				   crtc->mode.vdisplay, 0, 0); +	virtio_gpu_notify(vgdev);  }  static void virtio_gpu_crtc_atomic_enable(struct drm_crtc *crtc, @@ -109,6 +109,7 @@ static void virtio_gpu_crtc_atomic_disable(struct drm_crtc *crtc,  	struct virtio_gpu_output *output = drm_crtc_to_virtio_gpu_output(crtc);  	virtio_gpu_cmd_set_scanout(vgdev, output->index, 0, 0, 0, 0, 0); +	virtio_gpu_notify(vgdev);  	output->enabled = false;  } @@ -121,13 +122,6 @@ static int virtio_gpu_crtc_atomic_check(struct drm_crtc *crtc,  static void virtio_gpu_crtc_atomic_flush(struct drm_crtc *crtc,  					 struct drm_crtc_state *old_state)  { -	unsigned long flags; - -	spin_lock_irqsave(&crtc->dev->event_lock, flags); -	if (crtc->state->event) -		drm_crtc_send_vblank_event(crtc, crtc->state->event); -	crtc->state->event = NULL; -	spin_unlock_irqrestore(&crtc->dev->event_lock, flags);  }  static const struct drm_crtc_helper_funcs virtio_gpu_crtc_helper_funcs = { @@ -332,6 +326,7 @@ static void vgdev_atomic_commit_tail(struct drm_atomic_state *state)  	drm_atomic_helper_commit_modeset_enables(dev, state);  	drm_atomic_helper_commit_planes(dev, state, 0); +	drm_atomic_helper_fake_vblank(state);  	drm_atomic_helper_commit_hw_done(state);  	drm_atomic_helper_wait_for_vblanks(dev, state); @@ -375,6 +370,5 @@ void virtio_gpu_modeset_fini(struct virtio_gpu_device *vgdev)  	for (i = 0 ; i < vgdev->num_scanouts; ++i)  		kfree(vgdev->outputs[i].edid); -	drm_atomic_helper_shutdown(vgdev->ddev);  	drm_mode_config_cleanup(vgdev->ddev);  } diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c index 8cf27af3ad53..ab4bed78e656 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.c +++ b/drivers/gpu/drm/virtio/virtgpu_drv.c @@ -31,6 +31,7 @@  #include <linux/pci.h>  #include <drm/drm.h> +#include <drm/drm_atomic_helper.h>  #include <drm/drm_drv.h>  #include <drm/drm_file.h> @@ -135,7 +136,8 @@ static void virtio_gpu_remove(struct virtio_device *vdev)  {  	struct drm_device *dev = vdev->priv; -	drm_dev_unregister(dev); +	drm_dev_unplug(dev); +	drm_atomic_helper_shutdown(dev);  	virtio_gpu_deinit(dev);  	drm_dev_put(dev);  } @@ -214,4 +216,6 @@ static struct drm_driver driver = {  	.major = DRIVER_MAJOR,  	.minor = DRIVER_MINOR,  	.patchlevel = DRIVER_PATCHLEVEL, + +	.release = virtio_gpu_release,  }; diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h index 7e69c06e168e..c1824bdf2418 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.h +++ b/drivers/gpu/drm/virtio/virtgpu_drv.h @@ -32,6 +32,7 @@  #include <linux/virtio_gpu.h>  #include <drm/drm_atomic.h> +#include <drm/drm_drv.h>  #include <drm/drm_encoder.h>  #include <drm/drm_fb_helper.h>  #include <drm/drm_gem.h> @@ -68,15 +69,21 @@ struct virtio_gpu_object_params {  struct virtio_gpu_object {  	struct drm_gem_shmem_object base;  	uint32_t hw_res_handle; - -	struct sg_table *pages; -	uint32_t mapped;  	bool dumb;  	bool created;  };  #define gem_to_virtio_gpu_obj(gobj) \  	container_of((gobj), struct virtio_gpu_object, base.base) +struct virtio_gpu_object_shmem { +	struct virtio_gpu_object base; +	struct sg_table *pages; +	uint32_t mapped; +}; + +#define to_virtio_gpu_shmem(virtio_gpu_object) \ +	container_of((virtio_gpu_object), struct virtio_gpu_object_shmem, base) +  struct virtio_gpu_object_array {  	struct ww_acquire_ctx ticket;  	struct list_head next; @@ -114,6 +121,7 @@ struct virtio_gpu_vbuffer {  	char *resp_buf;  	int resp_size;  	virtio_gpu_resp_cb resp_cb; +	void *resp_cb_data;  	struct virtio_gpu_object_array *objs;  	struct list_head list; @@ -175,10 +183,8 @@ struct virtio_gpu_device {  	struct virtio_gpu_queue ctrlq;  	struct virtio_gpu_queue cursorq;  	struct kmem_cache *vbufs; -	bool vqs_ready; -	bool disable_notify; -	bool pending_notify; +	atomic_t pending_commands;  	struct ida	resource_ida; @@ -193,6 +199,7 @@ struct virtio_gpu_device {  	bool has_virgl_3d;  	bool has_edid; +	bool has_indirect;  	struct work_struct config_changed_work; @@ -207,6 +214,8 @@ struct virtio_gpu_device {  struct virtio_gpu_fpriv {  	uint32_t ctx_id; +	bool context_created; +	struct mutex context_lock;  };  /* virtio_ioctl.c */ @@ -216,6 +225,7 @@ extern struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS];  /* virtio_kms.c */  int virtio_gpu_init(struct drm_device *dev);  void virtio_gpu_deinit(struct drm_device *dev); +void virtio_gpu_release(struct drm_device *dev);  int virtio_gpu_driver_open(struct drm_device *dev, struct drm_file *file);  void virtio_gpu_driver_postclose(struct drm_device *dev, struct drm_file *file); @@ -262,7 +272,7 @@ void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,  				    struct virtio_gpu_object_array *objs,  				    struct virtio_gpu_fence *fence);  void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev, -				   uint32_t resource_id); +				   struct virtio_gpu_object *bo);  void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,  					uint64_t offset,  					uint32_t width, uint32_t height, @@ -279,9 +289,8 @@ void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,  				uint32_t x, uint32_t y);  int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,  			     struct virtio_gpu_object *obj, -			     struct virtio_gpu_fence *fence); -void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev, -			      struct virtio_gpu_object *obj); +			     struct virtio_gpu_mem_entry *ents, +			     unsigned int nents);  int virtio_gpu_attach_status_page(struct virtio_gpu_device *vgdev);  int virtio_gpu_detach_status_page(struct virtio_gpu_device *vgdev);  void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev, @@ -332,8 +341,7 @@ void virtio_gpu_dequeue_ctrl_func(struct work_struct *work);  void virtio_gpu_dequeue_cursor_func(struct work_struct *work);  void virtio_gpu_dequeue_fence_func(struct work_struct *work); -void virtio_gpu_disable_notify(struct virtio_gpu_device *vgdev); -void virtio_gpu_enable_notify(struct virtio_gpu_device *vgdev); +void virtio_gpu_notify(struct virtio_gpu_device *vgdev);  /* virtio_gpu_display.c */  void virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev); @@ -355,12 +363,16 @@ void virtio_gpu_fence_event_process(struct virtio_gpu_device *vdev,  				    u64 last_seq);  /* virtio_gpu_object */ +void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo);  struct drm_gem_object *virtio_gpu_create_object(struct drm_device *dev,  						size_t size);  int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,  			     struct virtio_gpu_object_params *params,  			     struct virtio_gpu_object **bo_ptr,  			     struct virtio_gpu_fence *fence); + +bool virtio_gpu_is_shmem(struct virtio_gpu_object *bo); +  /* virtgpu_prime.c */  struct drm_gem_object *virtgpu_gem_prime_import_sg_table(  	struct drm_device *dev, struct dma_buf_attachment *attach, diff --git a/drivers/gpu/drm/virtio/virtgpu_gem.c b/drivers/gpu/drm/virtio/virtgpu_gem.c index 0a2b62279647..0d6152c99a27 100644 --- a/drivers/gpu/drm/virtio/virtgpu_gem.c +++ b/drivers/gpu/drm/virtio/virtgpu_gem.c @@ -123,6 +123,7 @@ int virtio_gpu_gem_object_open(struct drm_gem_object *obj,  	virtio_gpu_cmd_context_attach_resource(vgdev, vfpriv->ctx_id,  					       objs); +	virtio_gpu_notify(vgdev);  	return 0;  } @@ -143,6 +144,7 @@ void virtio_gpu_gem_object_close(struct drm_gem_object *obj,  	virtio_gpu_cmd_context_detach_resource(vgdev, vfpriv->ctx_id,  					       objs); +	virtio_gpu_notify(vgdev);  }  struct virtio_gpu_object_array *virtio_gpu_array_alloc(u32 nents) diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c index 205ec4abae2b..336cc9143205 100644 --- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c +++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c @@ -33,13 +33,34 @@  #include "virtgpu_drv.h" +static void virtio_gpu_create_context(struct drm_device *dev, +				      struct drm_file *file) +{ +	struct virtio_gpu_device *vgdev = dev->dev_private; +	struct virtio_gpu_fpriv *vfpriv = file->driver_priv; +	char dbgname[TASK_COMM_LEN]; + +	mutex_lock(&vfpriv->context_lock); +	if (vfpriv->context_created) +		goto out_unlock; + +	get_task_comm(dbgname, current); +	virtio_gpu_cmd_context_create(vgdev, vfpriv->ctx_id, +				      strlen(dbgname), dbgname); +	virtio_gpu_notify(vgdev); +	vfpriv->context_created = true; + +out_unlock: +	mutex_unlock(&vfpriv->context_lock); +} +  static int virtio_gpu_map_ioctl(struct drm_device *dev, void *data, -				struct drm_file *file_priv) +				struct drm_file *file)  {  	struct virtio_gpu_device *vgdev = dev->dev_private;  	struct drm_virtgpu_map *virtio_gpu_map = data; -	return virtio_gpu_mode_dumb_mmap(file_priv, vgdev->ddev, +	return virtio_gpu_mode_dumb_mmap(file, vgdev->ddev,  					 virtio_gpu_map->handle,  					 &virtio_gpu_map->offset);  } @@ -51,11 +72,11 @@ static int virtio_gpu_map_ioctl(struct drm_device *dev, void *data,   * VIRTIO_GPUReleaseInfo struct (first XXX bytes)   */  static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data, -				 struct drm_file *drm_file) +				 struct drm_file *file)  {  	struct drm_virtgpu_execbuffer *exbuf = data;  	struct virtio_gpu_device *vgdev = dev->dev_private; -	struct virtio_gpu_fpriv *vfpriv = drm_file->driver_priv; +	struct virtio_gpu_fpriv *vfpriv = file->driver_priv;  	struct virtio_gpu_fence *out_fence;  	int ret;  	uint32_t *bo_handles = NULL; @@ -74,6 +95,7 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,  	exbuf->fence_fd = -1; +	virtio_gpu_create_context(dev, file);  	if (exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_IN) {  		struct dma_fence *in_fence; @@ -116,7 +138,7 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,  			goto out_unused_fd;  		} -		buflist = virtio_gpu_array_from_handles(drm_file, bo_handles, +		buflist = virtio_gpu_array_from_handles(file, bo_handles,  							exbuf->num_bo_handles);  		if (!buflist) {  			ret = -ENOENT; @@ -126,22 +148,22 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,  		bo_handles = NULL;  	} -	if (buflist) { -		ret = virtio_gpu_array_lock_resv(buflist); -		if (ret) -			goto out_unused_fd; -	} -  	buf = vmemdup_user(u64_to_user_ptr(exbuf->command), exbuf->size);  	if (IS_ERR(buf)) {  		ret = PTR_ERR(buf); -		goto out_unresv; +		goto out_unused_fd; +	} + +	if (buflist) { +		ret = virtio_gpu_array_lock_resv(buflist); +		if (ret) +			goto out_memdup;  	}  	out_fence = virtio_gpu_fence_alloc(vgdev);  	if(!out_fence) {  		ret = -ENOMEM; -		goto out_memdup; +		goto out_unresv;  	}  	if (out_fence_fd >= 0) { @@ -158,13 +180,14 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,  	virtio_gpu_cmd_submit(vgdev, buf, exbuf->size,  			      vfpriv->ctx_id, buflist, out_fence); +	virtio_gpu_notify(vgdev);  	return 0; -out_memdup: -	kvfree(buf);  out_unresv:  	if (buflist)  		virtio_gpu_array_unlock_resv(buflist); +out_memdup: +	kvfree(buf);  out_unused_fd:  	kvfree(bo_handles);  	if (buflist) @@ -177,7 +200,7 @@ out_unused_fd:  }  static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data, -				     struct drm_file *file_priv) +				     struct drm_file *file)  {  	struct virtio_gpu_device *vgdev = dev->dev_private;  	struct drm_virtgpu_getparam *param = data; @@ -200,7 +223,7 @@ static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data,  }  static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data, -					    struct drm_file *file_priv) +					    struct drm_file *file)  {  	struct virtio_gpu_device *vgdev = dev->dev_private;  	struct drm_virtgpu_resource_create *rc = data; @@ -211,7 +234,17 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,  	uint32_t handle = 0;  	struct virtio_gpu_object_params params = { 0 }; -	if (vgdev->has_virgl_3d == false) { +	if (vgdev->has_virgl_3d) { +		virtio_gpu_create_context(dev, file); +		params.virgl = true; +		params.target = rc->target; +		params.bind = rc->bind; +		params.depth = rc->depth; +		params.array_size = rc->array_size; +		params.last_level = rc->last_level; +		params.nr_samples = rc->nr_samples; +		params.flags = rc->flags; +	} else {  		if (rc->depth > 1)  			return -EINVAL;  		if (rc->nr_samples > 1) @@ -228,16 +261,6 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,  	params.width = rc->width;  	params.height = rc->height;  	params.size = rc->size; -	if (vgdev->has_virgl_3d) { -		params.virgl = true; -		params.target = rc->target; -		params.bind = rc->bind; -		params.depth = rc->depth; -		params.array_size = rc->array_size; -		params.last_level = rc->last_level; -		params.nr_samples = rc->nr_samples; -		params.flags = rc->flags; -	}  	/* allocate a single page size object */  	if (params.size == 0)  		params.size = PAGE_SIZE; @@ -251,7 +274,7 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,  		return ret;  	obj = &qobj->base.base; -	ret = drm_gem_handle_create(file_priv, obj, &handle); +	ret = drm_gem_handle_create(file, obj, &handle);  	if (ret) {  		drm_gem_object_release(obj);  		return ret; @@ -264,13 +287,13 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,  }  static int virtio_gpu_resource_info_ioctl(struct drm_device *dev, void *data, -					  struct drm_file *file_priv) +					  struct drm_file *file)  {  	struct drm_virtgpu_resource_info *ri = data;  	struct drm_gem_object *gobj = NULL;  	struct virtio_gpu_object *qobj = NULL; -	gobj = drm_gem_object_lookup(file_priv, ri->bo_handle); +	gobj = drm_gem_object_lookup(file, ri->bo_handle);  	if (gobj == NULL)  		return -ENOENT; @@ -297,6 +320,7 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,  	if (vgdev->has_virgl_3d == false)  		return -ENOSYS; +	virtio_gpu_create_context(dev, file);  	objs = virtio_gpu_array_from_handles(file, &args->bo_handle, 1);  	if (objs == NULL)  		return -ENOENT; @@ -314,6 +338,7 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,  		(vgdev, vfpriv->ctx_id, offset, args->level,  		 &args->box, objs, fence);  	dma_fence_put(&fence->f); +	virtio_gpu_notify(vgdev);  	return 0;  err_unlock: @@ -344,6 +369,7 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,  			 args->box.w, args->box.h, args->box.x, args->box.y,  			 objs, NULL);  	} else { +		virtio_gpu_create_context(dev, file);  		ret = virtio_gpu_array_lock_resv(objs);  		if (ret != 0)  			goto err_put_free; @@ -359,6 +385,7 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,  			 args->level, &args->box, objs, fence);  		dma_fence_put(&fence->f);  	} +	virtio_gpu_notify(vgdev);  	return 0;  err_unlock: @@ -445,6 +472,7 @@ static int virtio_gpu_get_caps_ioctl(struct drm_device *dev,  	/* not in cache - need to talk to hw */  	virtio_gpu_cmd_get_capset(vgdev, found_valid, args->cap_set_ver,  				  &cache_ent); +	virtio_gpu_notify(vgdev);  copy_exit:  	ret = wait_event_timeout(vgdev->resp_wq, diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c index 2f5773e43557..023a030ca7b9 100644 --- a/drivers/gpu/drm/virtio/virtgpu_kms.c +++ b/drivers/gpu/drm/virtio/virtgpu_kms.c @@ -44,6 +44,7 @@ static void virtio_gpu_config_changed_work_func(struct work_struct *work)  		if (vgdev->has_edid)  			virtio_gpu_cmd_get_edids(vgdev);  		virtio_gpu_cmd_get_display_info(vgdev); +		virtio_gpu_notify(vgdev);  		drm_helper_hpd_irq_event(vgdev->ddev);  		events_clear |= VIRTIO_GPU_EVENT_DISPLAY;  	} @@ -51,22 +52,11 @@ static void virtio_gpu_config_changed_work_func(struct work_struct *work)  		      events_clear, &events_clear);  } -static int virtio_gpu_context_create(struct virtio_gpu_device *vgdev, -				      uint32_t nlen, const char *name) -{ -	int handle = ida_alloc(&vgdev->ctx_id_ida, GFP_KERNEL); - -	if (handle < 0) -		return handle; -	handle += 1; -	virtio_gpu_cmd_context_create(vgdev, handle, nlen, name); -	return handle; -} -  static void virtio_gpu_context_destroy(struct virtio_gpu_device *vgdev,  				      uint32_t ctx_id)  {  	virtio_gpu_cmd_context_destroy(vgdev, ctx_id); +	virtio_gpu_notify(vgdev);  	ida_free(&vgdev->ctx_id_ida, ctx_id - 1);  } @@ -92,6 +82,7 @@ static void virtio_gpu_get_capsets(struct virtio_gpu_device *vgdev,  	}  	for (i = 0; i < num_capsets; i++) {  		virtio_gpu_cmd_get_capset_info(vgdev, i); +		virtio_gpu_notify(vgdev);  		ret = wait_event_timeout(vgdev->resp_wq,  					 vgdev->capsets[i].id > 0, 5 * HZ);  		if (ret == 0) { @@ -159,6 +150,9 @@ int virtio_gpu_init(struct drm_device *dev)  	if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_EDID)) {  		vgdev->has_edid = true;  	} +	if (virtio_has_feature(vgdev->vdev, VIRTIO_RING_F_INDIRECT_DESC)) { +		vgdev->has_indirect = true; +	}  	DRM_INFO("features: %cvirgl %cedid\n",  		 vgdev->has_virgl_3d ? '+' : '-', @@ -196,13 +190,13 @@ int virtio_gpu_init(struct drm_device *dev)  	virtio_gpu_modeset_init(vgdev);  	virtio_device_ready(vgdev->vdev); -	vgdev->vqs_ready = true;  	if (num_capsets)  		virtio_gpu_get_capsets(vgdev, num_capsets);  	if (vgdev->has_edid)  		virtio_gpu_cmd_get_edids(vgdev);  	virtio_gpu_cmd_get_display_info(vgdev); +	virtio_gpu_notify(vgdev);  	wait_event_timeout(vgdev->resp_wq, !vgdev->display_info_pending,  			   5 * HZ);  	return 0; @@ -231,12 +225,16 @@ void virtio_gpu_deinit(struct drm_device *dev)  	struct virtio_gpu_device *vgdev = dev->dev_private;  	flush_work(&vgdev->obj_free_work); -	vgdev->vqs_ready = false;  	flush_work(&vgdev->ctrlq.dequeue_work);  	flush_work(&vgdev->cursorq.dequeue_work);  	flush_work(&vgdev->config_changed_work);  	vgdev->vdev->config->reset(vgdev->vdev);  	vgdev->vdev->config->del_vqs(vgdev->vdev); +} + +void virtio_gpu_release(struct drm_device *dev) +{ +	struct virtio_gpu_device *vgdev = dev->dev_private;  	virtio_gpu_modeset_fini(vgdev);  	virtio_gpu_free_vbufs(vgdev); @@ -249,8 +247,7 @@ int virtio_gpu_driver_open(struct drm_device *dev, struct drm_file *file)  {  	struct virtio_gpu_device *vgdev = dev->dev_private;  	struct virtio_gpu_fpriv *vfpriv; -	int id; -	char dbgname[TASK_COMM_LEN]; +	int handle;  	/* can't create contexts without 3d renderer */  	if (!vgdev->has_virgl_3d) @@ -261,14 +258,15 @@ int virtio_gpu_driver_open(struct drm_device *dev, struct drm_file *file)  	if (!vfpriv)  		return -ENOMEM; -	get_task_comm(dbgname, current); -	id = virtio_gpu_context_create(vgdev, strlen(dbgname), dbgname); -	if (id < 0) { +	mutex_init(&vfpriv->context_lock); + +	handle = ida_alloc(&vgdev->ctx_id_ida, GFP_KERNEL); +	if (handle < 0) {  		kfree(vfpriv); -		return id; +		return handle;  	} -	vfpriv->ctx_id = id; +	vfpriv->ctx_id = handle + 1;  	file->driver_priv = vfpriv;  	return 0;  } @@ -284,6 +282,7 @@ void virtio_gpu_driver_postclose(struct drm_device *dev, struct drm_file *file)  	vfpriv = file->driver_priv;  	virtio_gpu_context_destroy(vgdev, vfpriv->ctx_id); +	mutex_destroy(&vfpriv->context_lock);  	kfree(vfpriv);  	file->driver_priv = NULL;  } diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c index 017a9e0fc3bb..2bfb13d1932e 100644 --- a/drivers/gpu/drm/virtio/virtgpu_object.c +++ b/drivers/gpu/drm/virtio/virtgpu_object.c @@ -23,6 +23,7 @@   * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.   */ +#include <linux/dma-mapping.h>  #include <linux/moduleparam.h>  #include "virtgpu_drv.h" @@ -42,8 +43,8 @@ static int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev,  		 * "f91a9dd35715 Fix unlinking resources from hash  		 * table." (Feb 2019) fixes the bug.  		 */ -		static int handle; -		handle++; +		static atomic_t seqno = ATOMIC_INIT(0); +		int handle = atomic_inc_return(&seqno);  		*resid = handle + 1;  	} else {  		int handle = ida_alloc(&vgdev->resource_ida, GFP_KERNEL); @@ -61,21 +62,46 @@ static void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t  	}  } -static void virtio_gpu_free_object(struct drm_gem_object *obj) +void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo)  { -	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);  	struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private; -	if (bo->pages) -		virtio_gpu_object_detach(vgdev, bo); -	if (bo->created) -		virtio_gpu_cmd_unref_resource(vgdev, bo->hw_res_handle);  	virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle); +	if (virtio_gpu_is_shmem(bo)) { +		struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo); + +		if (shmem->pages) { +			if (shmem->mapped) { +				dma_unmap_sg(vgdev->vdev->dev.parent, +					     shmem->pages->sgl, shmem->mapped, +					     DMA_TO_DEVICE); +				shmem->mapped = 0; +			} + +			sg_free_table(shmem->pages); +			shmem->pages = NULL; +			drm_gem_shmem_unpin(&bo->base.base); +		} + +		drm_gem_shmem_free_object(&bo->base.base); +	} +} + +static void virtio_gpu_free_object(struct drm_gem_object *obj) +{ +	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); +	struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private; -	drm_gem_shmem_free_object(obj); +	if (bo->created) { +		virtio_gpu_cmd_unref_resource(vgdev, bo); +		virtio_gpu_notify(vgdev); +		/* completion handler calls virtio_gpu_cleanup_object() */ +		return; +	} +	virtio_gpu_cleanup_object(bo);  } -static const struct drm_gem_object_funcs virtio_gpu_gem_funcs = { +static const struct drm_gem_object_funcs virtio_gpu_shmem_funcs = {  	.free = virtio_gpu_free_object,  	.open = virtio_gpu_gem_object_open,  	.close = virtio_gpu_gem_object_close, @@ -86,9 +112,14 @@ static const struct drm_gem_object_funcs virtio_gpu_gem_funcs = {  	.get_sg_table = drm_gem_shmem_get_sg_table,  	.vmap = drm_gem_shmem_vmap,  	.vunmap = drm_gem_shmem_vunmap, -	.mmap = &drm_gem_shmem_mmap, +	.mmap = drm_gem_shmem_mmap,  }; +bool virtio_gpu_is_shmem(struct virtio_gpu_object *bo) +{ +	return bo->base.base.funcs == &virtio_gpu_shmem_funcs; +} +  struct drm_gem_object *virtio_gpu_create_object(struct drm_device *dev,  						size_t size)  { @@ -98,10 +129,58 @@ struct drm_gem_object *virtio_gpu_create_object(struct drm_device *dev,  	if (!bo)  		return NULL; -	bo->base.base.funcs = &virtio_gpu_gem_funcs; +	bo->base.base.funcs = &virtio_gpu_shmem_funcs; +	bo->base.map_cached = true;  	return &bo->base.base;  } +static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev, +					struct virtio_gpu_object *bo, +					struct virtio_gpu_mem_entry **ents, +					unsigned int *nents) +{ +	bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev); +	struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo); +	struct scatterlist *sg; +	int si, ret; + +	ret = drm_gem_shmem_pin(&bo->base.base); +	if (ret < 0) +		return -EINVAL; + +	shmem->pages = drm_gem_shmem_get_sg_table(&bo->base.base); +	if (!shmem->pages) { +		drm_gem_shmem_unpin(&bo->base.base); +		return -EINVAL; +	} + +	if (use_dma_api) { +		shmem->mapped = dma_map_sg(vgdev->vdev->dev.parent, +					   shmem->pages->sgl, +					   shmem->pages->nents, +					   DMA_TO_DEVICE); +		*nents = shmem->mapped; +	} else { +		*nents = shmem->pages->nents; +	} + +	*ents = kmalloc_array(*nents, sizeof(struct virtio_gpu_mem_entry), +			      GFP_KERNEL); +	if (!(*ents)) { +		DRM_ERROR("failed to allocate ent list\n"); +		return -ENOMEM; +	} + +	for_each_sg(shmem->pages->sgl, sg, *nents, si) { +		(*ents)[si].addr = cpu_to_le64(use_dma_api +					       ? sg_dma_address(sg) +					       : sg_phys(sg)); +		(*ents)[si].length = cpu_to_le32(sg->length); +		(*ents)[si].padding = 0; +	} +	return 0; +} +  int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,  			     struct virtio_gpu_object_params *params,  			     struct virtio_gpu_object **bo_ptr, @@ -110,6 +189,8 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,  	struct virtio_gpu_object_array *objs = NULL;  	struct drm_gem_shmem_object *shmem_obj;  	struct virtio_gpu_object *bo; +	struct virtio_gpu_mem_entry *ents; +	unsigned int nents;  	int ret;  	*bo_ptr = NULL; @@ -146,12 +227,19 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,  					       objs, fence);  	} -	ret = virtio_gpu_object_attach(vgdev, bo, NULL); +	ret = virtio_gpu_object_shmem_init(vgdev, bo, &ents, &nents); +	if (ret != 0) { +		virtio_gpu_free_object(&shmem_obj->base); +		return ret; +	} + +	ret = virtio_gpu_object_attach(vgdev, bo, ents, nents);  	if (ret != 0) {  		virtio_gpu_free_object(&shmem_obj->base);  		return ret;  	} +	virtio_gpu_notify(vgdev);  	*bo_ptr = bo;  	return 0; diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c index d1c3f5fbfee4..52d24179bcec 100644 --- a/drivers/gpu/drm/virtio/virtgpu_plane.c +++ b/drivers/gpu/drm/virtio/virtgpu_plane.c @@ -148,14 +148,13 @@ static void virtio_gpu_primary_plane_update(struct drm_plane *plane,  					   plane->state->src_w >> 16,  					   plane->state->src_h >> 16,  					   0, 0); +		virtio_gpu_notify(vgdev);  		return;  	}  	if (!drm_atomic_helper_damage_merged(old_state, plane->state, &rect))  		return; -	virtio_gpu_disable_notify(vgdev); -  	bo = gem_to_virtio_gpu_obj(plane->state->fb->obj[0]);  	if (bo->dumb)  		virtio_gpu_update_dumb_bo(vgdev, plane->state, &rect); @@ -186,8 +185,7 @@ static void virtio_gpu_primary_plane_update(struct drm_plane *plane,  				      rect.y1,  				      rect.x2 - rect.x1,  				      rect.y2 - rect.y1); - -	virtio_gpu_enable_notify(vgdev); +	virtio_gpu_notify(vgdev);  }  static int virtio_gpu_cursor_prepare_fb(struct drm_plane *plane, @@ -266,6 +264,7 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,  			 plane->state->crtc_w,  			 plane->state->crtc_h,  			 0, 0, objs, vgfb->fence); +		virtio_gpu_notify(vgdev);  		dma_fence_wait(&vgfb->fence->f, true);  		dma_fence_put(&vgfb->fence->f);  		vgfb->fence = NULL; diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c index 5914e79d3429..73854915ec34 100644 --- a/drivers/gpu/drm/virtio/virtgpu_vq.c +++ b/drivers/gpu/drm/virtio/virtgpu_vq.c @@ -95,7 +95,8 @@ virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev,  	if (!vbuf)  		return ERR_PTR(-ENOMEM); -	BUG_ON(size > MAX_INLINE_CMD_SIZE); +	BUG_ON(size > MAX_INLINE_CMD_SIZE || +	       size < sizeof(struct virtio_gpu_ctrl_hdr));  	vbuf->buf = (void *)vbuf + sizeof(*vbuf);  	vbuf->size = size; @@ -109,21 +110,14 @@ virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev,  	return vbuf;  } -static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev, -				  struct virtio_gpu_vbuffer **vbuffer_p, -				  int size) +static struct virtio_gpu_ctrl_hdr * +virtio_gpu_vbuf_ctrl_hdr(struct virtio_gpu_vbuffer *vbuf)  { -	struct virtio_gpu_vbuffer *vbuf; - -	vbuf = virtio_gpu_get_vbuf(vgdev, size, -				   sizeof(struct virtio_gpu_ctrl_hdr), -				   NULL, NULL); -	if (IS_ERR(vbuf)) { -		*vbuffer_p = NULL; -		return ERR_CAST(vbuf); -	} -	*vbuffer_p = vbuf; -	return vbuf->buf; +	/* this assumes a vbuf contains a command that starts with a +	 * virtio_gpu_ctrl_hdr, which is true for both ctrl and cursor +	 * virtqueues. +	 */ +	return (struct virtio_gpu_ctrl_hdr *)vbuf->buf;  }  static struct virtio_gpu_update_cursor* @@ -161,6 +155,25 @@ static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device *vgdev,  	return (struct virtio_gpu_command *)vbuf->buf;  } +static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev, +				  struct virtio_gpu_vbuffer **vbuffer_p, +				  int size) +{ +	return virtio_gpu_alloc_cmd_resp(vgdev, NULL, vbuffer_p, size, +					 sizeof(struct virtio_gpu_ctrl_hdr), +					 NULL); +} + +static void *virtio_gpu_alloc_cmd_cb(struct virtio_gpu_device *vgdev, +				     struct virtio_gpu_vbuffer **vbuffer_p, +				     int size, +				     virtio_gpu_resp_cb cb) +{ +	return virtio_gpu_alloc_cmd_resp(vgdev, cb, vbuffer_p, size, +					 sizeof(struct virtio_gpu_ctrl_hdr), +					 NULL); +} +  static void free_vbuf(struct virtio_gpu_device *vgdev,  		      struct virtio_gpu_vbuffer *vbuf)  { @@ -209,12 +222,12 @@ void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)  		trace_virtio_gpu_cmd_response(vgdev->ctrlq.vq, resp);  		if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA)) { -			if (resp->type >= cpu_to_le32(VIRTIO_GPU_RESP_ERR_UNSPEC)) { +			if (le32_to_cpu(resp->type) >= VIRTIO_GPU_RESP_ERR_UNSPEC) {  				struct virtio_gpu_ctrl_hdr *cmd; -				cmd = (struct virtio_gpu_ctrl_hdr *)entry->buf; -				DRM_ERROR("response 0x%x (command 0x%x)\n", -					  le32_to_cpu(resp->type), -					  le32_to_cpu(cmd->type)); +				cmd = virtio_gpu_vbuf_ctrl_hdr(entry); +				DRM_ERROR_RATELIMITED("response 0x%x (command 0x%x)\n", +						      le32_to_cpu(resp->type), +						      le32_to_cpu(cmd->type));  			} else  				DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type));  		} @@ -307,109 +320,107 @@ static struct sg_table *vmalloc_to_sgt(char *data, uint32_t size, int *sg_ents)  	return sgt;  } -static bool virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev, -						struct virtio_gpu_vbuffer *vbuf, -						struct scatterlist *vout) -		__releases(&vgdev->ctrlq.qlock) -		__acquires(&vgdev->ctrlq.qlock) +static void virtio_gpu_queue_ctrl_sgs(struct virtio_gpu_device *vgdev, +				      struct virtio_gpu_vbuffer *vbuf, +				      struct virtio_gpu_fence *fence, +				      int elemcnt, +				      struct scatterlist **sgs, +				      int outcnt, +				      int incnt)  {  	struct virtqueue *vq = vgdev->ctrlq.vq; -	struct scatterlist *sgs[3], vcmd, vresp; -	int outcnt = 0, incnt = 0; -	bool notify = false; -	int ret; +	int ret, idx; -	if (!vgdev->vqs_ready) -		return notify; +	if (!drm_dev_enter(vgdev->ddev, &idx)) { +		if (fence && vbuf->objs) +			virtio_gpu_array_unlock_resv(vbuf->objs); +		free_vbuf(vgdev, vbuf); +		return; +	} -	sg_init_one(&vcmd, vbuf->buf, vbuf->size); -	sgs[outcnt + incnt] = &vcmd; -	outcnt++; +	if (vgdev->has_indirect) +		elemcnt = 1; -	if (vout) { -		sgs[outcnt + incnt] = vout; -		outcnt++; +again: +	spin_lock(&vgdev->ctrlq.qlock); + +	if (vq->num_free < elemcnt) { +		spin_unlock(&vgdev->ctrlq.qlock); +		virtio_gpu_notify(vgdev); +		wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= elemcnt); +		goto again;  	} -	if (vbuf->resp_size) { -		sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size); -		sgs[outcnt + incnt] = &vresp; -		incnt++; +	/* now that the position of the vbuf in the virtqueue is known, we can +	 * finally set the fence id +	 */ +	if (fence) { +		virtio_gpu_fence_emit(vgdev, virtio_gpu_vbuf_ctrl_hdr(vbuf), +				      fence); +		if (vbuf->objs) { +			virtio_gpu_array_add_fence(vbuf->objs, &fence->f); +			virtio_gpu_array_unlock_resv(vbuf->objs); +		}  	} -retry:  	ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC); -	if (ret == -ENOSPC) { -		spin_unlock(&vgdev->ctrlq.qlock); -		wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= outcnt + incnt); -		spin_lock(&vgdev->ctrlq.qlock); -		goto retry; -	} else { -		trace_virtio_gpu_cmd_queue(vq, -			(struct virtio_gpu_ctrl_hdr *)vbuf->buf); +	WARN_ON(ret); -		notify = virtqueue_kick_prepare(vq); -	} -	return notify; +	trace_virtio_gpu_cmd_queue(vq, virtio_gpu_vbuf_ctrl_hdr(vbuf)); + +	atomic_inc(&vgdev->pending_commands); + +	spin_unlock(&vgdev->ctrlq.qlock); + +	drm_dev_exit(idx);  }  static void virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,  						struct virtio_gpu_vbuffer *vbuf, -						struct virtio_gpu_ctrl_hdr *hdr,  						struct virtio_gpu_fence *fence)  { -	struct virtqueue *vq = vgdev->ctrlq.vq; -	struct scatterlist *vout = NULL, sg; +	struct scatterlist *sgs[3], vcmd, vout, vresp;  	struct sg_table *sgt = NULL; -	bool notify; -	int outcnt = 0; +	int elemcnt = 0, outcnt = 0, incnt = 0; +	/* set up vcmd */ +	sg_init_one(&vcmd, vbuf->buf, vbuf->size); +	elemcnt++; +	sgs[outcnt] = &vcmd; +	outcnt++; + +	/* set up vout */  	if (vbuf->data_size) {  		if (is_vmalloc_addr(vbuf->data_buf)) { +			int sg_ents;  			sgt = vmalloc_to_sgt(vbuf->data_buf, vbuf->data_size, -					     &outcnt); -			if (!sgt) +					     &sg_ents); +			if (!sgt) { +				if (fence && vbuf->objs) +					virtio_gpu_array_unlock_resv(vbuf->objs);  				return; -			vout = sgt->sgl; +			} + +			elemcnt += sg_ents; +			sgs[outcnt] = sgt->sgl;  		} else { -			sg_init_one(&sg, vbuf->data_buf, vbuf->data_size); -			vout = &sg; -			outcnt = 1; +			sg_init_one(&vout, vbuf->data_buf, vbuf->data_size); +			elemcnt++; +			sgs[outcnt] = &vout;  		} +		outcnt++;  	} -again: -	spin_lock(&vgdev->ctrlq.qlock); - -	/* -	 * Make sure we have enouth space in the virtqueue.  If not -	 * wait here until we have. -	 * -	 * Without that virtio_gpu_queue_ctrl_buffer_nolock might have -	 * to wait for free space, which can result in fence ids being -	 * submitted out-of-order. -	 */ -	if (vq->num_free < 2 + outcnt) { -		spin_unlock(&vgdev->ctrlq.qlock); -		wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= 3); -		goto again; +	/* set up vresp */ +	if (vbuf->resp_size) { +		sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size); +		elemcnt++; +		sgs[outcnt + incnt] = &vresp; +		incnt++;  	} -	if (hdr && fence) { -		virtio_gpu_fence_emit(vgdev, hdr, fence); -		if (vbuf->objs) { -			virtio_gpu_array_add_fence(vbuf->objs, &fence->f); -			virtio_gpu_array_unlock_resv(vbuf->objs); -		} -	} -	notify = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf, vout); -	spin_unlock(&vgdev->ctrlq.qlock); -	if (notify) { -		if (vgdev->disable_notify) -			vgdev->pending_notify = true; -		else -			virtqueue_notify(vgdev->ctrlq.vq); -	} +	virtio_gpu_queue_ctrl_sgs(vgdev, vbuf, fence, elemcnt, sgs, outcnt, +				  incnt);  	if (sgt) {  		sg_free_table(sgt); @@ -417,25 +428,26 @@ again:  	}  } -void virtio_gpu_disable_notify(struct virtio_gpu_device *vgdev) +void virtio_gpu_notify(struct virtio_gpu_device *vgdev)  { -	vgdev->disable_notify = true; -} - -void virtio_gpu_enable_notify(struct virtio_gpu_device *vgdev) -{ -	vgdev->disable_notify = false; +	bool notify; -	if (!vgdev->pending_notify) +	if (!atomic_read(&vgdev->pending_commands))  		return; -	vgdev->pending_notify = false; -	virtqueue_notify(vgdev->ctrlq.vq); + +	spin_lock(&vgdev->ctrlq.qlock); +	atomic_set(&vgdev->pending_commands, 0); +	notify = virtqueue_kick_prepare(vgdev->ctrlq.vq); +	spin_unlock(&vgdev->ctrlq.qlock); + +	if (notify) +		virtqueue_notify(vgdev->ctrlq.vq);  }  static void virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,  					 struct virtio_gpu_vbuffer *vbuf)  { -	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, NULL, NULL); +	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, NULL);  }  static void virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev, @@ -443,12 +455,13 @@ static void virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,  {  	struct virtqueue *vq = vgdev->cursorq.vq;  	struct scatterlist *sgs[1], ccmd; +	int idx, ret, outcnt;  	bool notify; -	int ret; -	int outcnt; -	if (!vgdev->vqs_ready) +	if (!drm_dev_enter(vgdev->ddev, &idx)) { +		free_vbuf(vgdev, vbuf);  		return; +	}  	sg_init_one(&ccmd, vbuf->buf, vbuf->size);  	sgs[0] = &ccmd; @@ -464,7 +477,7 @@ retry:  		goto retry;  	} else {  		trace_virtio_gpu_cmd_queue(vq, -			(struct virtio_gpu_ctrl_hdr *)vbuf->buf); +			virtio_gpu_vbuf_ctrl_hdr(vbuf));  		notify = virtqueue_kick_prepare(vq);  	} @@ -473,6 +486,8 @@ retry:  	if (notify)  		virtqueue_notify(vq); + +	drm_dev_exit(idx);  }  /* just create gem objects for userspace and long lived objects, @@ -499,39 +514,36 @@ void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,  	cmd_p->width = cpu_to_le32(params->width);  	cmd_p->height = cpu_to_le32(params->height); -	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence); +	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);  	bo->created = true;  } -void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev, -				   uint32_t resource_id) +static void virtio_gpu_cmd_unref_cb(struct virtio_gpu_device *vgdev, +				    struct virtio_gpu_vbuffer *vbuf)  { -	struct virtio_gpu_resource_unref *cmd_p; -	struct virtio_gpu_vbuffer *vbuf; +	struct virtio_gpu_object *bo; -	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); -	memset(cmd_p, 0, sizeof(*cmd_p)); +	bo = vbuf->resp_cb_data; +	vbuf->resp_cb_data = NULL; -	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNREF); -	cmd_p->resource_id = cpu_to_le32(resource_id); - -	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); +	virtio_gpu_cleanup_object(bo);  } -static void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev, -						  uint32_t resource_id, -						  struct virtio_gpu_fence *fence) +void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev, +				   struct virtio_gpu_object *bo)  { -	struct virtio_gpu_resource_detach_backing *cmd_p; +	struct virtio_gpu_resource_unref *cmd_p;  	struct virtio_gpu_vbuffer *vbuf; -	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); +	cmd_p = virtio_gpu_alloc_cmd_cb(vgdev, &vbuf, sizeof(*cmd_p), +					virtio_gpu_cmd_unref_cb);  	memset(cmd_p, 0, sizeof(*cmd_p)); -	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING); -	cmd_p->resource_id = cpu_to_le32(resource_id); +	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNREF); +	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); -	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence); +	vbuf->resp_cb_data = bo; +	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);  }  void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev, @@ -588,10 +600,11 @@ void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,  	struct virtio_gpu_transfer_to_host_2d *cmd_p;  	struct virtio_gpu_vbuffer *vbuf;  	bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev); +	struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);  	if (use_dma_api)  		dma_sync_sg_for_device(vgdev->vdev->dev.parent, -				       bo->pages->sgl, bo->pages->nents, +				       shmem->pages->sgl, shmem->pages->nents,  				       DMA_TO_DEVICE);  	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); @@ -606,7 +619,7 @@ void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,  	cmd_p->r.x = cpu_to_le32(x);  	cmd_p->r.y = cpu_to_le32(y); -	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence); +	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);  }  static void @@ -629,7 +642,7 @@ virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev,  	vbuf->data_buf = ents;  	vbuf->data_size = sizeof(*ents) * nents; -	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence); +	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);  }  static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev, @@ -939,7 +952,6 @@ void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev,  	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);  	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);  	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); -  }  void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev, @@ -988,7 +1000,8 @@ virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,  	cmd_p->nr_samples = cpu_to_le32(params->nr_samples);  	cmd_p->flags = cpu_to_le32(params->flags); -	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence); +	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence); +  	bo->created = true;  } @@ -1003,10 +1016,11 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,  	struct virtio_gpu_transfer_host_3d *cmd_p;  	struct virtio_gpu_vbuffer *vbuf;  	bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev); +	struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);  	if (use_dma_api)  		dma_sync_sg_for_device(vgdev->vdev->dev.parent, -				       bo->pages->sgl, bo->pages->nents, +				       shmem->pages->sgl, shmem->pages->nents,  				       DMA_TO_DEVICE);  	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); @@ -1021,7 +1035,7 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,  	cmd_p->offset = cpu_to_le64(offset);  	cmd_p->level = cpu_to_le32(level); -	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence); +	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);  }  void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev, @@ -1047,7 +1061,7 @@ void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,  	cmd_p->offset = cpu_to_le64(offset);  	cmd_p->level = cpu_to_le32(level); -	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence); +	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);  }  void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev, @@ -1070,94 +1084,19 @@ void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,  	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);  	cmd_p->size = cpu_to_le32(data_size); -	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence); +	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);  }  int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,  			     struct virtio_gpu_object *obj, -			     struct virtio_gpu_fence *fence) +			     struct virtio_gpu_mem_entry *ents, +			     unsigned int nents)  { -	bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev); -	struct virtio_gpu_mem_entry *ents; -	struct scatterlist *sg; -	int si, nents, ret; - -	if (WARN_ON_ONCE(!obj->created)) -		return -EINVAL; -	if (WARN_ON_ONCE(obj->pages)) -		return -EINVAL; - -	ret = drm_gem_shmem_pin(&obj->base.base); -	if (ret < 0) -		return -EINVAL; - -	obj->pages = drm_gem_shmem_get_sg_table(&obj->base.base); -	if (obj->pages == NULL) { -		drm_gem_shmem_unpin(&obj->base.base); -		return -EINVAL; -	} - -	if (use_dma_api) { -		obj->mapped = dma_map_sg(vgdev->vdev->dev.parent, -					 obj->pages->sgl, obj->pages->nents, -					 DMA_TO_DEVICE); -		nents = obj->mapped; -	} else { -		nents = obj->pages->nents; -	} - -	/* gets freed when the ring has consumed it */ -	ents = kmalloc_array(nents, sizeof(struct virtio_gpu_mem_entry), -			     GFP_KERNEL); -	if (!ents) { -		DRM_ERROR("failed to allocate ent list\n"); -		return -ENOMEM; -	} - -	for_each_sg(obj->pages->sgl, sg, nents, si) { -		ents[si].addr = cpu_to_le64(use_dma_api -					    ? sg_dma_address(sg) -					    : sg_phys(sg)); -		ents[si].length = cpu_to_le32(sg->length); -		ents[si].padding = 0; -	} -  	virtio_gpu_cmd_resource_attach_backing(vgdev, obj->hw_res_handle, -					       ents, nents, -					       fence); +					       ents, nents, NULL);  	return 0;  } -void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev, -			      struct virtio_gpu_object *obj) -{ -	bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev); - -	if (WARN_ON_ONCE(!obj->pages)) -		return; - -	if (use_dma_api && obj->mapped) { -		struct virtio_gpu_fence *fence = virtio_gpu_fence_alloc(vgdev); -		/* detach backing and wait for the host process it ... */ -		virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, fence); -		dma_fence_wait(&fence->f, true); -		dma_fence_put(&fence->f); - -		/* ... then tear down iommu mappings */ -		dma_unmap_sg(vgdev->vdev->dev.parent, -			     obj->pages->sgl, obj->mapped, -			     DMA_TO_DEVICE); -		obj->mapped = 0; -	} else { -		virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, NULL); -	} - -	sg_free_table(obj->pages); -	obj->pages = NULL; - -	drm_gem_shmem_unpin(&obj->base.base); -} -  void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,  			    struct virtio_gpu_output *output)  { |