diff options
Diffstat (limited to 'drivers/gpu/drm/virtio/virtgpu_ioctl.c')
| -rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_ioctl.c | 195 | 
1 files changed, 186 insertions, 9 deletions
| diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c index 5c1ad1596889..5618a1d5879c 100644 --- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c +++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c @@ -38,20 +38,60 @@  				    VIRTGPU_BLOB_FLAG_USE_SHAREABLE | \  				    VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE) +static int virtio_gpu_fence_event_create(struct drm_device *dev, +					 struct drm_file *file, +					 struct virtio_gpu_fence *fence, +					 uint32_t ring_idx) +{ +	struct virtio_gpu_fpriv *vfpriv = file->driver_priv; +	struct virtio_gpu_fence_event *e = NULL; +	int ret; + +	if (!(vfpriv->ring_idx_mask & (1 << ring_idx))) +		return 0; + +	e = kzalloc(sizeof(*e), GFP_KERNEL); +	if (!e) +		return -ENOMEM; + +	e->event.type = VIRTGPU_EVENT_FENCE_SIGNALED_INTERNAL; +	e->event.length = sizeof(e->event); + +	ret = drm_event_reserve_init(dev, file, &e->base, &e->event); +	if (ret) +		goto free; + +	fence->e = e; +	return 0; +free: +	kfree(e); +	return ret; +} + +/* Must be called with &virtio_gpu_fpriv.struct_mutex held. */ +static void virtio_gpu_create_context_locked(struct virtio_gpu_device *vgdev, +					     struct virtio_gpu_fpriv *vfpriv) +{ +	char dbgname[TASK_COMM_LEN]; + +	get_task_comm(dbgname, current); +	virtio_gpu_cmd_context_create(vgdev, vfpriv->ctx_id, +				      vfpriv->context_init, strlen(dbgname), +				      dbgname); + +	vfpriv->context_created = true; +} +  void virtio_gpu_create_context(struct drm_device *dev, struct drm_file *file)  {  	struct virtio_gpu_device *vgdev = dev->dev_private;  	struct virtio_gpu_fpriv *vfpriv = file->driver_priv; -	char dbgname[TASK_COMM_LEN];  	mutex_lock(&vfpriv->context_lock);  	if (vfpriv->context_created)  		goto out_unlock; -	get_task_comm(dbgname, current); -	virtio_gpu_cmd_context_create(vgdev, vfpriv->ctx_id, -				      strlen(dbgname), dbgname); -	vfpriv->context_created = true; +	virtio_gpu_create_context_locked(vgdev, vfpriv);  out_unlock:  	mutex_unlock(&vfpriv->context_lock); @@ -89,6 +129,11 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,  	int in_fence_fd = exbuf->fence_fd;  	int out_fence_fd = -1;  	void *buf; +	uint64_t fence_ctx; +	uint32_t ring_idx; + +	fence_ctx = vgdev->fence_drv.context; +	ring_idx = 0;  	if (vgdev->has_virgl_3d == false)  		return -ENOSYS; @@ -96,6 +141,17 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,  	if ((exbuf->flags & ~VIRTGPU_EXECBUF_FLAGS))  		return -EINVAL; +	if ((exbuf->flags & VIRTGPU_EXECBUF_RING_IDX)) { +		if (exbuf->ring_idx >= vfpriv->num_rings) +			return -EINVAL; + +		if (!vfpriv->base_fence_ctx) +			return -EINVAL; + +		fence_ctx = vfpriv->base_fence_ctx; +		ring_idx = exbuf->ring_idx; +	} +  	exbuf->fence_fd = -1;  	virtio_gpu_create_context(dev, file); @@ -163,12 +219,16 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,  			goto out_memdup;  	} -	out_fence = virtio_gpu_fence_alloc(vgdev); +	out_fence = virtio_gpu_fence_alloc(vgdev, fence_ctx, ring_idx);  	if(!out_fence) {  		ret = -ENOMEM;  		goto out_unresv;  	} +	ret = virtio_gpu_fence_event_create(dev, file, out_fence, ring_idx); +	if (ret) +		goto out_unresv; +  	if (out_fence_fd >= 0) {  		sync_file = sync_file_create(&out_fence->f);  		if (!sync_file) { @@ -226,6 +286,12 @@ static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data,  	case VIRTGPU_PARAM_CROSS_DEVICE:  		value = vgdev->has_resource_assign_uuid ? 1 : 0;  		break; +	case VIRTGPU_PARAM_CONTEXT_INIT: +		value = vgdev->has_context_init ? 1 : 0; +		break; +	case VIRTGPU_PARAM_SUPPORTED_CAPSET_IDs: +		value = vgdev->capset_id_mask; +		break;  	default:  		return -EINVAL;  	} @@ -278,7 +344,7 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,  	if (params.size == 0)  		params.size = PAGE_SIZE; -	fence = virtio_gpu_fence_alloc(vgdev); +	fence = virtio_gpu_fence_alloc(vgdev, vgdev->fence_drv.context, 0);  	if (!fence)  		return -ENOMEM;  	ret = virtio_gpu_object_create(vgdev, ¶ms, &qobj, fence); @@ -357,7 +423,7 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,  	if (ret != 0)  		goto err_put_free; -	fence = virtio_gpu_fence_alloc(vgdev); +	fence = virtio_gpu_fence_alloc(vgdev, vgdev->fence_drv.context, 0);  	if (!fence) {  		ret = -ENOMEM;  		goto err_unlock; @@ -417,7 +483,8 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,  			goto err_put_free;  		ret = -ENOMEM; -		fence = virtio_gpu_fence_alloc(vgdev); +		fence = virtio_gpu_fence_alloc(vgdev, vgdev->fence_drv.context, +					       0);  		if (!fence)  			goto err_unlock; @@ -662,6 +729,113 @@ static int virtio_gpu_resource_create_blob_ioctl(struct drm_device *dev,  	return 0;  } +static int virtio_gpu_context_init_ioctl(struct drm_device *dev, +					 void *data, struct drm_file *file) +{ +	int ret = 0; +	uint32_t num_params, i, param, value; +	uint64_t valid_ring_mask; +	size_t len; +	struct drm_virtgpu_context_set_param *ctx_set_params = NULL; +	struct virtio_gpu_device *vgdev = dev->dev_private; +	struct virtio_gpu_fpriv *vfpriv = file->driver_priv; +	struct drm_virtgpu_context_init *args = data; + +	num_params = args->num_params; +	len = num_params * sizeof(struct drm_virtgpu_context_set_param); + +	if (!vgdev->has_context_init || !vgdev->has_virgl_3d) +		return -EINVAL; + +	/* Number of unique parameters supported at this time. */ +	if (num_params > 3) +		return -EINVAL; + +	ctx_set_params = memdup_user(u64_to_user_ptr(args->ctx_set_params), +				     len); + +	if (IS_ERR(ctx_set_params)) +		return PTR_ERR(ctx_set_params); + +	mutex_lock(&vfpriv->context_lock); +	if (vfpriv->context_created) { +		ret = -EEXIST; +		goto out_unlock; +	} + +	for (i = 0; i < num_params; i++) { +		param = ctx_set_params[i].param; +		value = ctx_set_params[i].value; + +		switch (param) { +		case VIRTGPU_CONTEXT_PARAM_CAPSET_ID: +			if (value > MAX_CAPSET_ID) { +				ret = -EINVAL; +				goto out_unlock; +			} + +			if ((vgdev->capset_id_mask & (1 << value)) == 0) { +				ret = -EINVAL; +				goto out_unlock; +			} + +			/* Context capset ID already set */ +			if (vfpriv->context_init & +			    VIRTIO_GPU_CONTEXT_INIT_CAPSET_ID_MASK) { +				ret = -EINVAL; +				goto out_unlock; +			} + +			vfpriv->context_init |= value; +			break; +		case VIRTGPU_CONTEXT_PARAM_NUM_RINGS: +			if (vfpriv->base_fence_ctx) { +				ret = -EINVAL; +				goto out_unlock; +			} + +			if (value > MAX_RINGS) { +				ret = -EINVAL; +				goto out_unlock; +			} + +			vfpriv->base_fence_ctx = dma_fence_context_alloc(value); +			vfpriv->num_rings = value; +			break; +		case VIRTGPU_CONTEXT_PARAM_POLL_RINGS_MASK: +			if (vfpriv->ring_idx_mask) { +				ret = -EINVAL; +				goto out_unlock; +			} + +			vfpriv->ring_idx_mask = value; +			break; +		default: +			ret = -EINVAL; +			goto out_unlock; +		} +	} + +	if (vfpriv->ring_idx_mask) { +		valid_ring_mask = 0; +		for (i = 0; i < vfpriv->num_rings; i++) +			valid_ring_mask |= 1 << i; + +		if (~valid_ring_mask & vfpriv->ring_idx_mask) { +			ret = -EINVAL; +			goto out_unlock; +		} +	} + +	virtio_gpu_create_context_locked(vgdev, vfpriv); +	virtio_gpu_notify(vgdev); + +out_unlock: +	mutex_unlock(&vfpriv->context_lock); +	kfree(ctx_set_params); +	return ret; +} +  struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS] = {  	DRM_IOCTL_DEF_DRV(VIRTGPU_MAP, virtio_gpu_map_ioctl,  			  DRM_RENDER_ALLOW), @@ -698,4 +872,7 @@ struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS] = {  	DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_CREATE_BLOB,  			  virtio_gpu_resource_create_blob_ioctl,  			  DRM_RENDER_ALLOW), + +	DRM_IOCTL_DEF_DRV(VIRTGPU_CONTEXT_INIT, virtio_gpu_context_init_ioctl, +			  DRM_RENDER_ALLOW),  }; |