aboutsummaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/virtio/virtgpu_gem.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/virtio/virtgpu_gem.c')
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_gem.c210
1 files changed, 150 insertions, 60 deletions
diff --git a/drivers/gpu/drm/virtio/virtgpu_gem.c b/drivers/gpu/drm/virtio/virtgpu_gem.c
index f06586393974..4c1f579edfb3 100644
--- a/drivers/gpu/drm/virtio/virtgpu_gem.c
+++ b/drivers/gpu/drm/virtio/virtgpu_gem.c
@@ -23,56 +23,36 @@
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
-#include <drm/drmP.h>
-#include "virtgpu_drv.h"
-
-void virtio_gpu_gem_free_object(struct drm_gem_object *gem_obj)
-{
- struct virtio_gpu_object *obj = gem_to_virtio_gpu_obj(gem_obj);
-
- if (obj)
- virtio_gpu_object_unref(&obj);
-}
-
-struct virtio_gpu_object *virtio_gpu_alloc_object(struct drm_device *dev,
- size_t size, bool kernel,
- bool pinned)
-{
- struct virtio_gpu_device *vgdev = dev->dev_private;
- struct virtio_gpu_object *obj;
- int ret;
-
- ret = virtio_gpu_object_create(vgdev, size, kernel, pinned, &obj);
- if (ret)
- return ERR_PTR(ret);
+#include <drm/drm_file.h>
+#include <drm/drm_fourcc.h>
- return obj;
-}
+#include "virtgpu_drv.h"
int virtio_gpu_gem_create(struct drm_file *file,
struct drm_device *dev,
- uint64_t size,
+ struct virtio_gpu_object_params *params,
struct drm_gem_object **obj_p,
uint32_t *handle_p)
{
+ struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_object *obj;
int ret;
u32 handle;
- obj = virtio_gpu_alloc_object(dev, size, false, false);
- if (IS_ERR(obj))
- return PTR_ERR(obj);
+ ret = virtio_gpu_object_create(vgdev, params, &obj, NULL);
+ if (ret < 0)
+ return ret;
- ret = drm_gem_handle_create(file, &obj->gem_base, &handle);
+ ret = drm_gem_handle_create(file, &obj->base.base, &handle);
if (ret) {
- drm_gem_object_release(&obj->gem_base);
+ drm_gem_object_release(&obj->base.base);
return ret;
}
- *obj_p = &obj->gem_base;
+ *obj_p = &obj->base.base;
/* drop reference from allocate - handle holds it now */
- drm_gem_object_put_unlocked(&obj->gem_base);
+ drm_gem_object_put_unlocked(&obj->base.base);
*handle_p = handle;
return 0;
@@ -82,12 +62,10 @@ int virtio_gpu_mode_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
- struct virtio_gpu_device *vgdev = dev->dev_private;
struct drm_gem_object *gobj;
- struct virtio_gpu_object *obj;
+ struct virtio_gpu_object_params params = { 0 };
int ret;
uint32_t pitch;
- uint32_t format;
if (args->bpp != 32)
return -EINVAL;
@@ -96,22 +74,16 @@ int virtio_gpu_mode_dumb_create(struct drm_file *file_priv,
args->size = pitch * args->height;
args->size = ALIGN(args->size, PAGE_SIZE);
- ret = virtio_gpu_gem_create(file_priv, dev, args->size, &gobj,
+ params.format = virtio_gpu_translate_format(DRM_FORMAT_HOST_XRGB8888);
+ params.width = args->width;
+ params.height = args->height;
+ params.size = args->size;
+ params.dumb = true;
+ ret = virtio_gpu_gem_create(file_priv, dev, &params, &gobj,
&args->handle);
if (ret)
goto fail;
- format = virtio_gpu_translate_format(DRM_FORMAT_HOST_XRGB8888);
- obj = gem_to_virtio_gpu_obj(gobj);
- virtio_gpu_cmd_create_resource(vgdev, obj, format,
- args->width, args->height);
-
- /* attach the object to the resource */
- ret = virtio_gpu_object_attach(vgdev, obj, NULL);
- if (ret)
- goto fail;
-
- obj->dumb = true;
args->pitch = pitch;
return ret;
@@ -141,19 +113,18 @@ int virtio_gpu_gem_object_open(struct drm_gem_object *obj,
{
struct virtio_gpu_device *vgdev = obj->dev->dev_private;
struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
- struct virtio_gpu_object *qobj = gem_to_virtio_gpu_obj(obj);
- int r;
+ struct virtio_gpu_object_array *objs;
if (!vgdev->has_virgl_3d)
return 0;
- r = virtio_gpu_object_reserve(qobj, false);
- if (r)
- return r;
+ objs = virtio_gpu_array_alloc(1);
+ if (!objs)
+ return -ENOMEM;
+ virtio_gpu_array_add_obj(objs, obj);
virtio_gpu_cmd_context_attach_resource(vgdev, vfpriv->ctx_id,
- qobj->hw_res_handle);
- virtio_gpu_object_unreserve(qobj);
+ objs);
return 0;
}
@@ -162,17 +133,136 @@ void virtio_gpu_gem_object_close(struct drm_gem_object *obj,
{
struct virtio_gpu_device *vgdev = obj->dev->dev_private;
struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
- struct virtio_gpu_object *qobj = gem_to_virtio_gpu_obj(obj);
- int r;
+ struct virtio_gpu_object_array *objs;
if (!vgdev->has_virgl_3d)
return;
- r = virtio_gpu_object_reserve(qobj, false);
- if (r)
+ objs = virtio_gpu_array_alloc(1);
+ if (!objs)
return;
+ virtio_gpu_array_add_obj(objs, obj);
virtio_gpu_cmd_context_detach_resource(vgdev, vfpriv->ctx_id,
- qobj->hw_res_handle);
- virtio_gpu_object_unreserve(qobj);
+ objs);
+}
+
+struct virtio_gpu_object_array *virtio_gpu_array_alloc(u32 nents)
+{
+ struct virtio_gpu_object_array *objs;
+ size_t size = sizeof(*objs) + sizeof(objs->objs[0]) * nents;
+
+ objs = kmalloc(size, GFP_KERNEL);
+ if (!objs)
+ return NULL;
+
+ objs->nents = 0;
+ objs->total = nents;
+ return objs;
+}
+
+static void virtio_gpu_array_free(struct virtio_gpu_object_array *objs)
+{
+ kfree(objs);
+}
+
+struct virtio_gpu_object_array*
+virtio_gpu_array_from_handles(struct drm_file *drm_file, u32 *handles, u32 nents)
+{
+ struct virtio_gpu_object_array *objs;
+ u32 i;
+
+ objs = virtio_gpu_array_alloc(nents);
+ if (!objs)
+ return NULL;
+
+ for (i = 0; i < nents; i++) {
+ objs->objs[i] = drm_gem_object_lookup(drm_file, handles[i]);
+ if (!objs->objs[i]) {
+ objs->nents = i;
+ virtio_gpu_array_put_free(objs);
+ return NULL;
+ }
+ }
+ objs->nents = i;
+ return objs;
+}
+
+void virtio_gpu_array_add_obj(struct virtio_gpu_object_array *objs,
+ struct drm_gem_object *obj)
+{
+ if (WARN_ON_ONCE(objs->nents == objs->total))
+ return;
+
+ drm_gem_object_get(obj);
+ objs->objs[objs->nents] = obj;
+ objs->nents++;
+}
+
+int virtio_gpu_array_lock_resv(struct virtio_gpu_object_array *objs)
+{
+ int ret;
+
+ if (objs->nents == 1) {
+ ret = dma_resv_lock_interruptible(objs->objs[0]->resv, NULL);
+ } else {
+ ret = drm_gem_lock_reservations(objs->objs, objs->nents,
+ &objs->ticket);
+ }
+ return ret;
+}
+
+void virtio_gpu_array_unlock_resv(struct virtio_gpu_object_array *objs)
+{
+ if (objs->nents == 1) {
+ dma_resv_unlock(objs->objs[0]->resv);
+ } else {
+ drm_gem_unlock_reservations(objs->objs, objs->nents,
+ &objs->ticket);
+ }
+}
+
+void virtio_gpu_array_add_fence(struct virtio_gpu_object_array *objs,
+ struct dma_fence *fence)
+{
+ int i;
+
+ for (i = 0; i < objs->nents; i++)
+ dma_resv_add_excl_fence(objs->objs[i]->resv, fence);
+}
+
+void virtio_gpu_array_put_free(struct virtio_gpu_object_array *objs)
+{
+ u32 i;
+
+ for (i = 0; i < objs->nents; i++)
+ drm_gem_object_put_unlocked(objs->objs[i]);
+ virtio_gpu_array_free(objs);
+}
+
+void virtio_gpu_array_put_free_delayed(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_object_array *objs)
+{
+ spin_lock(&vgdev->obj_free_lock);
+ list_add_tail(&objs->next, &vgdev->obj_free_list);
+ spin_unlock(&vgdev->obj_free_lock);
+ schedule_work(&vgdev->obj_free_work);
+}
+
+void virtio_gpu_array_put_free_work(struct work_struct *work)
+{
+ struct virtio_gpu_device *vgdev =
+ container_of(work, struct virtio_gpu_device, obj_free_work);
+ struct virtio_gpu_object_array *objs;
+
+ spin_lock(&vgdev->obj_free_lock);
+ while (!list_empty(&vgdev->obj_free_list)) {
+ objs = list_first_entry(&vgdev->obj_free_list,
+ struct virtio_gpu_object_array, next);
+ list_del(&objs->next);
+ spin_unlock(&vgdev->obj_free_lock);
+ virtio_gpu_array_put_free(objs);
+ spin_lock(&vgdev->obj_free_lock);
+ }
+ spin_unlock(&vgdev->obj_free_lock);
}