diff options
Diffstat (limited to 'drivers/gpu/drm/virtio')
| -rw-r--r-- | drivers/gpu/drm/virtio/Kconfig | 3 | ||||
| -rw-r--r-- | drivers/gpu/drm/virtio/Makefile | 4 | ||||
| -rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_debugfs.c | 31 | ||||
| -rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_display.c | 26 | ||||
| -rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_drv.c | 37 | ||||
| -rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_drv.h | 182 | ||||
| -rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_fb.c | 150 | ||||
| -rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_fence.c | 35 | ||||
| -rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_gem.c | 210 | ||||
| -rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_ioctl.c | 367 | ||||
| -rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_kms.c | 28 | ||||
| -rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_object.c | 244 | ||||
| -rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_plane.c | 67 | ||||
| -rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_prime.c | 36 | ||||
| -rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_trace.h | 52 | ||||
| -rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_trace_points.c | 5 | ||||
| -rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_ttm.c | 356 | ||||
| -rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_vq.c | 306 |
18 files changed, 863 insertions, 1276 deletions
diff --git a/drivers/gpu/drm/virtio/Kconfig b/drivers/gpu/drm/virtio/Kconfig index 0c384d9a2b75..eff3047052d4 100644 --- a/drivers/gpu/drm/virtio/Kconfig +++ b/drivers/gpu/drm/virtio/Kconfig @@ -1,8 +1,9 @@ +# SPDX-License-Identifier: GPL-2.0-only config DRM_VIRTIO_GPU tristate "Virtio GPU driver" depends on DRM && VIRTIO && MMU select DRM_KMS_HELPER - select DRM_TTM + select DRM_GEM_SHMEM_HELPER help This is the virtual GPU driver for virtio. It can be used with QEMU based VMMs (like KVM or Xen). diff --git a/drivers/gpu/drm/virtio/Makefile b/drivers/gpu/drm/virtio/Makefile index 4e90cc8fa651..92aa2b3d349d 100644 --- a/drivers/gpu/drm/virtio/Makefile +++ b/drivers/gpu/drm/virtio/Makefile @@ -4,8 +4,8 @@ # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. virtio-gpu-y := virtgpu_drv.o virtgpu_kms.o virtgpu_gem.o \ - virtgpu_fb.o virtgpu_display.o virtgpu_vq.o virtgpu_ttm.o \ + virtgpu_display.o virtgpu_vq.o \ virtgpu_fence.o virtgpu_object.o virtgpu_debugfs.o virtgpu_plane.o \ - virtgpu_ioctl.o virtgpu_prime.o + virtgpu_ioctl.o virtgpu_prime.o virtgpu_trace_points.o obj-$(CONFIG_DRM_VIRTIO_GPU) += virtio-gpu.o diff --git a/drivers/gpu/drm/virtio/virtgpu_debugfs.c b/drivers/gpu/drm/virtio/virtgpu_debugfs.c index 73dc99046c43..5156e6b279db 100644 --- a/drivers/gpu/drm/virtio/virtgpu_debugfs.c +++ b/drivers/gpu/drm/virtio/virtgpu_debugfs.c @@ -23,11 +23,35 @@ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ -#include <linux/debugfs.h> -#include <drm/drmP.h> +#include <drm/drm_debugfs.h> +#include <drm/drm_file.h> #include "virtgpu_drv.h" +static void virtio_add_bool(struct seq_file *m, const char *name, + bool value) +{ + seq_printf(m, "%-16s : %s\n", name, value ? "yes" : "no"); +} + +static void virtio_add_int(struct seq_file *m, const char *name, + int value) +{ + seq_printf(m, "%-16s : %d\n", name, value); +} + +static int virtio_gpu_features(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct virtio_gpu_device *vgdev = node->minor->dev->dev_private; + + virtio_add_bool(m, "virgl", vgdev->has_virgl_3d); + virtio_add_bool(m, "edid", vgdev->has_edid); + virtio_add_int(m, "cap sets", vgdev->num_capsets); + virtio_add_int(m, "scanouts", vgdev->num_scanouts); + return 0; +} + static int virtio_gpu_debugfs_irq_info(struct seq_file *m, void *data) { @@ -41,7 +65,8 @@ virtio_gpu_debugfs_irq_info(struct seq_file *m, void *data) } static struct drm_info_list virtio_gpu_debugfs_list[] = { - { "irq_fence", virtio_gpu_debugfs_irq_info, 0, NULL }, + { "virtio-gpu-features", virtio_gpu_features }, + { "virtio-gpu-irq-fence", virtio_gpu_debugfs_irq_info, 0, NULL }, }; #define VIRTIO_GPU_DEBUGFS_ENTRIES ARRAY_SIZE(virtio_gpu_debugfs_list) diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c index 653ec7d0bf4d..e622485ae826 100644 --- a/drivers/gpu/drm/virtio/virtgpu_display.c +++ b/drivers/gpu/drm/virtio/virtgpu_display.c @@ -25,10 +25,14 @@ * OTHER DEALINGS IN THE SOFTWARE. */ -#include "virtgpu_drv.h" #include <drm/drm_atomic_helper.h> +#include <drm/drm_damage_helper.h> +#include <drm/drm_fourcc.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_probe_helper.h> +#include <drm/drm_vblank.h> + +#include "virtgpu_drv.h" #define XRES_MIN 32 #define YRES_MIN 32 @@ -49,23 +53,10 @@ static const struct drm_crtc_funcs virtio_gpu_crtc_funcs = { .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, }; -static int -virtio_gpu_framebuffer_surface_dirty(struct drm_framebuffer *fb, - struct drm_file *file_priv, - unsigned int flags, unsigned int color, - struct drm_clip_rect *clips, - unsigned int num_clips) -{ - struct virtio_gpu_framebuffer *virtio_gpu_fb - = to_virtio_gpu_framebuffer(fb); - - return virtio_gpu_surface_dirty(virtio_gpu_fb, clips, num_clips); -} - static const struct drm_framebuffer_funcs virtio_gpu_fb_funcs = { .create_handle = drm_gem_fb_create_handle, .destroy = drm_gem_fb_destroy, - .dirty = virtio_gpu_framebuffer_surface_dirty, + .dirty = drm_atomic_helper_dirtyfb, }; int @@ -85,10 +76,6 @@ virtio_gpu_framebuffer_init(struct drm_device *dev, vgfb->base.obj[0] = NULL; return ret; } - - spin_lock_init(&vgfb->dirty_lock); - vgfb->x1 = vgfb->y1 = INT_MAX; - vgfb->x2 = vgfb->y2 = 0; return 0; } @@ -385,5 +372,6 @@ void virtio_gpu_modeset_fini(struct virtio_gpu_device *vgdev) for (i = 0 ; i < vgdev->num_scanouts; ++i) kfree(vgdev->outputs[i].edid); + drm_atomic_helper_shutdown(vgdev->ddev); drm_mode_config_cleanup(vgdev->ddev); } diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c index b996ac1d4fcc..8dee698c90ff 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.c +++ b/drivers/gpu/drm/virtio/virtgpu_drv.c @@ -29,10 +29,13 @@ #include <linux/module.h> #include <linux/console.h> #include <linux/pci.h> -#include <drm/drmP.h> + #include <drm/drm.h> +#include <drm/drm_drv.h> +#include <drm/drm_file.h> #include "virtgpu_drv.h" + static struct drm_driver driver; static int virtio_gpu_modeset = -1; @@ -53,7 +56,6 @@ static int virtio_gpu_pci_quirk(struct drm_device *dev, struct virtio_device *vd dev->pdev = pdev; if (vga) drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, - 0, "virtiodrmfb"); /* @@ -182,20 +184,10 @@ MODULE_AUTHOR("Dave Airlie <[email protected]>"); MODULE_AUTHOR("Gerd Hoffmann <[email protected]>"); MODULE_AUTHOR("Alon Levy"); -static const struct file_operations virtio_gpu_driver_fops = { - .owner = THIS_MODULE, - .open = drm_open, - .mmap = virtio_gpu_mmap, - .poll = drm_poll, - .read = drm_read, - .unlocked_ioctl = drm_ioctl, - .release = drm_release, - .compat_ioctl = drm_compat_ioctl, - .llseek = noop_llseek, -}; +DEFINE_DRM_GEM_FOPS(virtio_gpu_driver_fops); static struct drm_driver driver = { - .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | DRIVER_RENDER | DRIVER_ATOMIC, + .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_RENDER | DRIVER_ATOMIC, .open = virtio_gpu_driver_open, .postclose = virtio_gpu_driver_postclose, @@ -205,17 +197,12 @@ static struct drm_driver driver = { #if defined(CONFIG_DEBUG_FS) .debugfs_init = virtio_gpu_debugfs_init, #endif - .gem_prime_export = drm_gem_prime_export, - .gem_prime_import = drm_gem_prime_import, - .gem_prime_pin = virtgpu_gem_prime_pin, - .gem_prime_unpin = virtgpu_gem_prime_unpin, - .gem_prime_vmap = virtgpu_gem_prime_vmap, - .gem_prime_vunmap = virtgpu_gem_prime_vunmap, - .gem_prime_mmap = virtgpu_gem_prime_mmap, - - .gem_free_object_unlocked = virtio_gpu_gem_free_object, - .gem_open_object = virtio_gpu_gem_object_open, - .gem_close_object = virtio_gpu_gem_object_close, + .prime_handle_to_fd = drm_gem_prime_handle_to_fd, + .prime_fd_to_handle = drm_gem_prime_fd_to_handle, + .gem_prime_mmap = drm_gem_prime_mmap, + .gem_prime_import_sg_table = virtgpu_gem_prime_import_sg_table, + + .gem_create_object = virtio_gpu_create_object, .fops = &virtio_gpu_driver_fops, .ioctls = virtio_gpu_ioctls, diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h index 3238fdf58eb4..0b56ba005e25 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.h +++ b/drivers/gpu/drm/virtio/virtgpu_drv.h @@ -31,16 +31,13 @@ #include <linux/virtio_config.h> #include <linux/virtio_gpu.h> -#include <drm/drmP.h> -#include <drm/drm_gem.h> #include <drm/drm_atomic.h> #include <drm/drm_encoder.h> #include <drm/drm_fb_helper.h> +#include <drm/drm_gem.h> +#include <drm/drm_gem_shmem_helper.h> +#include <drm/drm_ioctl.h> #include <drm/drm_probe_helper.h> -#include <drm/ttm/ttm_bo_api.h> -#include <drm/ttm/ttm_bo_driver.h> -#include <drm/ttm/ttm_placement.h> -#include <drm/ttm/ttm_module.h> #define DRIVER_NAME "virtio_gpu" #define DRIVER_DESC "virtio GPU" @@ -50,22 +47,41 @@ #define DRIVER_MINOR 1 #define DRIVER_PATCHLEVEL 0 +struct virtio_gpu_object_params { + uint32_t format; + uint32_t width; + uint32_t height; + unsigned long size; + bool dumb; + /* 3d */ + bool virgl; + uint32_t target; + uint32_t bind; + uint32_t depth; + uint32_t array_size; + uint32_t last_level; + uint32_t nr_samples; + uint32_t flags; +}; + struct virtio_gpu_object { - struct drm_gem_object gem_base; + struct drm_gem_shmem_object base; uint32_t hw_res_handle; struct sg_table *pages; uint32_t mapped; - void *vmap; bool dumb; - struct ttm_place placement_code; - struct ttm_placement placement; - struct ttm_buffer_object tbo; - struct ttm_bo_kmap_obj kmap; bool created; }; #define gem_to_virtio_gpu_obj(gobj) \ - container_of((gobj), struct virtio_gpu_object, gem_base) + container_of((gobj), struct virtio_gpu_object, base.base) + +struct virtio_gpu_object_array { + struct ww_acquire_ctx ticket; + struct list_head next; + u32 nents, total; + struct drm_gem_object *objs[]; +}; struct virtio_gpu_vbuffer; struct virtio_gpu_device; @@ -85,7 +101,6 @@ struct virtio_gpu_fence { struct dma_fence f; struct virtio_gpu_fence_driver *drv; struct list_head node; - uint64_t seq; }; #define to_virtio_fence(x) \ container_of(x, struct virtio_gpu_fence, f) @@ -99,9 +114,9 @@ struct virtio_gpu_vbuffer { char *resp_buf; int resp_size; - virtio_gpu_resp_cb resp_cb; + struct virtio_gpu_object_array *objs; struct list_head list; }; @@ -126,18 +141,11 @@ struct virtio_gpu_output { struct virtio_gpu_framebuffer { struct drm_framebuffer base; - int x1, y1, x2, y2; /* dirty rect */ - spinlock_t dirty_lock; - uint32_t hw_res_handle; struct virtio_gpu_fence *fence; }; #define to_virtio_gpu_framebuffer(x) \ container_of(x, struct virtio_gpu_framebuffer, base) -struct virtio_gpu_mman { - struct ttm_bo_device bdev; -}; - struct virtio_gpu_queue { struct virtqueue *vq; spinlock_t qlock; @@ -166,8 +174,6 @@ struct virtio_gpu_device { struct virtio_device *vdev; - struct virtio_gpu_mman mman; - struct virtio_gpu_output outputs[VIRTIO_GPU_MAX_SCANOUTS]; uint32_t num_scanouts; @@ -192,6 +198,10 @@ struct virtio_gpu_device { struct work_struct config_changed_work; + struct work_struct obj_free_work; + spinlock_t obj_free_lock; + struct list_head obj_free_list; + struct virtio_gpu_drv_capset *capsets; uint32_t num_capsets; struct list_head cap_cache; @@ -217,16 +227,13 @@ int virtio_gpu_gem_init(struct virtio_gpu_device *vgdev); void virtio_gpu_gem_fini(struct virtio_gpu_device *vgdev); int virtio_gpu_gem_create(struct drm_file *file, struct drm_device *dev, - uint64_t size, + struct virtio_gpu_object_params *params, struct drm_gem_object **obj_p, uint32_t *handle_p); int virtio_gpu_gem_object_open(struct drm_gem_object *obj, struct drm_file *file); void virtio_gpu_gem_object_close(struct drm_gem_object *obj, struct drm_file *file); -struct virtio_gpu_object *virtio_gpu_alloc_object(struct drm_device *dev, - size_t size, bool kernel, - bool pinned); int virtio_gpu_mode_dumb_create(struct drm_file *file_priv, struct drm_device *dev, struct drm_mode_create_dumb *args); @@ -234,25 +241,35 @@ int virtio_gpu_mode_dumb_mmap(struct drm_file *file_priv, struct drm_device *dev, uint32_t handle, uint64_t *offset_p); -/* virtio_fb */ -int virtio_gpu_surface_dirty(struct virtio_gpu_framebuffer *qfb, - struct drm_clip_rect *clips, - unsigned int num_clips); +struct virtio_gpu_object_array *virtio_gpu_array_alloc(u32 nents); +struct virtio_gpu_object_array* +virtio_gpu_array_from_handles(struct drm_file *drm_file, u32 *handles, u32 nents); +void virtio_gpu_array_add_obj(struct virtio_gpu_object_array *objs, + struct drm_gem_object *obj); +int virtio_gpu_array_lock_resv(struct virtio_gpu_object_array *objs); +void virtio_gpu_array_unlock_resv(struct virtio_gpu_object_array *objs); +void virtio_gpu_array_add_fence(struct virtio_gpu_object_array *objs, + struct dma_fence *fence); +void virtio_gpu_array_put_free(struct virtio_gpu_object_array *objs); +void virtio_gpu_array_put_free_delayed(struct virtio_gpu_device *vgdev, + struct virtio_gpu_object_array *objs); +void virtio_gpu_array_put_free_work(struct work_struct *work); + /* virtio vg */ int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev); void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev); void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev, struct virtio_gpu_object *bo, - uint32_t format, - uint32_t width, - uint32_t height); + struct virtio_gpu_object_params *params, + struct virtio_gpu_object_array *objs, + struct virtio_gpu_fence *fence); void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev, uint32_t resource_id); void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev, - struct virtio_gpu_object *bo, uint64_t offset, - __le32 width, __le32 height, - __le32 x, __le32 y, + uint32_t width, uint32_t height, + uint32_t x, uint32_t y, + struct virtio_gpu_object_array *objs, struct virtio_gpu_fence *fence); void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev, uint32_t resource_id, @@ -283,28 +300,33 @@ void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev, uint32_t id); void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev, uint32_t ctx_id, - uint32_t resource_id); + struct virtio_gpu_object_array *objs); void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev, uint32_t ctx_id, - uint32_t resource_id); + struct virtio_gpu_object_array *objs); void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev, void *data, uint32_t data_size, - uint32_t ctx_id, struct virtio_gpu_fence *fence); + uint32_t ctx_id, + struct virtio_gpu_object_array *objs, + struct virtio_gpu_fence *fence); void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev, - uint32_t resource_id, uint32_t ctx_id, + uint32_t ctx_id, uint64_t offset, uint32_t level, struct virtio_gpu_box *box, + struct virtio_gpu_object_array *objs, struct virtio_gpu_fence *fence); void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev, - struct virtio_gpu_object *bo, uint32_t ctx_id, uint64_t offset, uint32_t level, struct virtio_gpu_box *box, + struct virtio_gpu_object_array *objs, struct virtio_gpu_fence *fence); void virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev, struct virtio_gpu_object *bo, - struct virtio_gpu_resource_create_3d *rc_3d); + struct virtio_gpu_object_params *params, + struct virtio_gpu_object_array *objs, + struct virtio_gpu_fence *fence); void virtio_gpu_ctrl_ack(struct virtqueue *vq); void virtio_gpu_cursor_ack(struct virtqueue *vq); void virtio_gpu_fence_ack(struct virtqueue *vq); @@ -326,82 +348,32 @@ struct drm_plane *virtio_gpu_plane_init(struct virtio_gpu_device *vgdev, enum drm_plane_type type, int index); -/* virtio_gpu_ttm.c */ -int virtio_gpu_ttm_init(struct virtio_gpu_device *vgdev); -void virtio_gpu_ttm_fini(struct virtio_gpu_device *vgdev); -int virtio_gpu_mmap(struct file *filp, struct vm_area_struct *vma); - /* virtio_gpu_fence.c */ +bool virtio_fence_signaled(struct dma_fence *f); struct virtio_gpu_fence *virtio_gpu_fence_alloc( struct virtio_gpu_device *vgdev); -int virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev, +void virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev, struct virtio_gpu_ctrl_hdr *cmd_hdr, struct virtio_gpu_fence *fence); void virtio_gpu_fence_event_process(struct virtio_gpu_device *vdev, u64 last_seq); /* virtio_gpu_object */ +struct drm_gem_object *virtio_gpu_create_object(struct drm_device *dev, + size_t size); int virtio_gpu_object_create(struct virtio_gpu_device *vgdev, - unsigned long size, bool kernel, bool pinned, - struct virtio_gpu_object **bo_ptr); -void virtio_gpu_object_kunmap(struct virtio_gpu_object *bo); -int virtio_gpu_object_kmap(struct virtio_gpu_object *bo); -int virtio_gpu_object_get_sg_table(struct virtio_gpu_device *qdev, - struct virtio_gpu_object *bo); -void virtio_gpu_object_free_sg_table(struct virtio_gpu_object *bo); -int virtio_gpu_object_wait(struct virtio_gpu_object *bo, bool no_wait); + struct virtio_gpu_object_params *params, + struct virtio_gpu_object **bo_ptr, + struct virtio_gpu_fence *fence); /* virtgpu_prime.c */ -int virtgpu_gem_prime_pin(struct drm_gem_object *obj); -void virtgpu_gem_prime_unpin(struct drm_gem_object *obj); -void *virtgpu_gem_prime_vmap(struct drm_gem_object *obj); -void virtgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); -int virtgpu_gem_prime_mmap(struct drm_gem_object *obj, - struct vm_area_struct *vma); - -static inline struct virtio_gpu_object* -virtio_gpu_object_ref(struct virtio_gpu_object *bo) -{ - ttm_bo_get(&bo->tbo); - return bo; -} - -static inline void virtio_gpu_object_unref(struct virtio_gpu_object **bo) -{ - struct ttm_buffer_object *tbo; - - if ((*bo) == NULL) - return; - tbo = &((*bo)->tbo); - ttm_bo_put(tbo); - *bo = NULL; -} +struct drm_gem_object *virtgpu_gem_prime_import_sg_table( + struct drm_device *dev, struct dma_buf_attachment *attach, + struct sg_table *sgt); static inline u64 virtio_gpu_object_mmap_offset(struct virtio_gpu_object *bo) { - return drm_vma_node_offset_addr(&bo->tbo.vma_node); -} - -static inline int virtio_gpu_object_reserve(struct virtio_gpu_object *bo, - bool no_wait) -{ - int r; - - r = ttm_bo_reserve(&bo->tbo, true, no_wait, NULL); - if (unlikely(r != 0)) { - if (r != -ERESTARTSYS) { - struct virtio_gpu_device *qdev = - bo->gem_base.dev->dev_private; - dev_err(qdev->dev, "%p reserve failed\n", bo); - } - return r; - } - return 0; -} - -static inline void virtio_gpu_object_unreserve(struct virtio_gpu_object *bo) -{ - ttm_bo_unreserve(&bo->tbo); + return drm_vma_node_offset_addr(&bo->base.base.vma_node); } /* virgl debufs */ diff --git a/drivers/gpu/drm/virtio/virtgpu_fb.c b/drivers/gpu/drm/virtio/virtgpu_fb.c deleted file mode 100644 index b07584b1c2bf..000000000000 --- a/drivers/gpu/drm/virtio/virtgpu_fb.c +++ /dev/null @@ -1,150 +0,0 @@ -/* - * Copyright (C) 2015 Red Hat, Inc. - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sublicense, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial - * portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE - * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ - -#include <drm/drmP.h> -#include <drm/drm_fb_helper.h> -#include "virtgpu_drv.h" - -static int virtio_gpu_dirty_update(struct virtio_gpu_framebuffer *fb, - bool store, int x, int y, - int width, int height) -{ - struct drm_device *dev = fb->base.dev; - struct virtio_gpu_device *vgdev = dev->dev_private; - bool store_for_later = false; - int bpp = fb->base.format->cpp[0]; - int x2, y2; - unsigned long flags; - struct virtio_gpu_object *obj = gem_to_virtio_gpu_obj(fb->base.obj[0]); - - if ((width <= 0) || - (x + width > fb->base.width) || - (y + height > fb->base.height)) { - DRM_DEBUG("values out of range %dx%d+%d+%d, fb %dx%d\n", - width, height, x, y, - fb->base.width, fb->base.height); - return -EINVAL; - } - - /* - * Can be called with pretty much any context (console output - * path). If we are in atomic just store the dirty rect info - * to send out the update later. - * - * Can't test inside spin lock. - */ - if (in_atomic() || store) - store_for_later = true; - - x2 = x + width - 1; - y2 = y + height - 1; - - spin_lock_irqsave(&fb->dirty_lock, flags); - - if (fb->y1 < y) - y = fb->y1; - if (fb->y2 > y2) - y2 = fb->y2; - if (fb->x1 < x) - x = fb->x1; - if (fb->x2 > x2) - x2 = fb->x2; - - if (store_for_later) { - fb->x1 = x; - fb->x2 = x2; - fb->y1 = y; - fb->y2 = y2; - spin_unlock_irqrestore(&fb->dirty_lock, flags); - return 0; - } - - fb->x1 = fb->y1 = INT_MAX; - fb->x2 = fb->y2 = 0; - - spin_unlock_irqrestore(&fb->dirty_lock, flags); - - { - uint32_t offset; - uint32_t w = x2 - x + 1; - uint32_t h = y2 - y + 1; - - offset = (y * fb->base.pitches[0]) + x * bpp; - - virtio_gpu_cmd_transfer_to_host_2d(vgdev, obj, - offset, - cpu_to_le32(w), - cpu_to_le32(h), - cpu_to_le32(x), - cpu_to_le32(y), - NULL); - - } - virtio_gpu_cmd_resource_flush(vgdev, obj->hw_res_handle, - x, y, x2 - x + 1, y2 - y + 1); - return 0; -} - -int virtio_gpu_surface_dirty(struct virtio_gpu_framebuffer *vgfb, - struct drm_clip_rect *clips, - unsigned int num_clips) -{ - struct virtio_gpu_device *vgdev = vgfb->base.dev->dev_private; - struct virtio_gpu_object *obj = gem_to_virtio_gpu_obj(vgfb->base.obj[0]); - struct drm_clip_rect norect; - struct drm_clip_rect *clips_ptr; - int left, right, top, bottom; - int i; - int inc = 1; - - if (!num_clips) { - num_clips = 1; - clips = &norect; - norect.x1 = norect.y1 = 0; - norect.x2 = vgfb->base.width; - norect.y2 = vgfb->base.height; - } - left = clips->x1; - right = clips->x2; - top = clips->y1; - bottom = clips->y2; - - /* skip the first clip rect */ - for (i = 1, clips_ptr = clips + inc; - i < num_clips; i++, clips_ptr += inc) { - left = min_t(int, left, (int)clips_ptr->x1); - right = max_t(int, right, (int)clips_ptr->x2); - top = min_t(int, top, (int)clips_ptr->y1); - bottom = max_t(int, bottom, (int)clips_ptr->y2); - } - - if (obj->dumb) - return virtio_gpu_dirty_update(vgfb, false, left, top, - right - left, bottom - top); - - virtio_gpu_cmd_resource_flush(vgdev, obj->hw_res_handle, - left, top, right - left, bottom - top); - return 0; -} diff --git a/drivers/gpu/drm/virtio/virtgpu_fence.c b/drivers/gpu/drm/virtio/virtgpu_fence.c index 21bd4c4a32d1..a4b9881ca1d3 100644 --- a/drivers/gpu/drm/virtio/virtgpu_fence.c +++ b/drivers/gpu/drm/virtio/virtgpu_fence.c @@ -23,7 +23,8 @@ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ -#include <drm/drmP.h> +#include <trace/events/dma_fence.h> + #include "virtgpu_drv.h" static const char *virtio_get_driver_name(struct dma_fence *f) @@ -36,20 +37,22 @@ static const char *virtio_get_timeline_name(struct dma_fence *f) return "controlq"; } -static bool virtio_signaled(struct dma_fence *f) +bool virtio_fence_signaled(struct dma_fence *f) { struct virtio_gpu_fence *fence = to_virtio_fence(f); - if (atomic64_read(&fence->drv->last_seq) >= fence->seq) + if (WARN_ON_ONCE(fence->f.seqno == 0)) + /* leaked fence outside driver before completing + * initialization with virtio_gpu_fence_emit */ + return false; + if (atomic64_read(&fence->drv->last_seq) >= fence->f.seqno) return true; return false; } static void virtio_fence_value_str(struct dma_fence *f, char *str, int size) { - struct virtio_gpu_fence *fence = to_virtio_fence(f); - - snprintf(str, size, "%llu", fence->seq); + snprintf(str, size, "%llu", f->seqno); } static void virtio_timeline_value_str(struct dma_fence *f, char *str, int size) @@ -62,7 +65,7 @@ static void virtio_timeline_value_str(struct dma_fence *f, char *str, int size) static const struct dma_fence_ops virtio_fence_ops = { .get_driver_name = virtio_get_driver_name, .get_timeline_name = virtio_get_timeline_name, - .signaled = virtio_signaled, + .signaled = virtio_fence_signaled, .fence_value_str = virtio_fence_value_str, .timeline_value_str = virtio_timeline_value_str, }; @@ -71,17 +74,22 @@ struct virtio_gpu_fence *virtio_gpu_fence_alloc(struct virtio_gpu_device *vgdev) { struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv; struct virtio_gpu_fence *fence = kzalloc(sizeof(struct virtio_gpu_fence), - GFP_ATOMIC); + GFP_KERNEL); if (!fence) return fence; fence->drv = drv; + + /* This only partially initializes the fence because the seqno is + * unknown yet. The fence must not be used outside of the driver + * until virtio_gpu_fence_emit is called. + */ dma_fence_init(&fence->f, &virtio_fence_ops, &drv->lock, drv->context, 0); return fence; } -int virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev, +void virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev, struct virtio_gpu_ctrl_hdr *cmd_hdr, struct virtio_gpu_fence *fence) { @@ -89,14 +97,15 @@ int virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev, unsigned long irq_flags; spin_lock_irqsave(&drv->lock, irq_flags); - fence->seq = ++drv->sync_seq; + fence->f.seqno = ++drv->sync_seq; dma_fence_get(&fence->f); list_add_tail(&fence->node, &drv->fences); spin_unlock_irqrestore(&drv->lock, irq_flags); + trace_dma_fence_emit(&fence->f); + cmd_hdr->flags |= cpu_to_le32(VIRTIO_GPU_FLAG_FENCE); - cmd_hdr->fence_id = cpu_to_le64(fence->seq); - return 0; + cmd_hdr->fence_id = cpu_to_le64(fence->f.seqno); } void virtio_gpu_fence_event_process(struct virtio_gpu_device *vgdev, @@ -109,7 +118,7 @@ void virtio_gpu_fence_event_process(struct virtio_gpu_device *vgdev, spin_lock_irqsave(&drv->lock, irq_flags); atomic64_set(&vgdev->fence_drv.last_seq, last_seq); list_for_each_entry_safe(fence, tmp, &drv->fences, node) { - if (last_seq < fence->seq) + if (last_seq < fence->f.seqno) continue; dma_fence_signal_locked(&fence->f); list_del(&fence->node); diff --git a/drivers/gpu/drm/virtio/virtgpu_gem.c b/drivers/gpu/drm/virtio/virtgpu_gem.c index f06586393974..4c1f579edfb3 100644 --- a/drivers/gpu/drm/virtio/virtgpu_gem.c +++ b/drivers/gpu/drm/virtio/virtgpu_gem.c @@ -23,56 +23,36 @@ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ -#include <drm/drmP.h> -#include "virtgpu_drv.h" - -void virtio_gpu_gem_free_object(struct drm_gem_object *gem_obj) -{ - struct virtio_gpu_object *obj = gem_to_virtio_gpu_obj(gem_obj); - - if (obj) - virtio_gpu_object_unref(&obj); -} - -struct virtio_gpu_object *virtio_gpu_alloc_object(struct drm_device *dev, - size_t size, bool kernel, - bool pinned) -{ - struct virtio_gpu_device *vgdev = dev->dev_private; - struct virtio_gpu_object *obj; - int ret; - - ret = virtio_gpu_object_create(vgdev, size, kernel, pinned, &obj); - if (ret) - return ERR_PTR(ret); +#include <drm/drm_file.h> +#include <drm/drm_fourcc.h> - return obj; -} +#include "virtgpu_drv.h" int virtio_gpu_gem_create(struct drm_file *file, struct drm_device *dev, - uint64_t size, + struct virtio_gpu_object_params *params, struct drm_gem_object **obj_p, uint32_t *handle_p) { + struct virtio_gpu_device *vgdev = dev->dev_private; struct virtio_gpu_object *obj; int ret; u32 handle; - obj = virtio_gpu_alloc_object(dev, size, false, false); - if (IS_ERR(obj)) - return PTR_ERR(obj); + ret = virtio_gpu_object_create(vgdev, params, &obj, NULL); + if (ret < 0) + return ret; - ret = drm_gem_handle_create(file, &obj->gem_base, &handle); + ret = drm_gem_handle_create(file, &obj->base.base, &handle); if (ret) { - drm_gem_object_release(&obj->gem_base); + drm_gem_object_release(&obj->base.base); return ret; } - *obj_p = &obj->gem_base; + *obj_p = &obj->base.base; /* drop reference from allocate - handle holds it now */ - drm_gem_object_put_unlocked(&obj->gem_base); + drm_gem_object_put_unlocked(&obj->base.base); *handle_p = handle; return 0; @@ -82,12 +62,10 @@ int virtio_gpu_mode_dumb_create(struct drm_file *file_priv, struct drm_device *dev, struct drm_mode_create_dumb *args) { - struct virtio_gpu_device *vgdev = dev->dev_private; struct drm_gem_object *gobj; - struct virtio_gpu_object *obj; + struct virtio_gpu_object_params params = { 0 }; int ret; uint32_t pitch; - uint32_t format; if (args->bpp != 32) return -EINVAL; @@ -96,22 +74,16 @@ int virtio_gpu_mode_dumb_create(struct drm_file *file_priv, args->size = pitch * args->height; args->size = ALIGN(args->size, PAGE_SIZE); - ret = virtio_gpu_gem_create(file_priv, dev, args->size, &gobj, + params.format = virtio_gpu_translate_format(DRM_FORMAT_HOST_XRGB8888); + params.width = args->width; + params.height = args->height; + params.size = args->size; + params.dumb = true; + ret = virtio_gpu_gem_create(file_priv, dev, ¶ms, &gobj, &args->handle); if (ret) goto fail; - format = virtio_gpu_translate_format(DRM_FORMAT_HOST_XRGB8888); - obj = gem_to_virtio_gpu_obj(gobj); - virtio_gpu_cmd_create_resource(vgdev, obj, format, - args->width, args->height); - - /* attach the object to the resource */ - ret = virtio_gpu_object_attach(vgdev, obj, NULL); - if (ret) - goto fail; - - obj->dumb = true; args->pitch = pitch; return ret; @@ -141,19 +113,18 @@ int virtio_gpu_gem_object_open(struct drm_gem_object *obj, { struct virtio_gpu_device *vgdev = obj->dev->dev_private; struct virtio_gpu_fpriv *vfpriv = file->driver_priv; - struct virtio_gpu_object *qobj = gem_to_virtio_gpu_obj(obj); - int r; + struct virtio_gpu_object_array *objs; if (!vgdev->has_virgl_3d) return 0; - r = virtio_gpu_object_reserve(qobj, false); - if (r) - return r; + objs = virtio_gpu_array_alloc(1); + if (!objs) + return -ENOMEM; + virtio_gpu_array_add_obj(objs, obj); virtio_gpu_cmd_context_attach_resource(vgdev, vfpriv->ctx_id, - qobj->hw_res_handle); - virtio_gpu_object_unreserve(qobj); + objs); return 0; } @@ -162,17 +133,136 @@ void virtio_gpu_gem_object_close(struct drm_gem_object *obj, { struct virtio_gpu_device *vgdev = obj->dev->dev_private; struct virtio_gpu_fpriv *vfpriv = file->driver_priv; - struct virtio_gpu_object *qobj = gem_to_virtio_gpu_obj(obj); - int r; + struct virtio_gpu_object_array *objs; if (!vgdev->has_virgl_3d) return; - r = virtio_gpu_object_reserve(qobj, false); - if (r) + objs = virtio_gpu_array_alloc(1); + if (!objs) return; + virtio_gpu_array_add_obj(objs, obj); virtio_gpu_cmd_context_detach_resource(vgdev, vfpriv->ctx_id, - qobj->hw_res_handle); - virtio_gpu_object_unreserve(qobj); + objs); +} + +struct virtio_gpu_object_array *virtio_gpu_array_alloc(u32 nents) +{ + struct virtio_gpu_object_array *objs; + size_t size = sizeof(*objs) + sizeof(objs->objs[0]) * nents; + + objs = kmalloc(size, GFP_KERNEL); + if (!objs) + return NULL; + + objs->nents = 0; + objs->total = nents; + return objs; +} + +static void virtio_gpu_array_free(struct virtio_gpu_object_array *objs) +{ + kfree(objs); +} + +struct virtio_gpu_object_array* +virtio_gpu_array_from_handles(struct drm_file *drm_file, u32 *handles, u32 nents) +{ + struct virtio_gpu_object_array *objs; + u32 i; + + objs = virtio_gpu_array_alloc(nents); + if (!objs) + return NULL; + + for (i = 0; i < nents; i++) { + objs->objs[i] = drm_gem_object_lookup(drm_file, handles[i]); + if (!objs->objs[i]) { + objs->nents = i; + virtio_gpu_array_put_free(objs); + return NULL; + } + } + objs->nents = i; + return objs; +} + +void virtio_gpu_array_add_obj(struct virtio_gpu_object_array *objs, + struct drm_gem_object *obj) +{ + if (WARN_ON_ONCE(objs->nents == objs->total)) + return; + + drm_gem_object_get(obj); + objs->objs[objs->nents] = obj; + objs->nents++; +} + +int virtio_gpu_array_lock_resv(struct virtio_gpu_object_array *objs) +{ + int ret; + + if (objs->nents == 1) { + ret = dma_resv_lock_interruptible(objs->objs[0]->resv, NULL); + } else { + ret = drm_gem_lock_reservations(objs->objs, objs->nents, + &objs->ticket); + } + return ret; +} + +void virtio_gpu_array_unlock_resv(struct virtio_gpu_object_array *objs) +{ + if (objs->nents == 1) { + dma_resv_unlock(objs->objs[0]->resv); + } else { + drm_gem_unlock_reservations(objs->objs, objs->nents, + &objs->ticket); + } +} + +void virtio_gpu_array_add_fence(struct virtio_gpu_object_array *objs, + struct dma_fence *fence) +{ + int i; + + for (i = 0; i < objs->nents; i++) + dma_resv_add_excl_fence(objs->objs[i]->resv, fence); +} + +void virtio_gpu_array_put_free(struct virtio_gpu_object_array *objs) +{ + u32 i; + + for (i = 0; i < objs->nents; i++) + drm_gem_object_put_unlocked(objs->objs[i]); + virtio_gpu_array_free(objs); +} + +void virtio_gpu_array_put_free_delayed(struct virtio_gpu_device *vgdev, + struct virtio_gpu_object_array *objs) +{ + spin_lock(&vgdev->obj_free_lock); + list_add_tail(&objs->next, &vgdev->obj_free_list); + spin_unlock(&vgdev->obj_free_lock); + schedule_work(&vgdev->obj_free_work); +} + +void virtio_gpu_array_put_free_work(struct work_struct *work) +{ + struct virtio_gpu_device *vgdev = + container_of(work, struct virtio_gpu_device, obj_free_work); + struct virtio_gpu_object_array *objs; + + spin_lock(&vgdev->obj_free_lock); + while (!list_empty(&vgdev->obj_free_list)) { + objs = list_first_entry(&vgdev->obj_free_list, + struct virtio_gpu_object_array, next); + list_del(&objs->next); + spin_unlock(&vgdev->obj_free_lock); + virtio_gpu_array_put_free(objs); + spin_lock(&vgdev->obj_free_lock); + } + spin_unlock(&vgdev->obj_free_lock); } diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c index 14ce8188c052..9af1ec62434f 100644 --- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c +++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c @@ -25,11 +25,12 @@ * OTHER DEALINGS IN THE SOFTWARE. */ -#include <drm/drmP.h> -#include <drm/virtgpu_drm.h> -#include <drm/ttm/ttm_execbuf_util.h> +#include <linux/file.h> #include <linux/sync_file.h> +#include <drm/drm_file.h> +#include <drm/virtgpu_drm.h> + #include "virtgpu_drv.h" static void convert_to_hw_box(struct virtio_gpu_box *dst, @@ -54,45 +55,6 @@ static int virtio_gpu_map_ioctl(struct drm_device *dev, void *data, &virtio_gpu_map->offset); } -static int virtio_gpu_object_list_validate(struct ww_acquire_ctx *ticket, - struct list_head *head) -{ - struct ttm_operation_ctx ctx = { false, false }; - struct ttm_validate_buffer *buf; - struct ttm_buffer_object *bo; - struct virtio_gpu_object *qobj; - int ret; - - ret = ttm_eu_reserve_buffers(ticket, head, true, NULL); - if (ret != 0) - return ret; - - list_for_each_entry(buf, head, head) { - bo = buf->bo; - qobj = container_of(bo, struct virtio_gpu_object, tbo); - ret = ttm_bo_validate(bo, &qobj->placement, &ctx); - if (ret) { - ttm_eu_backoff_reservation(ticket, head); - return ret; - } - } - return 0; -} - -static void virtio_gpu_unref_list(struct list_head *head) -{ - struct ttm_validate_buffer *buf; - struct ttm_buffer_object *bo; - struct virtio_gpu_object *qobj; - - list_for_each_entry(buf, head, head) { - bo = buf->bo; - qobj = container_of(bo, struct virtio_gpu_object, tbo); - - drm_gem_object_put_unlocked(&qobj->gem_base); - } -} - /* * Usage of execbuffer: * Relocations need to take into account the full VIRTIO_GPUDrawable size. @@ -105,16 +67,11 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data, struct drm_virtgpu_execbuffer *exbuf = data; struct virtio_gpu_device *vgdev = dev->dev_private; struct virtio_gpu_fpriv *vfpriv = drm_file->driver_priv; - struct drm_gem_object *gobj; struct virtio_gpu_fence *out_fence; - struct virtio_gpu_object *qobj; int ret; uint32_t *bo_handles = NULL; void __user *user_bo_handles = NULL; - struct list_head validate_list; - struct ttm_validate_buffer *buflist = NULL; - int i; - struct ww_acquire_ctx ticket; + struct virtio_gpu_object_array *buflist = NULL; struct sync_file *sync_file; int in_fence_fd = exbuf->fence_fd; int out_fence_fd = -1; @@ -155,48 +112,38 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data, return out_fence_fd; } - INIT_LIST_HEAD(&validate_list); if (exbuf->num_bo_handles) { - bo_handles = kvmalloc_array(exbuf->num_bo_handles, - sizeof(uint32_t), GFP_KERNEL); - buflist = kvmalloc_array(exbuf->num_bo_handles, - sizeof(struct ttm_validate_buffer), - GFP_KERNEL | __GFP_ZERO); - if (!bo_handles || !buflist) { + sizeof(uint32_t), GFP_KERNEL); + if (!bo_handles) { ret = -ENOMEM; goto out_unused_fd; } - user_bo_handles = (void __user *)(uintptr_t)exbuf->bo_handles; + user_bo_handles = u64_to_user_ptr(exbuf->bo_handles); if (copy_from_user(bo_handles, user_bo_handles, exbuf->num_bo_handles * sizeof(uint32_t))) { ret = -EFAULT; goto out_unused_fd; } - for (i = 0; i < exbuf->num_bo_handles; i++) { - gobj = drm_gem_object_lookup(drm_file, bo_handles[i]); - if (!gobj) { - ret = -ENOENT; - goto out_unused_fd; - } - - qobj = gem_to_virtio_gpu_obj(gobj); - buflist[i].bo = &qobj->tbo; - - list_add(&buflist[i].head, &validate_list); + buflist = virtio_gpu_array_from_handles(drm_file, bo_handles, + exbuf->num_bo_handles); + if (!buflist) { + ret = -ENOENT; + goto out_unused_fd; } kvfree(bo_handles); bo_handles = NULL; } - ret = virtio_gpu_object_list_validate(&ticket, &validate_list); - if (ret) - goto out_free; + if (buflist) { + ret = virtio_gpu_array_lock_resv(buflist); + if (ret) + goto out_unused_fd; + } - buf = memdup_user((void __user *)(uintptr_t)exbuf->command, - exbuf->size); + buf = vmemdup_user(u64_to_user_ptr(exbuf->command), exbuf->size); if (IS_ERR(buf)) { ret = PTR_ERR(buf); goto out_unresv; @@ -221,24 +168,18 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data, } virtio_gpu_cmd_submit(vgdev, buf, exbuf->size, - vfpriv->ctx_id, out_fence); - - ttm_eu_fence_buffer_objects(&ticket, &validate_list, &out_fence->f); - - /* fence the command bo */ - virtio_gpu_unref_list(&validate_list); - kvfree(buflist); + vfpriv->ctx_id, buflist, out_fence); return 0; out_memdup: - kfree(buf); + kvfree(buf); out_unresv: - ttm_eu_backoff_reservation(&ticket, &validate_list); -out_free: - virtio_gpu_unref_list(&validate_list); + if (buflist) + virtio_gpu_array_unlock_resv(buflist); out_unused_fd: kvfree(bo_handles); - kvfree(buflist); + if (buflist) + virtio_gpu_array_put_free(buflist); if (out_fence_fd >= 0) put_unused_fd(out_fence_fd); @@ -263,10 +204,9 @@ static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data, default: return -EINVAL; } - if (copy_to_user((void __user *)(unsigned long)param->value, - &value, sizeof(int))) { + if (copy_to_user(u64_to_user_ptr(param->value), &value, sizeof(int))) return -EFAULT; - } + return 0; } @@ -275,16 +215,12 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data, { struct virtio_gpu_device *vgdev = dev->dev_private; struct drm_virtgpu_resource_create *rc = data; + struct virtio_gpu_fence *fence; int ret; struct virtio_gpu_object *qobj; struct drm_gem_object *obj; uint32_t handle = 0; - uint32_t size; - struct list_head validate_list; - struct ttm_validate_buffer mainbuf; - struct virtio_gpu_fence *fence = NULL; - struct ww_acquire_ctx ticket; - struct virtio_gpu_resource_create_3d rc_3d; + struct virtio_gpu_object_params params = { 0 }; if (vgdev->has_virgl_3d == false) { if (rc->depth > 1) @@ -299,94 +235,43 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data, return -EINVAL; } - INIT_LIST_HEAD(&validate_list); - memset(&mainbuf, 0, sizeof(struct ttm_validate_buffer)); - - size = rc->size; - + params.format = rc->format; + params.width = rc->width; + params.height = rc->height; + params.size = rc->size; + if (vgdev->has_virgl_3d) { + params.virgl = true; + params.target = rc->target; + params.bind = rc->bind; + params.depth = rc->depth; + params.array_size = rc->array_size; + params.last_level = rc->last_level; + params.nr_samples = rc->nr_samples; + params.flags = rc->flags; + } /* allocate a single page size object */ - if (size == 0) - size = PAGE_SIZE; - - qobj = virtio_gpu_alloc_object(dev, size, false, false); - if (IS_ERR(qobj)) - return PTR_ERR(qobj); - obj = &qobj->gem_base; - - if (!vgdev->has_virgl_3d) { - virtio_gpu_cmd_create_resource(vgdev, qobj, rc->format, - rc->width, rc->height); - - ret = virtio_gpu_object_attach(vgdev, qobj, NULL); - } else { - /* use a gem reference since unref list undoes them */ - drm_gem_object_get(&qobj->gem_base); - mainbuf.bo = &qobj->tbo; - list_add(&mainbuf.head, &validate_list); - - ret = virtio_gpu_object_list_validate(&ticket, &validate_list); - if (ret) { - DRM_DEBUG("failed to validate\n"); - goto fail_unref; - } + if (params.size == 0) + params.size = PAGE_SIZE; - rc_3d.resource_id = cpu_to_le32(qobj->hw_res_handle); - rc_3d.target = cpu_to_le32(rc->target); - rc_3d.format = cpu_to_le32(rc->format); - rc_3d.bind = cpu_to_le32(rc->bind); - rc_3d.width = cpu_to_le32(rc->width); - rc_3d.height = cpu_to_le32(rc->height); - rc_3d.depth = cpu_to_le32(rc->depth); - rc_3d.array_size = cpu_to_le32(rc->array_size); - rc_3d.last_level = cpu_to_le32(rc->last_level); - rc_3d.nr_samples = cpu_to_le32(rc->nr_samples); - rc_3d.flags = cpu_to_le32(rc->flags); - - fence = virtio_gpu_fence_alloc(vgdev); - if (!fence) { - ret = -ENOMEM; - goto fail_backoff; - } - - virtio_gpu_cmd_resource_create_3d(vgdev, qobj, &rc_3d); - ret = virtio_gpu_object_attach(vgdev, qobj, fence); - if (ret) { - dma_fence_put(&fence->f); - goto fail_backoff; - } - ttm_eu_fence_buffer_objects(&ticket, &validate_list, &fence->f); - } + fence = virtio_gpu_fence_alloc(vgdev); + if (!fence) + return -ENOMEM; + ret = virtio_gpu_object_create(vgdev, ¶ms, &qobj, fence); + dma_fence_put(&fence->f); + if (ret < 0) + return ret; + obj = &qobj->base.base; ret = drm_gem_handle_create(file_priv, obj, &handle); if (ret) { - drm_gem_object_release(obj); - if (vgdev->has_virgl_3d) { - virtio_gpu_unref_list(&validate_list); - dma_fence_put(&fence->f); - } return ret; } drm_gem_object_put_unlocked(obj); rc->res_handle = qobj->hw_res_handle; /* similiar to a VM address */ rc->bo_handle = handle; - - if (vgdev->has_virgl_3d) { - virtio_gpu_unref_list(&validate_list); - dma_fence_put(&fence->f); - } return 0; -fail_backoff: - ttm_eu_backoff_reservation(&ticket, &validate_list); -fail_unref: - if (vgdev->has_virgl_3d) { - virtio_gpu_unref_list(&validate_list); - dma_fence_put(&fence->f); - } -//fail_obj: -// drm_gem_object_handle_unreference_unlocked(obj); - return ret; } static int virtio_gpu_resource_info_ioctl(struct drm_device *dev, void *data, @@ -402,7 +287,7 @@ static int virtio_gpu_resource_info_ioctl(struct drm_device *dev, void *data, qobj = gem_to_virtio_gpu_obj(gobj); - ri->size = qobj->gem_base.size; + ri->size = qobj->base.base.size; ri->res_handle = qobj->hw_res_handle; drm_gem_object_put_unlocked(gobj); return 0; @@ -415,9 +300,7 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev, struct virtio_gpu_device *vgdev = dev->dev_private; struct virtio_gpu_fpriv *vfpriv = file->driver_priv; struct drm_virtgpu_3d_transfer_from_host *args = data; - struct ttm_operation_ctx ctx = { true, false }; - struct drm_gem_object *gobj = NULL; - struct virtio_gpu_object *qobj = NULL; + struct virtio_gpu_object_array *objs; struct virtio_gpu_fence *fence; int ret; u32 offset = args->offset; @@ -426,39 +309,31 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev, if (vgdev->has_virgl_3d == false) return -ENOSYS; - gobj = drm_gem_object_lookup(file, args->bo_handle); - if (gobj == NULL) + objs = virtio_gpu_array_from_handles(file, &args->bo_handle, 1); + if (objs == NULL) return -ENOENT; - qobj = gem_to_virtio_gpu_obj(gobj); - - ret = virtio_gpu_object_reserve(qobj, false); - if (ret) - goto out; - - ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, &ctx); - if (unlikely(ret)) - goto out_unres; + ret = virtio_gpu_array_lock_resv(objs); + if (ret != 0) + goto err_put_free; convert_to_hw_box(&box, &args->box); fence = virtio_gpu_fence_alloc(vgdev); if (!fence) { ret = -ENOMEM; - goto out_unres; + goto err_unlock; } virtio_gpu_cmd_transfer_from_host_3d - (vgdev, qobj->hw_res_handle, - vfpriv->ctx_id, offset, args->level, - &box, fence); - reservation_object_add_excl_fence(qobj->tbo.resv, - &fence->f); - + (vgdev, vfpriv->ctx_id, offset, args->level, + &box, objs, fence); dma_fence_put(&fence->f); -out_unres: - virtio_gpu_object_unreserve(qobj); -out: - drm_gem_object_put_unlocked(gobj); + return 0; + +err_unlock: + virtio_gpu_array_unlock_resv(objs); +err_put_free: + virtio_gpu_array_put_free(objs); return ret; } @@ -468,75 +343,71 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data, struct virtio_gpu_device *vgdev = dev->dev_private; struct virtio_gpu_fpriv *vfpriv = file->driver_priv; struct drm_virtgpu_3d_transfer_to_host *args = data; - struct ttm_operation_ctx ctx = { true, false }; - struct drm_gem_object *gobj = NULL; - struct virtio_gpu_object *qobj = NULL; + struct virtio_gpu_object_array *objs; struct virtio_gpu_fence *fence; struct virtio_gpu_box box; int ret; u32 offset = args->offset; - gobj = drm_gem_object_lookup(file, args->bo_handle); - if (gobj == NULL) + objs = virtio_gpu_array_from_handles(file, &args->bo_handle, 1); + if (objs == NULL) return -ENOENT; - qobj = gem_to_virtio_gpu_obj(gobj); - - ret = virtio_gpu_object_reserve(qobj, false); - if (ret) - goto out; - - ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, &ctx); - if (unlikely(ret)) - goto out_unres; - convert_to_hw_box(&box, &args->box); if (!vgdev->has_virgl_3d) { virtio_gpu_cmd_transfer_to_host_2d - (vgdev, qobj, offset, - box.w, box.h, box.x, box.y, NULL); + (vgdev, offset, + box.w, box.h, box.x, box.y, + objs, NULL); } else { + ret = virtio_gpu_array_lock_resv(objs); + if (ret != 0) + goto err_put_free; + + ret = -ENOMEM; fence = virtio_gpu_fence_alloc(vgdev); - if (!fence) { - ret = -ENOMEM; - goto out_unres; - } + if (!fence) + goto err_unlock; + virtio_gpu_cmd_transfer_to_host_3d - (vgdev, qobj, + (vgdev, vfpriv ? vfpriv->ctx_id : 0, offset, - args->level, &box, fence); - reservation_object_add_excl_fence(qobj->tbo.resv, - &fence->f); + args->level, &box, objs, fence); dma_fence_put(&fence->f); } + return 0; -out_unres: - virtio_gpu_object_unreserve(qobj); -out: - drm_gem_object_put_unlocked(gobj); +err_unlock: + virtio_gpu_array_unlock_resv(objs); +err_put_free: + virtio_gpu_array_put_free(objs); return ret; } static int virtio_gpu_wait_ioctl(struct drm_device *dev, void *data, - struct drm_file *file) + struct drm_file *file) { struct drm_virtgpu_3d_wait *args = data; - struct drm_gem_object *gobj = NULL; - struct virtio_gpu_object *qobj = NULL; + struct drm_gem_object *obj; + long timeout = 15 * HZ; int ret; - bool nowait = false; - gobj = drm_gem_object_lookup(file, args->handle); - if (gobj == NULL) + obj = drm_gem_object_lookup(file, args->handle); + if (obj == NULL) return -ENOENT; - qobj = gem_to_virtio_gpu_obj(gobj); - - if (args->flags & VIRTGPU_WAIT_NOWAIT) - nowait = true; - ret = virtio_gpu_object_wait(qobj, nowait); + if (args->flags & VIRTGPU_WAIT_NOWAIT) { + ret = dma_resv_test_signaled_rcu(obj->resv, true); + } else { + ret = dma_resv_wait_timeout_rcu(obj->resv, true, true, + timeout); + } + if (ret == 0) + ret = -EBUSY; + else if (ret > 0) + ret = 0; - drm_gem_object_put_unlocked(gobj); + drm_gem_object_put_unlocked(obj); return ret; } @@ -581,7 +452,6 @@ static int virtio_gpu_get_caps_ioctl(struct drm_device *dev, list_for_each_entry(cache_ent, &vgdev->cap_cache, head) { if (cache_ent->id == args->cap_set_id && cache_ent->version == args->cap_set_ver) { - ptr = cache_ent->caps_cache; spin_unlock(&vgdev->display_info_lock); goto copy_exit; } @@ -592,15 +462,18 @@ static int virtio_gpu_get_caps_ioctl(struct drm_device *dev, virtio_gpu_cmd_get_capset(vgdev, found_valid, args->cap_set_ver, &cache_ent); +copy_exit: ret = wait_event_timeout(vgdev->resp_wq, atomic_read(&cache_ent->is_valid), 5 * HZ); if (!ret) return -EBUSY; + /* is_valid check must proceed before copy of the cache entry. */ + smp_rmb(); + ptr = cache_ent->caps_cache; -copy_exit: - if (copy_to_user((void __user *)(unsigned long)args->addr, ptr, size)) + if (copy_to_user(u64_to_user_ptr(args->addr), ptr, size)) return -EFAULT; return 0; @@ -608,34 +481,34 @@ copy_exit: struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS] = { DRM_IOCTL_DEF_DRV(VIRTGPU_MAP, virtio_gpu_map_ioctl, - DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW), + DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(VIRTGPU_EXECBUFFER, virtio_gpu_execbuffer_ioctl, - DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW), + DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(VIRTGPU_GETPARAM, virtio_gpu_getparam_ioctl, - DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW), + DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_CREATE, virtio_gpu_resource_create_ioctl, - DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW), + DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_INFO, virtio_gpu_resource_info_ioctl, - DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW), + DRM_RENDER_ALLOW), /* make transfer async to the main ring? - no sure, can we * thread these in the underlying GL */ DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_FROM_HOST, virtio_gpu_transfer_from_host_ioctl, - DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW), + DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_TO_HOST, virtio_gpu_transfer_to_host_ioctl, - DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW), + DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(VIRTGPU_WAIT, virtio_gpu_wait_ioctl, - DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW), + DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(VIRTGPU_GET_CAPS, virtio_gpu_get_caps_ioctl, - DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW), + DRM_RENDER_ALLOW), }; diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c index 84b6a6bf00c6..2f5773e43557 100644 --- a/drivers/gpu/drm/virtio/virtgpu_kms.c +++ b/drivers/gpu/drm/virtio/virtgpu_kms.c @@ -25,7 +25,9 @@ #include <linux/virtio.h> #include <linux/virtio_config.h> -#include <drm/drmP.h> + +#include <drm/drm_file.h> + #include "virtgpu_drv.h" static void virtio_gpu_config_changed_work_func(struct work_struct *work) @@ -145,19 +147,23 @@ int virtio_gpu_init(struct drm_device *dev) INIT_WORK(&vgdev->config_changed_work, virtio_gpu_config_changed_work_func); + INIT_WORK(&vgdev->obj_free_work, + virtio_gpu_array_put_free_work); + INIT_LIST_HEAD(&vgdev->obj_free_list); + spin_lock_init(&vgdev->obj_free_lock); + #ifdef __LITTLE_ENDIAN if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_VIRGL)) vgdev->has_virgl_3d = true; - DRM_INFO("virgl 3d acceleration %s\n", - vgdev->has_virgl_3d ? "enabled" : "not supported by host"); -#else - DRM_INFO("virgl 3d acceleration not supported by guest\n"); #endif if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_EDID)) { vgdev->has_edid = true; - DRM_INFO("EDID support available.\n"); } + DRM_INFO("features: %cvirgl %cedid\n", + vgdev->has_virgl_3d ? '+' : '-', + vgdev->has_edid ? '+' : '-'); + ret = virtio_find_vqs(vgdev->vdev, 2, vqs, callbacks, names, NULL); if (ret) { DRM_ERROR("failed to find virt queues\n"); @@ -171,12 +177,6 @@ int virtio_gpu_init(struct drm_device *dev) goto err_vbufs; } - ret = virtio_gpu_ttm_init(vgdev); - if (ret) { - DRM_ERROR("failed to init ttm %d\n", ret); - goto err_ttm; - } - /* get display info */ virtio_cread(vgdev->vdev, struct virtio_gpu_config, num_scanouts, &num_scanouts); @@ -208,8 +208,6 @@ int virtio_gpu_init(struct drm_device *dev) return 0; err_scanouts: - virtio_gpu_ttm_fini(vgdev); -err_ttm: virtio_gpu_free_vbufs(vgdev); err_vbufs: vgdev->vdev->config->del_vqs(vgdev->vdev); @@ -232,6 +230,7 @@ void virtio_gpu_deinit(struct drm_device *dev) { struct virtio_gpu_device *vgdev = dev->dev_private; + flush_work(&vgdev->obj_free_work); vgdev->vqs_ready = false; flush_work(&vgdev->ctrlq.dequeue_work); flush_work(&vgdev->cursorq.dequeue_work); @@ -240,7 +239,6 @@ void virtio_gpu_deinit(struct drm_device *dev) vgdev->vdev->config->del_vqs(vgdev->vdev); virtio_gpu_modeset_fini(vgdev); - virtio_gpu_ttm_fini(vgdev); virtio_gpu_free_vbufs(vgdev); virtio_gpu_cleanup_cap_cache(vgdev); kfree(vgdev->capsets); diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c index e7e946035027..017a9e0fc3bb 100644 --- a/drivers/gpu/drm/virtio/virtgpu_object.c +++ b/drivers/gpu/drm/virtio/virtgpu_object.c @@ -23,189 +23,143 @@ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ +#include <linux/moduleparam.h> + #include "virtgpu_drv.h" +static int virtio_gpu_virglrenderer_workaround = 1; +module_param_named(virglhack, virtio_gpu_virglrenderer_workaround, int, 0400); + static int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev, uint32_t *resid) { -#if 0 - int handle = ida_alloc(&vgdev->resource_ida, GFP_KERNEL); - - if (handle < 0) - return handle; -#else - static int handle; - - /* - * FIXME: dirty hack to avoid re-using IDs, virglrenderer - * can't deal with that. Needs fixing in virglrenderer, also - * should figure a better way to handle that in the guest. - */ - handle++; -#endif - - *resid = handle + 1; + if (virtio_gpu_virglrenderer_workaround) { + /* + * Hack to avoid re-using resource IDs. + * + * virglrenderer versions up to (and including) 0.7.0 + * can't deal with that. virglrenderer commit + * "f91a9dd35715 Fix unlinking resources from hash + * table." (Feb 2019) fixes the bug. + */ + static int handle; + handle++; + *resid = handle + 1; + } else { + int handle = ida_alloc(&vgdev->resource_ida, GFP_KERNEL); + if (handle < 0) + return handle; + *resid = handle + 1; + } return 0; } static void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t id) { -#if 0 - ida_free(&vgdev->resource_ida, id - 1); -#endif + if (!virtio_gpu_virglrenderer_workaround) { + ida_free(&vgdev->resource_ida, id - 1); + } } -static void virtio_gpu_ttm_bo_destroy(struct ttm_buffer_object *tbo) +static void virtio_gpu_free_object(struct drm_gem_object *obj) { - struct virtio_gpu_object *bo; - struct virtio_gpu_device *vgdev; - - bo = container_of(tbo, struct virtio_gpu_object, tbo); - vgdev = (struct virtio_gpu_device *)bo->gem_base.dev->dev_private; + struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); + struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private; + if (bo->pages) + virtio_gpu_object_detach(vgdev, bo); if (bo->created) virtio_gpu_cmd_unref_resource(vgdev, bo->hw_res_handle); - if (bo->pages) - virtio_gpu_object_free_sg_table(bo); - if (bo->vmap) - virtio_gpu_object_kunmap(bo); - drm_gem_object_release(&bo->gem_base); virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle); - kfree(bo); + + drm_gem_shmem_free_object(obj); } -static void virtio_gpu_init_ttm_placement(struct virtio_gpu_object *vgbo, - bool pinned) +static const struct drm_gem_object_funcs virtio_gpu_gem_funcs = { + .free = virtio_gpu_free_object, + .open = virtio_gpu_gem_object_open, + .close = virtio_gpu_gem_object_close, + + .print_info = drm_gem_shmem_print_info, + .pin = drm_gem_shmem_pin, + .unpin = drm_gem_shmem_unpin, + .get_sg_table = drm_gem_shmem_get_sg_table, + .vmap = drm_gem_shmem_vmap, + .vunmap = drm_gem_shmem_vunmap, + .mmap = &drm_gem_shmem_mmap, +}; + +struct drm_gem_object *virtio_gpu_create_object(struct drm_device *dev, + size_t size) { - u32 c = 1; - u32 pflag = pinned ? TTM_PL_FLAG_NO_EVICT : 0; - - vgbo->placement.placement = &vgbo->placement_code; - vgbo->placement.busy_placement = &vgbo->placement_code; - vgbo->placement_code.fpfn = 0; - vgbo->placement_code.lpfn = 0; - vgbo->placement_code.flags = - TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT | pflag; - vgbo->placement.num_placement = c; - vgbo->placement.num_busy_placement = c; + struct virtio_gpu_object *bo; + + bo = kzalloc(sizeof(*bo), GFP_KERNEL); + if (!bo) + return NULL; + bo->base.base.funcs = &virtio_gpu_gem_funcs; + return &bo->base.base; } int virtio_gpu_object_create(struct virtio_gpu_device *vgdev, - unsigned long size, bool kernel, bool pinned, - struct virtio_gpu_object **bo_ptr) + struct virtio_gpu_object_params *params, + struct virtio_gpu_object **bo_ptr, + struct virtio_gpu_fence *fence) { + struct virtio_gpu_object_array *objs = NULL; + struct drm_gem_shmem_object *shmem_obj; struct virtio_gpu_object *bo; - enum ttm_bo_type type; - size_t acc_size; int ret; - if (kernel) - type = ttm_bo_type_kernel; - else - type = ttm_bo_type_device; *bo_ptr = NULL; - acc_size = ttm_bo_dma_acc_size(&vgdev->mman.bdev, size, - sizeof(struct virtio_gpu_object)); + params->size = roundup(params->size, PAGE_SIZE); + shmem_obj = drm_gem_shmem_create(vgdev->ddev, params->size); + if (IS_ERR(shmem_obj)) + return PTR_ERR(shmem_obj); + bo = gem_to_virtio_gpu_obj(&shmem_obj->base); - bo = kzalloc(sizeof(struct virtio_gpu_object), GFP_KERNEL); - if (bo == NULL) - return -ENOMEM; ret = virtio_gpu_resource_id_get(vgdev, &bo->hw_res_handle); - if (ret < 0) { - kfree(bo); - return ret; - } - size = roundup(size, PAGE_SIZE); - ret = drm_gem_object_init(vgdev->ddev, &bo->gem_base, size); - if (ret != 0) { - virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle); - kfree(bo); - return ret; - } - bo->dumb = false; - virtio_gpu_init_ttm_placement(bo, pinned); - - ret = ttm_bo_init(&vgdev->mman.bdev, &bo->tbo, size, type, - &bo->placement, 0, !kernel, acc_size, - NULL, NULL, &virtio_gpu_ttm_bo_destroy); - /* ttm_bo_init failure will call the destroy */ - if (ret != 0) - return ret; + if (ret < 0) + goto err_free_gem; - *bo_ptr = bo; - return 0; -} + bo->dumb = params->dumb; -void virtio_gpu_object_kunmap(struct virtio_gpu_object *bo) -{ - bo->vmap = NULL; - ttm_bo_kunmap(&bo->kmap); -} + if (fence) { + ret = -ENOMEM; + objs = virtio_gpu_array_alloc(1); + if (!objs) + goto err_put_id; + virtio_gpu_array_add_obj(objs, &bo->base.base); -int virtio_gpu_object_kmap(struct virtio_gpu_object *bo) -{ - bool is_iomem; - int r; + ret = virtio_gpu_array_lock_resv(objs); + if (ret != 0) + goto err_put_objs; + } - WARN_ON(bo->vmap); + if (params->virgl) { + virtio_gpu_cmd_resource_create_3d(vgdev, bo, params, + objs, fence); + } else { + virtio_gpu_cmd_create_resource(vgdev, bo, params, + objs, fence); + } - r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap); - if (r) - return r; - bo->vmap = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem); - return 0; -} + ret = virtio_gpu_object_attach(vgdev, bo, NULL); + if (ret != 0) { + virtio_gpu_free_object(&shmem_obj->base); + return ret; + } -int virtio_gpu_object_get_sg_table(struct virtio_gpu_device *qdev, - struct virtio_gpu_object *bo) -{ - int ret; - struct page **pages = bo->tbo.ttm->pages; - int nr_pages = bo->tbo.num_pages; - struct ttm_operation_ctx ctx = { - .interruptible = false, - .no_wait_gpu = false - }; - - /* wtf swapping */ - if (bo->pages) - return 0; - - if (bo->tbo.ttm->state == tt_unpopulated) - bo->tbo.ttm->bdev->driver->ttm_tt_populate(bo->tbo.ttm, &ctx); - bo->pages = kmalloc(sizeof(struct sg_table), GFP_KERNEL); - if (!bo->pages) - goto out; - - ret = sg_alloc_table_from_pages(bo->pages, pages, nr_pages, 0, - nr_pages << PAGE_SHIFT, GFP_KERNEL); - if (ret) - goto out; + *bo_ptr = bo; return 0; -out: - kfree(bo->pages); - bo->pages = NULL; - return -ENOMEM; -} - -void virtio_gpu_object_free_sg_table(struct virtio_gpu_object *bo) -{ - sg_free_table(bo->pages); - kfree(bo->pages); - bo->pages = NULL; -} -int virtio_gpu_object_wait(struct virtio_gpu_object *bo, bool no_wait) -{ - int r; - - r = ttm_bo_reserve(&bo->tbo, true, no_wait, NULL); - if (unlikely(r != 0)) - return r; - r = ttm_bo_wait(&bo->tbo, true, no_wait); - ttm_bo_unreserve(&bo->tbo); - return r; +err_put_objs: + virtio_gpu_array_put_free(objs); +err_put_id: + virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle); +err_free_gem: + drm_gem_shmem_free_object(&shmem_obj->base); + return ret; } - diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c index 024c2aa0c929..390524143139 100644 --- a/drivers/gpu/drm/virtio/virtgpu_plane.c +++ b/drivers/gpu/drm/virtio/virtgpu_plane.c @@ -23,9 +23,11 @@ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ -#include "virtgpu_drv.h" -#include <drm/drm_plane_helper.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_fourcc.h> +#include <drm/drm_plane_helper.h> + +#include "virtgpu_drv.h" static const uint32_t virtio_gpu_formats[] = { DRM_FORMAT_HOST_XRGB8888, @@ -82,7 +84,22 @@ static const struct drm_plane_funcs virtio_gpu_plane_funcs = { static int virtio_gpu_plane_atomic_check(struct drm_plane *plane, struct drm_plane_state *state) { - return 0; + bool is_cursor = plane->type == DRM_PLANE_TYPE_CURSOR; + struct drm_crtc_state *crtc_state; + int ret; + + if (!state->fb || !state->crtc) + return 0; + + crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc); + if (IS_ERR(crtc_state)) + return PTR_ERR(crtc_state); + + ret = drm_atomic_helper_check_plane_state(state, crtc_state, + DRM_PLANE_HELPER_NO_SCALING, + DRM_PLANE_HELPER_NO_SCALING, + is_cursor, true); + return ret; } static void virtio_gpu_primary_plane_update(struct drm_plane *plane, @@ -107,12 +124,19 @@ static void virtio_gpu_primary_plane_update(struct drm_plane *plane, bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]); handle = bo->hw_res_handle; if (bo->dumb) { + struct virtio_gpu_object_array *objs; + + objs = virtio_gpu_array_alloc(1); + if (!objs) + return; + virtio_gpu_array_add_obj(objs, vgfb->base.obj[0]); virtio_gpu_cmd_transfer_to_host_2d - (vgdev, bo, 0, - cpu_to_le32(plane->state->src_w >> 16), - cpu_to_le32(plane->state->src_h >> 16), - cpu_to_le32(plane->state->src_x >> 16), - cpu_to_le32(plane->state->src_y >> 16), NULL); + (vgdev, 0, + plane->state->src_w >> 16, + plane->state->src_h >> 16, + plane->state->src_x >> 16, + plane->state->src_y >> 16, + objs, NULL); } } else { handle = 0; @@ -184,7 +208,6 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane, struct virtio_gpu_framebuffer *vgfb; struct virtio_gpu_object *bo = NULL; uint32_t handle; - int ret = 0; if (plane->state->crtc) output = drm_crtc_to_virtio_gpu_output(plane->state->crtc); @@ -203,20 +226,20 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane, if (bo && bo->dumb && (plane->state->fb != old_state->fb)) { /* new cursor -- update & wait */ + struct virtio_gpu_object_array *objs; + + objs = virtio_gpu_array_alloc(1); + if (!objs) + return; + virtio_gpu_array_add_obj(objs, vgfb->base.obj[0]); virtio_gpu_cmd_transfer_to_host_2d - (vgdev, bo, 0, - cpu_to_le32(plane->state->crtc_w), - cpu_to_le32(plane->state->crtc_h), - 0, 0, vgfb->fence); - ret = virtio_gpu_object_reserve(bo, false); - if (!ret) { - reservation_object_add_excl_fence(bo->tbo.resv, - &vgfb->fence->f); - dma_fence_put(&vgfb->fence->f); - vgfb->fence = NULL; - virtio_gpu_object_unreserve(bo); - virtio_gpu_object_wait(bo, false); - } + (vgdev, 0, + plane->state->crtc_w, + plane->state->crtc_h, + 0, 0, objs, vgfb->fence); + dma_fence_wait(&vgfb->fence->f, true); + dma_fence_put(&vgfb->fence->f); + vgfb->fence = NULL; } if (plane->state->fb != old_state->fb) { diff --git a/drivers/gpu/drm/virtio/virtgpu_prime.c b/drivers/gpu/drm/virtio/virtgpu_prime.c index c59ec34c80a5..050d24c39a8f 100644 --- a/drivers/gpu/drm/virtio/virtgpu_prime.c +++ b/drivers/gpu/drm/virtio/virtgpu_prime.c @@ -22,41 +22,17 @@ * Authors: Andreas Pokorny */ +#include <drm/drm_prime.h> + #include "virtgpu_drv.h" /* Empty Implementations as there should not be any other driver for a virtual * device that might share buffers with virtgpu */ -int virtgpu_gem_prime_pin(struct drm_gem_object *obj) -{ - WARN_ONCE(1, "not implemented"); - return -ENODEV; -} - -void virtgpu_gem_prime_unpin(struct drm_gem_object *obj) -{ - WARN_ONCE(1, "not implemented"); -} - -void *virtgpu_gem_prime_vmap(struct drm_gem_object *obj) -{ - struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); - int ret; - - ret = virtio_gpu_object_kmap(bo); - if (ret) - return NULL; - return bo->vmap; -} - -void virtgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) -{ - virtio_gpu_object_kunmap(gem_to_virtio_gpu_obj(obj)); -} - -int virtgpu_gem_prime_mmap(struct drm_gem_object *obj, - struct vm_area_struct *area) +struct drm_gem_object *virtgpu_gem_prime_import_sg_table( + struct drm_device *dev, struct dma_buf_attachment *attach, + struct sg_table *table) { - return -ENODEV; + return ERR_PTR(-ENODEV); } diff --git a/drivers/gpu/drm/virtio/virtgpu_trace.h b/drivers/gpu/drm/virtio/virtgpu_trace.h new file mode 100644 index 000000000000..711ecc2bd241 --- /dev/null +++ b/drivers/gpu/drm/virtio/virtgpu_trace.h @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#if !defined(_VIRTGPU_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ) +#define _VIRTGPU_TRACE_H_ + +#include <linux/tracepoint.h> + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM virtio_gpu +#define TRACE_INCLUDE_FILE virtgpu_trace + +DECLARE_EVENT_CLASS(virtio_gpu_cmd, + TP_PROTO(struct virtqueue *vq, struct virtio_gpu_ctrl_hdr *hdr), + TP_ARGS(vq, hdr), + TP_STRUCT__entry( + __field(int, dev) + __field(unsigned int, vq) + __field(const char *, name) + __field(u32, type) + __field(u32, flags) + __field(u64, fence_id) + __field(u32, ctx_id) + ), + TP_fast_assign( + __entry->dev = vq->vdev->index; + __entry->vq = vq->index; + __entry->name = vq->name; + __entry->type = le32_to_cpu(hdr->type); + __entry->flags = le32_to_cpu(hdr->flags); + __entry->fence_id = le64_to_cpu(hdr->fence_id); + __entry->ctx_id = le32_to_cpu(hdr->ctx_id); + ), + TP_printk("vdev=%d vq=%u name=%s type=0x%x flags=0x%x fence_id=%llu ctx_id=%u", + __entry->dev, __entry->vq, __entry->name, + __entry->type, __entry->flags, __entry->fence_id, + __entry->ctx_id) +); + +DEFINE_EVENT(virtio_gpu_cmd, virtio_gpu_cmd_queue, + TP_PROTO(struct virtqueue *vq, struct virtio_gpu_ctrl_hdr *hdr), + TP_ARGS(vq, hdr) +); + +DEFINE_EVENT(virtio_gpu_cmd, virtio_gpu_cmd_response, + TP_PROTO(struct virtqueue *vq, struct virtio_gpu_ctrl_hdr *hdr), + TP_ARGS(vq, hdr) +); + +#endif + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/virtio +#include <trace/define_trace.h> diff --git a/drivers/gpu/drm/virtio/virtgpu_trace_points.c b/drivers/gpu/drm/virtio/virtgpu_trace_points.c new file mode 100644 index 000000000000..1970cb6f24ef --- /dev/null +++ b/drivers/gpu/drm/virtio/virtgpu_trace_points.c @@ -0,0 +1,5 @@ +// SPDX-License-Identifier: GPL-2.0 +#include "virtgpu_drv.h" + +#define CREATE_TRACE_POINTS +#include "virtgpu_trace.h" diff --git a/drivers/gpu/drm/virtio/virtgpu_ttm.c b/drivers/gpu/drm/virtio/virtgpu_ttm.c deleted file mode 100644 index 4bfbf25fabff..000000000000 --- a/drivers/gpu/drm/virtio/virtgpu_ttm.c +++ /dev/null @@ -1,356 +0,0 @@ -/* - * Copyright (C) 2015 Red Hat, Inc. - * All Rights Reserved. - * - * Authors: - * Dave Airlie - * Alon Levy - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ - -#include <drm/ttm/ttm_bo_api.h> -#include <drm/ttm/ttm_bo_driver.h> -#include <drm/ttm/ttm_placement.h> -#include <drm/ttm/ttm_page_alloc.h> -#include <drm/ttm/ttm_module.h> -#include <drm/drmP.h> -#include <drm/drm.h> -#include <drm/virtgpu_drm.h> -#include "virtgpu_drv.h" - -#include <linux/delay.h> - -#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT) - -static struct -virtio_gpu_device *virtio_gpu_get_vgdev(struct ttm_bo_device *bdev) -{ - struct virtio_gpu_mman *mman; - struct virtio_gpu_device *vgdev; - - mman = container_of(bdev, struct virtio_gpu_mman, bdev); - vgdev = container_of(mman, struct virtio_gpu_device, mman); - return vgdev; -} - -int virtio_gpu_mmap(struct file *filp, struct vm_area_struct *vma) -{ - struct drm_file *file_priv; - struct virtio_gpu_device *vgdev; - int r; - - file_priv = filp->private_data; - vgdev = file_priv->minor->dev->dev_private; - if (vgdev == NULL) { - DRM_ERROR( - "filp->private_data->minor->dev->dev_private == NULL\n"); - return -EINVAL; - } - r = ttm_bo_mmap(filp, vma, &vgdev->mman.bdev); - - return r; -} - -static int virtio_gpu_invalidate_caches(struct ttm_bo_device *bdev, - uint32_t flags) -{ - return 0; -} - -static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man, - struct ttm_buffer_object *bo, - const struct ttm_place *place, - struct ttm_mem_reg *mem) -{ - mem->mm_node = (void *)1; - return 0; -} - -static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man, - struct ttm_mem_reg *mem) -{ - mem->mm_node = (void *)NULL; -} - -static int ttm_bo_man_init(struct ttm_mem_type_manager *man, - unsigned long p_size) -{ - return 0; -} - -static int ttm_bo_man_takedown(struct ttm_mem_type_manager *man) -{ - return 0; -} - -static void ttm_bo_man_debug(struct ttm_mem_type_manager *man, - struct drm_printer *printer) -{ -} - -static const struct ttm_mem_type_manager_func virtio_gpu_bo_manager_func = { - .init = ttm_bo_man_init, - .takedown = ttm_bo_man_takedown, - .get_node = ttm_bo_man_get_node, - .put_node = ttm_bo_man_put_node, - .debug = ttm_bo_man_debug -}; - -static int virtio_gpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, - struct ttm_mem_type_manager *man) -{ - struct virtio_gpu_device *vgdev; - - vgdev = virtio_gpu_get_vgdev(bdev); - - switch (type) { - case TTM_PL_SYSTEM: - /* System memory */ - man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; - man->available_caching = TTM_PL_MASK_CACHING; - man->default_caching = TTM_PL_FLAG_CACHED; - break; - case TTM_PL_TT: - man->func = &virtio_gpu_bo_manager_func; - man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; - man->available_caching = TTM_PL_MASK_CACHING; - man->default_caching = TTM_PL_FLAG_CACHED; - break; - default: - DRM_ERROR("Unsupported memory type %u\n", (unsigned int)type); - return -EINVAL; - } - return 0; -} - -static void virtio_gpu_evict_flags(struct ttm_buffer_object *bo, - struct ttm_placement *placement) -{ - static const struct ttm_place placements = { - .fpfn = 0, - .lpfn = 0, - .flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM, - }; - - placement->placement = &placements; - placement->busy_placement = &placements; - placement->num_placement = 1; - placement->num_busy_placement = 1; -} - -static int virtio_gpu_verify_access(struct ttm_buffer_object *bo, - struct file *filp) -{ - return 0; -} - -static int virtio_gpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, - struct ttm_mem_reg *mem) -{ - struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; - - mem->bus.addr = NULL; - mem->bus.offset = 0; - mem->bus.size = mem->num_pages << PAGE_SHIFT; - mem->bus.base = 0; - mem->bus.is_iomem = false; - if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE)) - return -EINVAL; - switch (mem->mem_type) { - case TTM_PL_SYSTEM: - case TTM_PL_TT: - /* system memory */ - return 0; - default: - return -EINVAL; - } - return 0; -} - -static void virtio_gpu_ttm_io_mem_free(struct ttm_bo_device *bdev, - struct ttm_mem_reg *mem) -{ -} - -/* - * TTM backend functions. - */ -struct virtio_gpu_ttm_tt { - struct ttm_dma_tt ttm; - struct virtio_gpu_device *vgdev; - u64 offset; -}; - -static int virtio_gpu_ttm_backend_bind(struct ttm_tt *ttm, - struct ttm_mem_reg *bo_mem) -{ - struct virtio_gpu_ttm_tt *gtt = (void *)ttm; - - gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT); - if (!ttm->num_pages) - WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", - ttm->num_pages, bo_mem, ttm); - - /* Not implemented */ - return 0; -} - -static int virtio_gpu_ttm_backend_unbind(struct ttm_tt *ttm) -{ - /* Not implemented */ - return 0; -} - -static void virtio_gpu_ttm_backend_destroy(struct ttm_tt *ttm) -{ - struct virtio_gpu_ttm_tt *gtt = (void *)ttm; - - ttm_dma_tt_fini(>t->ttm); - kfree(gtt); -} - -static struct ttm_backend_func virtio_gpu_backend_func = { - .bind = &virtio_gpu_ttm_backend_bind, - .unbind = &virtio_gpu_ttm_backend_unbind, - .destroy = &virtio_gpu_ttm_backend_destroy, -}; - -static struct ttm_tt *virtio_gpu_ttm_tt_create(struct ttm_buffer_object *bo, - uint32_t page_flags) -{ - struct virtio_gpu_device *vgdev; - struct virtio_gpu_ttm_tt *gtt; - - vgdev = virtio_gpu_get_vgdev(bo->bdev); - gtt = kzalloc(sizeof(struct virtio_gpu_ttm_tt), GFP_KERNEL); - if (gtt == NULL) - return NULL; - gtt->ttm.ttm.func = &virtio_gpu_backend_func; - gtt->vgdev = vgdev; - if (ttm_dma_tt_init(>t->ttm, bo, page_flags)) { - kfree(gtt); - return NULL; - } - return >t->ttm.ttm; -} - -static void virtio_gpu_move_null(struct ttm_buffer_object *bo, - struct ttm_mem_reg *new_mem) -{ - struct ttm_mem_reg *old_mem = &bo->mem; - - BUG_ON(old_mem->mm_node != NULL); - *old_mem = *new_mem; - new_mem->mm_node = NULL; -} - -static int virtio_gpu_bo_move(struct ttm_buffer_object *bo, bool evict, - struct ttm_operation_ctx *ctx, - struct ttm_mem_reg *new_mem) -{ - int ret; - - ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu); - if (ret) - return ret; - - virtio_gpu_move_null(bo, new_mem); - return 0; -} - -static void virtio_gpu_bo_move_notify(struct ttm_buffer_object *tbo, - bool evict, - struct ttm_mem_reg *new_mem) -{ - struct virtio_gpu_object *bo; - struct virtio_gpu_device *vgdev; - - bo = container_of(tbo, struct virtio_gpu_object, tbo); - vgdev = (struct virtio_gpu_device *)bo->gem_base.dev->dev_private; - - if (!new_mem || (new_mem->placement & TTM_PL_FLAG_SYSTEM)) { - if (bo->hw_res_handle) - virtio_gpu_object_detach(vgdev, bo); - - } else if (new_mem->placement & TTM_PL_FLAG_TT) { - if (bo->hw_res_handle) { - virtio_gpu_object_attach(vgdev, bo, NULL); - } - } -} - -static void virtio_gpu_bo_swap_notify(struct ttm_buffer_object *tbo) -{ - struct virtio_gpu_object *bo; - struct virtio_gpu_device *vgdev; - - bo = container_of(tbo, struct virtio_gpu_object, tbo); - vgdev = (struct virtio_gpu_device *)bo->gem_base.dev->dev_private; - - if (bo->pages) - virtio_gpu_object_free_sg_table(bo); -} - -static struct ttm_bo_driver virtio_gpu_bo_driver = { - .ttm_tt_create = &virtio_gpu_ttm_tt_create, - .invalidate_caches = &virtio_gpu_invalidate_caches, - .init_mem_type = &virtio_gpu_init_mem_type, - .eviction_valuable = ttm_bo_eviction_valuable, - .evict_flags = &virtio_gpu_evict_flags, - .move = &virtio_gpu_bo_move, - .verify_access = &virtio_gpu_verify_access, - .io_mem_reserve = &virtio_gpu_ttm_io_mem_reserve, - .io_mem_free = &virtio_gpu_ttm_io_mem_free, - .move_notify = &virtio_gpu_bo_move_notify, - .swap_notify = &virtio_gpu_bo_swap_notify, -}; - -int virtio_gpu_ttm_init(struct virtio_gpu_device *vgdev) -{ - int r; - - /* No others user of address space so set it to 0 */ - r = ttm_bo_device_init(&vgdev->mman.bdev, - &virtio_gpu_bo_driver, - vgdev->ddev->anon_inode->i_mapping, - DRM_FILE_PAGE_OFFSET, 0); - if (r) { - DRM_ERROR("failed initializing buffer object driver(%d).\n", r); - goto err_dev_init; - } - - r = ttm_bo_init_mm(&vgdev->mman.bdev, TTM_PL_TT, 0); - if (r) { - DRM_ERROR("Failed initializing GTT heap.\n"); - goto err_mm_init; - } - return 0; - -err_mm_init: - ttm_bo_device_release(&vgdev->mman.bdev); -err_dev_init: - return r; -} - -void virtio_gpu_ttm_fini(struct virtio_gpu_device *vgdev) -{ - ttm_bo_device_release(&vgdev->mman.bdev); - DRM_INFO("virtio_gpu: ttm finalized\n"); -} diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c index 6bc2008b0d0d..74ad3bc3ebe8 100644 --- a/drivers/gpu/drm/virtio/virtgpu_vq.c +++ b/drivers/gpu/drm/virtio/virtgpu_vq.c @@ -26,12 +26,14 @@ * OTHER DEALINGS IN THE SOFTWARE. */ -#include <drm/drmP.h> -#include "virtgpu_drv.h" +#include <linux/dma-mapping.h> #include <linux/virtio.h> #include <linux/virtio_config.h> #include <linux/virtio_ring.h> +#include "virtgpu_drv.h" +#include "virtgpu_trace.h" + #define MAX_INLINE_CMD_SIZE 96 #define MAX_INLINE_RESP_SIZE 24 #define VBUFFER_SIZE (sizeof(struct virtio_gpu_vbuffer) \ @@ -153,7 +155,7 @@ static void free_vbuf(struct virtio_gpu_device *vgdev, { if (vbuf->resp_size > MAX_INLINE_RESP_SIZE) kfree(vbuf->resp_buf); - kfree(vbuf->data_buf); + kvfree(vbuf->data_buf); kmem_cache_free(vgdev->vbufs, vbuf); } @@ -190,8 +192,11 @@ void virtio_gpu_dequeue_ctrl_func(struct work_struct *work) } while (!virtqueue_enable_cb(vgdev->ctrlq.vq)); spin_unlock(&vgdev->ctrlq.qlock); - list_for_each_entry_safe(entry, tmp, &reclaim_list, list) { + list_for_each_entry(entry, &reclaim_list, list) { resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf; + + trace_virtio_gpu_cmd_response(vgdev->ctrlq.vq, resp); + if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA)) { if (resp->type >= cpu_to_le32(VIRTIO_GPU_RESP_ERR_UNSPEC)) { struct virtio_gpu_ctrl_hdr *cmd; @@ -214,14 +219,18 @@ void virtio_gpu_dequeue_ctrl_func(struct work_struct *work) } if (entry->resp_cb) entry->resp_cb(vgdev, entry); - - list_del(&entry->list); - free_vbuf(vgdev, entry); } wake_up(&vgdev->ctrlq.ack_queue); if (fence_id) virtio_gpu_fence_event_process(vgdev, fence_id); + + list_for_each_entry_safe(entry, tmp, &reclaim_list, list) { + if (entry->objs) + virtio_gpu_array_put_free_delayed(vgdev, entry->objs); + list_del(&entry->list); + free_vbuf(vgdev, entry); + } } void virtio_gpu_dequeue_cursor_func(struct work_struct *work) @@ -247,26 +256,67 @@ void virtio_gpu_dequeue_cursor_func(struct work_struct *work) wake_up(&vgdev->cursorq.ack_queue); } -static int virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev, - struct virtio_gpu_vbuffer *vbuf) +/* Create sg_table from a vmalloc'd buffer. */ +static struct sg_table *vmalloc_to_sgt(char *data, uint32_t size, int *sg_ents) +{ + int ret, s, i; + struct sg_table *sgt; + struct scatterlist *sg; + struct page *pg; + + if (WARN_ON(!PAGE_ALIGNED(data))) + return NULL; + + sgt = kmalloc(sizeof(*sgt), GFP_KERNEL); + if (!sgt) + return NULL; + + *sg_ents = DIV_ROUND_UP(size, PAGE_SIZE); + ret = sg_alloc_table(sgt, *sg_ents, GFP_KERNEL); + if (ret) { + kfree(sgt); + return NULL; + } + + for_each_sg(sgt->sgl, sg, *sg_ents, i) { + pg = vmalloc_to_page(data); + if (!pg) { + sg_free_table(sgt); + kfree(sgt); + return NULL; + } + + s = min_t(int, PAGE_SIZE, size); + sg_set_page(sg, pg, s, 0); + + size -= s; + data += s; + } + + return sgt; +} + +static bool virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev, + struct virtio_gpu_vbuffer *vbuf, + struct scatterlist *vout) __releases(&vgdev->ctrlq.qlock) __acquires(&vgdev->ctrlq.qlock) { struct virtqueue *vq = vgdev->ctrlq.vq; - struct scatterlist *sgs[3], vcmd, vout, vresp; + struct scatterlist *sgs[3], vcmd, vresp; int outcnt = 0, incnt = 0; + bool notify = false; int ret; if (!vgdev->vqs_ready) - return -ENODEV; + return notify; sg_init_one(&vcmd, vbuf->buf, vbuf->size); sgs[outcnt + incnt] = &vcmd; outcnt++; - if (vbuf->data_size) { - sg_init_one(&vout, vbuf->data_buf, vbuf->data_size); - sgs[outcnt + incnt] = &vout; + if (vout) { + sgs[outcnt + incnt] = vout; outcnt++; } @@ -284,32 +334,38 @@ retry: spin_lock(&vgdev->ctrlq.qlock); goto retry; } else { - virtqueue_kick(vq); - } - - if (!ret) - ret = vq->num_free; - return ret; -} + trace_virtio_gpu_cmd_queue(vq, + (struct virtio_gpu_ctrl_hdr *)vbuf->buf); -static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev, - struct virtio_gpu_vbuffer *vbuf) -{ - int rc; - - spin_lock(&vgdev->ctrlq.qlock); - rc = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf); - spin_unlock(&vgdev->ctrlq.qlock); - return rc; + notify = virtqueue_kick_prepare(vq); + } + return notify; } -static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev, - struct virtio_gpu_vbuffer *vbuf, - struct virtio_gpu_ctrl_hdr *hdr, - struct virtio_gpu_fence *fence) +static void virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev, + struct virtio_gpu_vbuffer *vbuf, + struct virtio_gpu_ctrl_hdr *hdr, + struct virtio_gpu_fence *fence) { struct virtqueue *vq = vgdev->ctrlq.vq; - int rc; + struct scatterlist *vout = NULL, sg; + struct sg_table *sgt = NULL; + bool notify; + int outcnt = 0; + + if (vbuf->data_size) { + if (is_vmalloc_addr(vbuf->data_buf)) { + sgt = vmalloc_to_sgt(vbuf->data_buf, vbuf->data_size, + &outcnt); + if (!sgt) + return; + vout = sgt->sgl; + } else { + sg_init_one(&sg, vbuf->data_buf, vbuf->data_size); + vout = &sg; + outcnt = 1; + } + } again: spin_lock(&vgdev->ctrlq.qlock); @@ -322,29 +378,47 @@ again: * to wait for free space, which can result in fence ids being * submitted out-of-order. */ - if (vq->num_free < 3) { + if (vq->num_free < 2 + outcnt) { spin_unlock(&vgdev->ctrlq.qlock); wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= 3); goto again; } - if (fence) + if (hdr && fence) { virtio_gpu_fence_emit(vgdev, hdr, fence); - rc = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf); + if (vbuf->objs) { + virtio_gpu_array_add_fence(vbuf->objs, &fence->f); + virtio_gpu_array_unlock_resv(vbuf->objs); + } + } + notify = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf, vout); spin_unlock(&vgdev->ctrlq.qlock); - return rc; + if (notify) + virtqueue_notify(vgdev->ctrlq.vq); + + if (sgt) { + sg_free_table(sgt); + kfree(sgt); + } +} + +static void virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev, + struct virtio_gpu_vbuffer *vbuf) +{ + virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, NULL, NULL); } -static int virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev, - struct virtio_gpu_vbuffer *vbuf) +static void virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev, + struct virtio_gpu_vbuffer *vbuf) { struct virtqueue *vq = vgdev->cursorq.vq; struct scatterlist *sgs[1], ccmd; + bool notify; int ret; int outcnt; if (!vgdev->vqs_ready) - return -ENODEV; + return; sg_init_one(&ccmd, vbuf->buf, vbuf->size); sgs[0] = &ccmd; @@ -359,14 +433,16 @@ retry: spin_lock(&vgdev->cursorq.qlock); goto retry; } else { - virtqueue_kick(vq); + trace_virtio_gpu_cmd_queue(vq, + (struct virtio_gpu_ctrl_hdr *)vbuf->buf); + + notify = virtqueue_kick_prepare(vq); } spin_unlock(&vgdev->cursorq.qlock); - if (!ret) - ret = vq->num_free; - return ret; + if (notify) + virtqueue_notify(vq); } /* just create gem objects for userspace and long lived objects, @@ -376,23 +452,24 @@ retry: /* create a basic resource */ void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev, struct virtio_gpu_object *bo, - uint32_t format, - uint32_t width, - uint32_t height) + struct virtio_gpu_object_params *params, + struct virtio_gpu_object_array *objs, + struct virtio_gpu_fence *fence) { struct virtio_gpu_resource_create_2d *cmd_p; struct virtio_gpu_vbuffer *vbuf; cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); memset(cmd_p, 0, sizeof(*cmd_p)); + vbuf->objs = objs; cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D); cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); - cmd_p->format = cpu_to_le32(format); - cmd_p->width = cpu_to_le32(width); - cmd_p->height = cpu_to_le32(height); + cmd_p->format = cpu_to_le32(params->format); + cmd_p->width = cpu_to_le32(params->width); + cmd_p->height = cpu_to_le32(params->height); - virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); + virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence); bo->created = true; } @@ -471,12 +548,13 @@ void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev, } void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev, - struct virtio_gpu_object *bo, uint64_t offset, - __le32 width, __le32 height, - __le32 x, __le32 y, + uint32_t width, uint32_t height, + uint32_t x, uint32_t y, + struct virtio_gpu_object_array *objs, struct virtio_gpu_fence *fence) { + struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]); struct virtio_gpu_transfer_to_host_2d *cmd_p; struct virtio_gpu_vbuffer *vbuf; bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev); @@ -488,14 +566,15 @@ void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev, cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); memset(cmd_p, 0, sizeof(*cmd_p)); + vbuf->objs = objs; cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D); cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); cmd_p->offset = cpu_to_le64(offset); - cmd_p->r.width = width; - cmd_p->r.height = height; - cmd_p->r.x = x; - cmd_p->r.y = y; + cmd_p->r.width = cpu_to_le32(width); + cmd_p->r.height = cpu_to_le32(height); + cmd_p->r.x = cpu_to_le32(x); + cmd_p->r.y = cpu_to_le32(y); virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence); } @@ -584,12 +663,14 @@ static void virtio_gpu_cmd_capset_cb(struct virtio_gpu_device *vgdev, cache_ent->id == le32_to_cpu(cmd->capset_id)) { memcpy(cache_ent->caps_cache, resp->capset_data, cache_ent->size); + /* Copy must occur before is_valid is signalled. */ + smp_wmb(); atomic_set(&cache_ent->is_valid, 1); break; } } spin_unlock(&vgdev->display_info_lock); - wake_up(&vgdev->resp_wq); + wake_up_all(&vgdev->resp_wq); } static int virtio_get_edid_block(void *data, u8 *buf, @@ -620,11 +701,11 @@ static void virtio_gpu_cmd_get_edid_cb(struct virtio_gpu_device *vgdev, output = vgdev->outputs + scanout; new_edid = drm_do_get_edid(&output->conn, virtio_get_edid_block, resp); + drm_connector_update_edid_property(&output->conn, new_edid); spin_lock(&vgdev->display_info_lock); old_edid = output->edid; output->edid = new_edid; - drm_connector_update_edid_property(&output->conn, output->edid); spin_unlock(&vgdev->display_info_lock); kfree(old_edid); @@ -685,8 +766,11 @@ int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev, struct virtio_gpu_vbuffer *vbuf; int max_size; struct virtio_gpu_drv_cap_cache *cache_ent; + struct virtio_gpu_drv_cap_cache *search_ent; void *resp_buf; + *cache_p = NULL; + if (idx >= vgdev->num_capsets) return -EINVAL; @@ -717,9 +801,26 @@ int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev, atomic_set(&cache_ent->is_valid, 0); cache_ent->size = max_size; spin_lock(&vgdev->display_info_lock); - list_add_tail(&cache_ent->head, &vgdev->cap_cache); + /* Search while under lock in case it was added by another task. */ + list_for_each_entry(search_ent, &vgdev->cap_cache, head) { + if (search_ent->id == vgdev->capsets[idx].id && + search_ent->version == version) { + *cache_p = search_ent; + break; + } + } + if (!*cache_p) + list_add_tail(&cache_ent->head, &vgdev->cap_cache); spin_unlock(&vgdev->display_info_lock); + if (*cache_p) { + /* Entry was found, so free everything that was just created. */ + kfree(resp_buf); + kfree(cache_ent->caps_cache); + kfree(cache_ent); + return 0; + } + cmd_p = virtio_gpu_alloc_cmd_resp (vgdev, &virtio_gpu_cmd_capset_cb, &vbuf, sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_capset) + max_size, @@ -794,63 +895,81 @@ void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev, void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev, uint32_t ctx_id, - uint32_t resource_id) + struct virtio_gpu_object_array *objs) { + struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]); struct virtio_gpu_ctx_resource *cmd_p; struct virtio_gpu_vbuffer *vbuf; cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); memset(cmd_p, 0, sizeof(*cmd_p)); + vbuf->objs = objs; cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE); cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id); - cmd_p->resource_id = cpu_to_le32(resource_id); + cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); } void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev, uint32_t ctx_id, - uint32_t resource_id) + struct virtio_gpu_object_array *objs) { + struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]); struct virtio_gpu_ctx_resource *cmd_p; struct virtio_gpu_vbuffer *vbuf; cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); memset(cmd_p, 0, sizeof(*cmd_p)); + vbuf->objs = objs; cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE); cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id); - cmd_p->resource_id = cpu_to_le32(resource_id); + cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); } void virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev, struct virtio_gpu_object *bo, - struct virtio_gpu_resource_create_3d *rc_3d) + struct virtio_gpu_object_params *params, + struct virtio_gpu_object_array *objs, + struct virtio_gpu_fence *fence) { struct virtio_gpu_resource_create_3d *cmd_p; struct virtio_gpu_vbuffer *vbuf; cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); memset(cmd_p, 0, sizeof(*cmd_p)); + vbuf->objs = objs; - *cmd_p = *rc_3d; cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D); - cmd_p->hdr.flags = 0; + cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); + cmd_p->format = cpu_to_le32(params->format); + cmd_p->width = cpu_to_le32(params->width); + cmd_p->height = cpu_to_le32(params->height); + + cmd_p->target = cpu_to_le32(params->target); + cmd_p->bind = cpu_to_le32(params->bind); + cmd_p->depth = cpu_to_le32(params->depth); + cmd_p->array_size = cpu_to_le32(params->array_size); + cmd_p->last_level = cpu_to_le32(params->last_level); + cmd_p->nr_samples = cpu_to_le32(params->nr_samples); + cmd_p->flags = cpu_to_le32(params->flags); - virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); + virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence); bo->created = true; } void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev, - struct virtio_gpu_object *bo, uint32_t ctx_id, uint64_t offset, uint32_t level, struct virtio_gpu_box *box, + struct virtio_gpu_object_array *objs, struct virtio_gpu_fence *fence) { + struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]); struct virtio_gpu_transfer_host_3d *cmd_p; struct virtio_gpu_vbuffer *vbuf; bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev); @@ -863,6 +982,8 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev, cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); memset(cmd_p, 0, sizeof(*cmd_p)); + vbuf->objs = objs; + cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D); cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id); cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); @@ -874,20 +995,24 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev, } void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev, - uint32_t resource_id, uint32_t ctx_id, + uint32_t ctx_id, uint64_t offset, uint32_t level, struct virtio_gpu_box *box, + struct virtio_gpu_object_array *objs, struct virtio_gpu_fence *fence) { + struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]); struct virtio_gpu_transfer_host_3d *cmd_p; struct virtio_gpu_vbuffer *vbuf; cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); memset(cmd_p, 0, sizeof(*cmd_p)); + vbuf->objs = objs; + cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D); cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id); - cmd_p->resource_id = cpu_to_le32(resource_id); + cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); cmd_p->box = *box; cmd_p->offset = cpu_to_le64(offset); cmd_p->level = cpu_to_le32(level); @@ -897,7 +1022,9 @@ void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev, void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev, void *data, uint32_t data_size, - uint32_t ctx_id, struct virtio_gpu_fence *fence) + uint32_t ctx_id, + struct virtio_gpu_object_array *objs, + struct virtio_gpu_fence *fence) { struct virtio_gpu_cmd_submit *cmd_p; struct virtio_gpu_vbuffer *vbuf; @@ -907,6 +1034,7 @@ void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev, vbuf->data_buf = data; vbuf->data_size = data_size; + vbuf->objs = objs; cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SUBMIT_3D); cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id); @@ -922,17 +1050,21 @@ int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev, bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev); struct virtio_gpu_mem_entry *ents; struct scatterlist *sg; - int si, nents; + int si, nents, ret; - if (!obj->created) - return 0; + if (WARN_ON_ONCE(!obj->created)) + return -EINVAL; + if (WARN_ON_ONCE(obj->pages)) + return -EINVAL; - if (!obj->pages) { - int ret; + ret = drm_gem_shmem_pin(&obj->base.base); + if (ret < 0) + return -EINVAL; - ret = virtio_gpu_object_get_sg_table(vgdev, obj); - if (ret) - return ret; + obj->pages = drm_gem_shmem_get_sg_table(&obj->base.base); + if (obj->pages == NULL) { + drm_gem_shmem_unpin(&obj->base.base); + return -EINVAL; } if (use_dma_api) { @@ -971,6 +1103,9 @@ void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev, { bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev); + if (WARN_ON_ONCE(!obj->pages)) + return; + if (use_dma_api && obj->mapped) { struct virtio_gpu_fence *fence = virtio_gpu_fence_alloc(vgdev); /* detach backing and wait for the host process it ... */ @@ -986,6 +1121,11 @@ void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev, } else { virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, NULL); } + + sg_free_table(obj->pages); + obj->pages = NULL; + + drm_gem_shmem_unpin(&obj->base.base); } void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev, |