From 67b7836d4458790f1261e31fe0ce3250989784f0 Mon Sep 17 00:00:00 2001 From: Dmitry Osipenko Date: Mon, 21 Mar 2022 03:35:41 +0300 Subject: drm/shmem-helper: Switch to reservation lock Replace all drm-shmem locks with a GEM reservation lock. This makes locks consistent with dma-buf locking convention where importers are responsible for holding reservation lock for all operations performed over dma-bufs, preventing deadlock between dma-buf importers and exporters. Suggested-by: Daniel Vetter Acked-by: Thomas Zimmermann Signed-off-by: Dmitry Osipenko Link: https://lore.kernel.org/all/20230108210445.3948344-8-dmitry.osipenko@collabora.com/ --- drivers/gpu/drm/lima/lima_gem.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm/lima') diff --git a/drivers/gpu/drm/lima/lima_gem.c b/drivers/gpu/drm/lima/lima_gem.c index 0f1ca0b0db49..5008f0c2428f 100644 --- a/drivers/gpu/drm/lima/lima_gem.c +++ b/drivers/gpu/drm/lima/lima_gem.c @@ -34,7 +34,7 @@ int lima_heap_alloc(struct lima_bo *bo, struct lima_vm *vm) new_size = min(new_size, bo->base.base.size); - mutex_lock(&bo->base.pages_lock); + dma_resv_lock(bo->base.base.resv, NULL); if (bo->base.pages) { pages = bo->base.pages; @@ -42,7 +42,7 @@ int lima_heap_alloc(struct lima_bo *bo, struct lima_vm *vm) pages = kvmalloc_array(bo->base.base.size >> PAGE_SHIFT, sizeof(*pages), GFP_KERNEL | __GFP_ZERO); if (!pages) { - mutex_unlock(&bo->base.pages_lock); + dma_resv_unlock(bo->base.base.resv); return -ENOMEM; } @@ -56,13 +56,13 @@ int lima_heap_alloc(struct lima_bo *bo, struct lima_vm *vm) struct page *page = shmem_read_mapping_page(mapping, i); if (IS_ERR(page)) { - mutex_unlock(&bo->base.pages_lock); + dma_resv_unlock(bo->base.base.resv); return PTR_ERR(page); } pages[i] = page; } - mutex_unlock(&bo->base.pages_lock); + dma_resv_unlock(bo->base.base.resv); ret = sg_alloc_table_from_pages(&sgt, pages, i, 0, new_size, GFP_KERNEL); -- cgit From e0106ac97886b6bc36c480de72562d3e70b3f8b1 Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Tue, 28 Feb 2023 16:26:12 +0100 Subject: Revert "drm/shmem-helper: Switch to reservation lock" This reverts commit 67b7836d4458790f1261e31fe0ce3250989784f0. The locking appears incomplete. A caller of SHMEM helper's pin function never acquires the dma-buf reservation lock. So we get WARNING: CPU: 3 PID: 967 at drivers/gpu/drm/drm_gem_shmem_helper.c:243 drm_gem_shmem_pin+0x42/0x90 [drm_shmem_helper] Signed-off-by: Thomas Zimmermann Acked-by: Dmitry Osipenko Link: https://patchwork.freedesktop.org/patch/msgid/20230228152612.19971-1-tzimmermann@suse.de --- drivers/gpu/drm/drm_gem_shmem_helper.c | 185 +++++++++++++++-------- drivers/gpu/drm/lima/lima_gem.c | 8 +- drivers/gpu/drm/panfrost/panfrost_drv.c | 7 +- drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c | 6 +- drivers/gpu/drm/panfrost/panfrost_mmu.c | 19 ++- include/drm/drm_gem_shmem_helper.h | 14 +- 6 files changed, 148 insertions(+), 91 deletions(-) (limited to 'drivers/gpu/drm/lima') diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c index 3d43e5961573..f75e50273d7a 100644 --- a/drivers/gpu/drm/drm_gem_shmem_helper.c +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c @@ -88,6 +88,8 @@ __drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private) if (ret) goto err_release; + mutex_init(&shmem->pages_lock); + mutex_init(&shmem->vmap_lock); INIT_LIST_HEAD(&shmem->madv_list); if (!private) { @@ -139,13 +141,11 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem) { struct drm_gem_object *obj = &shmem->base; + drm_WARN_ON(obj->dev, shmem->vmap_use_count); + if (obj->import_attach) { drm_prime_gem_destroy(obj, shmem->sgt); } else { - dma_resv_lock(shmem->base.resv, NULL); - - drm_WARN_ON(obj->dev, shmem->vmap_use_count); - if (shmem->sgt) { dma_unmap_sgtable(obj->dev->dev, shmem->sgt, DMA_BIDIRECTIONAL, 0); @@ -154,18 +154,18 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem) } if (shmem->pages) drm_gem_shmem_put_pages(shmem); - - drm_WARN_ON(obj->dev, shmem->pages_use_count); - - dma_resv_unlock(shmem->base.resv); } + drm_WARN_ON(obj->dev, shmem->pages_use_count); + drm_gem_object_release(obj); + mutex_destroy(&shmem->pages_lock); + mutex_destroy(&shmem->vmap_lock); kfree(shmem); } EXPORT_SYMBOL_GPL(drm_gem_shmem_free); -static int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem) +static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem) { struct drm_gem_object *obj = &shmem->base; struct page **pages; @@ -197,16 +197,35 @@ static int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem) } /* - * drm_gem_shmem_put_pages - Decrease use count on the backing pages for a shmem GEM object + * drm_gem_shmem_get_pages - Allocate backing pages for a shmem GEM object * @shmem: shmem GEM object * - * This function decreases the use count and puts the backing pages when use drops to zero. + * This function makes sure that backing pages exists for the shmem GEM object + * and increases the use count. + * + * Returns: + * 0 on success or a negative error code on failure. */ -void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem) +int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem) { struct drm_gem_object *obj = &shmem->base; + int ret; - dma_resv_assert_held(shmem->base.resv); + drm_WARN_ON(obj->dev, obj->import_attach); + + ret = mutex_lock_interruptible(&shmem->pages_lock); + if (ret) + return ret; + ret = drm_gem_shmem_get_pages_locked(shmem); + mutex_unlock(&shmem->pages_lock); + + return ret; +} +EXPORT_SYMBOL(drm_gem_shmem_get_pages); + +static void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem) +{ + struct drm_gem_object *obj = &shmem->base; if (drm_WARN_ON_ONCE(obj->dev, !shmem->pages_use_count)) return; @@ -224,6 +243,19 @@ void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem) shmem->pages_mark_accessed_on_put); shmem->pages = NULL; } + +/* + * drm_gem_shmem_put_pages - Decrease use count on the backing pages for a shmem GEM object + * @shmem: shmem GEM object + * + * This function decreases the use count and puts the backing pages when use drops to zero. + */ +void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem) +{ + mutex_lock(&shmem->pages_lock); + drm_gem_shmem_put_pages_locked(shmem); + mutex_unlock(&shmem->pages_lock); +} EXPORT_SYMBOL(drm_gem_shmem_put_pages); /** @@ -240,8 +272,6 @@ int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem) { struct drm_gem_object *obj = &shmem->base; - dma_resv_assert_held(shmem->base.resv); - drm_WARN_ON(obj->dev, obj->import_attach); return drm_gem_shmem_get_pages(shmem); @@ -259,31 +289,14 @@ void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem) { struct drm_gem_object *obj = &shmem->base; - dma_resv_assert_held(shmem->base.resv); - drm_WARN_ON(obj->dev, obj->import_attach); drm_gem_shmem_put_pages(shmem); } EXPORT_SYMBOL(drm_gem_shmem_unpin); -/* - * drm_gem_shmem_vmap - Create a virtual mapping for a shmem GEM object - * @shmem: shmem GEM object - * @map: Returns the kernel virtual address of the SHMEM GEM object's backing - * store. - * - * This function makes sure that a contiguous kernel virtual address mapping - * exists for the buffer backing the shmem GEM object. It hides the differences - * between dma-buf imported and natively allocated objects. - * - * Acquired mappings should be cleaned up by calling drm_gem_shmem_vunmap(). - * - * Returns: - * 0 on success or a negative error code on failure. - */ -int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem, - struct iosys_map *map) +static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem, + struct iosys_map *map) { struct drm_gem_object *obj = &shmem->base; int ret = 0; @@ -299,8 +312,6 @@ int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem, } else { pgprot_t prot = PAGE_KERNEL; - dma_resv_assert_held(shmem->base.resv); - if (shmem->vmap_use_count++ > 0) { iosys_map_set_vaddr(map, shmem->vaddr); return 0; @@ -335,30 +346,45 @@ err_zero_use: return ret; } -EXPORT_SYMBOL(drm_gem_shmem_vmap); /* - * drm_gem_shmem_vunmap - Unmap a virtual mapping for a shmem GEM object + * drm_gem_shmem_vmap - Create a virtual mapping for a shmem GEM object * @shmem: shmem GEM object - * @map: Kernel virtual address where the SHMEM GEM object was mapped + * @map: Returns the kernel virtual address of the SHMEM GEM object's backing + * store. * - * This function cleans up a kernel virtual address mapping acquired by - * drm_gem_shmem_vmap(). The mapping is only removed when the use count drops to - * zero. + * This function makes sure that a contiguous kernel virtual address mapping + * exists for the buffer backing the shmem GEM object. It hides the differences + * between dma-buf imported and natively allocated objects. * - * This function hides the differences between dma-buf imported and natively - * allocated objects. + * Acquired mappings should be cleaned up by calling drm_gem_shmem_vunmap(). + * + * Returns: + * 0 on success or a negative error code on failure. */ -void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem, - struct iosys_map *map) +int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem, + struct iosys_map *map) +{ + int ret; + + ret = mutex_lock_interruptible(&shmem->vmap_lock); + if (ret) + return ret; + ret = drm_gem_shmem_vmap_locked(shmem, map); + mutex_unlock(&shmem->vmap_lock); + + return ret; +} +EXPORT_SYMBOL(drm_gem_shmem_vmap); + +static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem, + struct iosys_map *map) { struct drm_gem_object *obj = &shmem->base; if (obj->import_attach) { dma_buf_vunmap(obj->import_attach->dmabuf, map); } else { - dma_resv_assert_held(shmem->base.resv); - if (drm_WARN_ON_ONCE(obj->dev, !shmem->vmap_use_count)) return; @@ -371,6 +397,26 @@ void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem, shmem->vaddr = NULL; } + +/* + * drm_gem_shmem_vunmap - Unmap a virtual mapping for a shmem GEM object + * @shmem: shmem GEM object + * @map: Kernel virtual address where the SHMEM GEM object was mapped + * + * This function cleans up a kernel virtual address mapping acquired by + * drm_gem_shmem_vmap(). The mapping is only removed when the use count drops to + * zero. + * + * This function hides the differences between dma-buf imported and natively + * allocated objects. + */ +void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem, + struct iosys_map *map) +{ + mutex_lock(&shmem->vmap_lock); + drm_gem_shmem_vunmap_locked(shmem, map); + mutex_unlock(&shmem->vmap_lock); +} EXPORT_SYMBOL(drm_gem_shmem_vunmap); static int @@ -401,24 +447,24 @@ drm_gem_shmem_create_with_handle(struct drm_file *file_priv, */ int drm_gem_shmem_madvise(struct drm_gem_shmem_object *shmem, int madv) { - dma_resv_assert_held(shmem->base.resv); + mutex_lock(&shmem->pages_lock); if (shmem->madv >= 0) shmem->madv = madv; madv = shmem->madv; + mutex_unlock(&shmem->pages_lock); + return (madv >= 0); } EXPORT_SYMBOL(drm_gem_shmem_madvise); -void drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem) +void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem) { struct drm_gem_object *obj = &shmem->base; struct drm_device *dev = obj->dev; - dma_resv_assert_held(shmem->base.resv); - drm_WARN_ON(obj->dev, !drm_gem_shmem_is_purgeable(shmem)); dma_unmap_sgtable(dev->dev, shmem->sgt, DMA_BIDIRECTIONAL, 0); @@ -426,7 +472,7 @@ void drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem) kfree(shmem->sgt); shmem->sgt = NULL; - drm_gem_shmem_put_pages(shmem); + drm_gem_shmem_put_pages_locked(shmem); shmem->madv = -1; @@ -442,6 +488,17 @@ void drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem) invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, 0, (loff_t)-1); } +EXPORT_SYMBOL(drm_gem_shmem_purge_locked); + +bool drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem) +{ + if (!mutex_trylock(&shmem->pages_lock)) + return false; + drm_gem_shmem_purge_locked(shmem); + mutex_unlock(&shmem->pages_lock); + + return true; +} EXPORT_SYMBOL(drm_gem_shmem_purge); /** @@ -494,7 +551,7 @@ static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf) /* We don't use vmf->pgoff since that has the fake offset */ page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT; - dma_resv_lock(shmem->base.resv, NULL); + mutex_lock(&shmem->pages_lock); if (page_offset >= num_pages || drm_WARN_ON_ONCE(obj->dev, !shmem->pages) || @@ -506,7 +563,7 @@ static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf) ret = vmf_insert_pfn(vma, vmf->address, page_to_pfn(page)); } - dma_resv_unlock(shmem->base.resv); + mutex_unlock(&shmem->pages_lock); return ret; } @@ -518,7 +575,7 @@ static void drm_gem_shmem_vm_open(struct vm_area_struct *vma) drm_WARN_ON(obj->dev, obj->import_attach); - dma_resv_lock(shmem->base.resv, NULL); + mutex_lock(&shmem->pages_lock); /* * We should have already pinned the pages when the buffer was first @@ -528,7 +585,7 @@ static void drm_gem_shmem_vm_open(struct vm_area_struct *vma) if (!drm_WARN_ON_ONCE(obj->dev, !shmem->pages_use_count)) shmem->pages_use_count++; - dma_resv_unlock(shmem->base.resv); + mutex_unlock(&shmem->pages_lock); drm_gem_vm_open(vma); } @@ -538,10 +595,7 @@ static void drm_gem_shmem_vm_close(struct vm_area_struct *vma) struct drm_gem_object *obj = vma->vm_private_data; struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); - dma_resv_lock(shmem->base.resv, NULL); drm_gem_shmem_put_pages(shmem); - dma_resv_unlock(shmem->base.resv); - drm_gem_vm_close(vma); } @@ -576,10 +630,7 @@ int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct return dma_buf_mmap(obj->dma_buf, vma, 0); } - dma_resv_lock(shmem->base.resv, NULL); ret = drm_gem_shmem_get_pages(shmem); - dma_resv_unlock(shmem->base.resv); - if (ret) return ret; @@ -645,7 +696,7 @@ static struct sg_table *drm_gem_shmem_get_pages_sgt_locked(struct drm_gem_shmem_ drm_WARN_ON(obj->dev, obj->import_attach); - ret = drm_gem_shmem_get_pages(shmem); + ret = drm_gem_shmem_get_pages_locked(shmem); if (ret) return ERR_PTR(ret); @@ -667,7 +718,7 @@ err_free_sgt: sg_free_table(sgt); kfree(sgt); err_put_pages: - drm_gem_shmem_put_pages(shmem); + drm_gem_shmem_put_pages_locked(shmem); return ERR_PTR(ret); } @@ -692,11 +743,11 @@ struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem) int ret; struct sg_table *sgt; - ret = dma_resv_lock_interruptible(shmem->base.resv, NULL); + ret = mutex_lock_interruptible(&shmem->pages_lock); if (ret) return ERR_PTR(ret); sgt = drm_gem_shmem_get_pages_sgt_locked(shmem); - dma_resv_unlock(shmem->base.resv); + mutex_unlock(&shmem->pages_lock); return sgt; } diff --git a/drivers/gpu/drm/lima/lima_gem.c b/drivers/gpu/drm/lima/lima_gem.c index 5008f0c2428f..0f1ca0b0db49 100644 --- a/drivers/gpu/drm/lima/lima_gem.c +++ b/drivers/gpu/drm/lima/lima_gem.c @@ -34,7 +34,7 @@ int lima_heap_alloc(struct lima_bo *bo, struct lima_vm *vm) new_size = min(new_size, bo->base.base.size); - dma_resv_lock(bo->base.base.resv, NULL); + mutex_lock(&bo->base.pages_lock); if (bo->base.pages) { pages = bo->base.pages; @@ -42,7 +42,7 @@ int lima_heap_alloc(struct lima_bo *bo, struct lima_vm *vm) pages = kvmalloc_array(bo->base.base.size >> PAGE_SHIFT, sizeof(*pages), GFP_KERNEL | __GFP_ZERO); if (!pages) { - dma_resv_unlock(bo->base.base.resv); + mutex_unlock(&bo->base.pages_lock); return -ENOMEM; } @@ -56,13 +56,13 @@ int lima_heap_alloc(struct lima_bo *bo, struct lima_vm *vm) struct page *page = shmem_read_mapping_page(mapping, i); if (IS_ERR(page)) { - dma_resv_unlock(bo->base.base.resv); + mutex_unlock(&bo->base.pages_lock); return PTR_ERR(page); } pages[i] = page; } - dma_resv_unlock(bo->base.base.resv); + mutex_unlock(&bo->base.pages_lock); ret = sg_alloc_table_from_pages(&sgt, pages, i, 0, new_size, GFP_KERNEL); diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c index aa292e4a86eb..f49096f53141 100644 --- a/drivers/gpu/drm/panfrost/panfrost_drv.c +++ b/drivers/gpu/drm/panfrost/panfrost_drv.c @@ -407,10 +407,6 @@ static int panfrost_ioctl_madvise(struct drm_device *dev, void *data, bo = to_panfrost_bo(gem_obj); - ret = dma_resv_lock_interruptible(bo->base.base.resv, NULL); - if (ret) - goto out_put_object; - mutex_lock(&pfdev->shrinker_lock); mutex_lock(&bo->mappings.lock); if (args->madv == PANFROST_MADV_DONTNEED) { @@ -448,8 +444,7 @@ static int panfrost_ioctl_madvise(struct drm_device *dev, void *data, out_unlock_mappings: mutex_unlock(&bo->mappings.lock); mutex_unlock(&pfdev->shrinker_lock); - dma_resv_unlock(bo->base.base.resv); -out_put_object: + drm_gem_object_put(gem_obj); return ret; } diff --git a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c index 6a71a2555f85..bf0170782f25 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c +++ b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c @@ -48,14 +48,14 @@ static bool panfrost_gem_purge(struct drm_gem_object *obj) if (!mutex_trylock(&bo->mappings.lock)) return false; - if (!dma_resv_trylock(shmem->base.resv)) + if (!mutex_trylock(&shmem->pages_lock)) goto unlock_mappings; panfrost_gem_teardown_mappings_locked(bo); - drm_gem_shmem_purge(&bo->base); + drm_gem_shmem_purge_locked(&bo->base); ret = true; - dma_resv_unlock(shmem->base.resv); + mutex_unlock(&shmem->pages_lock); unlock_mappings: mutex_unlock(&bo->mappings.lock); diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c index 94421fa255d2..4e83a1891f3e 100644 --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c @@ -443,7 +443,6 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, struct panfrost_gem_mapping *bomapping; struct panfrost_gem_object *bo; struct address_space *mapping; - struct drm_gem_object *obj; pgoff_t page_offset; struct sg_table *sgt; struct page **pages; @@ -466,16 +465,15 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, page_offset = addr >> PAGE_SHIFT; page_offset -= bomapping->mmnode.start; - obj = &bo->base.base; - - dma_resv_lock(obj->resv, NULL); + mutex_lock(&bo->base.pages_lock); if (!bo->base.pages) { bo->sgts = kvmalloc_array(bo->base.base.size / SZ_2M, sizeof(struct sg_table), GFP_KERNEL | __GFP_ZERO); if (!bo->sgts) { + mutex_unlock(&bo->base.pages_lock); ret = -ENOMEM; - goto err_unlock; + goto err_bo; } pages = kvmalloc_array(bo->base.base.size >> PAGE_SHIFT, @@ -483,8 +481,9 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, if (!pages) { kvfree(bo->sgts); bo->sgts = NULL; + mutex_unlock(&bo->base.pages_lock); ret = -ENOMEM; - goto err_unlock; + goto err_bo; } bo->base.pages = pages; bo->base.pages_use_count = 1; @@ -492,6 +491,7 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, pages = bo->base.pages; if (pages[page_offset]) { /* Pages are already mapped, bail out. */ + mutex_unlock(&bo->base.pages_lock); goto out; } } @@ -502,11 +502,14 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, for (i = page_offset; i < page_offset + NUM_FAULT_PAGES; i++) { pages[i] = shmem_read_mapping_page(mapping, i); if (IS_ERR(pages[i])) { + mutex_unlock(&bo->base.pages_lock); ret = PTR_ERR(pages[i]); goto err_pages; } } + mutex_unlock(&bo->base.pages_lock); + sgt = &bo->sgts[page_offset / (SZ_2M / PAGE_SIZE)]; ret = sg_alloc_table_from_pages(sgt, pages + page_offset, NUM_FAULT_PAGES, 0, SZ_2M, GFP_KERNEL); @@ -525,8 +528,6 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, dev_dbg(pfdev->dev, "mapped page fault @ AS%d %llx", as, addr); out: - dma_resv_unlock(obj->resv); - panfrost_gem_mapping_put(bomapping); return 0; @@ -535,8 +536,6 @@ err_map: sg_free_table(sgt); err_pages: drm_gem_shmem_put_pages(&bo->base); -err_unlock: - dma_resv_unlock(obj->resv); err_bo: panfrost_gem_mapping_put(bomapping); return ret; diff --git a/include/drm/drm_gem_shmem_helper.h b/include/drm/drm_gem_shmem_helper.h index 20ddcd799df9..5994fed5e327 100644 --- a/include/drm/drm_gem_shmem_helper.h +++ b/include/drm/drm_gem_shmem_helper.h @@ -26,6 +26,11 @@ struct drm_gem_shmem_object { */ struct drm_gem_object base; + /** + * @pages_lock: Protects the page table and use count + */ + struct mutex pages_lock; + /** * @pages: Page table */ @@ -60,6 +65,11 @@ struct drm_gem_shmem_object { */ struct sg_table *sgt; + /** + * @vmap_lock: Protects the vmap address and use count + */ + struct mutex vmap_lock; + /** * @vaddr: Kernel virtual address of the backing memory */ @@ -99,6 +109,7 @@ struct drm_gem_shmem_object { struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size); void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem); +int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem); void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem); int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem); void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem); @@ -117,7 +128,8 @@ static inline bool drm_gem_shmem_is_purgeable(struct drm_gem_shmem_object *shmem !shmem->base.dma_buf && !shmem->base.import_attach; } -void drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem); +void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem); +bool drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem); struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object *shmem); struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem); -- cgit From e0dfefa08d94bf548fbbbbae22e4720a652a3102 Mon Sep 17 00:00:00 2001 From: Maíra Canal Date: Fri, 24 Feb 2023 18:41:32 -0300 Subject: drm/lima: Use drm_sched_job_add_syncobj_dependency() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit As lima_gem_add_deps() performs the same steps as drm_sched_job_add_syncobj_dependency(), replace the open-coded implementation in Lima in order to simply use the DRM function. Signed-off-by: Maíra Canal Reviewed-by: Qiang Yu Signed-off-by: Maíra Canal Link: https://patchwork.freedesktop.org/patch/msgid/20230224214133.411966-1-mcanal@igalia.com --- drivers/gpu/drm/lima/lima_gem.c | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) (limited to 'drivers/gpu/drm/lima') diff --git a/drivers/gpu/drm/lima/lima_gem.c b/drivers/gpu/drm/lima/lima_gem.c index 0f1ca0b0db49..10252dc11a22 100644 --- a/drivers/gpu/drm/lima/lima_gem.c +++ b/drivers/gpu/drm/lima/lima_gem.c @@ -277,21 +277,13 @@ static int lima_gem_add_deps(struct drm_file *file, struct lima_submit *submit) int i, err; for (i = 0; i < ARRAY_SIZE(submit->in_sync); i++) { - struct dma_fence *fence = NULL; - if (!submit->in_sync[i]) continue; - err = drm_syncobj_find_fence(file, submit->in_sync[i], - 0, 0, &fence); + err = drm_sched_job_add_syncobj_dependency(&submit->task->base, file, + submit->in_sync[i], 0); if (err) return err; - - err = drm_sched_job_add_dependency(&submit->task->base, fence); - if (err) { - dma_fence_put(fence); - return err; - } } return 0; -- cgit From c5647cae2704e58d1c4e5fedbf63f11bca6376c9 Mon Sep 17 00:00:00 2001 From: Harshit Mogalapalli Date: Mon, 13 Mar 2023 22:27:11 -0700 Subject: drm/lima/lima_drv: Add missing unwind goto in lima_pdev_probe() Smatch reports: drivers/gpu/drm/lima/lima_drv.c:396 lima_pdev_probe() warn: missing unwind goto? Store return value in err and goto 'err_out0' which has lima_sched_slab_fini() before returning. Fixes: a1d2a6339961 ("drm/lima: driver for ARM Mali4xx GPUs") Signed-off-by: Harshit Mogalapalli Signed-off-by: Qiang Yu Link: https://patchwork.freedesktop.org/patch/msgid/20230314052711.4061652-1-harshit.m.mogalapalli@oracle.com --- drivers/gpu/drm/lima/lima_drv.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/lima') diff --git a/drivers/gpu/drm/lima/lima_drv.c b/drivers/gpu/drm/lima/lima_drv.c index 7b8d7178d09a..39cab4a55f57 100644 --- a/drivers/gpu/drm/lima/lima_drv.c +++ b/drivers/gpu/drm/lima/lima_drv.c @@ -392,8 +392,10 @@ static int lima_pdev_probe(struct platform_device *pdev) /* Allocate and initialize the DRM device. */ ddev = drm_dev_alloc(&lima_drm_driver, &pdev->dev); - if (IS_ERR(ddev)) - return PTR_ERR(ddev); + if (IS_ERR(ddev)) { + err = PTR_ERR(ddev); + goto err_out0; + } ddev->dev_private = ldev; ldev->ddev = ddev; -- cgit From bccafec957a5c4b22ac29e53a39e82d0a0008348 Mon Sep 17 00:00:00 2001 From: Erico Nunes Date: Mon, 13 Mar 2023 00:30:50 +0100 Subject: drm/lima: add usage counting method to ctx_mgr lima maintains a context manager per drm_file, similar to amdgpu. In order to account for the complete usage per drm_file, all of the associated contexts need to be considered. Previously released contexts also need to be accounted for but their drm_sched_entity info is gone once they get released, so account for it in the ctx_mgr. Signed-off-by: Erico Nunes Signed-off-by: Qiang Yu Link: https://patchwork.freedesktop.org/patch/msgid/20230312233052.21095-2-nunes.erico@gmail.com --- drivers/gpu/drm/lima/lima_ctx.c | 30 +++++++++++++++++++++++++++++- drivers/gpu/drm/lima/lima_ctx.h | 3 +++ 2 files changed, 32 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/lima') diff --git a/drivers/gpu/drm/lima/lima_ctx.c b/drivers/gpu/drm/lima/lima_ctx.c index 891d5cd5019a..e008e586fad0 100644 --- a/drivers/gpu/drm/lima/lima_ctx.c +++ b/drivers/gpu/drm/lima/lima_ctx.c @@ -15,6 +15,7 @@ int lima_ctx_create(struct lima_device *dev, struct lima_ctx_mgr *mgr, u32 *id) if (!ctx) return -ENOMEM; ctx->dev = dev; + ctx->mgr = mgr; kref_init(&ctx->refcnt); for (i = 0; i < lima_pipe_num; i++) { @@ -42,10 +43,17 @@ err_out0: static void lima_ctx_do_release(struct kref *ref) { struct lima_ctx *ctx = container_of(ref, struct lima_ctx, refcnt); + struct lima_ctx_mgr *mgr = ctx->mgr; int i; - for (i = 0; i < lima_pipe_num; i++) + for (i = 0; i < lima_pipe_num; i++) { + struct lima_sched_context *context = &ctx->context[i]; + struct drm_sched_entity *entity = &context->base; + + mgr->elapsed_ns[i] += entity->elapsed_ns; + lima_sched_context_fini(ctx->dev->pipe + i, ctx->context + i); + } kfree(ctx); } @@ -99,3 +107,23 @@ void lima_ctx_mgr_fini(struct lima_ctx_mgr *mgr) xa_destroy(&mgr->handles); mutex_destroy(&mgr->lock); } + +void lima_ctx_mgr_usage(struct lima_ctx_mgr *mgr, u64 usage[lima_pipe_num]) +{ + struct lima_ctx *ctx; + unsigned long id; + + for (int i = 0; i < lima_pipe_num; i++) + usage[i] = mgr->elapsed_ns[i]; + + mutex_lock(&mgr->lock); + xa_for_each(&mgr->handles, id, ctx) { + for (int i = 0; i < lima_pipe_num; i++) { + struct lima_sched_context *context = &ctx->context[i]; + struct drm_sched_entity *entity = &context->base; + + usage[i] += entity->elapsed_ns; + } + } + mutex_unlock(&mgr->lock); +} diff --git a/drivers/gpu/drm/lima/lima_ctx.h b/drivers/gpu/drm/lima/lima_ctx.h index 74e2be09090f..6068863880eb 100644 --- a/drivers/gpu/drm/lima/lima_ctx.h +++ b/drivers/gpu/drm/lima/lima_ctx.h @@ -12,6 +12,7 @@ struct lima_ctx { struct kref refcnt; struct lima_device *dev; + struct lima_ctx_mgr *mgr; struct lima_sched_context context[lima_pipe_num]; atomic_t guilty; @@ -23,6 +24,7 @@ struct lima_ctx { struct lima_ctx_mgr { struct mutex lock; struct xarray handles; + u64 elapsed_ns[lima_pipe_num]; }; int lima_ctx_create(struct lima_device *dev, struct lima_ctx_mgr *mgr, u32 *id); @@ -31,5 +33,6 @@ struct lima_ctx *lima_ctx_get(struct lima_ctx_mgr *mgr, u32 id); void lima_ctx_put(struct lima_ctx *ctx); void lima_ctx_mgr_init(struct lima_ctx_mgr *mgr); void lima_ctx_mgr_fini(struct lima_ctx_mgr *mgr); +void lima_ctx_mgr_usage(struct lima_ctx_mgr *mgr, u64 usage[lima_pipe_num]); #endif -- cgit From 87767de835edf527b879a363d518c33da68adb81 Mon Sep 17 00:00:00 2001 From: Erico Nunes Date: Mon, 13 Mar 2023 00:30:51 +0100 Subject: drm/lima: allocate unique id per drm_file To track if fds are pointing to the same execution context and export the expected information to fdinfo, similar to what is done in other drivers. Signed-off-by: Erico Nunes Signed-off-by: Qiang Yu Link: https://patchwork.freedesktop.org/patch/msgid/20230312233052.21095-3-nunes.erico@gmail.com --- drivers/gpu/drm/lima/lima_device.h | 3 +++ drivers/gpu/drm/lima/lima_drv.c | 12 ++++++++++++ drivers/gpu/drm/lima/lima_drv.h | 1 + 3 files changed, 16 insertions(+) (limited to 'drivers/gpu/drm/lima') diff --git a/drivers/gpu/drm/lima/lima_device.h b/drivers/gpu/drm/lima/lima_device.h index 41b9d7b4bcc7..71b2db60d161 100644 --- a/drivers/gpu/drm/lima/lima_device.h +++ b/drivers/gpu/drm/lima/lima_device.h @@ -106,6 +106,9 @@ struct lima_device { struct lima_dump_head dump; struct list_head error_task_list; struct mutex error_task_list_lock; + + struct xarray active_contexts; + u32 next_context_id; }; static inline struct lima_device * diff --git a/drivers/gpu/drm/lima/lima_drv.c b/drivers/gpu/drm/lima/lima_drv.c index 39cab4a55f57..f456a471216b 100644 --- a/drivers/gpu/drm/lima/lima_drv.c +++ b/drivers/gpu/drm/lima/lima_drv.c @@ -218,6 +218,11 @@ static int lima_drm_driver_open(struct drm_device *dev, struct drm_file *file) if (!priv) return -ENOMEM; + err = xa_alloc_cyclic(&ldev->active_contexts, &priv->id, priv, + xa_limit_32b, &ldev->next_context_id, GFP_KERNEL); + if (err < 0) + goto err_out0; + priv->vm = lima_vm_create(ldev); if (!priv->vm) { err = -ENOMEM; @@ -237,6 +242,9 @@ err_out0: static void lima_drm_driver_postclose(struct drm_device *dev, struct drm_file *file) { struct lima_drm_priv *priv = file->driver_priv; + struct lima_device *ldev = to_lima_dev(dev); + + xa_erase(&ldev->active_contexts, priv->id); lima_ctx_mgr_fini(&priv->ctx_mgr); lima_vm_put(priv->vm); @@ -388,6 +396,8 @@ static int lima_pdev_probe(struct platform_device *pdev) ldev->dev = &pdev->dev; ldev->id = (enum lima_gpu_id)of_device_get_match_data(&pdev->dev); + xa_init_flags(&ldev->active_contexts, XA_FLAGS_ALLOC); + platform_set_drvdata(pdev, ldev); /* Allocate and initialize the DRM device. */ @@ -446,6 +456,8 @@ static int lima_pdev_remove(struct platform_device *pdev) struct lima_device *ldev = platform_get_drvdata(pdev); struct drm_device *ddev = ldev->ddev; + xa_destroy(&ldev->active_contexts); + sysfs_remove_bin_file(&ldev->dev->kobj, &lima_error_state_attr); drm_dev_unregister(ddev); diff --git a/drivers/gpu/drm/lima/lima_drv.h b/drivers/gpu/drm/lima/lima_drv.h index c738d288547b..e49b7ab651d0 100644 --- a/drivers/gpu/drm/lima/lima_drv.h +++ b/drivers/gpu/drm/lima/lima_drv.h @@ -20,6 +20,7 @@ struct lima_sched_task; struct drm_lima_gem_submit_bo; struct lima_drm_priv { + int id; struct lima_vm *vm; struct lima_ctx_mgr ctx_mgr; }; -- cgit From 4a66f3da99dcb4dcbd28544110636b50adfb0f0d Mon Sep 17 00:00:00 2001 From: Erico Nunes Date: Mon, 13 Mar 2023 00:30:52 +0100 Subject: drm/lima: add show_fdinfo for drm usage stats This exposes an accumulated active time per client via the fdinfo infrastructure per execution engine, following Documentation/gpu/drm-usage-stats.rst. In lima, the exposed execution engines are gp and pp. Signed-off-by: Erico Nunes Signed-off-by: Qiang Yu Link: https://patchwork.freedesktop.org/patch/msgid/20230312233052.21095-4-nunes.erico@gmail.com --- drivers/gpu/drm/lima/lima_drv.c | 31 ++++++++++++++++++++++++++++++- 1 file changed, 30 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/lima') diff --git a/drivers/gpu/drm/lima/lima_drv.c b/drivers/gpu/drm/lima/lima_drv.c index f456a471216b..3420875d6fc6 100644 --- a/drivers/gpu/drm/lima/lima_drv.c +++ b/drivers/gpu/drm/lima/lima_drv.c @@ -261,7 +261,36 @@ static const struct drm_ioctl_desc lima_drm_driver_ioctls[] = { DRM_IOCTL_DEF_DRV(LIMA_CTX_FREE, lima_ioctl_ctx_free, DRM_RENDER_ALLOW), }; -DEFINE_DRM_GEM_FOPS(lima_drm_driver_fops); +static void lima_drm_driver_show_fdinfo(struct seq_file *m, struct file *filp) +{ + struct drm_file *file = filp->private_data; + struct drm_device *dev = file->minor->dev; + struct lima_device *ldev = to_lima_dev(dev); + struct lima_drm_priv *priv = file->driver_priv; + struct lima_ctx_mgr *ctx_mgr = &priv->ctx_mgr; + u64 usage[lima_pipe_num]; + + lima_ctx_mgr_usage(ctx_mgr, usage); + + /* + * For a description of the text output format used here, see + * Documentation/gpu/drm-usage-stats.rst. + */ + seq_printf(m, "drm-driver:\t%s\n", dev->driver->name); + seq_printf(m, "drm-client-id:\t%u\n", priv->id); + for (int i = 0; i < lima_pipe_num; i++) { + struct lima_sched_pipe *pipe = &ldev->pipe[i]; + struct drm_gpu_scheduler *sched = &pipe->base; + + seq_printf(m, "drm-engine-%s:\t%llu ns\n", sched->name, usage[i]); + } +} + +static const struct file_operations lima_drm_driver_fops = { + .owner = THIS_MODULE, + DRM_GEM_FOPS, + .show_fdinfo = lima_drm_driver_show_fdinfo, +}; /* * Changelog: -- cgit From 7e4d0b09a9f268dad595c3ea92d692a14389131a Mon Sep 17 00:00:00 2001 From: Qiang Yu Date: Tue, 4 Apr 2023 08:25:59 +0800 Subject: Revert "drm/lima: add show_fdinfo for drm usage stats" This reverts commit 4a66f3da99dcb4dcbd28544110636b50adfb0f0d. This is due to the depend commit has been reverted on upstream: commit baad10973fdb ("Revert "drm/scheduler: track GPU active time per entity"") Acked-by: Emil Velikov Signed-off-by: Qiang Yu Link: https://patchwork.freedesktop.org/patch/msgid/20230404002601.24136-2-yq882255@163.com --- drivers/gpu/drm/lima/lima_drv.c | 31 +------------------------------ 1 file changed, 1 insertion(+), 30 deletions(-) (limited to 'drivers/gpu/drm/lima') diff --git a/drivers/gpu/drm/lima/lima_drv.c b/drivers/gpu/drm/lima/lima_drv.c index 3420875d6fc6..f456a471216b 100644 --- a/drivers/gpu/drm/lima/lima_drv.c +++ b/drivers/gpu/drm/lima/lima_drv.c @@ -261,36 +261,7 @@ static const struct drm_ioctl_desc lima_drm_driver_ioctls[] = { DRM_IOCTL_DEF_DRV(LIMA_CTX_FREE, lima_ioctl_ctx_free, DRM_RENDER_ALLOW), }; -static void lima_drm_driver_show_fdinfo(struct seq_file *m, struct file *filp) -{ - struct drm_file *file = filp->private_data; - struct drm_device *dev = file->minor->dev; - struct lima_device *ldev = to_lima_dev(dev); - struct lima_drm_priv *priv = file->driver_priv; - struct lima_ctx_mgr *ctx_mgr = &priv->ctx_mgr; - u64 usage[lima_pipe_num]; - - lima_ctx_mgr_usage(ctx_mgr, usage); - - /* - * For a description of the text output format used here, see - * Documentation/gpu/drm-usage-stats.rst. - */ - seq_printf(m, "drm-driver:\t%s\n", dev->driver->name); - seq_printf(m, "drm-client-id:\t%u\n", priv->id); - for (int i = 0; i < lima_pipe_num; i++) { - struct lima_sched_pipe *pipe = &ldev->pipe[i]; - struct drm_gpu_scheduler *sched = &pipe->base; - - seq_printf(m, "drm-engine-%s:\t%llu ns\n", sched->name, usage[i]); - } -} - -static const struct file_operations lima_drm_driver_fops = { - .owner = THIS_MODULE, - DRM_GEM_FOPS, - .show_fdinfo = lima_drm_driver_show_fdinfo, -}; +DEFINE_DRM_GEM_FOPS(lima_drm_driver_fops); /* * Changelog: -- cgit From 4ad17bf571730475bf62290399d52b26ece8228c Mon Sep 17 00:00:00 2001 From: Qiang Yu Date: Tue, 4 Apr 2023 08:26:00 +0800 Subject: Revert "drm/lima: allocate unique id per drm_file" This reverts commit 87767de835edf527b879a363d518c33da68adb81. This is due to the depend commit has been reverted on upstream: commit baad10973fdb ("Revert "drm/scheduler: track GPU active time per entity"") Acked-by: Emil Velikov Signed-off-by: Qiang Yu Link: https://patchwork.freedesktop.org/patch/msgid/20230404002601.24136-3-yq882255@163.com --- drivers/gpu/drm/lima/lima_device.h | 3 --- drivers/gpu/drm/lima/lima_drv.c | 12 ------------ drivers/gpu/drm/lima/lima_drv.h | 1 - 3 files changed, 16 deletions(-) (limited to 'drivers/gpu/drm/lima') diff --git a/drivers/gpu/drm/lima/lima_device.h b/drivers/gpu/drm/lima/lima_device.h index 71b2db60d161..41b9d7b4bcc7 100644 --- a/drivers/gpu/drm/lima/lima_device.h +++ b/drivers/gpu/drm/lima/lima_device.h @@ -106,9 +106,6 @@ struct lima_device { struct lima_dump_head dump; struct list_head error_task_list; struct mutex error_task_list_lock; - - struct xarray active_contexts; - u32 next_context_id; }; static inline struct lima_device * diff --git a/drivers/gpu/drm/lima/lima_drv.c b/drivers/gpu/drm/lima/lima_drv.c index f456a471216b..39cab4a55f57 100644 --- a/drivers/gpu/drm/lima/lima_drv.c +++ b/drivers/gpu/drm/lima/lima_drv.c @@ -218,11 +218,6 @@ static int lima_drm_driver_open(struct drm_device *dev, struct drm_file *file) if (!priv) return -ENOMEM; - err = xa_alloc_cyclic(&ldev->active_contexts, &priv->id, priv, - xa_limit_32b, &ldev->next_context_id, GFP_KERNEL); - if (err < 0) - goto err_out0; - priv->vm = lima_vm_create(ldev); if (!priv->vm) { err = -ENOMEM; @@ -242,9 +237,6 @@ err_out0: static void lima_drm_driver_postclose(struct drm_device *dev, struct drm_file *file) { struct lima_drm_priv *priv = file->driver_priv; - struct lima_device *ldev = to_lima_dev(dev); - - xa_erase(&ldev->active_contexts, priv->id); lima_ctx_mgr_fini(&priv->ctx_mgr); lima_vm_put(priv->vm); @@ -396,8 +388,6 @@ static int lima_pdev_probe(struct platform_device *pdev) ldev->dev = &pdev->dev; ldev->id = (enum lima_gpu_id)of_device_get_match_data(&pdev->dev); - xa_init_flags(&ldev->active_contexts, XA_FLAGS_ALLOC); - platform_set_drvdata(pdev, ldev); /* Allocate and initialize the DRM device. */ @@ -456,8 +446,6 @@ static int lima_pdev_remove(struct platform_device *pdev) struct lima_device *ldev = platform_get_drvdata(pdev); struct drm_device *ddev = ldev->ddev; - xa_destroy(&ldev->active_contexts); - sysfs_remove_bin_file(&ldev->dev->kobj, &lima_error_state_attr); drm_dev_unregister(ddev); diff --git a/drivers/gpu/drm/lima/lima_drv.h b/drivers/gpu/drm/lima/lima_drv.h index e49b7ab651d0..c738d288547b 100644 --- a/drivers/gpu/drm/lima/lima_drv.h +++ b/drivers/gpu/drm/lima/lima_drv.h @@ -20,7 +20,6 @@ struct lima_sched_task; struct drm_lima_gem_submit_bo; struct lima_drm_priv { - int id; struct lima_vm *vm; struct lima_ctx_mgr ctx_mgr; }; -- cgit From 8678c8b305bb0de99eb05d6dae74e04ca46827a4 Mon Sep 17 00:00:00 2001 From: Qiang Yu Date: Tue, 4 Apr 2023 08:26:01 +0800 Subject: Revert "drm/lima: add usage counting method to ctx_mgr" This reverts commit bccafec957a5c4b22ac29e53a39e82d0a0008348. This is due to the depend commit has been reverted on upstream: commit baad10973fdb ("Revert "drm/scheduler: track GPU active time per entity"") Acked-by: Emil Velikov Signed-off-by: Qiang Yu Link: https://patchwork.freedesktop.org/patch/msgid/20230404002601.24136-4-yq882255@163.com --- drivers/gpu/drm/lima/lima_ctx.c | 30 +----------------------------- drivers/gpu/drm/lima/lima_ctx.h | 3 --- 2 files changed, 1 insertion(+), 32 deletions(-) (limited to 'drivers/gpu/drm/lima') diff --git a/drivers/gpu/drm/lima/lima_ctx.c b/drivers/gpu/drm/lima/lima_ctx.c index e008e586fad0..891d5cd5019a 100644 --- a/drivers/gpu/drm/lima/lima_ctx.c +++ b/drivers/gpu/drm/lima/lima_ctx.c @@ -15,7 +15,6 @@ int lima_ctx_create(struct lima_device *dev, struct lima_ctx_mgr *mgr, u32 *id) if (!ctx) return -ENOMEM; ctx->dev = dev; - ctx->mgr = mgr; kref_init(&ctx->refcnt); for (i = 0; i < lima_pipe_num; i++) { @@ -43,17 +42,10 @@ err_out0: static void lima_ctx_do_release(struct kref *ref) { struct lima_ctx *ctx = container_of(ref, struct lima_ctx, refcnt); - struct lima_ctx_mgr *mgr = ctx->mgr; int i; - for (i = 0; i < lima_pipe_num; i++) { - struct lima_sched_context *context = &ctx->context[i]; - struct drm_sched_entity *entity = &context->base; - - mgr->elapsed_ns[i] += entity->elapsed_ns; - + for (i = 0; i < lima_pipe_num; i++) lima_sched_context_fini(ctx->dev->pipe + i, ctx->context + i); - } kfree(ctx); } @@ -107,23 +99,3 @@ void lima_ctx_mgr_fini(struct lima_ctx_mgr *mgr) xa_destroy(&mgr->handles); mutex_destroy(&mgr->lock); } - -void lima_ctx_mgr_usage(struct lima_ctx_mgr *mgr, u64 usage[lima_pipe_num]) -{ - struct lima_ctx *ctx; - unsigned long id; - - for (int i = 0; i < lima_pipe_num; i++) - usage[i] = mgr->elapsed_ns[i]; - - mutex_lock(&mgr->lock); - xa_for_each(&mgr->handles, id, ctx) { - for (int i = 0; i < lima_pipe_num; i++) { - struct lima_sched_context *context = &ctx->context[i]; - struct drm_sched_entity *entity = &context->base; - - usage[i] += entity->elapsed_ns; - } - } - mutex_unlock(&mgr->lock); -} diff --git a/drivers/gpu/drm/lima/lima_ctx.h b/drivers/gpu/drm/lima/lima_ctx.h index 6068863880eb..74e2be09090f 100644 --- a/drivers/gpu/drm/lima/lima_ctx.h +++ b/drivers/gpu/drm/lima/lima_ctx.h @@ -12,7 +12,6 @@ struct lima_ctx { struct kref refcnt; struct lima_device *dev; - struct lima_ctx_mgr *mgr; struct lima_sched_context context[lima_pipe_num]; atomic_t guilty; @@ -24,7 +23,6 @@ struct lima_ctx { struct lima_ctx_mgr { struct mutex lock; struct xarray handles; - u64 elapsed_ns[lima_pipe_num]; }; int lima_ctx_create(struct lima_device *dev, struct lima_ctx_mgr *mgr, u32 *id); @@ -33,6 +31,5 @@ struct lima_ctx *lima_ctx_get(struct lima_ctx_mgr *mgr, u32 id); void lima_ctx_put(struct lima_ctx *ctx); void lima_ctx_mgr_init(struct lima_ctx_mgr *mgr); void lima_ctx_mgr_fini(struct lima_ctx_mgr *mgr); -void lima_ctx_mgr_usage(struct lima_ctx_mgr *mgr, u64 usage[lima_pipe_num]); #endif -- cgit From 6eea63c7090b20ee41032d3e478e617b219d69aa Mon Sep 17 00:00:00 2001 From: Erico Nunes Date: Tue, 6 Jun 2023 16:32:47 +0200 Subject: drm/lima: fix sched context destroy The drm sched entity must be flushed before finishing, to account for jobs potentially still in flight at that time. Lima did not do this flush until now, so switch the destroy call to the drm_sched_entity_destroy() wrapper which will take care of that. This fixes a regression on lima which started since the rework in commit 2fdb8a8f07c2 ("drm/scheduler: rework entity flush, kill and fini") where some specific types of applications may hang indefinitely. Fixes: 2fdb8a8f07c2 ("drm/scheduler: rework entity flush, kill and fini") Reviewed-by: Vasily Khoruzhick Signed-off-by: Erico Nunes Signed-off-by: Qiang Yu Link: https://patchwork.freedesktop.org/patch/msgid/20230606143247.433018-1-nunes.erico@gmail.com --- drivers/gpu/drm/lima/lima_sched.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/lima') diff --git a/drivers/gpu/drm/lima/lima_sched.c b/drivers/gpu/drm/lima/lima_sched.c index ff003403fbbc..ffd91a5ee299 100644 --- a/drivers/gpu/drm/lima/lima_sched.c +++ b/drivers/gpu/drm/lima/lima_sched.c @@ -165,7 +165,7 @@ int lima_sched_context_init(struct lima_sched_pipe *pipe, void lima_sched_context_fini(struct lima_sched_pipe *pipe, struct lima_sched_context *context) { - drm_sched_entity_fini(&context->base); + drm_sched_entity_destroy(&context->base); } struct dma_fence *lima_sched_context_queue_task(struct lima_sched_task *task) -- cgit