diff options
Diffstat (limited to 'drivers/gpu/drm/lima/lima_gem.c')
| -rw-r--r-- | drivers/gpu/drm/lima/lima_gem.c | 160 |
1 files changed, 142 insertions, 18 deletions
diff --git a/drivers/gpu/drm/lima/lima_gem.c b/drivers/gpu/drm/lima/lima_gem.c index d0059d8c97d8..640acc060467 100644 --- a/drivers/gpu/drm/lima/lima_gem.c +++ b/drivers/gpu/drm/lima/lima_gem.c @@ -4,6 +4,8 @@ #include <linux/mm.h> #include <linux/sync_file.h> #include <linux/pagemap.h> +#include <linux/shmem_fs.h> +#include <linux/dma-mapping.h> #include <drm/drm_file.h> #include <drm/drm_syncobj.h> @@ -15,6 +17,88 @@ #include "lima_gem.h" #include "lima_vm.h" +int lima_heap_alloc(struct lima_bo *bo, struct lima_vm *vm) +{ + struct page **pages; + struct address_space *mapping = bo->base.base.filp->f_mapping; + struct device *dev = bo->base.base.dev->dev; + size_t old_size = bo->heap_size; + size_t new_size = bo->heap_size ? bo->heap_size * 2 : + (lima_heap_init_nr_pages << PAGE_SHIFT); + struct sg_table sgt; + int i, ret; + + if (bo->heap_size >= bo->base.base.size) + return -ENOSPC; + + new_size = min(new_size, bo->base.base.size); + + mutex_lock(&bo->base.pages_lock); + + if (bo->base.pages) { + pages = bo->base.pages; + } else { + pages = kvmalloc_array(bo->base.base.size >> PAGE_SHIFT, + sizeof(*pages), GFP_KERNEL | __GFP_ZERO); + if (!pages) { + mutex_unlock(&bo->base.pages_lock); + return -ENOMEM; + } + + bo->base.pages = pages; + bo->base.pages_use_count = 1; + + mapping_set_unevictable(mapping); + } + + for (i = old_size >> PAGE_SHIFT; i < new_size >> PAGE_SHIFT; i++) { + struct page *page = shmem_read_mapping_page(mapping, i); + + if (IS_ERR(page)) { + mutex_unlock(&bo->base.pages_lock); + return PTR_ERR(page); + } + pages[i] = page; + } + + mutex_unlock(&bo->base.pages_lock); + + ret = sg_alloc_table_from_pages(&sgt, pages, i, 0, + new_size, GFP_KERNEL); + if (ret) + return ret; + + if (bo->base.sgt) { + dma_unmap_sgtable(dev, bo->base.sgt, DMA_BIDIRECTIONAL, 0); + sg_free_table(bo->base.sgt); + } else { + bo->base.sgt = kmalloc(sizeof(*bo->base.sgt), GFP_KERNEL); + if (!bo->base.sgt) { + sg_free_table(&sgt); + return -ENOMEM; + } + } + + ret = dma_map_sgtable(dev, &sgt, DMA_BIDIRECTIONAL, 0); + if (ret) { + sg_free_table(&sgt); + kfree(bo->base.sgt); + bo->base.sgt = NULL; + return ret; + } + + *bo->base.sgt = sgt; + + if (vm) { + ret = lima_vm_map_bo(vm, bo, old_size >> PAGE_SHIFT); + if (ret) + return ret; + } + + bo->heap_size = new_size; + return 0; +} + int lima_gem_create_handle(struct drm_device *dev, struct drm_file *file, u32 size, u32 flags, u32 *handle) { @@ -22,7 +106,8 @@ int lima_gem_create_handle(struct drm_device *dev, struct drm_file *file, gfp_t mask; struct drm_gem_shmem_object *shmem; struct drm_gem_object *obj; - struct sg_table *sgt; + struct lima_bo *bo; + bool is_heap = flags & LIMA_BO_FLAG_HEAP; shmem = drm_gem_shmem_create(dev, size); if (IS_ERR(shmem)) @@ -36,17 +121,25 @@ int lima_gem_create_handle(struct drm_device *dev, struct drm_file *file, mask |= __GFP_DMA32; mapping_set_gfp_mask(obj->filp->f_mapping, mask); - sgt = drm_gem_shmem_get_pages_sgt(obj); - if (IS_ERR(sgt)) { - err = PTR_ERR(sgt); - goto out; + if (is_heap) { + bo = to_lima_bo(obj); + err = lima_heap_alloc(bo, NULL); + if (err) + goto out; + } else { + struct sg_table *sgt = drm_gem_shmem_get_pages_sgt(obj); + + if (IS_ERR(sgt)) { + err = PTR_ERR(sgt); + goto out; + } } err = drm_gem_handle_create(file, obj, handle); out: /* drop reference from allocate - handle holds it now */ - drm_gem_object_put_unlocked(obj); + drm_gem_object_put(obj); return err; } @@ -79,17 +172,47 @@ static void lima_gem_object_close(struct drm_gem_object *obj, struct drm_file *f lima_vm_bo_del(vm, bo); } +static int lima_gem_pin(struct drm_gem_object *obj) +{ + struct lima_bo *bo = to_lima_bo(obj); + + if (bo->heap_size) + return -EINVAL; + + return drm_gem_shmem_pin(obj); +} + +static int lima_gem_vmap(struct drm_gem_object *obj, struct dma_buf_map *map) +{ + struct lima_bo *bo = to_lima_bo(obj); + + if (bo->heap_size) + return -EINVAL; + + return drm_gem_shmem_vmap(obj, map); +} + +static int lima_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) +{ + struct lima_bo *bo = to_lima_bo(obj); + + if (bo->heap_size) + return -EINVAL; + + return drm_gem_shmem_mmap(obj, vma); +} + static const struct drm_gem_object_funcs lima_gem_funcs = { .free = lima_gem_free_object, .open = lima_gem_object_open, .close = lima_gem_object_close, .print_info = drm_gem_shmem_print_info, - .pin = drm_gem_shmem_pin, + .pin = lima_gem_pin, .unpin = drm_gem_shmem_unpin, .get_sg_table = drm_gem_shmem_get_sg_table, - .vmap = drm_gem_shmem_vmap, + .vmap = lima_gem_vmap, .vunmap = drm_gem_shmem_vunmap, - .mmap = drm_gem_shmem_mmap, + .mmap = lima_gem_mmap, }; struct drm_gem_object *lima_gem_create_object(struct drm_device *dev, size_t size) @@ -102,7 +225,7 @@ struct drm_gem_object *lima_gem_create_object(struct drm_device *dev, size_t siz mutex_init(&bo->lock); INIT_LIST_HEAD(&bo->va); - + bo->base.map_wc = true; bo->base.base.funcs = &lima_gem_funcs; return &bo->base.base; @@ -125,7 +248,7 @@ int lima_gem_get_info(struct drm_file *file, u32 handle, u32 *va, u64 *offset) *offset = drm_vma_node_offset_addr(&obj->vma_node); - drm_gem_object_put_unlocked(obj); + drm_gem_object_put(obj); return 0; } @@ -144,7 +267,9 @@ static int lima_gem_sync_bo(struct lima_sched_task *task, struct lima_bo *bo, if (explicit) return 0; - return drm_gem_fence_array_add_implicit(&task->deps, &bo->base.base, write); + return drm_sched_job_add_implicit_dependencies(&task->base, + &bo->base.base, + write); } static int lima_gem_add_deps(struct drm_file *file, struct lima_submit *submit) @@ -162,7 +287,7 @@ static int lima_gem_add_deps(struct drm_file *file, struct lima_submit *submit) if (err) return err; - err = drm_gem_fence_array_add(&submit->task->deps, fence); + err = drm_sched_job_add_dependency(&submit->task->base, fence); if (err) { dma_fence_put(fence); return err; @@ -205,7 +330,7 @@ int lima_gem_submit(struct drm_file *file, struct lima_submit *submit) */ err = lima_vm_bo_add(vm, bo, false); if (err) { - drm_gem_object_put_unlocked(obj); + drm_gem_object_put(obj); goto err_out0; } @@ -236,8 +361,7 @@ int lima_gem_submit(struct drm_file *file, struct lima_submit *submit) goto err_out2; } - fence = lima_sched_context_queue_task( - submit->ctx->context + submit->pipe, submit->task); + fence = lima_sched_context_queue_task(submit->task); for (i = 0; i < submit->nr_bos; i++) { if (submit->bos[i].flags & LIMA_SUBMIT_BO_WRITE) @@ -250,7 +374,7 @@ int lima_gem_submit(struct drm_file *file, struct lima_submit *submit) submit->nr_bos, &ctx); for (i = 0; i < submit->nr_bos; i++) - drm_gem_object_put_unlocked(&bos[i]->base.base); + drm_gem_object_put(&bos[i]->base.base); if (out_sync) { drm_syncobj_replace_fence(out_sync, fence); @@ -271,7 +395,7 @@ err_out0: if (!bos[i]) break; lima_vm_bo_del(vm, bos[i]); - drm_gem_object_put_unlocked(&bos[i]->base.base); + drm_gem_object_put(&bos[i]->base.base); } if (out_sync) drm_syncobj_put(out_sync); |