diff options
Diffstat (limited to 'drivers/gpu/drm/drm_gem_shmem_helper.c')
-rw-r--r-- | drivers/gpu/drm/drm_gem_shmem_helper.c | 93 |
1 files changed, 42 insertions, 51 deletions
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c index e00616d94f26..9825c378dfa6 100644 --- a/drivers/gpu/drm/drm_gem_shmem_helper.c +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c @@ -51,13 +51,17 @@ __drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private) if (!obj) return ERR_PTR(-ENOMEM); + shmem = to_drm_gem_shmem_obj(obj); + if (!obj->funcs) obj->funcs = &drm_gem_shmem_funcs; - if (private) + if (private) { drm_gem_private_object_init(dev, obj, size); - else + shmem->map_wc = false; /* dma-buf mappings use always writecombine */ + } else { ret = drm_gem_object_init(dev, obj, size); + } if (ret) goto err_free; @@ -65,7 +69,6 @@ __drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private) if (ret) goto err_release; - shmem = to_drm_gem_shmem_obj(obj); mutex_init(&shmem->pages_lock); mutex_init(&shmem->vmap_lock); INIT_LIST_HEAD(&shmem->madv_list); @@ -258,16 +261,25 @@ void drm_gem_shmem_unpin(struct drm_gem_object *obj) } EXPORT_SYMBOL(drm_gem_shmem_unpin); -static void *drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem) +static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem, struct dma_buf_map *map) { struct drm_gem_object *obj = &shmem->base; - int ret; + int ret = 0; - if (shmem->vmap_use_count++ > 0) - return shmem->vaddr; + if (shmem->vmap_use_count++ > 0) { + dma_buf_map_set_vaddr(map, shmem->vaddr); + return 0; + } if (obj->import_attach) { - shmem->vaddr = dma_buf_vmap(obj->import_attach->dmabuf); + ret = dma_buf_vmap(obj->import_attach->dmabuf, map); + if (!ret) { + if (WARN_ON(map->is_iomem)) { + ret = -EIO; + goto err_put_pages; + } + shmem->vaddr = map->vaddr; + } } else { pgprot_t prot = PAGE_KERNEL; @@ -275,19 +287,22 @@ static void *drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem) if (ret) goto err_zero_use; - if (!shmem->map_cached) + if (shmem->map_wc) prot = pgprot_writecombine(prot); shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT, VM_MAP, prot); + if (!shmem->vaddr) + ret = -ENOMEM; + else + dma_buf_map_set_vaddr(map, shmem->vaddr); } - if (!shmem->vaddr) { - DRM_DEBUG_KMS("Failed to vmap pages\n"); - ret = -ENOMEM; + if (ret) { + DRM_DEBUG_KMS("Failed to vmap pages, error %d\n", ret); goto err_put_pages; } - return shmem->vaddr; + return 0; err_put_pages: if (!obj->import_attach) @@ -295,12 +310,14 @@ err_put_pages: err_zero_use: shmem->vmap_use_count = 0; - return ERR_PTR(ret); + return ret; } /* * drm_gem_shmem_vmap - Create a virtual mapping for a shmem GEM object * @shmem: shmem GEM object + * @map: Returns the kernel virtual address of the SHMEM GEM object's backing + * store. * * This function makes sure that a contiguous kernel virtual address mapping * exists for the buffer backing the shmem GEM object. @@ -314,23 +331,23 @@ err_zero_use: * Returns: * 0 on success or a negative error code on failure. */ -void *drm_gem_shmem_vmap(struct drm_gem_object *obj) +int drm_gem_shmem_vmap(struct drm_gem_object *obj, struct dma_buf_map *map) { struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); - void *vaddr; int ret; ret = mutex_lock_interruptible(&shmem->vmap_lock); if (ret) - return ERR_PTR(ret); - vaddr = drm_gem_shmem_vmap_locked(shmem); + return ret; + ret = drm_gem_shmem_vmap_locked(shmem, map); mutex_unlock(&shmem->vmap_lock); - return vaddr; + return ret; } EXPORT_SYMBOL(drm_gem_shmem_vmap); -static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem) +static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem, + struct dma_buf_map *map) { struct drm_gem_object *obj = &shmem->base; @@ -341,7 +358,7 @@ static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem) return; if (obj->import_attach) - dma_buf_vunmap(obj->import_attach->dmabuf, shmem->vaddr); + dma_buf_vunmap(obj->import_attach->dmabuf, map); else vunmap(shmem->vaddr); @@ -352,6 +369,7 @@ static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem) /* * drm_gem_shmem_vunmap - Unmap a virtual mapping fo a shmem GEM object * @shmem: shmem GEM object + * @map: Kernel virtual address where the SHMEM GEM object was mapped * * This function cleans up a kernel virtual address mapping acquired by * drm_gem_shmem_vmap(). The mapping is only removed when the use count drops to @@ -361,12 +379,12 @@ static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem) * also be called by drivers directly, in which case it will hide the * differences between dma-buf imported and natively allocated objects. */ -void drm_gem_shmem_vunmap(struct drm_gem_object *obj, void *vaddr) +void drm_gem_shmem_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map) { struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); mutex_lock(&shmem->vmap_lock); - drm_gem_shmem_vunmap_locked(shmem); + drm_gem_shmem_vunmap_locked(shmem, map); mutex_unlock(&shmem->vmap_lock); } EXPORT_SYMBOL(drm_gem_shmem_vunmap); @@ -462,33 +480,6 @@ bool drm_gem_shmem_purge(struct drm_gem_object *obj) EXPORT_SYMBOL(drm_gem_shmem_purge); /** - * drm_gem_shmem_create_object_cached - Create a shmem buffer object with - * cached mappings - * @dev: DRM device - * @size: Size of the object to allocate - * - * By default, shmem buffer objects use writecombine mappings. This - * function implements struct drm_driver.gem_create_object for shmem - * buffer objects with cached mappings. - * - * Returns: - * A struct drm_gem_shmem_object * on success or NULL negative on failure. - */ -struct drm_gem_object * -drm_gem_shmem_create_object_cached(struct drm_device *dev, size_t size) -{ - struct drm_gem_shmem_object *shmem; - - shmem = kzalloc(sizeof(*shmem), GFP_KERNEL); - if (!shmem) - return NULL; - shmem->map_cached = true; - - return &shmem->base; -} -EXPORT_SYMBOL(drm_gem_shmem_create_object_cached); - -/** * drm_gem_shmem_dumb_create - Create a dumb shmem buffer object * @file: DRM file structure to create the dumb buffer for * @dev: DRM device @@ -611,7 +602,7 @@ int drm_gem_shmem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND; vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); - if (!shmem->map_cached) + if (shmem->map_wc) vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); vma->vm_ops = &drm_gem_shmem_vm_ops; |