diff options
| author | Dmitry Torokhov <[email protected]> | 2023-08-30 16:06:38 -0700 | 
|---|---|---|
| committer | Dmitry Torokhov <[email protected]> | 2023-08-30 16:06:38 -0700 | 
| commit | 1ac731c529cd4d6adbce134754b51ff7d822b145 (patch) | |
| tree | 143ab3f35ca5f3b69f583c84e6964b17139c2ec1 /drivers/gpu/drm/drm_gem_shmem_helper.c | |
| parent | 07b4c950f27bef0362dc6ad7ee713aab61d58149 (diff) | |
| parent | 54116d442e001e1b6bd482122043b1870998a1f3 (diff) | |
Merge branch 'next' into for-linus
Prepare input updates for 6.6 merge window.
Diffstat (limited to 'drivers/gpu/drm/drm_gem_shmem_helper.c')
| -rw-r--r-- | drivers/gpu/drm/drm_gem_shmem_helper.c | 74 | 
1 files changed, 42 insertions, 32 deletions
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c index 75185a960fc4..4ea6507a77e5 100644 --- a/drivers/gpu/drm/drm_gem_shmem_helper.c +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c @@ -141,7 +141,7 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)  {  	struct drm_gem_object *obj = &shmem->base; -	WARN_ON(shmem->vmap_use_count); +	drm_WARN_ON(obj->dev, shmem->vmap_use_count);  	if (obj->import_attach) {  		drm_prime_gem_destroy(obj, shmem->sgt); @@ -156,7 +156,7 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)  			drm_gem_shmem_put_pages(shmem);  	} -	WARN_ON(shmem->pages_use_count); +	drm_WARN_ON(obj->dev, shmem->pages_use_count);  	drm_gem_object_release(obj);  	mutex_destroy(&shmem->pages_lock); @@ -175,7 +175,8 @@ static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem)  	pages = drm_gem_get_pages(obj);  	if (IS_ERR(pages)) { -		DRM_DEBUG_KMS("Failed to get pages (%ld)\n", PTR_ERR(pages)); +		drm_dbg_kms(obj->dev, "Failed to get pages (%ld)\n", +			    PTR_ERR(pages));  		shmem->pages_use_count = 0;  		return PTR_ERR(pages);  	} @@ -207,9 +208,10 @@ static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem)   */  int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem)  { +	struct drm_gem_object *obj = &shmem->base;  	int ret; -	WARN_ON(shmem->base.import_attach); +	drm_WARN_ON(obj->dev, obj->import_attach);  	ret = mutex_lock_interruptible(&shmem->pages_lock);  	if (ret) @@ -225,7 +227,7 @@ static void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem)  {  	struct drm_gem_object *obj = &shmem->base; -	if (WARN_ON_ONCE(!shmem->pages_use_count)) +	if (drm_WARN_ON_ONCE(obj->dev, !shmem->pages_use_count))  		return;  	if (--shmem->pages_use_count > 0) @@ -268,7 +270,9 @@ EXPORT_SYMBOL(drm_gem_shmem_put_pages);   */  int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem)  { -	WARN_ON(shmem->base.import_attach); +	struct drm_gem_object *obj = &shmem->base; + +	drm_WARN_ON(obj->dev, obj->import_attach);  	return drm_gem_shmem_get_pages(shmem);  } @@ -283,7 +287,9 @@ EXPORT_SYMBOL(drm_gem_shmem_pin);   */  void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem)  { -	WARN_ON(shmem->base.import_attach); +	struct drm_gem_object *obj = &shmem->base; + +	drm_WARN_ON(obj->dev, obj->import_attach);  	drm_gem_shmem_put_pages(shmem);  } @@ -295,24 +301,22 @@ static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem,  	struct drm_gem_object *obj = &shmem->base;  	int ret = 0; -	if (shmem->vmap_use_count++ > 0) { -		iosys_map_set_vaddr(map, shmem->vaddr); -		return 0; -	} -  	if (obj->import_attach) {  		ret = dma_buf_vmap(obj->import_attach->dmabuf, map);  		if (!ret) { -			if (WARN_ON(map->is_iomem)) { +			if (drm_WARN_ON(obj->dev, map->is_iomem)) {  				dma_buf_vunmap(obj->import_attach->dmabuf, map); -				ret = -EIO; -				goto err_put_pages; +				return -EIO;  			} -			shmem->vaddr = map->vaddr;  		}  	} else {  		pgprot_t prot = PAGE_KERNEL; +		if (shmem->vmap_use_count++ > 0) { +			iosys_map_set_vaddr(map, shmem->vaddr); +			return 0; +		} +  		ret = drm_gem_shmem_get_pages(shmem);  		if (ret)  			goto err_zero_use; @@ -328,7 +332,7 @@ static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem,  	}  	if (ret) { -		DRM_DEBUG_KMS("Failed to vmap pages, error %d\n", ret); +		drm_dbg_kms(obj->dev, "Failed to vmap pages, error %d\n", ret);  		goto err_put_pages;  	} @@ -378,15 +382,15 @@ static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem,  {  	struct drm_gem_object *obj = &shmem->base; -	if (WARN_ON_ONCE(!shmem->vmap_use_count)) -		return; - -	if (--shmem->vmap_use_count > 0) -		return; -  	if (obj->import_attach) {  		dma_buf_vunmap(obj->import_attach->dmabuf, map);  	} else { +		if (drm_WARN_ON_ONCE(obj->dev, !shmem->vmap_use_count)) +			return; + +		if (--shmem->vmap_use_count > 0) +			return; +  		vunmap(shmem->vaddr);  		drm_gem_shmem_put_pages(shmem);  	} @@ -461,7 +465,7 @@ void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem)  	struct drm_gem_object *obj = &shmem->base;  	struct drm_device *dev = obj->dev; -	WARN_ON(!drm_gem_shmem_is_purgeable(shmem)); +	drm_WARN_ON(obj->dev, !drm_gem_shmem_is_purgeable(shmem));  	dma_unmap_sgtable(dev->dev, shmem->sgt, DMA_BIDIRECTIONAL, 0);  	sg_free_table(shmem->sgt); @@ -550,7 +554,7 @@ static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)  	mutex_lock(&shmem->pages_lock);  	if (page_offset >= num_pages || -	    WARN_ON_ONCE(!shmem->pages) || +	    drm_WARN_ON_ONCE(obj->dev, !shmem->pages) ||  	    shmem->madv < 0) {  		ret = VM_FAULT_SIGBUS;  	} else { @@ -569,7 +573,7 @@ static void drm_gem_shmem_vm_open(struct vm_area_struct *vma)  	struct drm_gem_object *obj = vma->vm_private_data;  	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); -	WARN_ON(shmem->base.import_attach); +	drm_WARN_ON(obj->dev, obj->import_attach);  	mutex_lock(&shmem->pages_lock); @@ -578,7 +582,7 @@ static void drm_gem_shmem_vm_open(struct vm_area_struct *vma)  	 * mmap'd, vm_open() just grabs an additional reference for the new  	 * mm the vma is getting copied into (ie. on fork()).  	 */ -	if (!WARN_ON_ONCE(!shmem->pages_use_count)) +	if (!drm_WARN_ON_ONCE(obj->dev, !shmem->pages_use_count))  		shmem->pages_use_count++;  	mutex_unlock(&shmem->pages_lock); @@ -619,11 +623,14 @@ int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct  	int ret;  	if (obj->import_attach) { -		/* Drop the reference drm_gem_mmap_obj() acquired.*/ -		drm_gem_object_put(obj);  		vma->vm_private_data = NULL; +		ret = dma_buf_mmap(obj->dma_buf, vma, 0); -		return dma_buf_mmap(obj->dma_buf, vma, 0); +		/* Drop the reference drm_gem_mmap_obj() acquired.*/ +		if (!ret) +			drm_gem_object_put(obj); + +		return ret;  	}  	ret = drm_gem_shmem_get_pages(shmem); @@ -648,6 +655,9 @@ EXPORT_SYMBOL_GPL(drm_gem_shmem_mmap);  void drm_gem_shmem_print_info(const struct drm_gem_shmem_object *shmem,  			      struct drm_printer *p, unsigned int indent)  { +	if (shmem->base.import_attach) +		return; +  	drm_printf_indent(p, indent, "pages_use_count=%u\n", shmem->pages_use_count);  	drm_printf_indent(p, indent, "vmap_use_count=%u\n", shmem->vmap_use_count);  	drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr); @@ -672,7 +682,7 @@ struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object *shmem)  {  	struct drm_gem_object *obj = &shmem->base; -	WARN_ON(shmem->base.import_attach); +	drm_WARN_ON(obj->dev, obj->import_attach);  	return drm_prime_pages_to_sg(obj->dev, shmem->pages, obj->size >> PAGE_SHIFT);  } @@ -687,7 +697,7 @@ static struct sg_table *drm_gem_shmem_get_pages_sgt_locked(struct drm_gem_shmem_  	if (shmem->sgt)  		return shmem->sgt; -	WARN_ON(obj->import_attach); +	drm_WARN_ON(obj->dev, obj->import_attach);  	ret = drm_gem_shmem_get_pages_locked(shmem);  	if (ret)  |