diff options
Diffstat (limited to 'drivers/gpu/drm/exynos/exynos_drm_gem.c')
| -rw-r--r-- | drivers/gpu/drm/exynos/exynos_drm_gem.c | 445 | 
1 files changed, 208 insertions, 237 deletions
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c index 0d5b9698d384..f12fbc36b120 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c @@ -13,98 +13,112 @@  #include <drm/drm_vma_manager.h>  #include <linux/shmem_fs.h> +#include <linux/dma-buf.h>  #include <drm/exynos_drm.h>  #include "exynos_drm_drv.h"  #include "exynos_drm_gem.h" -#include "exynos_drm_buf.h"  #include "exynos_drm_iommu.h" -static unsigned int convert_to_vm_err_msg(int msg) +static int exynos_drm_alloc_buf(struct exynos_drm_gem_obj *obj)  { -	unsigned int out_msg; +	struct drm_device *dev = obj->base.dev; +	enum dma_attr attr; +	unsigned int nr_pages; -	switch (msg) { -	case 0: -	case -ERESTARTSYS: -	case -EINTR: -		out_msg = VM_FAULT_NOPAGE; -		break; +	if (obj->dma_addr) { +		DRM_DEBUG_KMS("already allocated.\n"); +		return 0; +	} -	case -ENOMEM: -		out_msg = VM_FAULT_OOM; -		break; +	init_dma_attrs(&obj->dma_attrs); -	default: -		out_msg = VM_FAULT_SIGBUS; -		break; -	} +	/* +	 * if EXYNOS_BO_CONTIG, fully physically contiguous memory +	 * region will be allocated else physically contiguous +	 * as possible. +	 */ +	if (!(obj->flags & EXYNOS_BO_NONCONTIG)) +		dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS, &obj->dma_attrs); -	return out_msg; -} +	/* +	 * if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping +	 * else cachable mapping. +	 */ +	if (obj->flags & EXYNOS_BO_WC || !(obj->flags & EXYNOS_BO_CACHABLE)) +		attr = DMA_ATTR_WRITE_COMBINE; +	else +		attr = DMA_ATTR_NON_CONSISTENT; -static int check_gem_flags(unsigned int flags) -{ -	if (flags & ~(EXYNOS_BO_MASK)) { -		DRM_ERROR("invalid flags.\n"); -		return -EINVAL; -	} +	dma_set_attr(attr, &obj->dma_attrs); +	dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &obj->dma_attrs); -	return 0; -} +	nr_pages = obj->size >> PAGE_SHIFT; -static void update_vm_cache_attr(struct exynos_drm_gem_obj *obj, -					struct vm_area_struct *vma) -{ -	DRM_DEBUG_KMS("flags = 0x%x\n", obj->flags); +	if (!is_drm_iommu_supported(dev)) { +		dma_addr_t start_addr; +		unsigned int i = 0; -	/* non-cachable as default. */ -	if (obj->flags & EXYNOS_BO_CACHABLE) -		vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); -	else if (obj->flags & EXYNOS_BO_WC) -		vma->vm_page_prot = -			pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); -	else -		vma->vm_page_prot = -			pgprot_noncached(vm_get_page_prot(vma->vm_flags)); -} +		obj->pages = drm_calloc_large(nr_pages, sizeof(struct page *)); +		if (!obj->pages) { +			DRM_ERROR("failed to allocate pages.\n"); +			return -ENOMEM; +		} -static unsigned long roundup_gem_size(unsigned long size, unsigned int flags) -{ -	/* TODO */ +		obj->cookie = dma_alloc_attrs(dev->dev, +					obj->size, +					&obj->dma_addr, GFP_KERNEL, +					&obj->dma_attrs); +		if (!obj->cookie) { +			DRM_ERROR("failed to allocate buffer.\n"); +			drm_free_large(obj->pages); +			return -ENOMEM; +		} -	return roundup(size, PAGE_SIZE); +		start_addr = obj->dma_addr; +		while (i < nr_pages) { +			obj->pages[i] = phys_to_page(start_addr); +			start_addr += PAGE_SIZE; +			i++; +		} +	} else { +		obj->pages = dma_alloc_attrs(dev->dev, obj->size, +					&obj->dma_addr, GFP_KERNEL, +					&obj->dma_attrs); +		if (!obj->pages) { +			DRM_ERROR("failed to allocate buffer.\n"); +			return -ENOMEM; +		} +	} + +	DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n", +			(unsigned long)obj->dma_addr, +			obj->size); + +	return 0;  } -static int exynos_drm_gem_map_buf(struct drm_gem_object *obj, -					struct vm_area_struct *vma, -					unsigned long f_vaddr, -					pgoff_t page_offset) +static void exynos_drm_free_buf(struct exynos_drm_gem_obj *obj)  { -	struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj); -	struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer; -	struct scatterlist *sgl; -	unsigned long pfn; -	int i; +	struct drm_device *dev = obj->base.dev; -	if (!buf->sgt) -		return -EINTR; - -	if (page_offset >= (buf->size >> PAGE_SHIFT)) { -		DRM_ERROR("invalid page offset\n"); -		return -EINVAL; +	if (!obj->dma_addr) { +		DRM_DEBUG_KMS("dma_addr is invalid.\n"); +		return;  	} -	sgl = buf->sgt->sgl; -	for_each_sg(buf->sgt->sgl, sgl, buf->sgt->nents, i) { -		if (page_offset < (sgl->length >> PAGE_SHIFT)) -			break; -		page_offset -=	(sgl->length >> PAGE_SHIFT); -	} +	DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n", +			(unsigned long)obj->dma_addr, obj->size); -	pfn = __phys_to_pfn(sg_phys(sgl)) + page_offset; +	if (!is_drm_iommu_supported(dev)) { +		dma_free_attrs(dev->dev, obj->size, obj->cookie, +				(dma_addr_t)obj->dma_addr, &obj->dma_attrs); +		drm_free_large(obj->pages); +	} else +		dma_free_attrs(dev->dev, obj->size, obj->pages, +				(dma_addr_t)obj->dma_addr, &obj->dma_attrs); -	return vm_insert_mixed(vma, f_vaddr, pfn); +	obj->dma_addr = (dma_addr_t)NULL;  }  static int exynos_drm_gem_handle_create(struct drm_gem_object *obj, @@ -131,11 +145,7 @@ static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,  void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)  { -	struct drm_gem_object *obj; -	struct exynos_drm_gem_buf *buf; - -	obj = &exynos_gem_obj->base; -	buf = exynos_gem_obj->buffer; +	struct drm_gem_object *obj = &exynos_gem_obj->base;  	DRM_DEBUG_KMS("handle count = %d\n", obj->handle_count); @@ -148,12 +158,9 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)  	if (obj->import_attach)  		goto out; -	exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags, buf); +	exynos_drm_free_buf(exynos_gem_obj);  out: -	exynos_drm_fini_buf(obj->dev, buf); -	exynos_gem_obj->buffer = NULL; -  	drm_gem_free_mmap_offset(obj);  	/* release file pointer to gem object. */ @@ -180,7 +187,7 @@ unsigned long exynos_drm_gem_get_size(struct drm_device *dev,  	drm_gem_object_unreference_unlocked(obj); -	return exynos_gem_obj->buffer->size; +	return exynos_gem_obj->size;  } @@ -193,7 +200,7 @@ struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,  	exynos_gem_obj = kzalloc(sizeof(*exynos_gem_obj), GFP_KERNEL);  	if (!exynos_gem_obj) -		return NULL; +		return ERR_PTR(-ENOMEM);  	exynos_gem_obj->size = size;  	obj = &exynos_gem_obj->base; @@ -202,7 +209,7 @@ struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,  	if (ret < 0) {  		DRM_ERROR("failed to initialize gem object\n");  		kfree(exynos_gem_obj); -		return NULL; +		return ERR_PTR(ret);  	}  	DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp); @@ -215,47 +222,35 @@ struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,  						unsigned long size)  {  	struct exynos_drm_gem_obj *exynos_gem_obj; -	struct exynos_drm_gem_buf *buf;  	int ret; +	if (flags & ~(EXYNOS_BO_MASK)) { +		DRM_ERROR("invalid flags.\n"); +		return ERR_PTR(-EINVAL); +	} +  	if (!size) {  		DRM_ERROR("invalid size.\n");  		return ERR_PTR(-EINVAL);  	} -	size = roundup_gem_size(size, flags); - -	ret = check_gem_flags(flags); -	if (ret) -		return ERR_PTR(ret); - -	buf = exynos_drm_init_buf(dev, size); -	if (!buf) -		return ERR_PTR(-ENOMEM); +	size = roundup(size, PAGE_SIZE);  	exynos_gem_obj = exynos_drm_gem_init(dev, size); -	if (!exynos_gem_obj) { -		ret = -ENOMEM; -		goto err_fini_buf; -	} - -	exynos_gem_obj->buffer = buf; +	if (IS_ERR(exynos_gem_obj)) +		return exynos_gem_obj;  	/* set memory type and cache attribute from user side. */  	exynos_gem_obj->flags = flags; -	ret = exynos_drm_alloc_buf(dev, buf, flags); -	if (ret < 0) -		goto err_gem_fini; +	ret = exynos_drm_alloc_buf(exynos_gem_obj); +	if (ret < 0) { +		drm_gem_object_release(&exynos_gem_obj->base); +		kfree(exynos_gem_obj); +		return ERR_PTR(ret); +	}  	return exynos_gem_obj; - -err_gem_fini: -	drm_gem_object_release(&exynos_gem_obj->base); -	kfree(exynos_gem_obj); -err_fini_buf: -	exynos_drm_fini_buf(dev, buf); -	return ERR_PTR(ret);  }  int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data, @@ -294,7 +289,7 @@ dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev,  	exynos_gem_obj = to_exynos_gem_obj(obj); -	return &exynos_gem_obj->buffer->dma_addr; +	return &exynos_gem_obj->dma_addr;  }  void exynos_drm_gem_put_dma_addr(struct drm_device *dev, @@ -322,7 +317,6 @@ int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem_obj *exynos_gem_obj,  				      struct vm_area_struct *vma)  {  	struct drm_device *drm_dev = exynos_gem_obj->base.dev; -	struct exynos_drm_gem_buf *buffer;  	unsigned long vm_size;  	int ret; @@ -331,19 +325,13 @@ int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem_obj *exynos_gem_obj,  	vm_size = vma->vm_end - vma->vm_start; -	/* -	 * a buffer contains information to physically continuous memory -	 * allocated by user request or at framebuffer creation. -	 */ -	buffer = exynos_gem_obj->buffer; -  	/* check if user-requested size is valid. */ -	if (vm_size > buffer->size) +	if (vm_size > exynos_gem_obj->size)  		return -EINVAL; -	ret = dma_mmap_attrs(drm_dev->dev, vma, buffer->pages, -				buffer->dma_addr, buffer->size, -				&buffer->dma_attrs); +	ret = dma_mmap_attrs(drm_dev->dev, vma, exynos_gem_obj->pages, +				exynos_gem_obj->dma_addr, exynos_gem_obj->size, +				&exynos_gem_obj->dma_attrs);  	if (ret < 0) {  		DRM_ERROR("failed to mmap.\n");  		return ret; @@ -378,103 +366,6 @@ int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,  	return 0;  } -struct vm_area_struct *exynos_gem_get_vma(struct vm_area_struct *vma) -{ -	struct vm_area_struct *vma_copy; - -	vma_copy = kmalloc(sizeof(*vma_copy), GFP_KERNEL); -	if (!vma_copy) -		return NULL; - -	if (vma->vm_ops && vma->vm_ops->open) -		vma->vm_ops->open(vma); - -	if (vma->vm_file) -		get_file(vma->vm_file); - -	memcpy(vma_copy, vma, sizeof(*vma)); - -	vma_copy->vm_mm = NULL; -	vma_copy->vm_next = NULL; -	vma_copy->vm_prev = NULL; - -	return vma_copy; -} - -void exynos_gem_put_vma(struct vm_area_struct *vma) -{ -	if (!vma) -		return; - -	if (vma->vm_ops && vma->vm_ops->close) -		vma->vm_ops->close(vma); - -	if (vma->vm_file) -		fput(vma->vm_file); - -	kfree(vma); -} - -int exynos_gem_get_pages_from_userptr(unsigned long start, -						unsigned int npages, -						struct page **pages, -						struct vm_area_struct *vma) -{ -	int get_npages; - -	/* the memory region mmaped with VM_PFNMAP. */ -	if (vma_is_io(vma)) { -		unsigned int i; - -		for (i = 0; i < npages; ++i, start += PAGE_SIZE) { -			unsigned long pfn; -			int ret = follow_pfn(vma, start, &pfn); -			if (ret) -				return ret; - -			pages[i] = pfn_to_page(pfn); -		} - -		if (i != npages) { -			DRM_ERROR("failed to get user_pages.\n"); -			return -EINVAL; -		} - -		return 0; -	} - -	get_npages = get_user_pages(current, current->mm, start, -					npages, 1, 1, pages, NULL); -	get_npages = max(get_npages, 0); -	if (get_npages != npages) { -		DRM_ERROR("failed to get user_pages.\n"); -		while (get_npages) -			put_page(pages[--get_npages]); -		return -EFAULT; -	} - -	return 0; -} - -void exynos_gem_put_pages_to_userptr(struct page **pages, -					unsigned int npages, -					struct vm_area_struct *vma) -{ -	if (!vma_is_io(vma)) { -		unsigned int i; - -		for (i = 0; i < npages; i++) { -			set_page_dirty_lock(pages[i]); - -			/* -			 * undo the reference we took when populating -			 * the table. -			 */ -			put_page(pages[i]); -		} -	} -} -  int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev,  				struct sg_table *sgt,  				enum dma_data_direction dir) @@ -503,15 +394,6 @@ void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev,  void exynos_drm_gem_free_object(struct drm_gem_object *obj)  { -	struct exynos_drm_gem_obj *exynos_gem_obj; -	struct exynos_drm_gem_buf *buf; - -	exynos_gem_obj = to_exynos_gem_obj(obj); -	buf = exynos_gem_obj->buffer; - -	if (obj->import_attach) -		drm_prime_gem_destroy(obj, buf->sgt); -  	exynos_drm_gem_destroy(to_exynos_gem_obj(obj));  } @@ -595,24 +477,34 @@ unlock:  int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)  {  	struct drm_gem_object *obj = vma->vm_private_data; -	struct drm_device *dev = obj->dev; -	unsigned long f_vaddr; +	struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj); +	unsigned long pfn;  	pgoff_t page_offset;  	int ret;  	page_offset = ((unsigned long)vmf->virtual_address -  			vma->vm_start) >> PAGE_SHIFT; -	f_vaddr = (unsigned long)vmf->virtual_address; - -	mutex_lock(&dev->struct_mutex); -	ret = exynos_drm_gem_map_buf(obj, vma, f_vaddr, page_offset); -	if (ret < 0) -		DRM_ERROR("failed to map a buffer with user.\n"); +	if (page_offset >= (exynos_gem_obj->size >> PAGE_SHIFT)) { +		DRM_ERROR("invalid page offset\n"); +		ret = -EINVAL; +		goto out; +	} -	mutex_unlock(&dev->struct_mutex); +	pfn = page_to_pfn(exynos_gem_obj->pages[page_offset]); +	ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn); -	return convert_to_vm_err_msg(ret); +out: +	switch (ret) { +	case 0: +	case -ERESTARTSYS: +	case -EINTR: +		return VM_FAULT_NOPAGE; +	case -ENOMEM: +		return VM_FAULT_OOM; +	default: +		return VM_FAULT_SIGBUS; +	}  }  int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) @@ -631,11 +523,17 @@ int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)  	obj = vma->vm_private_data;  	exynos_gem_obj = to_exynos_gem_obj(obj); -	ret = check_gem_flags(exynos_gem_obj->flags); -	if (ret) -		goto err_close_vm; +	DRM_DEBUG_KMS("flags = 0x%x\n", exynos_gem_obj->flags); -	update_vm_cache_attr(exynos_gem_obj, vma); +	/* non-cachable as default. */ +	if (exynos_gem_obj->flags & EXYNOS_BO_CACHABLE) +		vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); +	else if (exynos_gem_obj->flags & EXYNOS_BO_WC) +		vma->vm_page_prot = +			pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); +	else +		vma->vm_page_prot = +			pgprot_noncached(vm_get_page_prot(vma->vm_flags));  	ret = exynos_drm_gem_mmap_buffer(exynos_gem_obj, vma);  	if (ret) @@ -649,3 +547,76 @@ err_close_vm:  	return ret;  } + +/* low-level interface prime helpers */ +struct sg_table *exynos_drm_gem_prime_get_sg_table(struct drm_gem_object *obj) +{ +	struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj); +	int npages; + +	npages = exynos_gem_obj->size >> PAGE_SHIFT; + +	return drm_prime_pages_to_sg(exynos_gem_obj->pages, npages); +} + +struct drm_gem_object * +exynos_drm_gem_prime_import_sg_table(struct drm_device *dev, +				     struct dma_buf_attachment *attach, +				     struct sg_table *sgt) +{ +	struct exynos_drm_gem_obj *exynos_gem_obj; +	int npages; +	int ret; + +	exynos_gem_obj = exynos_drm_gem_init(dev, attach->dmabuf->size); +	if (IS_ERR(exynos_gem_obj)) { +		ret = PTR_ERR(exynos_gem_obj); +		return ERR_PTR(ret); +	} + +	exynos_gem_obj->dma_addr = sg_dma_address(sgt->sgl); + +	npages = exynos_gem_obj->size >> PAGE_SHIFT; +	exynos_gem_obj->pages = drm_malloc_ab(npages, sizeof(struct page *)); +	if (!exynos_gem_obj->pages) { +		ret = -ENOMEM; +		goto err; +	} + +	ret = drm_prime_sg_to_page_addr_arrays(sgt, exynos_gem_obj->pages, NULL, +			npages); +	if (ret < 0) +		goto err_free_large; + +	if (sgt->nents == 1) { +		/* always physically continuous memory if sgt->nents is 1. */ +		exynos_gem_obj->flags |= EXYNOS_BO_CONTIG; +	} else { +		/* +		 * this case could be CONTIG or NONCONTIG type but for now +		 * sets NONCONTIG. +		 * TODO. we have to find a way that exporter can notify +		 * the type of its own buffer to importer. +		 */ +		exynos_gem_obj->flags |= EXYNOS_BO_NONCONTIG; +	} + +	return &exynos_gem_obj->base; + +err_free_large: +	drm_free_large(exynos_gem_obj->pages); +err: +	drm_gem_object_release(&exynos_gem_obj->base); +	kfree(exynos_gem_obj); +	return ERR_PTR(ret); +} + +void *exynos_drm_gem_prime_vmap(struct drm_gem_object *obj) +{ +	return NULL; +} + +void exynos_drm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) +{ +	/* Nothing to do */ +}  |