diff options
Diffstat (limited to 'drivers/dma-buf/dma-buf.c')
| -rw-r--r-- | drivers/dma-buf/dma-buf.c | 321 | 
1 files changed, 237 insertions, 84 deletions
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index e6f36c014c4c..757c0fb77a6c 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -95,10 +95,11 @@ static int dma_buf_file_release(struct inode *inode, struct file *file)  		return -EINVAL;  	dmabuf = file->private_data; - -	mutex_lock(&db_list.lock); -	list_del(&dmabuf->list_node); -	mutex_unlock(&db_list.lock); +	if (dmabuf) { +		mutex_lock(&db_list.lock); +		list_del(&dmabuf->list_node); +		mutex_unlock(&db_list.lock); +	}  	return 0;  } @@ -130,6 +131,7 @@ static struct file_system_type dma_buf_fs_type = {  static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)  {  	struct dma_buf *dmabuf; +	int ret;  	if (!is_dma_buf_file(file))  		return -EINVAL; @@ -145,7 +147,11 @@ static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)  	    dmabuf->size >> PAGE_SHIFT)  		return -EINVAL; -	return dmabuf->ops->mmap(dmabuf, vma); +	dma_resv_lock(dmabuf->resv, NULL); +	ret = dmabuf->ops->mmap(dmabuf, vma); +	dma_resv_unlock(dmabuf->resv); + +	return ret;  }  static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence) @@ -523,17 +529,17 @@ static inline int is_dma_buf_file(struct file *file)  	return file->f_op == &dma_buf_fops;  } -static struct file *dma_buf_getfile(struct dma_buf *dmabuf, int flags) +static struct file *dma_buf_getfile(size_t size, int flags)  {  	static atomic64_t dmabuf_inode = ATOMIC64_INIT(0); -	struct file *file;  	struct inode *inode = alloc_anon_inode(dma_buf_mnt->mnt_sb); +	struct file *file;  	if (IS_ERR(inode))  		return ERR_CAST(inode); -	inode->i_size = dmabuf->size; -	inode_set_bytes(inode, dmabuf->size); +	inode->i_size = size; +	inode_set_bytes(inode, size);  	/*  	 * The ->i_ino acquired from get_next_ino() is not unique thus @@ -547,8 +553,6 @@ static struct file *dma_buf_getfile(struct dma_buf *dmabuf, int flags)  				 flags, &dma_buf_fops);  	if (IS_ERR(file))  		goto err_alloc_file; -	file->private_data = dmabuf; -	file->f_path.dentry->d_fsdata = dmabuf;  	return file; @@ -614,19 +618,11 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)  	size_t alloc_size = sizeof(struct dma_buf);  	int ret; -	if (!exp_info->resv) -		alloc_size += sizeof(struct dma_resv); -	else -		/* prevent &dma_buf[1] == dma_buf->resv */ -		alloc_size += 1; - -	if (WARN_ON(!exp_info->priv -			  || !exp_info->ops -			  || !exp_info->ops->map_dma_buf -			  || !exp_info->ops->unmap_dma_buf -			  || !exp_info->ops->release)) { +	if (WARN_ON(!exp_info->priv || !exp_info->ops +		    || !exp_info->ops->map_dma_buf +		    || !exp_info->ops->unmap_dma_buf +		    || !exp_info->ops->release))  		return ERR_PTR(-EINVAL); -	}  	if (WARN_ON(exp_info->ops->cache_sgt_mapping &&  		    (exp_info->ops->pin || exp_info->ops->unpin))) @@ -638,10 +634,21 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)  	if (!try_module_get(exp_info->owner))  		return ERR_PTR(-ENOENT); +	file = dma_buf_getfile(exp_info->size, exp_info->flags); +	if (IS_ERR(file)) { +		ret = PTR_ERR(file); +		goto err_module; +	} + +	if (!exp_info->resv) +		alloc_size += sizeof(struct dma_resv); +	else +		/* prevent &dma_buf[1] == dma_buf->resv */ +		alloc_size += 1;  	dmabuf = kzalloc(alloc_size, GFP_KERNEL);  	if (!dmabuf) {  		ret = -ENOMEM; -		goto err_module; +		goto err_file;  	}  	dmabuf->priv = exp_info->priv; @@ -653,44 +660,35 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)  	init_waitqueue_head(&dmabuf->poll);  	dmabuf->cb_in.poll = dmabuf->cb_out.poll = &dmabuf->poll;  	dmabuf->cb_in.active = dmabuf->cb_out.active = 0; +	INIT_LIST_HEAD(&dmabuf->attachments);  	if (!resv) { -		resv = (struct dma_resv *)&dmabuf[1]; -		dma_resv_init(resv); +		dmabuf->resv = (struct dma_resv *)&dmabuf[1]; +		dma_resv_init(dmabuf->resv); +	} else { +		dmabuf->resv = resv;  	} -	dmabuf->resv = resv; -	file = dma_buf_getfile(dmabuf, exp_info->flags); -	if (IS_ERR(file)) { -		ret = PTR_ERR(file); +	ret = dma_buf_stats_setup(dmabuf, file); +	if (ret)  		goto err_dmabuf; -	} +	file->private_data = dmabuf; +	file->f_path.dentry->d_fsdata = dmabuf;  	dmabuf->file = file; -	mutex_init(&dmabuf->lock); -	INIT_LIST_HEAD(&dmabuf->attachments); -  	mutex_lock(&db_list.lock);  	list_add(&dmabuf->list_node, &db_list.head);  	mutex_unlock(&db_list.lock); -	ret = dma_buf_stats_setup(dmabuf); -	if (ret) -		goto err_sysfs; -  	return dmabuf; -err_sysfs: -	/* -	 * Set file->f_path.dentry->d_fsdata to NULL so that when -	 * dma_buf_release() gets invoked by dentry_ops, it exits -	 * early before calling the release() dma_buf op. -	 */ -	file->f_path.dentry->d_fsdata = NULL; -	fput(file);  err_dmabuf: +	if (!resv) +		dma_resv_fini(dmabuf->resv);  	kfree(dmabuf); +err_file: +	fput(file);  err_module:  	module_put(exp_info->owner);  	return ERR_PTR(ret); @@ -807,6 +805,70 @@ static struct sg_table * __map_dma_buf(struct dma_buf_attachment *attach,  }  /** + * DOC: locking convention + * + * In order to avoid deadlock situations between dma-buf exports and importers, + * all dma-buf API users must follow the common dma-buf locking convention. + * + * Convention for importers + * + * 1. Importers must hold the dma-buf reservation lock when calling these + *    functions: + * + *     - dma_buf_pin() + *     - dma_buf_unpin() + *     - dma_buf_map_attachment() + *     - dma_buf_unmap_attachment() + *     - dma_buf_vmap() + *     - dma_buf_vunmap() + * + * 2. Importers must not hold the dma-buf reservation lock when calling these + *    functions: + * + *     - dma_buf_attach() + *     - dma_buf_dynamic_attach() + *     - dma_buf_detach() + *     - dma_buf_export( + *     - dma_buf_fd() + *     - dma_buf_get() + *     - dma_buf_put() + *     - dma_buf_mmap() + *     - dma_buf_begin_cpu_access() + *     - dma_buf_end_cpu_access() + *     - dma_buf_map_attachment_unlocked() + *     - dma_buf_unmap_attachment_unlocked() + *     - dma_buf_vmap_unlocked() + *     - dma_buf_vunmap_unlocked() + * + * Convention for exporters + * + * 1. These &dma_buf_ops callbacks are invoked with unlocked dma-buf + *    reservation and exporter can take the lock: + * + *     - &dma_buf_ops.attach() + *     - &dma_buf_ops.detach() + *     - &dma_buf_ops.release() + *     - &dma_buf_ops.begin_cpu_access() + *     - &dma_buf_ops.end_cpu_access() + * + * 2. These &dma_buf_ops callbacks are invoked with locked dma-buf + *    reservation and exporter can't take the lock: + * + *     - &dma_buf_ops.pin() + *     - &dma_buf_ops.unpin() + *     - &dma_buf_ops.map_dma_buf() + *     - &dma_buf_ops.unmap_dma_buf() + *     - &dma_buf_ops.mmap() + *     - &dma_buf_ops.vmap() + *     - &dma_buf_ops.vunmap() + * + * 3. Exporters must hold the dma-buf reservation lock when calling these + *    functions: + * + *     - dma_buf_move_notify() + */ + +/**   * dma_buf_dynamic_attach - Add the device to dma_buf's attachments list   * @dmabuf:		[in]	buffer to attach device to.   * @dev:		[in]	device to be attached. @@ -870,8 +932,8 @@ dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,  	    dma_buf_is_dynamic(dmabuf)) {  		struct sg_table *sgt; +		dma_resv_lock(attach->dmabuf->resv, NULL);  		if (dma_buf_is_dynamic(attach->dmabuf)) { -			dma_resv_lock(attach->dmabuf->resv, NULL);  			ret = dmabuf->ops->pin(attach);  			if (ret)  				goto err_unlock; @@ -884,8 +946,7 @@ dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,  			ret = PTR_ERR(sgt);  			goto err_unpin;  		} -		if (dma_buf_is_dynamic(attach->dmabuf)) -			dma_resv_unlock(attach->dmabuf->resv); +		dma_resv_unlock(attach->dmabuf->resv);  		attach->sgt = sgt;  		attach->dir = DMA_BIDIRECTIONAL;  	} @@ -901,8 +962,7 @@ err_unpin:  		dmabuf->ops->unpin(attach);  err_unlock: -	if (dma_buf_is_dynamic(attach->dmabuf)) -		dma_resv_unlock(attach->dmabuf->resv); +	dma_resv_unlock(attach->dmabuf->resv);  	dma_buf_detach(dmabuf, attach);  	return ERR_PTR(ret); @@ -945,24 +1005,22 @@ static void __unmap_dma_buf(struct dma_buf_attachment *attach,   */  void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)  { -	if (WARN_ON(!dmabuf || !attach)) +	if (WARN_ON(!dmabuf || !attach || dmabuf != attach->dmabuf))  		return; +	dma_resv_lock(dmabuf->resv, NULL); +  	if (attach->sgt) { -		if (dma_buf_is_dynamic(attach->dmabuf)) -			dma_resv_lock(attach->dmabuf->resv, NULL);  		__unmap_dma_buf(attach, attach->sgt, attach->dir); -		if (dma_buf_is_dynamic(attach->dmabuf)) { +		if (dma_buf_is_dynamic(attach->dmabuf))  			dmabuf->ops->unpin(attach); -			dma_resv_unlock(attach->dmabuf->resv); -		}  	} - -	dma_resv_lock(dmabuf->resv, NULL);  	list_del(&attach->node); +  	dma_resv_unlock(dmabuf->resv); +  	if (dmabuf->ops->detach)  		dmabuf->ops->detach(dmabuf, attach); @@ -1053,8 +1111,7 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,  	if (WARN_ON(!attach || !attach->dmabuf))  		return ERR_PTR(-EINVAL); -	if (dma_buf_attachment_is_dynamic(attach)) -		dma_resv_assert_held(attach->dmabuf->resv); +	dma_resv_assert_held(attach->dmabuf->resv);  	if (attach->sgt) {  		/* @@ -1069,7 +1126,6 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,  	}  	if (dma_buf_is_dynamic(attach->dmabuf)) { -		dma_resv_assert_held(attach->dmabuf->resv);  		if (!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) {  			r = attach->dmabuf->ops->pin(attach);  			if (r) @@ -1112,6 +1168,34 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,  EXPORT_SYMBOL_NS_GPL(dma_buf_map_attachment, DMA_BUF);  /** + * dma_buf_map_attachment_unlocked - Returns the scatterlist table of the attachment; + * mapped into _device_ address space. Is a wrapper for map_dma_buf() of the + * dma_buf_ops. + * @attach:	[in]	attachment whose scatterlist is to be returned + * @direction:	[in]	direction of DMA transfer + * + * Unlocked variant of dma_buf_map_attachment(). + */ +struct sg_table * +dma_buf_map_attachment_unlocked(struct dma_buf_attachment *attach, +				enum dma_data_direction direction) +{ +	struct sg_table *sg_table; + +	might_sleep(); + +	if (WARN_ON(!attach || !attach->dmabuf)) +		return ERR_PTR(-EINVAL); + +	dma_resv_lock(attach->dmabuf->resv, NULL); +	sg_table = dma_buf_map_attachment(attach, direction); +	dma_resv_unlock(attach->dmabuf->resv); + +	return sg_table; +} +EXPORT_SYMBOL_NS_GPL(dma_buf_map_attachment_unlocked, DMA_BUF); + +/**   * dma_buf_unmap_attachment - unmaps and decreases usecount of the buffer;might   * deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of   * dma_buf_ops. @@ -1130,15 +1214,11 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,  	if (WARN_ON(!attach || !attach->dmabuf || !sg_table))  		return; -	if (dma_buf_attachment_is_dynamic(attach)) -		dma_resv_assert_held(attach->dmabuf->resv); +	dma_resv_assert_held(attach->dmabuf->resv);  	if (attach->sgt == sg_table)  		return; -	if (dma_buf_is_dynamic(attach->dmabuf)) -		dma_resv_assert_held(attach->dmabuf->resv); -  	__unmap_dma_buf(attach, sg_table, direction);  	if (dma_buf_is_dynamic(attach->dmabuf) && @@ -1148,11 +1228,36 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,  EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment, DMA_BUF);  /** + * dma_buf_unmap_attachment_unlocked - unmaps and decreases usecount of the buffer;might + * deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of + * dma_buf_ops. + * @attach:	[in]	attachment to unmap buffer from + * @sg_table:	[in]	scatterlist info of the buffer to unmap + * @direction:	[in]	direction of DMA transfer + * + * Unlocked variant of dma_buf_unmap_attachment(). + */ +void dma_buf_unmap_attachment_unlocked(struct dma_buf_attachment *attach, +				       struct sg_table *sg_table, +				       enum dma_data_direction direction) +{ +	might_sleep(); + +	if (WARN_ON(!attach || !attach->dmabuf || !sg_table)) +		return; + +	dma_resv_lock(attach->dmabuf->resv, NULL); +	dma_buf_unmap_attachment(attach, sg_table, direction); +	dma_resv_unlock(attach->dmabuf->resv); +} +EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment_unlocked, DMA_BUF); + +/**   * dma_buf_move_notify - notify attachments that DMA-buf is moving   *   * @dmabuf:	[in]	buffer which is moving   * - * Informs all attachmenst that they need to destroy and recreated all their + * Informs all attachments that they need to destroy and recreate all their   * mappings.   */  void dma_buf_move_notify(struct dma_buf *dmabuf) @@ -1170,11 +1275,11 @@ EXPORT_SYMBOL_NS_GPL(dma_buf_move_notify, DMA_BUF);  /**   * DOC: cpu access   * - * There are mutliple reasons for supporting CPU access to a dma buffer object: + * There are multiple reasons for supporting CPU access to a dma buffer object:   *   * - Fallback operations in the kernel, for example when a device is connected   *   over USB and the kernel needs to shuffle the data around first before - *   sending it away. Cache coherency is handled by braketing any transactions + *   sending it away. Cache coherency is handled by bracketing any transactions   *   with calls to dma_buf_begin_cpu_access() and dma_buf_end_cpu_access()   *   access.   * @@ -1201,7 +1306,7 @@ EXPORT_SYMBOL_NS_GPL(dma_buf_move_notify, DMA_BUF);   *   replace ION buffers mmap support was needed.   *   *   There is no special interfaces, userspace simply calls mmap on the dma-buf - *   fd. But like for CPU access there's a need to braket the actual access, + *   fd. But like for CPU access there's a need to bracket the actual access,   *   which is handled by the ioctl (DMA_BUF_IOCTL_SYNC). Note that   *   DMA_BUF_IOCTL_SYNC can fail with -EAGAIN or -EINTR, in which case it must   *   be restarted. @@ -1275,10 +1380,10 @@ static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf,   * preparations. Coherency is only guaranteed in the specified range for the   * specified access direction.   * @dmabuf:	[in]	buffer to prepare cpu access for. - * @direction:	[in]	length of range for cpu access. + * @direction:	[in]	direction of access.   *   * After the cpu access is complete the caller should call - * dma_buf_end_cpu_access(). Only when cpu access is braketed by both calls is + * dma_buf_end_cpu_access(). Only when cpu access is bracketed by both calls is   * it guaranteed to be coherent with other DMA access.   *   * This function will also wait for any DMA transactions tracked through @@ -1318,7 +1423,7 @@ EXPORT_SYMBOL_NS_GPL(dma_buf_begin_cpu_access, DMA_BUF);   * actions. Coherency is only guaranteed in the specified range for the   * specified access direction.   * @dmabuf:	[in]	buffer to complete cpu access for. - * @direction:	[in]	length of range for cpu access. + * @direction:	[in]	direction of access.   *   * This terminates CPU access started with dma_buf_begin_cpu_access().   * @@ -1358,6 +1463,8 @@ EXPORT_SYMBOL_NS_GPL(dma_buf_end_cpu_access, DMA_BUF);  int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,  		 unsigned long pgoff)  { +	int ret; +  	if (WARN_ON(!dmabuf || !vma))  		return -EINVAL; @@ -1378,7 +1485,11 @@ int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,  	vma_set_file(vma, dmabuf->file);  	vma->vm_pgoff = pgoff; -	return dmabuf->ops->mmap(dmabuf, vma); +	dma_resv_lock(dmabuf->resv, NULL); +	ret = dmabuf->ops->mmap(dmabuf, vma); +	dma_resv_unlock(dmabuf->resv); + +	return ret;  }  EXPORT_SYMBOL_NS_GPL(dma_buf_mmap, DMA_BUF); @@ -1401,42 +1512,68 @@ EXPORT_SYMBOL_NS_GPL(dma_buf_mmap, DMA_BUF);  int dma_buf_vmap(struct dma_buf *dmabuf, struct iosys_map *map)  {  	struct iosys_map ptr; -	int ret = 0; +	int ret;  	iosys_map_clear(map);  	if (WARN_ON(!dmabuf))  		return -EINVAL; +	dma_resv_assert_held(dmabuf->resv); +  	if (!dmabuf->ops->vmap)  		return -EINVAL; -	mutex_lock(&dmabuf->lock);  	if (dmabuf->vmapping_counter) {  		dmabuf->vmapping_counter++;  		BUG_ON(iosys_map_is_null(&dmabuf->vmap_ptr));  		*map = dmabuf->vmap_ptr; -		goto out_unlock; +		return 0;  	}  	BUG_ON(iosys_map_is_set(&dmabuf->vmap_ptr));  	ret = dmabuf->ops->vmap(dmabuf, &ptr);  	if (WARN_ON_ONCE(ret)) -		goto out_unlock; +		return ret;  	dmabuf->vmap_ptr = ptr;  	dmabuf->vmapping_counter = 1;  	*map = dmabuf->vmap_ptr; -out_unlock: -	mutex_unlock(&dmabuf->lock); -	return ret; +	return 0;  }  EXPORT_SYMBOL_NS_GPL(dma_buf_vmap, DMA_BUF);  /** + * dma_buf_vmap_unlocked - Create virtual mapping for the buffer object into kernel + * address space. Same restrictions as for vmap and friends apply. + * @dmabuf:	[in]	buffer to vmap + * @map:	[out]	returns the vmap pointer + * + * Unlocked version of dma_buf_vmap() + * + * Returns 0 on success, or a negative errno code otherwise. + */ +int dma_buf_vmap_unlocked(struct dma_buf *dmabuf, struct iosys_map *map) +{ +	int ret; + +	iosys_map_clear(map); + +	if (WARN_ON(!dmabuf)) +		return -EINVAL; + +	dma_resv_lock(dmabuf->resv, NULL); +	ret = dma_buf_vmap(dmabuf, map); +	dma_resv_unlock(dmabuf->resv); + +	return ret; +} +EXPORT_SYMBOL_NS_GPL(dma_buf_vmap_unlocked, DMA_BUF); + +/**   * dma_buf_vunmap - Unmap a vmap obtained by dma_buf_vmap.   * @dmabuf:	[in]	buffer to vunmap   * @map:	[in]	vmap pointer to vunmap @@ -1446,20 +1583,36 @@ void dma_buf_vunmap(struct dma_buf *dmabuf, struct iosys_map *map)  	if (WARN_ON(!dmabuf))  		return; +	dma_resv_assert_held(dmabuf->resv); +  	BUG_ON(iosys_map_is_null(&dmabuf->vmap_ptr));  	BUG_ON(dmabuf->vmapping_counter == 0);  	BUG_ON(!iosys_map_is_equal(&dmabuf->vmap_ptr, map)); -	mutex_lock(&dmabuf->lock);  	if (--dmabuf->vmapping_counter == 0) {  		if (dmabuf->ops->vunmap)  			dmabuf->ops->vunmap(dmabuf, map);  		iosys_map_clear(&dmabuf->vmap_ptr);  	} -	mutex_unlock(&dmabuf->lock);  }  EXPORT_SYMBOL_NS_GPL(dma_buf_vunmap, DMA_BUF); +/** + * dma_buf_vunmap_unlocked - Unmap a vmap obtained by dma_buf_vmap. + * @dmabuf:	[in]	buffer to vunmap + * @map:	[in]	vmap pointer to vunmap + */ +void dma_buf_vunmap_unlocked(struct dma_buf *dmabuf, struct iosys_map *map) +{ +	if (WARN_ON(!dmabuf)) +		return; + +	dma_resv_lock(dmabuf->resv, NULL); +	dma_buf_vunmap(dmabuf, map); +	dma_resv_unlock(dmabuf->resv); +} +EXPORT_SYMBOL_NS_GPL(dma_buf_vunmap_unlocked, DMA_BUF); +  #ifdef CONFIG_DEBUG_FS  static int dma_buf_debug_show(struct seq_file *s, void *unused)  {  |