diff options
Diffstat (limited to 'include/linux/dma-mapping.h')
| -rw-r--r-- | include/linux/dma-mapping.h | 105 | 
1 files changed, 76 insertions, 29 deletions
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index 4a658de44ee9..f693aafe221f 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -117,14 +117,6 @@ dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr,  		size_t size, enum dma_data_direction dir, unsigned long attrs);  void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size,  		enum dma_data_direction dir, unsigned long attrs); -void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size, -		enum dma_data_direction dir); -void dma_sync_single_for_device(struct device *dev, dma_addr_t addr, -		size_t size, enum dma_data_direction dir); -void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, -		    int nelems, enum dma_data_direction dir); -void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, -		       int nelems, enum dma_data_direction dir);  void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,  		gfp_t flag, unsigned long attrs);  void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr, @@ -147,7 +139,6 @@ u64 dma_get_required_mask(struct device *dev);  bool dma_addressing_limited(struct device *dev);  size_t dma_max_mapping_size(struct device *dev);  size_t dma_opt_mapping_size(struct device *dev); -bool dma_need_sync(struct device *dev, dma_addr_t dma_addr);  unsigned long dma_get_merge_boundary(struct device *dev);  struct sg_table *dma_alloc_noncontiguous(struct device *dev, size_t size,  		enum dma_data_direction dir, gfp_t gfp, unsigned long attrs); @@ -195,22 +186,6 @@ static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr,  		size_t size, enum dma_data_direction dir, unsigned long attrs)  {  } -static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, -		size_t size, enum dma_data_direction dir) -{ -} -static inline void dma_sync_single_for_device(struct device *dev, -		dma_addr_t addr, size_t size, enum dma_data_direction dir) -{ -} -static inline void dma_sync_sg_for_cpu(struct device *dev, -		struct scatterlist *sg, int nelems, enum dma_data_direction dir) -{ -} -static inline void dma_sync_sg_for_device(struct device *dev, -		struct scatterlist *sg, int nelems, enum dma_data_direction dir) -{ -}  static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)  {  	return -ENOMEM; @@ -277,10 +252,6 @@ static inline size_t dma_opt_mapping_size(struct device *dev)  {  	return 0;  } -static inline bool dma_need_sync(struct device *dev, dma_addr_t dma_addr) -{ -	return false; -}  static inline unsigned long dma_get_merge_boundary(struct device *dev)  {  	return 0; @@ -310,6 +281,82 @@ static inline int dma_mmap_noncontiguous(struct device *dev,  }  #endif /* CONFIG_HAS_DMA */ +#if defined(CONFIG_HAS_DMA) && defined(CONFIG_DMA_NEED_SYNC) +void __dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size, +		enum dma_data_direction dir); +void __dma_sync_single_for_device(struct device *dev, dma_addr_t addr, +		size_t size, enum dma_data_direction dir); +void __dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, +		int nelems, enum dma_data_direction dir); +void __dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, +		int nelems, enum dma_data_direction dir); +bool __dma_need_sync(struct device *dev, dma_addr_t dma_addr); + +static inline bool dma_dev_need_sync(const struct device *dev) +{ +	/* Always call DMA sync operations when debugging is enabled */ +	return !dev->dma_skip_sync || IS_ENABLED(CONFIG_DMA_API_DEBUG); +} + +static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, +		size_t size, enum dma_data_direction dir) +{ +	if (dma_dev_need_sync(dev)) +		__dma_sync_single_for_cpu(dev, addr, size, dir); +} + +static inline void dma_sync_single_for_device(struct device *dev, +		dma_addr_t addr, size_t size, enum dma_data_direction dir) +{ +	if (dma_dev_need_sync(dev)) +		__dma_sync_single_for_device(dev, addr, size, dir); +} + +static inline void dma_sync_sg_for_cpu(struct device *dev, +		struct scatterlist *sg, int nelems, enum dma_data_direction dir) +{ +	if (dma_dev_need_sync(dev)) +		__dma_sync_sg_for_cpu(dev, sg, nelems, dir); +} + +static inline void dma_sync_sg_for_device(struct device *dev, +		struct scatterlist *sg, int nelems, enum dma_data_direction dir) +{ +	if (dma_dev_need_sync(dev)) +		__dma_sync_sg_for_device(dev, sg, nelems, dir); +} + +static inline bool dma_need_sync(struct device *dev, dma_addr_t dma_addr) +{ +	return dma_dev_need_sync(dev) ? __dma_need_sync(dev, dma_addr) : false; +} +#else /* !CONFIG_HAS_DMA || !CONFIG_DMA_NEED_SYNC */ +static inline bool dma_dev_need_sync(const struct device *dev) +{ +	return false; +} +static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, +		size_t size, enum dma_data_direction dir) +{ +} +static inline void dma_sync_single_for_device(struct device *dev, +		dma_addr_t addr, size_t size, enum dma_data_direction dir) +{ +} +static inline void dma_sync_sg_for_cpu(struct device *dev, +		struct scatterlist *sg, int nelems, enum dma_data_direction dir) +{ +} +static inline void dma_sync_sg_for_device(struct device *dev, +		struct scatterlist *sg, int nelems, enum dma_data_direction dir) +{ +} +static inline bool dma_need_sync(struct device *dev, dma_addr_t dma_addr) +{ +	return false; +} +#endif /* !CONFIG_HAS_DMA || !CONFIG_DMA_NEED_SYNC */ +  struct page *dma_alloc_pages(struct device *dev, size_t size,  		dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp);  void dma_free_pages(struct device *dev, size_t size, struct page *page,  |