diff options
Diffstat (limited to 'arch/mips/mm')
| -rw-r--r-- | arch/mips/mm/c-r4k.c | 17 | ||||
| -rw-r--r-- | arch/mips/mm/dma-noncoherent.c | 79 | 
2 files changed, 24 insertions, 72 deletions
| diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index a9ef057c79fe..05bd77727fb9 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c @@ -1955,22 +1955,21 @@ void r4k_cache_init(void)  	__flush_icache_user_range	= r4k_flush_icache_user_range;  	__local_flush_icache_user_range	= local_r4k_flush_icache_user_range; -#if defined(CONFIG_DMA_NONCOHERENT) || defined(CONFIG_DMA_MAYBE_COHERENT) -# if defined(CONFIG_DMA_PERDEV_COHERENT) -	if (0) { -# else -	if ((coherentio == IO_COHERENCE_ENABLED) || -	    ((coherentio == IO_COHERENCE_DEFAULT) && hw_coherentio)) { -# endif +#ifdef CONFIG_DMA_NONCOHERENT +#ifdef CONFIG_DMA_MAYBE_COHERENT +	if (coherentio == IO_COHERENCE_ENABLED || +	    (coherentio == IO_COHERENCE_DEFAULT && hw_coherentio)) {  		_dma_cache_wback_inv	= (void *)cache_noop;  		_dma_cache_wback	= (void *)cache_noop;  		_dma_cache_inv		= (void *)cache_noop; -	} else { +	} else +#endif /* CONFIG_DMA_MAYBE_COHERENT */ +	{  		_dma_cache_wback_inv	= r4k_dma_cache_wback_inv;  		_dma_cache_wback	= r4k_dma_cache_wback_inv;  		_dma_cache_inv		= r4k_dma_cache_inv;  	} -#endif +#endif /* CONFIG_DMA_NONCOHERENT */  	build_clear_page();  	build_copy_page(); diff --git a/arch/mips/mm/dma-noncoherent.c b/arch/mips/mm/dma-noncoherent.c index 2aca1236af36..e6c9485cadcf 100644 --- a/arch/mips/mm/dma-noncoherent.c +++ b/arch/mips/mm/dma-noncoherent.c @@ -14,26 +14,6 @@  #include <asm/dma-coherence.h>  #include <asm/io.h> -#ifdef CONFIG_DMA_PERDEV_COHERENT -static inline int dev_is_coherent(struct device *dev) -{ -	return dev->archdata.dma_coherent; -} -#else -static inline int dev_is_coherent(struct device *dev) -{ -	switch (coherentio) { -	default: -	case IO_COHERENCE_DEFAULT: -		return hw_coherentio; -	case IO_COHERENCE_ENABLED: -		return 1; -	case IO_COHERENCE_DISABLED: -		return 0; -	} -} -#endif /* CONFIG_DMA_PERDEV_COHERENT */ -  /*   * The affected CPUs below in 'cpu_needs_post_dma_flush()' can speculatively   * fill random cachelines with stale data at any time, requiring an extra @@ -49,9 +29,6 @@ static inline int dev_is_coherent(struct device *dev)   */  static inline bool cpu_needs_post_dma_flush(struct device *dev)  { -	if (dev_is_coherent(dev)) -		return false; -  	switch (boot_cpu_type()) {  	case CPU_R10000:  	case CPU_R12000: @@ -72,11 +49,8 @@ void *arch_dma_alloc(struct device *dev, size_t size,  {  	void *ret; -	ret = dma_direct_alloc(dev, size, dma_handle, gfp, attrs); -	if (!ret) -		return NULL; - -	if (!dev_is_coherent(dev) && !(attrs & DMA_ATTR_NON_CONSISTENT)) { +	ret = dma_direct_alloc_pages(dev, size, dma_handle, gfp, attrs); +	if (!ret && !(attrs & DMA_ATTR_NON_CONSISTENT)) {  		dma_cache_wback_inv((unsigned long) ret, size);  		ret = (void *)UNCAC_ADDR(ret);  	} @@ -87,43 +61,24 @@ void *arch_dma_alloc(struct device *dev, size_t size,  void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,  		dma_addr_t dma_addr, unsigned long attrs)  { -	if (!(attrs & DMA_ATTR_NON_CONSISTENT) && !dev_is_coherent(dev)) +	if (!(attrs & DMA_ATTR_NON_CONSISTENT))  		cpu_addr = (void *)CAC_ADDR((unsigned long)cpu_addr); -	dma_direct_free(dev, size, cpu_addr, dma_addr, attrs); +	dma_direct_free_pages(dev, size, cpu_addr, dma_addr, attrs);  } -int arch_dma_mmap(struct device *dev, struct vm_area_struct *vma, -		void *cpu_addr, dma_addr_t dma_addr, size_t size, -		unsigned long attrs) +long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr, +		dma_addr_t dma_addr)  { -	unsigned long user_count = vma_pages(vma); -	unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; -	unsigned long addr = (unsigned long)cpu_addr; -	unsigned long off = vma->vm_pgoff; -	unsigned long pfn; -	int ret = -ENXIO; - -	if (!dev_is_coherent(dev)) -		addr = CAC_ADDR(addr); - -	pfn = page_to_pfn(virt_to_page((void *)addr)); +	unsigned long addr = CAC_ADDR((unsigned long)cpu_addr); +	return page_to_pfn(virt_to_page((void *)addr)); +} +pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot, +		unsigned long attrs) +{  	if (attrs & DMA_ATTR_WRITE_COMBINE) -		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); -	else -		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); - -	if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) -		return ret; - -	if (off < count && user_count <= (count - off)) { -		ret = remap_pfn_range(vma, vma->vm_start, -				      pfn + off, -				      user_count << PAGE_SHIFT, -				      vma->vm_page_prot); -	} - -	return ret; +		return pgprot_writecombine(prot); +	return pgprot_noncached(prot);  }  static inline void dma_sync_virt(void *addr, size_t size, @@ -187,8 +142,7 @@ static inline void dma_sync_phys(phys_addr_t paddr, size_t size,  void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,  		size_t size, enum dma_data_direction dir)  { -	if (!dev_is_coherent(dev)) -		dma_sync_phys(paddr, size, dir); +	dma_sync_phys(paddr, size, dir);  }  void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr, @@ -203,6 +157,5 @@ void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size,  {  	BUG_ON(direction == DMA_NONE); -	if (!dev_is_coherent(dev)) -		dma_sync_virt(vaddr, size, direction); +	dma_sync_virt(vaddr, size, direction);  } |