diff options
Diffstat (limited to 'arch/tile/kernel/pci-dma.c')
-rw-r--r-- | arch/tile/kernel/pci-dma.c | 36 |
1 files changed, 22 insertions, 14 deletions
diff --git a/arch/tile/kernel/pci-dma.c b/arch/tile/kernel/pci-dma.c index 09bb774b39cd..569bb6dd154a 100644 --- a/arch/tile/kernel/pci-dma.c +++ b/arch/tile/kernel/pci-dma.c @@ -213,10 +213,12 @@ static int tile_dma_map_sg(struct device *dev, struct scatterlist *sglist, for_each_sg(sglist, sg, nents, i) { sg->dma_address = sg_phys(sg); - __dma_prep_pa_range(sg->dma_address, sg->length, direction); #ifdef CONFIG_NEED_SG_DMA_LENGTH sg->dma_length = sg->length; #endif + if (attrs & DMA_ATTR_SKIP_CPU_SYNC) + continue; + __dma_prep_pa_range(sg->dma_address, sg->length, direction); } return nents; @@ -232,6 +234,8 @@ static void tile_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, BUG_ON(!valid_dma_direction(direction)); for_each_sg(sglist, sg, nents, i) { sg->dma_address = sg_phys(sg); + if (attrs & DMA_ATTR_SKIP_CPU_SYNC) + continue; __dma_complete_pa_range(sg->dma_address, sg->length, direction); } @@ -245,7 +249,8 @@ static dma_addr_t tile_dma_map_page(struct device *dev, struct page *page, BUG_ON(!valid_dma_direction(direction)); BUG_ON(offset + size > PAGE_SIZE); - __dma_prep_page(page, offset, size, direction); + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) + __dma_prep_page(page, offset, size, direction); return page_to_pa(page) + offset; } @@ -256,6 +261,9 @@ static void tile_dma_unmap_page(struct device *dev, dma_addr_t dma_address, { BUG_ON(!valid_dma_direction(direction)); + if (attrs & DMA_ATTR_SKIP_CPU_SYNC) + return; + __dma_complete_page(pfn_to_page(PFN_DOWN(dma_address)), dma_address & (PAGE_SIZE - 1), size, direction); } @@ -321,7 +329,7 @@ tile_dma_supported(struct device *dev, u64 mask) return 1; } -static struct dma_map_ops tile_default_dma_map_ops = { +static const struct dma_map_ops tile_default_dma_map_ops = { .alloc = tile_dma_alloc_coherent, .free = tile_dma_free_coherent, .map_page = tile_dma_map_page, @@ -336,7 +344,7 @@ static struct dma_map_ops tile_default_dma_map_ops = { .dma_supported = tile_dma_supported }; -struct dma_map_ops *tile_dma_map_ops = &tile_default_dma_map_ops; +const struct dma_map_ops *tile_dma_map_ops = &tile_default_dma_map_ops; EXPORT_SYMBOL(tile_dma_map_ops); /* Generic PCI DMA mapping functions */ @@ -508,7 +516,7 @@ tile_pci_dma_supported(struct device *dev, u64 mask) return 1; } -static struct dma_map_ops tile_pci_default_dma_map_ops = { +static const struct dma_map_ops tile_pci_default_dma_map_ops = { .alloc = tile_pci_dma_alloc_coherent, .free = tile_pci_dma_free_coherent, .map_page = tile_pci_dma_map_page, @@ -523,7 +531,7 @@ static struct dma_map_ops tile_pci_default_dma_map_ops = { .dma_supported = tile_pci_dma_supported }; -struct dma_map_ops *gx_pci_dma_map_ops = &tile_pci_default_dma_map_ops; +const struct dma_map_ops *gx_pci_dma_map_ops = &tile_pci_default_dma_map_ops; EXPORT_SYMBOL(gx_pci_dma_map_ops); /* PCI DMA mapping functions for legacy PCI devices */ @@ -544,7 +552,7 @@ static void tile_swiotlb_free_coherent(struct device *dev, size_t size, swiotlb_free_coherent(dev, size, vaddr, dma_addr); } -static struct dma_map_ops pci_swiotlb_dma_ops = { +static const struct dma_map_ops pci_swiotlb_dma_ops = { .alloc = tile_swiotlb_alloc_coherent, .free = tile_swiotlb_free_coherent, .map_page = swiotlb_map_page, @@ -559,7 +567,7 @@ static struct dma_map_ops pci_swiotlb_dma_ops = { .mapping_error = swiotlb_dma_mapping_error, }; -static struct dma_map_ops pci_hybrid_dma_ops = { +static const struct dma_map_ops pci_hybrid_dma_ops = { .alloc = tile_swiotlb_alloc_coherent, .free = tile_swiotlb_free_coherent, .map_page = tile_pci_dma_map_page, @@ -574,18 +582,18 @@ static struct dma_map_ops pci_hybrid_dma_ops = { .dma_supported = tile_pci_dma_supported }; -struct dma_map_ops *gx_legacy_pci_dma_map_ops = &pci_swiotlb_dma_ops; -struct dma_map_ops *gx_hybrid_pci_dma_map_ops = &pci_hybrid_dma_ops; +const struct dma_map_ops *gx_legacy_pci_dma_map_ops = &pci_swiotlb_dma_ops; +const struct dma_map_ops *gx_hybrid_pci_dma_map_ops = &pci_hybrid_dma_ops; #else -struct dma_map_ops *gx_legacy_pci_dma_map_ops; -struct dma_map_ops *gx_hybrid_pci_dma_map_ops; +const struct dma_map_ops *gx_legacy_pci_dma_map_ops; +const struct dma_map_ops *gx_hybrid_pci_dma_map_ops; #endif EXPORT_SYMBOL(gx_legacy_pci_dma_map_ops); EXPORT_SYMBOL(gx_hybrid_pci_dma_map_ops); int dma_set_mask(struct device *dev, u64 mask) { - struct dma_map_ops *dma_ops = get_dma_ops(dev); + const struct dma_map_ops *dma_ops = get_dma_ops(dev); /* * For PCI devices with 64-bit DMA addressing capability, promote @@ -615,7 +623,7 @@ EXPORT_SYMBOL(dma_set_mask); #ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK int dma_set_coherent_mask(struct device *dev, u64 mask) { - struct dma_map_ops *dma_ops = get_dma_ops(dev); + const struct dma_map_ops *dma_ops = get_dma_ops(dev); /* * For PCI devices with 64-bit DMA addressing capability, promote |