aboutsummaryrefslogtreecommitdiff
path: root/arch/arc/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arc/mm')
-rw-r--r--arch/arc/mm/dma.c3
-rw-r--r--arch/arc/mm/fault.c2
-rw-r--r--arch/arc/mm/ioremap.c2
3 files changed, 4 insertions, 3 deletions
diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c
index 73d7e4c75b7d..ab74b5d9186c 100644
--- a/arch/arc/mm/dma.c
+++ b/arch/arc/mm/dma.c
@@ -92,7 +92,8 @@ static void *arc_dma_alloc(struct device *dev, size_t size,
static void arc_dma_free(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle, struct dma_attrs *attrs)
{
- struct page *page = virt_to_page(dma_handle);
+ phys_addr_t paddr = plat_dma_to_phys(dev, dma_handle);
+ struct page *page = virt_to_page(paddr);
int is_non_coh = 1;
is_non_coh = dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs) ||
diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c
index af63f4a13e60..e94e5aa33985 100644
--- a/arch/arc/mm/fault.c
+++ b/arch/arc/mm/fault.c
@@ -137,7 +137,7 @@ good_area:
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
- fault = handle_mm_fault(mm, vma, address, flags);
+ fault = handle_mm_fault(vma, address, flags);
/* If Pagefault was interrupted by SIGKILL, exit page fault "early" */
if (unlikely(fatal_signal_pending(current))) {
diff --git a/arch/arc/mm/ioremap.c b/arch/arc/mm/ioremap.c
index 49b8abd1115c..f52b7db67fd3 100644
--- a/arch/arc/mm/ioremap.c
+++ b/arch/arc/mm/ioremap.c
@@ -49,7 +49,7 @@ EXPORT_SYMBOL(ioremap);
/*
* ioremap with access flags
* Cache semantics wise it is same as ioremap - "forced" uncached.
- * However unline vanilla ioremap which bypasses ARC MMU for addresses in
+ * However unlike vanilla ioremap which bypasses ARC MMU for addresses in
* ARC hardware uncached region, this one still goes thru the MMU as caller
* might need finer access control (R/W/X)
*/