summaryrefslogtreecommitdiff
path: root/arch/arc
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arc')
-rw-r--r--arch/arc/include/asm/pgtable.h2
-rw-r--r--arch/arc/kernel/setup.c1
-rw-r--r--arch/arc/mm/dma.c3
-rw-r--r--arch/arc/mm/ioremap.c2
4 files changed, 5 insertions, 3 deletions
diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h
index 858f98ef7f1b..0f92d97432a2 100644
--- a/arch/arc/include/asm/pgtable.h
+++ b/arch/arc/include/asm/pgtable.h
@@ -110,7 +110,7 @@
#define ___DEF (_PAGE_PRESENT | _PAGE_CACHEABLE)
/* Set of bits not changed in pte_modify */
-#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
+#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_SPECIAL)
/* More Abbrevaited helpers */
#define PAGE_U_NONE __pgprot(___DEF)
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
index 2ee7a4d758a8..5f8c0b47045a 100644
--- a/arch/arc/kernel/setup.c
+++ b/arch/arc/kernel/setup.c
@@ -14,6 +14,7 @@
#include <linux/module.h>
#include <linux/cpu.h>
#include <linux/of_fdt.h>
+#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/cache.h>
#include <asm/sections.h>
diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c
index 73d7e4c75b7d..ab74b5d9186c 100644
--- a/arch/arc/mm/dma.c
+++ b/arch/arc/mm/dma.c
@@ -92,7 +92,8 @@ static void *arc_dma_alloc(struct device *dev, size_t size,
static void arc_dma_free(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle, struct dma_attrs *attrs)
{
- struct page *page = virt_to_page(dma_handle);
+ phys_addr_t paddr = plat_dma_to_phys(dev, dma_handle);
+ struct page *page = virt_to_page(paddr);
int is_non_coh = 1;
is_non_coh = dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs) ||
diff --git a/arch/arc/mm/ioremap.c b/arch/arc/mm/ioremap.c
index 49b8abd1115c..f52b7db67fd3 100644
--- a/arch/arc/mm/ioremap.c
+++ b/arch/arc/mm/ioremap.c
@@ -49,7 +49,7 @@ EXPORT_SYMBOL(ioremap);
/*
* ioremap with access flags
* Cache semantics wise it is same as ioremap - "forced" uncached.
- * However unline vanilla ioremap which bypasses ARC MMU for addresses in
+ * However unlike vanilla ioremap which bypasses ARC MMU for addresses in
* ARC hardware uncached region, this one still goes thru the MMU as caller
* might need finer access control (R/W/X)
*/