blob: b7db86a76c98d921df7129a172344e2dddac264a [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001From e5bf75ca33946d81c82014168042a64db7c81551 Mon Sep 17 00:00:00 2001
2From: Pan Jiafei <Jiafei.Pan@nxp.com>
3Date: Thu, 17 Mar 2016 02:01:03 +0000
4Subject: [PATCH] arm: add new non-shareable ioremap
5
6Signed-off-by: Pan Jiafei <Jiafei.Pan@nxp.com>
7Signed-off-by: Roy Pledge <roy.pledge@nxp.com>
8---
9 arch/arm/include/asm/io.h | 3 +++
10 arch/arm/include/asm/mach/map.h | 4 ++--
11 arch/arm/mm/ioremap.c | 7 +++++++
12 arch/arm/mm/mmu.c | 9 +++++++++
13 4 files changed, 21 insertions(+), 2 deletions(-)
14
15--- a/arch/arm/include/asm/io.h
16+++ b/arch/arm/include/asm/io.h
17@@ -123,6 +123,7 @@ static inline u32 __raw_readl(const vola
18 #define MT_DEVICE_NONSHARED 1
19 #define MT_DEVICE_CACHED 2
20 #define MT_DEVICE_WC 3
21+#define MT_MEMORY_RW_NS 4
22 /*
23 * types 4 onwards can be found in asm/mach/map.h and are undefined
24 * for ioremap
25@@ -438,6 +439,8 @@ void __iomem *ioremap_wc(resource_size_t
26 #define ioremap_wc ioremap_wc
27 #define ioremap_wt ioremap_wc
28
29+void __iomem *ioremap_cache_ns(resource_size_t res_cookie, size_t size);
30+
31 void iounmap(volatile void __iomem *iomem_cookie);
32 #define iounmap iounmap
33
34--- a/arch/arm/include/asm/mach/map.h
35+++ b/arch/arm/include/asm/mach/map.h
36@@ -18,9 +18,9 @@ struct map_desc {
37 unsigned int type;
38 };
39
40-/* types 0-3 are defined in asm/io.h */
41+/* types 0-4 are defined in asm/io.h */
42 enum {
43- MT_UNCACHED = 4,
44+ MT_UNCACHED = 5,
45 MT_CACHECLEAN,
46 MT_MINICLEAN,
47 MT_LOW_VECTORS,
48--- a/arch/arm/mm/ioremap.c
49+++ b/arch/arm/mm/ioremap.c
50@@ -401,6 +401,13 @@ void __iomem *ioremap_wc(resource_size_t
51 }
52 EXPORT_SYMBOL(ioremap_wc);
53
54+void __iomem *ioremap_cache_ns(resource_size_t res_cookie, size_t size)
55+{
56+ return arch_ioremap_caller(res_cookie, size, MT_MEMORY_RW_NS,
57+ __builtin_return_address(0));
58+}
59+EXPORT_SYMBOL(ioremap_cache_ns);
60+
61 /*
62 * Remap an arbitrary physical address space into the kernel virtual
63 * address space as memory. Needed when the kernel wants to execute
64--- a/arch/arm/mm/mmu.c
65+++ b/arch/arm/mm/mmu.c
66@@ -327,6 +327,13 @@ static struct mem_type mem_types[] __ro_
67 #endif
68 .domain = DOMAIN_KERNEL,
69 },
70+ [MT_MEMORY_RW_NS] = {
71+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
72+ L_PTE_XN,
73+ .prot_l1 = PMD_TYPE_TABLE,
74+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_XN,
75+ .domain = DOMAIN_KERNEL,
76+ },
77 [MT_ROM] = {
78 .prot_sect = PMD_TYPE_SECT,
79 .domain = DOMAIN_KERNEL,
80@@ -667,6 +674,7 @@ static void __init build_mem_type_table(
81 }
82 kern_pgprot |= PTE_EXT_AF;
83 vecs_pgprot |= PTE_EXT_AF;
84+ mem_types[MT_MEMORY_RW_NS].prot_pte |= PTE_EXT_AF | cp->pte;
85
86 /*
87 * Set PXN for user mappings
88@@ -695,6 +703,7 @@ static void __init build_mem_type_table(
89 mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot;
90 mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd;
91 mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot;
92+ mem_types[MT_MEMORY_RW_NS].prot_sect |= ecc_mask | cp->pmd;
93 mem_types[MT_MEMORY_RO].prot_sect |= ecc_mask | cp->pmd;
94 mem_types[MT_MEMORY_RO].prot_pte |= kern_pgprot;
95 mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;