blob: 0b94b674aa91fa5b8ac994d143c96cece97f02c6 [file] [log] [blame]
xjb04a4022021-11-25 15:01:52 +08001// SPDX-License-Identifier: GPL-2.0
2/*
3 * linux/arch/arm/mm/mmap.c
4 */
5#include <linux/fs.h>
6#include <linux/mm.h>
7#include <linux/mman.h>
8#include <linux/shm.h>
9#include <linux/sched/signal.h>
10#include <linux/sched/mm.h>
11#include <linux/io.h>
12#include <linux/personality.h>
13#include <linux/random.h>
14#include <asm/cachetype.h>
15
16#define COLOUR_ALIGN(addr,pgoff) \
17 ((((addr)+SHMLBA-1)&~(SHMLBA-1)) + \
18 (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1)))
19
20/* gap between mmap and stack */
21#define MIN_GAP (128*1024*1024UL)
22#define MAX_GAP ((STACK_TOP)/6*5)
23#define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12))
24
25static int mmap_is_legacy(struct rlimit *rlim_stack)
26{
27 if (current->personality & ADDR_COMPAT_LAYOUT)
28 return 1;
29
30 if (rlim_stack->rlim_cur == RLIM_INFINITY)
31 return 1;
32
33 return sysctl_legacy_va_layout;
34}
35
36static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack)
37{
38 unsigned long gap = rlim_stack->rlim_cur;
39 unsigned long pad = stack_guard_gap;
40
41 /* Account for stack randomization if necessary */
42 if (current->flags & PF_RANDOMIZE)
43 pad += (STACK_RND_MASK << PAGE_SHIFT);
44
45 /* Values close to RLIM_INFINITY can overflow. */
46 if (gap + pad > gap)
47 gap += pad;
48
49 if (gap < MIN_GAP)
50 gap = MIN_GAP;
51 else if (gap > MAX_GAP)
52 gap = MAX_GAP;
53
54 return PAGE_ALIGN(STACK_TOP - gap - rnd);
55}
56
57/*
58 * We need to ensure that shared mappings are correctly aligned to
59 * avoid aliasing issues with VIPT caches. We need to ensure that
60 * a specific page of an object is always mapped at a multiple of
61 * SHMLBA bytes.
62 *
63 * We unconditionally provide this function for all cases, however
64 * in the VIVT case, we optimise out the alignment rules.
65 */
66unsigned long
67arch_get_unmapped_area(struct file *filp, unsigned long addr,
68 unsigned long len, unsigned long pgoff, unsigned long flags)
69{
70 struct mm_struct *mm = current->mm;
71 struct vm_area_struct *vma;
72 int do_align = 0;
73 int aliasing = cache_is_vipt_aliasing();
74 struct vm_unmapped_area_info info;
75
76 /*
77 * We only need to do colour alignment if either the I or D
78 * caches alias.
79 */
80 if (aliasing)
81 do_align = filp || (flags & MAP_SHARED);
82
83 /*
84 * We enforce the MAP_FIXED case.
85 */
86 if (flags & MAP_FIXED) {
87 if (aliasing && flags & MAP_SHARED &&
88 (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
89 return -EINVAL;
90 return addr;
91 }
92
93 if (len > TASK_SIZE)
94 return -ENOMEM;
95
96 if (addr) {
97 if (do_align)
98 addr = COLOUR_ALIGN(addr, pgoff);
99 else
100 addr = PAGE_ALIGN(addr);
101
102 vma = find_vma(mm, addr);
103 if (TASK_SIZE - len >= addr &&
104 (!vma || addr + len <= vm_start_gap(vma)))
105 return addr;
106 }
107
108 info.flags = 0;
109 info.length = len;
110 info.low_limit = mm->mmap_base;
111 info.high_limit = TASK_SIZE;
112 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
113 info.align_offset = pgoff << PAGE_SHIFT;
114 return vm_unmapped_area(&info);
115}
116
117unsigned long
118arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
119 const unsigned long len, const unsigned long pgoff,
120 const unsigned long flags)
121{
122 struct vm_area_struct *vma;
123 struct mm_struct *mm = current->mm;
124 unsigned long addr = addr0;
125 int do_align = 0;
126 int aliasing = cache_is_vipt_aliasing();
127 struct vm_unmapped_area_info info;
128
129 /*
130 * We only need to do colour alignment if either the I or D
131 * caches alias.
132 */
133 if (aliasing)
134 do_align = filp || (flags & MAP_SHARED);
135
136 /* requested length too big for entire address space */
137 if (len > TASK_SIZE)
138 return -ENOMEM;
139
140 if (flags & MAP_FIXED) {
141 if (aliasing && flags & MAP_SHARED &&
142 (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
143 return -EINVAL;
144 return addr;
145 }
146
147 /* requesting a specific address */
148 if (addr) {
149 if (do_align)
150 addr = COLOUR_ALIGN(addr, pgoff);
151 else
152 addr = PAGE_ALIGN(addr);
153 vma = find_vma(mm, addr);
154 if (TASK_SIZE - len >= addr &&
155 (!vma || addr + len <= vm_start_gap(vma)))
156 return addr;
157 }
158
159 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
160 info.length = len;
161 info.low_limit = FIRST_USER_ADDRESS;
162 info.high_limit = mm->mmap_base;
163 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
164 info.align_offset = pgoff << PAGE_SHIFT;
165 addr = vm_unmapped_area(&info);
166
167 /*
168 * A failed mmap() very likely causes application failure,
169 * so fall back to the bottom-up function here. This scenario
170 * can happen with large stack limits and large mmap()
171 * allocations.
172 */
173 if (addr & ~PAGE_MASK) {
174 VM_BUG_ON(addr != -ENOMEM);
175 info.flags = 0;
176 info.low_limit = mm->mmap_base;
177 info.high_limit = TASK_SIZE;
178 addr = vm_unmapped_area(&info);
179 }
180
181 return addr;
182}
183
184unsigned long arch_mmap_rnd(void)
185{
186 unsigned long rnd;
187
188 rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1);
189
190 return rnd << PAGE_SHIFT;
191}
192
193void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
194{
195 unsigned long random_factor = 0UL;
196
197 if (current->flags & PF_RANDOMIZE)
198 random_factor = arch_mmap_rnd();
199
200 if (mmap_is_legacy(rlim_stack)) {
201 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
202 mm->get_unmapped_area = arch_get_unmapped_area;
203 } else {
204 mm->mmap_base = mmap_base(random_factor, rlim_stack);
205 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
206 }
207}
208
209/*
210 * You really shouldn't be using read() or write() on /dev/mem. This
211 * might go away in the future.
212 */
213int valid_phys_addr_range(phys_addr_t addr, size_t size)
214{
215 if (addr < PHYS_OFFSET)
216 return 0;
217 if (addr + size > __pa(high_memory - 1) + 1)
218 return 0;
219
220 return 1;
221}
222
223/*
224 * Do not allow /dev/mem mappings beyond the supported physical range.
225 */
226int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
227{
228 return (pfn + (size >> PAGE_SHIFT)) <= (1 + (PHYS_MASK >> PAGE_SHIFT));
229}
230
231#ifdef CONFIG_STRICT_DEVMEM
232
233#include <linux/ioport.h>
234
235/*
236 * devmem_is_allowed() checks to see if /dev/mem access to a certain
237 * address is valid. The argument is a physical page number.
238 * We mimic x86 here by disallowing access to system RAM as well as
239 * device-exclusive MMIO regions. This effectively disable read()/write()
240 * on /dev/mem.
241 */
242int devmem_is_allowed(unsigned long pfn)
243{
244 if (iomem_is_exclusive(pfn << PAGE_SHIFT))
245 return 0;
246 if (!page_is_ram(pfn))
247 return 1;
248 return 0;
249}
250
251#endif