| xj | b04a402 | 2021-11-25 15:01:52 +0800 | [diff] [blame] | 1 | #include <linux/mm.h> | 
 | 2 | #include <linux/slab.h> | 
 | 3 | #include <linux/string.h> | 
 | 4 | #include <linux/compiler.h> | 
 | 5 | #include <linux/export.h> | 
 | 6 | #include <linux/err.h> | 
 | 7 | #include <linux/sched.h> | 
 | 8 | #include <linux/sched/mm.h> | 
 | 9 | #include <linux/sched/task_stack.h> | 
 | 10 | #include <linux/security.h> | 
 | 11 | #include <linux/swap.h> | 
 | 12 | #include <linux/swapops.h> | 
 | 13 | #include <linux/mman.h> | 
 | 14 | #include <linux/hugetlb.h> | 
 | 15 | #include <linux/vmalloc.h> | 
 | 16 | #include <linux/userfaultfd_k.h> | 
 | 17 |  | 
 | 18 | #include <asm/sections.h> | 
 | 19 | #include <linux/uaccess.h> | 
 | 20 |  | 
 | 21 | #include "internal.h" | 
 | 22 |  | 
 | 23 | static inline int is_kernel_rodata(unsigned long addr) | 
 | 24 | { | 
 | 25 | 	return addr >= (unsigned long)__start_rodata && | 
 | 26 | 		addr < (unsigned long)__end_rodata; | 
 | 27 | } | 
 | 28 |  | 
 | 29 | /** | 
 | 30 |  * kfree_const - conditionally free memory | 
 | 31 |  * @x: pointer to the memory | 
 | 32 |  * | 
 | 33 |  * Function calls kfree only if @x is not in .rodata section. | 
 | 34 |  */ | 
 | 35 | void kfree_const(const void *x) | 
 | 36 | { | 
 | 37 | 	if (!is_kernel_rodata((unsigned long)x)) | 
 | 38 | 		kfree(x); | 
 | 39 | } | 
 | 40 | EXPORT_SYMBOL(kfree_const); | 
 | 41 |  | 
 | 42 | /** | 
 | 43 |  * kstrdup - allocate space for and copy an existing string | 
 | 44 |  * @s: the string to duplicate | 
 | 45 |  * @gfp: the GFP mask used in the kmalloc() call when allocating memory | 
 | 46 |  */ | 
 | 47 | char *kstrdup(const char *s, gfp_t gfp) | 
 | 48 | { | 
 | 49 | 	size_t len; | 
 | 50 | 	char *buf; | 
 | 51 |  | 
 | 52 | 	if (!s) | 
 | 53 | 		return NULL; | 
 | 54 |  | 
 | 55 | 	len = strlen(s) + 1; | 
 | 56 | 	buf = kmalloc_track_caller(len, gfp); | 
 | 57 | 	if (buf) | 
 | 58 | 		memcpy(buf, s, len); | 
 | 59 | 	return buf; | 
 | 60 | } | 
 | 61 | EXPORT_SYMBOL(kstrdup); | 
 | 62 |  | 
 | 63 | /** | 
 | 64 |  * kstrdup_const - conditionally duplicate an existing const string | 
 | 65 |  * @s: the string to duplicate | 
 | 66 |  * @gfp: the GFP mask used in the kmalloc() call when allocating memory | 
 | 67 |  * | 
 | 68 |  * Function returns source string if it is in .rodata section otherwise it | 
 | 69 |  * fallbacks to kstrdup. | 
 | 70 |  * Strings allocated by kstrdup_const should be freed by kfree_const. | 
 | 71 |  */ | 
 | 72 | const char *kstrdup_const(const char *s, gfp_t gfp) | 
 | 73 | { | 
 | 74 | 	if (is_kernel_rodata((unsigned long)s)) | 
 | 75 | 		return s; | 
 | 76 |  | 
 | 77 | 	return kstrdup(s, gfp); | 
 | 78 | } | 
 | 79 | EXPORT_SYMBOL(kstrdup_const); | 
 | 80 |  | 
 | 81 | /** | 
 | 82 |  * kstrndup - allocate space for and copy an existing string | 
 | 83 |  * @s: the string to duplicate | 
 | 84 |  * @max: read at most @max chars from @s | 
 | 85 |  * @gfp: the GFP mask used in the kmalloc() call when allocating memory | 
 | 86 |  * | 
 | 87 |  * Note: Use kmemdup_nul() instead if the size is known exactly. | 
 | 88 |  */ | 
 | 89 | char *kstrndup(const char *s, size_t max, gfp_t gfp) | 
 | 90 | { | 
 | 91 | 	size_t len; | 
 | 92 | 	char *buf; | 
 | 93 |  | 
 | 94 | 	if (!s) | 
 | 95 | 		return NULL; | 
 | 96 |  | 
 | 97 | 	len = strnlen(s, max); | 
 | 98 | 	buf = kmalloc_track_caller(len+1, gfp); | 
 | 99 | 	if (buf) { | 
 | 100 | 		memcpy(buf, s, len); | 
 | 101 | 		buf[len] = '\0'; | 
 | 102 | 	} | 
 | 103 | 	return buf; | 
 | 104 | } | 
 | 105 | EXPORT_SYMBOL(kstrndup); | 
 | 106 |  | 
 | 107 | /** | 
 | 108 |  * kmemdup - duplicate region of memory | 
 | 109 |  * | 
 | 110 |  * @src: memory region to duplicate | 
 | 111 |  * @len: memory region length | 
 | 112 |  * @gfp: GFP mask to use | 
 | 113 |  */ | 
 | 114 | void *kmemdup(const void *src, size_t len, gfp_t gfp) | 
 | 115 | { | 
 | 116 | 	void *p; | 
 | 117 |  | 
 | 118 | 	p = kmalloc_track_caller(len, gfp); | 
 | 119 | 	if (p) | 
 | 120 | 		memcpy(p, src, len); | 
 | 121 | 	return p; | 
 | 122 | } | 
 | 123 | EXPORT_SYMBOL(kmemdup); | 
 | 124 |  | 
 | 125 | /** | 
 | 126 |  * kmemdup_nul - Create a NUL-terminated string from unterminated data | 
 | 127 |  * @s: The data to stringify | 
 | 128 |  * @len: The size of the data | 
 | 129 |  * @gfp: the GFP mask used in the kmalloc() call when allocating memory | 
 | 130 |  */ | 
 | 131 | char *kmemdup_nul(const char *s, size_t len, gfp_t gfp) | 
 | 132 | { | 
 | 133 | 	char *buf; | 
 | 134 |  | 
 | 135 | 	if (!s) | 
 | 136 | 		return NULL; | 
 | 137 |  | 
 | 138 | 	buf = kmalloc_track_caller(len + 1, gfp); | 
 | 139 | 	if (buf) { | 
 | 140 | 		memcpy(buf, s, len); | 
 | 141 | 		buf[len] = '\0'; | 
 | 142 | 	} | 
 | 143 | 	return buf; | 
 | 144 | } | 
 | 145 | EXPORT_SYMBOL(kmemdup_nul); | 
 | 146 |  | 
 | 147 | /** | 
 | 148 |  * memdup_user - duplicate memory region from user space | 
 | 149 |  * | 
 | 150 |  * @src: source address in user space | 
 | 151 |  * @len: number of bytes to copy | 
 | 152 |  * | 
 | 153 |  * Returns an ERR_PTR() on failure.  Result is physically | 
 | 154 |  * contiguous, to be freed by kfree(). | 
 | 155 |  */ | 
 | 156 | void *memdup_user(const void __user *src, size_t len) | 
 | 157 | { | 
 | 158 | 	void *p; | 
 | 159 |  | 
 | 160 | 	p = kmalloc_track_caller(len, GFP_USER); | 
 | 161 | 	if (!p) | 
 | 162 | 		return ERR_PTR(-ENOMEM); | 
 | 163 |  | 
 | 164 | 	if (copy_from_user(p, src, len)) { | 
 | 165 | 		kfree(p); | 
 | 166 | 		return ERR_PTR(-EFAULT); | 
 | 167 | 	} | 
 | 168 |  | 
 | 169 | 	return p; | 
 | 170 | } | 
 | 171 | EXPORT_SYMBOL(memdup_user); | 
 | 172 |  | 
 | 173 | /** | 
 | 174 |  * vmemdup_user - duplicate memory region from user space | 
 | 175 |  * | 
 | 176 |  * @src: source address in user space | 
 | 177 |  * @len: number of bytes to copy | 
 | 178 |  * | 
 | 179 |  * Returns an ERR_PTR() on failure.  Result may be not | 
 | 180 |  * physically contiguous.  Use kvfree() to free. | 
 | 181 |  */ | 
 | 182 | void *vmemdup_user(const void __user *src, size_t len) | 
 | 183 | { | 
 | 184 | 	void *p; | 
 | 185 |  | 
 | 186 | 	p = kvmalloc(len, GFP_USER); | 
 | 187 | 	if (!p) | 
 | 188 | 		return ERR_PTR(-ENOMEM); | 
 | 189 |  | 
 | 190 | 	if (copy_from_user(p, src, len)) { | 
 | 191 | 		kvfree(p); | 
 | 192 | 		return ERR_PTR(-EFAULT); | 
 | 193 | 	} | 
 | 194 |  | 
 | 195 | 	return p; | 
 | 196 | } | 
 | 197 | EXPORT_SYMBOL(vmemdup_user); | 
 | 198 |  | 
 | 199 | /** | 
 | 200 |  * strndup_user - duplicate an existing string from user space | 
 | 201 |  * @s: The string to duplicate | 
 | 202 |  * @n: Maximum number of bytes to copy, including the trailing NUL. | 
 | 203 |  */ | 
 | 204 | char *strndup_user(const char __user *s, long n) | 
 | 205 | { | 
 | 206 | 	char *p; | 
 | 207 | 	long length; | 
 | 208 |  | 
 | 209 | 	length = strnlen_user(s, n); | 
 | 210 |  | 
 | 211 | 	if (!length) | 
 | 212 | 		return ERR_PTR(-EFAULT); | 
 | 213 |  | 
 | 214 | 	if (length > n) | 
 | 215 | 		return ERR_PTR(-EINVAL); | 
 | 216 |  | 
 | 217 | 	p = memdup_user(s, length); | 
 | 218 |  | 
 | 219 | 	if (IS_ERR(p)) | 
 | 220 | 		return p; | 
 | 221 |  | 
 | 222 | 	p[length - 1] = '\0'; | 
 | 223 |  | 
 | 224 | 	return p; | 
 | 225 | } | 
 | 226 | EXPORT_SYMBOL(strndup_user); | 
 | 227 |  | 
 | 228 | /** | 
 | 229 |  * memdup_user_nul - duplicate memory region from user space and NUL-terminate | 
 | 230 |  * | 
 | 231 |  * @src: source address in user space | 
 | 232 |  * @len: number of bytes to copy | 
 | 233 |  * | 
 | 234 |  * Returns an ERR_PTR() on failure. | 
 | 235 |  */ | 
 | 236 | void *memdup_user_nul(const void __user *src, size_t len) | 
 | 237 | { | 
 | 238 | 	char *p; | 
 | 239 |  | 
 | 240 | 	/* | 
 | 241 | 	 * Always use GFP_KERNEL, since copy_from_user() can sleep and | 
 | 242 | 	 * cause pagefault, which makes it pointless to use GFP_NOFS | 
 | 243 | 	 * or GFP_ATOMIC. | 
 | 244 | 	 */ | 
 | 245 | 	p = kmalloc_track_caller(len + 1, GFP_KERNEL); | 
 | 246 | 	if (!p) | 
 | 247 | 		return ERR_PTR(-ENOMEM); | 
 | 248 |  | 
 | 249 | 	if (copy_from_user(p, src, len)) { | 
 | 250 | 		kfree(p); | 
 | 251 | 		return ERR_PTR(-EFAULT); | 
 | 252 | 	} | 
 | 253 | 	p[len] = '\0'; | 
 | 254 |  | 
 | 255 | 	return p; | 
 | 256 | } | 
 | 257 | EXPORT_SYMBOL(memdup_user_nul); | 
 | 258 |  | 
 | 259 | void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma, | 
 | 260 | 		struct vm_area_struct *prev, struct rb_node *rb_parent) | 
 | 261 | { | 
 | 262 | 	struct vm_area_struct *next; | 
 | 263 |  | 
 | 264 | 	vma->vm_prev = prev; | 
 | 265 | 	if (prev) { | 
 | 266 | 		next = prev->vm_next; | 
 | 267 | 		prev->vm_next = vma; | 
 | 268 | 	} else { | 
 | 269 | 		mm->mmap = vma; | 
 | 270 | 		if (rb_parent) | 
 | 271 | 			next = rb_entry(rb_parent, | 
 | 272 | 					struct vm_area_struct, vm_rb); | 
 | 273 | 		else | 
 | 274 | 			next = NULL; | 
 | 275 | 	} | 
 | 276 | 	vma->vm_next = next; | 
 | 277 | 	if (next) | 
 | 278 | 		next->vm_prev = vma; | 
 | 279 | } | 
 | 280 |  | 
 | 281 | /* Check if the vma is being used as a stack by this task */ | 
 | 282 | int vma_is_stack_for_current(struct vm_area_struct *vma) | 
 | 283 | { | 
 | 284 | 	struct task_struct * __maybe_unused t = current; | 
 | 285 |  | 
 | 286 | 	return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t)); | 
 | 287 | } | 
 | 288 |  | 
 | 289 | #if defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT) | 
 | 290 | void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack) | 
 | 291 | { | 
 | 292 | 	mm->mmap_base = TASK_UNMAPPED_BASE; | 
 | 293 | 	mm->get_unmapped_area = arch_get_unmapped_area; | 
 | 294 | } | 
 | 295 | #endif | 
 | 296 |  | 
 | 297 | /* | 
 | 298 |  * Like get_user_pages_fast() except its IRQ-safe in that it won't fall | 
 | 299 |  * back to the regular GUP. | 
 | 300 |  * Note a difference with get_user_pages_fast: this always returns the | 
 | 301 |  * number of pages pinned, 0 if no pages were pinned. | 
 | 302 |  * If the architecture does not support this function, simply return with no | 
 | 303 |  * pages pinned. | 
 | 304 |  */ | 
 | 305 | int __weak __get_user_pages_fast(unsigned long start, | 
 | 306 | 				 int nr_pages, int write, struct page **pages) | 
 | 307 | { | 
 | 308 | 	return 0; | 
 | 309 | } | 
 | 310 | EXPORT_SYMBOL_GPL(__get_user_pages_fast); | 
 | 311 |  | 
 | 312 | /** | 
 | 313 |  * get_user_pages_fast() - pin user pages in memory | 
 | 314 |  * @start:	starting user address | 
 | 315 |  * @nr_pages:	number of pages from start to pin | 
 | 316 |  * @write:	whether pages will be written to | 
 | 317 |  * @pages:	array that receives pointers to the pages pinned. | 
 | 318 |  *		Should be at least nr_pages long. | 
 | 319 |  * | 
 | 320 |  * Returns number of pages pinned. This may be fewer than the number | 
 | 321 |  * requested. If nr_pages is 0 or negative, returns 0. If no pages | 
 | 322 |  * were pinned, returns -errno. | 
 | 323 |  * | 
 | 324 |  * get_user_pages_fast provides equivalent functionality to get_user_pages, | 
 | 325 |  * operating on current and current->mm, with force=0 and vma=NULL. However | 
 | 326 |  * unlike get_user_pages, it must be called without mmap_sem held. | 
 | 327 |  * | 
 | 328 |  * get_user_pages_fast may take mmap_sem and page table locks, so no | 
 | 329 |  * assumptions can be made about lack of locking. get_user_pages_fast is to be | 
 | 330 |  * implemented in a way that is advantageous (vs get_user_pages()) when the | 
 | 331 |  * user memory area is already faulted in and present in ptes. However if the | 
 | 332 |  * pages have to be faulted in, it may turn out to be slightly slower so | 
 | 333 |  * callers need to carefully consider what to use. On many architectures, | 
 | 334 |  * get_user_pages_fast simply falls back to get_user_pages. | 
 | 335 |  */ | 
 | 336 | int __weak get_user_pages_fast(unsigned long start, | 
 | 337 | 				int nr_pages, int write, struct page **pages) | 
 | 338 | { | 
 | 339 | 	return get_user_pages_unlocked(start, nr_pages, pages, | 
 | 340 | 				       write ? FOLL_WRITE : 0); | 
 | 341 | } | 
 | 342 | EXPORT_SYMBOL_GPL(get_user_pages_fast); | 
 | 343 |  | 
 | 344 | unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr, | 
 | 345 | 	unsigned long len, unsigned long prot, | 
 | 346 | 	unsigned long flag, unsigned long pgoff) | 
 | 347 | { | 
 | 348 | 	unsigned long ret; | 
 | 349 | 	struct mm_struct *mm = current->mm; | 
 | 350 | 	unsigned long populate; | 
 | 351 | 	LIST_HEAD(uf); | 
 | 352 |  | 
 | 353 | 	ret = security_mmap_file(file, prot, flag); | 
 | 354 | 	if (!ret) { | 
 | 355 | 		if (down_write_killable(&mm->mmap_sem)) | 
 | 356 | 			return -EINTR; | 
 | 357 | 		ret = do_mmap_pgoff(file, addr, len, prot, flag, pgoff, | 
 | 358 | 				    &populate, &uf); | 
 | 359 | 		up_write(&mm->mmap_sem); | 
 | 360 | 		userfaultfd_unmap_complete(mm, &uf); | 
 | 361 | 		if (populate) | 
 | 362 | 			mm_populate(ret, populate); | 
 | 363 | 	} | 
 | 364 | 	return ret; | 
 | 365 | } | 
 | 366 |  | 
 | 367 | unsigned long vm_mmap(struct file *file, unsigned long addr, | 
 | 368 | 	unsigned long len, unsigned long prot, | 
 | 369 | 	unsigned long flag, unsigned long offset) | 
 | 370 | { | 
 | 371 | 	if (unlikely(offset + PAGE_ALIGN(len) < offset)) | 
 | 372 | 		return -EINVAL; | 
 | 373 | 	if (unlikely(offset_in_page(offset))) | 
 | 374 | 		return -EINVAL; | 
 | 375 |  | 
 | 376 | 	return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT); | 
 | 377 | } | 
 | 378 | EXPORT_SYMBOL(vm_mmap); | 
 | 379 |  | 
 | 380 | /** | 
 | 381 |  * kvmalloc_node - attempt to allocate physically contiguous memory, but upon | 
 | 382 |  * failure, fall back to non-contiguous (vmalloc) allocation. | 
 | 383 |  * @size: size of the request. | 
 | 384 |  * @flags: gfp mask for the allocation - must be compatible (superset) with GFP_KERNEL. | 
 | 385 |  * @node: numa node to allocate from | 
 | 386 |  * | 
 | 387 |  * Uses kmalloc to get the memory but if the allocation fails then falls back | 
 | 388 |  * to the vmalloc allocator. Use kvfree for freeing the memory. | 
 | 389 |  * | 
 | 390 |  * Reclaim modifiers - __GFP_NORETRY and __GFP_NOFAIL are not supported. | 
 | 391 |  * __GFP_RETRY_MAYFAIL is supported, and it should be used only if kmalloc is | 
 | 392 |  * preferable to the vmalloc fallback, due to visible performance drawbacks. | 
 | 393 |  * | 
 | 394 |  * Please note that any use of gfp flags outside of GFP_KERNEL is careful to not | 
 | 395 |  * fall back to vmalloc. | 
 | 396 |  */ | 
 | 397 | void *kvmalloc_node(size_t size, gfp_t flags, int node) | 
 | 398 | { | 
 | 399 | 	gfp_t kmalloc_flags = flags; | 
 | 400 | 	void *ret; | 
 | 401 |  | 
 | 402 | 	/* | 
 | 403 | 	 * vmalloc uses GFP_KERNEL for some internal allocations (e.g page tables) | 
 | 404 | 	 * so the given set of flags has to be compatible. | 
 | 405 | 	 */ | 
 | 406 | 	if ((flags & GFP_KERNEL) != GFP_KERNEL) | 
 | 407 | 		return kmalloc_node(size, flags, node); | 
 | 408 |  | 
 | 409 | 	/* | 
 | 410 | 	 * We want to attempt a large physically contiguous block first because | 
 | 411 | 	 * it is less likely to fragment multiple larger blocks and therefore | 
 | 412 | 	 * contribute to a long term fragmentation less than vmalloc fallback. | 
 | 413 | 	 * However make sure that larger requests are not too disruptive - no | 
 | 414 | 	 * OOM killer and no allocation failure warnings as we have a fallback. | 
 | 415 | 	 */ | 
 | 416 | 	if (size > PAGE_SIZE) { | 
 | 417 | 		kmalloc_flags |= __GFP_NOWARN; | 
 | 418 |  | 
 | 419 | 		if (!(kmalloc_flags & __GFP_RETRY_MAYFAIL)) | 
 | 420 | 			kmalloc_flags |= __GFP_NORETRY; | 
 | 421 | 	} | 
 | 422 |  | 
 | 423 | 	ret = kmalloc_node(size, kmalloc_flags, node); | 
 | 424 |  | 
 | 425 | 	/* | 
 | 426 | 	 * It doesn't really make sense to fallback to vmalloc for sub page | 
 | 427 | 	 * requests | 
 | 428 | 	 */ | 
 | 429 | 	if (ret || size <= PAGE_SIZE) | 
 | 430 | 		return ret; | 
 | 431 |  | 
 | 432 | 	return __vmalloc_node_flags_caller(size, node, flags, | 
 | 433 | 			__builtin_return_address(0)); | 
 | 434 | } | 
 | 435 | EXPORT_SYMBOL(kvmalloc_node); | 
 | 436 |  | 
 | 437 | /** | 
 | 438 |  * kvfree() - Free memory. | 
 | 439 |  * @addr: Pointer to allocated memory. | 
 | 440 |  * | 
 | 441 |  * kvfree frees memory allocated by any of vmalloc(), kmalloc() or kvmalloc(). | 
 | 442 |  * It is slightly more efficient to use kfree() or vfree() if you are certain | 
 | 443 |  * that you know which one to use. | 
 | 444 |  * | 
 | 445 |  * Context: Any context except NMI. | 
 | 446 |  */ | 
 | 447 | void kvfree(const void *addr) | 
 | 448 | { | 
 | 449 | 	if (is_vmalloc_addr(addr)) | 
 | 450 | 		vfree(addr); | 
 | 451 | 	else | 
 | 452 | 		kfree(addr); | 
 | 453 | } | 
 | 454 | EXPORT_SYMBOL(kvfree); | 
 | 455 |  | 
 | 456 | static inline void *__page_rmapping(struct page *page) | 
 | 457 | { | 
 | 458 | 	unsigned long mapping; | 
 | 459 |  | 
 | 460 | 	mapping = (unsigned long)page->mapping; | 
 | 461 | 	mapping &= ~PAGE_MAPPING_FLAGS; | 
 | 462 |  | 
 | 463 | 	return (void *)mapping; | 
 | 464 | } | 
 | 465 |  | 
 | 466 | /* Neutral page->mapping pointer to address_space or anon_vma or other */ | 
 | 467 | void *page_rmapping(struct page *page) | 
 | 468 | { | 
 | 469 | 	page = compound_head(page); | 
 | 470 | 	return __page_rmapping(page); | 
 | 471 | } | 
 | 472 |  | 
 | 473 | /* | 
 | 474 |  * Return true if this page is mapped into pagetables. | 
 | 475 |  * For compound page it returns true if any subpage of compound page is mapped. | 
 | 476 |  */ | 
 | 477 | bool page_mapped(struct page *page) | 
 | 478 | { | 
 | 479 | 	int i; | 
 | 480 |  | 
 | 481 | 	if (likely(!PageCompound(page))) | 
 | 482 | 		return atomic_read(&page->_mapcount) >= 0; | 
 | 483 | 	page = compound_head(page); | 
 | 484 | 	if (atomic_read(compound_mapcount_ptr(page)) >= 0) | 
 | 485 | 		return true; | 
 | 486 | 	if (PageHuge(page)) | 
 | 487 | 		return false; | 
 | 488 | 	for (i = 0; i < (1 << compound_order(page)); i++) { | 
 | 489 | 		if (atomic_read(&page[i]._mapcount) >= 0) | 
 | 490 | 			return true; | 
 | 491 | 	} | 
 | 492 | 	return false; | 
 | 493 | } | 
 | 494 | EXPORT_SYMBOL(page_mapped); | 
 | 495 |  | 
 | 496 | struct anon_vma *page_anon_vma(struct page *page) | 
 | 497 | { | 
 | 498 | 	unsigned long mapping; | 
 | 499 |  | 
 | 500 | 	page = compound_head(page); | 
 | 501 | 	mapping = (unsigned long)page->mapping; | 
 | 502 | 	if ((mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) | 
 | 503 | 		return NULL; | 
 | 504 | 	return __page_rmapping(page); | 
 | 505 | } | 
 | 506 |  | 
 | 507 | struct address_space *page_mapping(struct page *page) | 
 | 508 | { | 
 | 509 | 	struct address_space *mapping; | 
 | 510 |  | 
 | 511 | 	page = compound_head(page); | 
 | 512 |  | 
 | 513 | 	/* This happens if someone calls flush_dcache_page on slab page */ | 
 | 514 | 	if (unlikely(PageSlab(page))) | 
 | 515 | 		return NULL; | 
 | 516 |  | 
 | 517 | 	if (unlikely(PageSwapCache(page))) { | 
 | 518 | 		swp_entry_t entry; | 
 | 519 |  | 
 | 520 | 		entry.val = page_private(page); | 
 | 521 | 		return swap_address_space(entry); | 
 | 522 | 	} | 
 | 523 |  | 
 | 524 | 	mapping = page->mapping; | 
 | 525 | 	if ((unsigned long)mapping & PAGE_MAPPING_ANON) | 
 | 526 | 		return NULL; | 
 | 527 |  | 
 | 528 | 	return (void *)((unsigned long)mapping & ~PAGE_MAPPING_FLAGS); | 
 | 529 | } | 
 | 530 | EXPORT_SYMBOL(page_mapping); | 
 | 531 |  | 
 | 532 | /* | 
 | 533 |  * For file cache pages, return the address_space, otherwise return NULL | 
 | 534 |  */ | 
 | 535 | struct address_space *page_mapping_file(struct page *page) | 
 | 536 | { | 
 | 537 | 	if (unlikely(PageSwapCache(page))) | 
 | 538 | 		return NULL; | 
 | 539 | 	return page_mapping(page); | 
 | 540 | } | 
 | 541 |  | 
 | 542 | /* Slow path of page_mapcount() for compound pages */ | 
 | 543 | int __page_mapcount(struct page *page) | 
 | 544 | { | 
 | 545 | 	int ret; | 
 | 546 |  | 
 | 547 | 	ret = atomic_read(&page->_mapcount) + 1; | 
 | 548 | 	/* | 
 | 549 | 	 * For file THP page->_mapcount contains total number of mapping | 
 | 550 | 	 * of the page: no need to look into compound_mapcount. | 
 | 551 | 	 */ | 
 | 552 | 	if (!PageAnon(page) && !PageHuge(page)) | 
 | 553 | 		return ret; | 
 | 554 | 	page = compound_head(page); | 
 | 555 | 	ret += atomic_read(compound_mapcount_ptr(page)) + 1; | 
 | 556 | 	if (PageDoubleMap(page)) | 
 | 557 | 		ret--; | 
 | 558 | 	return ret; | 
 | 559 | } | 
 | 560 | EXPORT_SYMBOL_GPL(__page_mapcount); | 
 | 561 |  | 
 | 562 | int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; | 
 | 563 | int sysctl_overcommit_ratio __read_mostly = 50; | 
 | 564 | unsigned long sysctl_overcommit_kbytes __read_mostly; | 
 | 565 | int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT; | 
 | 566 | unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */ | 
 | 567 | unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */ | 
 | 568 |  | 
 | 569 | int overcommit_ratio_handler(struct ctl_table *table, int write, | 
 | 570 | 			     void __user *buffer, size_t *lenp, | 
 | 571 | 			     loff_t *ppos) | 
 | 572 | { | 
 | 573 | 	int ret; | 
 | 574 |  | 
 | 575 | 	ret = proc_dointvec(table, write, buffer, lenp, ppos); | 
 | 576 | 	if (ret == 0 && write) | 
 | 577 | 		sysctl_overcommit_kbytes = 0; | 
 | 578 | 	return ret; | 
 | 579 | } | 
 | 580 |  | 
 | 581 | int overcommit_kbytes_handler(struct ctl_table *table, int write, | 
 | 582 | 			     void __user *buffer, size_t *lenp, | 
 | 583 | 			     loff_t *ppos) | 
 | 584 | { | 
 | 585 | 	int ret; | 
 | 586 |  | 
 | 587 | 	ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos); | 
 | 588 | 	if (ret == 0 && write) | 
 | 589 | 		sysctl_overcommit_ratio = 0; | 
 | 590 | 	return ret; | 
 | 591 | } | 
 | 592 |  | 
 | 593 | /* | 
 | 594 |  * Committed memory limit enforced when OVERCOMMIT_NEVER policy is used | 
 | 595 |  */ | 
 | 596 | unsigned long vm_commit_limit(void) | 
 | 597 | { | 
 | 598 | 	unsigned long allowed; | 
 | 599 |  | 
 | 600 | 	if (sysctl_overcommit_kbytes) | 
 | 601 | 		allowed = sysctl_overcommit_kbytes >> (PAGE_SHIFT - 10); | 
 | 602 | 	else | 
 | 603 | 		allowed = ((totalram_pages - hugetlb_total_pages()) | 
 | 604 | 			   * sysctl_overcommit_ratio / 100); | 
 | 605 | 	allowed += total_swap_pages; | 
 | 606 |  | 
 | 607 | 	return allowed; | 
 | 608 | } | 
 | 609 |  | 
 | 610 | /* | 
 | 611 |  * Make sure vm_committed_as in one cacheline and not cacheline shared with | 
 | 612 |  * other variables. It can be updated by several CPUs frequently. | 
 | 613 |  */ | 
 | 614 | struct percpu_counter vm_committed_as ____cacheline_aligned_in_smp; | 
 | 615 |  | 
 | 616 | /* | 
 | 617 |  * The global memory commitment made in the system can be a metric | 
 | 618 |  * that can be used to drive ballooning decisions when Linux is hosted | 
 | 619 |  * as a guest. On Hyper-V, the host implements a policy engine for dynamically | 
 | 620 |  * balancing memory across competing virtual machines that are hosted. | 
 | 621 |  * Several metrics drive this policy engine including the guest reported | 
 | 622 |  * memory commitment. | 
 | 623 |  */ | 
 | 624 | unsigned long vm_memory_committed(void) | 
 | 625 | { | 
 | 626 | 	return percpu_counter_read_positive(&vm_committed_as); | 
 | 627 | } | 
 | 628 | EXPORT_SYMBOL_GPL(vm_memory_committed); | 
 | 629 |  | 
 | 630 | /* | 
 | 631 |  * Check that a process has enough memory to allocate a new virtual | 
 | 632 |  * mapping. 0 means there is enough memory for the allocation to | 
 | 633 |  * succeed and -ENOMEM implies there is not. | 
 | 634 |  * | 
 | 635 |  * We currently support three overcommit policies, which are set via the | 
 | 636 |  * vm.overcommit_memory sysctl.  See Documentation/vm/overcommit-accounting.rst | 
 | 637 |  * | 
 | 638 |  * Strict overcommit modes added 2002 Feb 26 by Alan Cox. | 
 | 639 |  * Additional code 2002 Jul 20 by Robert Love. | 
 | 640 |  * | 
 | 641 |  * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise. | 
 | 642 |  * | 
 | 643 |  * Note this is a helper function intended to be used by LSMs which | 
 | 644 |  * wish to use this logic. | 
 | 645 |  */ | 
 | 646 | int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin) | 
 | 647 | { | 
 | 648 | 	long free, allowed, reserve; | 
 | 649 |  | 
 | 650 | 	VM_WARN_ONCE(percpu_counter_read(&vm_committed_as) < | 
 | 651 | 			-(s64)vm_committed_as_batch * num_online_cpus(), | 
 | 652 | 			"memory commitment underflow"); | 
 | 653 |  | 
 | 654 | 	vm_acct_memory(pages); | 
 | 655 |  | 
 | 656 | 	/* | 
 | 657 | 	 * Sometimes we want to use more memory than we have | 
 | 658 | 	 */ | 
 | 659 | 	if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS) | 
 | 660 | 		return 0; | 
 | 661 |  | 
 | 662 | 	if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) { | 
 | 663 | 		free = global_zone_page_state(NR_FREE_PAGES); | 
 | 664 | 		free += global_node_page_state(NR_FILE_PAGES); | 
 | 665 |  | 
 | 666 | 		/* | 
 | 667 | 		 * shmem pages shouldn't be counted as free in this | 
 | 668 | 		 * case, they can't be purged, only swapped out, and | 
 | 669 | 		 * that won't affect the overall amount of available | 
 | 670 | 		 * memory in the system. | 
 | 671 | 		 */ | 
 | 672 | 		free -= global_node_page_state(NR_SHMEM); | 
 | 673 |  | 
 | 674 | 		free += get_nr_swap_pages(); | 
 | 675 |  | 
 | 676 | 		/* | 
 | 677 | 		 * Any slabs which are created with the | 
 | 678 | 		 * SLAB_RECLAIM_ACCOUNT flag claim to have contents | 
 | 679 | 		 * which are reclaimable, under pressure.  The dentry | 
 | 680 | 		 * cache and most inode caches should fall into this | 
 | 681 | 		 */ | 
 | 682 | 		free += global_node_page_state(NR_SLAB_RECLAIMABLE); | 
 | 683 |  | 
 | 684 | 		/* | 
 | 685 | 		 * Part of the kernel memory, which can be released | 
 | 686 | 		 * under memory pressure. | 
 | 687 | 		 */ | 
 | 688 | 		free += global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE); | 
 | 689 |  | 
 | 690 | 		/* | 
 | 691 | 		 * Leave reserved pages. The pages are not for anonymous pages. | 
 | 692 | 		 */ | 
 | 693 | 		if (free <= totalreserve_pages) | 
 | 694 | 			goto error; | 
 | 695 | 		else | 
 | 696 | 			free -= totalreserve_pages; | 
 | 697 |  | 
 | 698 | 		/* | 
 | 699 | 		 * Reserve some for root | 
 | 700 | 		 */ | 
 | 701 | 		if (!cap_sys_admin) | 
 | 702 | 			free -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10); | 
 | 703 |  | 
 | 704 | 		if (free > pages) | 
 | 705 | 			return 0; | 
 | 706 |  | 
 | 707 | 		goto error; | 
 | 708 | 	} | 
 | 709 |  | 
 | 710 | 	allowed = vm_commit_limit(); | 
 | 711 | 	/* | 
 | 712 | 	 * Reserve some for root | 
 | 713 | 	 */ | 
 | 714 | 	if (!cap_sys_admin) | 
 | 715 | 		allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10); | 
 | 716 |  | 
 | 717 | 	/* | 
 | 718 | 	 * Don't let a single process grow so big a user can't recover | 
 | 719 | 	 */ | 
 | 720 | 	if (mm) { | 
 | 721 | 		reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10); | 
 | 722 | 		allowed -= min_t(long, mm->total_vm / 32, reserve); | 
 | 723 | 	} | 
 | 724 |  | 
 | 725 | 	if (percpu_counter_read_positive(&vm_committed_as) < allowed) | 
 | 726 | 		return 0; | 
 | 727 | error: | 
 | 728 | 	vm_unacct_memory(pages); | 
 | 729 |  | 
 | 730 | 	return -ENOMEM; | 
 | 731 | } | 
 | 732 |  | 
 | 733 | /** | 
 | 734 |  * get_cmdline() - copy the cmdline value to a buffer. | 
 | 735 |  * @task:     the task whose cmdline value to copy. | 
 | 736 |  * @buffer:   the buffer to copy to. | 
 | 737 |  * @buflen:   the length of the buffer. Larger cmdline values are truncated | 
 | 738 |  *            to this length. | 
 | 739 |  * Returns the size of the cmdline field copied. Note that the copy does | 
 | 740 |  * not guarantee an ending NULL byte. | 
 | 741 |  */ | 
 | 742 | int get_cmdline(struct task_struct *task, char *buffer, int buflen) | 
 | 743 | { | 
 | 744 | 	int res = 0; | 
 | 745 | 	unsigned int len; | 
 | 746 | 	struct mm_struct *mm = get_task_mm(task); | 
 | 747 | 	unsigned long arg_start, arg_end, env_start, env_end; | 
 | 748 | 	if (!mm) | 
 | 749 | 		goto out; | 
 | 750 | 	if (!mm->arg_end) | 
 | 751 | 		goto out_mm;	/* Shh! No looking before we're done */ | 
 | 752 |  | 
 | 753 | 	down_read(&mm->mmap_sem); | 
 | 754 | 	arg_start = mm->arg_start; | 
 | 755 | 	arg_end = mm->arg_end; | 
 | 756 | 	env_start = mm->env_start; | 
 | 757 | 	env_end = mm->env_end; | 
 | 758 | 	up_read(&mm->mmap_sem); | 
 | 759 |  | 
 | 760 | 	len = arg_end - arg_start; | 
 | 761 |  | 
 | 762 | 	if (len > buflen) | 
 | 763 | 		len = buflen; | 
 | 764 |  | 
 | 765 | 	res = access_process_vm(task, arg_start, buffer, len, FOLL_FORCE); | 
 | 766 |  | 
 | 767 | 	/* | 
 | 768 | 	 * If the nul at the end of args has been overwritten, then | 
 | 769 | 	 * assume application is using setproctitle(3). | 
 | 770 | 	 */ | 
 | 771 | 	if (res > 0 && buffer[res-1] != '\0' && len < buflen) { | 
 | 772 | 		len = strnlen(buffer, res); | 
 | 773 | 		if (len < res) { | 
 | 774 | 			res = len; | 
 | 775 | 		} else { | 
 | 776 | 			len = env_end - env_start; | 
 | 777 | 			if (len > buflen - res) | 
 | 778 | 				len = buflen - res; | 
 | 779 | 			res += access_process_vm(task, env_start, | 
 | 780 | 						 buffer+res, len, | 
 | 781 | 						 FOLL_FORCE); | 
 | 782 | 			res = strnlen(buffer, res); | 
 | 783 | 		} | 
 | 784 | 	} | 
 | 785 | out_mm: | 
 | 786 | 	mmput(mm); | 
 | 787 | out: | 
 | 788 | 	return res; | 
 | 789 | } |