| lh | 9ed821d | 2023-04-07 01:36:19 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  *	linux/mm/madvise.c | 
 | 3 |  * | 
 | 4 |  * Copyright (C) 1999  Linus Torvalds | 
 | 5 |  * Copyright (C) 2002  Christoph Hellwig | 
 | 6 |  */ | 
 | 7 |  | 
 | 8 | #include <linux/mman.h> | 
 | 9 | #include <linux/pagemap.h> | 
 | 10 | #include <linux/syscalls.h> | 
 | 11 | #include <linux/mempolicy.h> | 
 | 12 | #include <linux/page-isolation.h> | 
 | 13 | #include <linux/hugetlb.h> | 
 | 14 | #include <linux/sched.h> | 
 | 15 | #include <linux/ksm.h> | 
 | 16 | #include <linux/file.h> | 
 | 17 |  | 
 | 18 | /* | 
 | 19 |  * Any behaviour which results in changes to the vma->vm_flags needs to | 
 | 20 |  * take mmap_sem for writing. Others, which simply traverse vmas, need | 
 | 21 |  * to only take it for reading. | 
 | 22 |  */ | 
 | 23 | static int madvise_need_mmap_write(int behavior) | 
 | 24 | { | 
 | 25 | 	switch (behavior) { | 
 | 26 | 	case MADV_REMOVE: | 
 | 27 | 	case MADV_WILLNEED: | 
 | 28 | 	case MADV_DONTNEED: | 
 | 29 | 		return 0; | 
 | 30 | 	default: | 
 | 31 | 		/* be safe, default to 1. list exceptions explicitly */ | 
 | 32 | 		return 1; | 
 | 33 | 	} | 
 | 34 | } | 
 | 35 |  | 
 | 36 | /* | 
 | 37 |  * We can potentially split a vm area into separate | 
 | 38 |  * areas, each area with its own behavior. | 
 | 39 |  */ | 
 | 40 | static long madvise_behavior(struct vm_area_struct * vma, | 
 | 41 | 		     struct vm_area_struct **prev, | 
 | 42 | 		     unsigned long start, unsigned long end, int behavior) | 
 | 43 | { | 
 | 44 | 	struct mm_struct * mm = vma->vm_mm; | 
 | 45 | 	int error = 0; | 
 | 46 | 	pgoff_t pgoff; | 
 | 47 | 	unsigned long new_flags = vma->vm_flags; | 
 | 48 |  | 
 | 49 | 	switch (behavior) { | 
 | 50 | 	case MADV_NORMAL: | 
 | 51 | 		new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ; | 
 | 52 | 		break; | 
 | 53 | 	case MADV_SEQUENTIAL: | 
 | 54 | 		new_flags = (new_flags & ~VM_RAND_READ) | VM_SEQ_READ; | 
 | 55 | 		break; | 
 | 56 | 	case MADV_RANDOM: | 
 | 57 | 		new_flags = (new_flags & ~VM_SEQ_READ) | VM_RAND_READ; | 
 | 58 | 		break; | 
 | 59 | 	case MADV_DONTFORK: | 
 | 60 | 		new_flags |= VM_DONTCOPY; | 
 | 61 | 		break; | 
 | 62 | 	case MADV_DOFORK: | 
 | 63 | 		if (vma->vm_flags & VM_IO) { | 
 | 64 | 			error = -EINVAL; | 
 | 65 | 			goto out; | 
 | 66 | 		} | 
 | 67 | 		new_flags &= ~VM_DONTCOPY; | 
 | 68 | 		break; | 
 | 69 | 	case MADV_DONTDUMP: | 
 | 70 | 		new_flags |= VM_NODUMP; | 
 | 71 | 		break; | 
 | 72 | 	case MADV_DODUMP: | 
 | 73 | 		new_flags &= ~VM_NODUMP; | 
 | 74 | 		break; | 
 | 75 | 	case MADV_MERGEABLE: | 
 | 76 | 	case MADV_UNMERGEABLE: | 
 | 77 | 		error = ksm_madvise(vma, start, end, behavior, &new_flags); | 
 | 78 | 		if (error) | 
 | 79 | 			goto out; | 
 | 80 | 		break; | 
 | 81 | 	case MADV_HUGEPAGE: | 
 | 82 | 	case MADV_NOHUGEPAGE: | 
 | 83 | 		error = hugepage_madvise(vma, &new_flags, behavior); | 
 | 84 | 		if (error) | 
 | 85 | 			goto out; | 
 | 86 | 		break; | 
 | 87 | 	} | 
 | 88 |  | 
 | 89 | 	if (new_flags == vma->vm_flags) { | 
 | 90 | 		*prev = vma; | 
 | 91 | 		goto out; | 
 | 92 | 	} | 
 | 93 |  | 
 | 94 | 	pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); | 
 | 95 | 	*prev = vma_merge(mm, *prev, start, end, new_flags, vma->anon_vma, | 
 | 96 | 				vma->vm_file, pgoff, vma_policy(vma)); | 
 | 97 | 	if (*prev) { | 
 | 98 | 		vma = *prev; | 
 | 99 | 		goto success; | 
 | 100 | 	} | 
 | 101 |  | 
 | 102 | 	*prev = vma; | 
 | 103 |  | 
 | 104 | 	if (start != vma->vm_start) { | 
 | 105 | 		error = split_vma(mm, vma, start, 1); | 
 | 106 | 		if (error) | 
 | 107 | 			goto out; | 
 | 108 | 	} | 
 | 109 |  | 
 | 110 | 	if (end != vma->vm_end) { | 
 | 111 | 		error = split_vma(mm, vma, end, 0); | 
 | 112 | 		if (error) | 
 | 113 | 			goto out; | 
 | 114 | 	} | 
 | 115 |  | 
 | 116 | success: | 
 | 117 | 	/* | 
 | 118 | 	 * vm_flags is protected by the mmap_sem held in write mode. | 
 | 119 | 	 */ | 
 | 120 | 	vma->vm_flags = new_flags; | 
 | 121 |  | 
 | 122 | out: | 
 | 123 | 	if (error == -ENOMEM) | 
 | 124 | 		error = -EAGAIN; | 
 | 125 | 	return error; | 
 | 126 | } | 
 | 127 |  | 
 | 128 | /* | 
 | 129 |  * Schedule all required I/O operations.  Do not wait for completion. | 
 | 130 |  */ | 
 | 131 | static long madvise_willneed(struct vm_area_struct * vma, | 
 | 132 | 			     struct vm_area_struct ** prev, | 
 | 133 | 			     unsigned long start, unsigned long end) | 
 | 134 | { | 
 | 135 | 	struct file *file = vma->vm_file; | 
 | 136 |  | 
 | 137 | 	if (!file) | 
 | 138 | 		return -EBADF; | 
 | 139 |  | 
 | 140 | 	if (file->f_mapping->a_ops->get_xip_mem) { | 
 | 141 | 		/* no bad return value, but ignore advice */ | 
 | 142 | 		return 0; | 
 | 143 | 	} | 
 | 144 |  | 
 | 145 | 	*prev = vma; | 
 | 146 | 	start = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; | 
 | 147 | 	if (end > vma->vm_end) | 
 | 148 | 		end = vma->vm_end; | 
 | 149 | 	end = ((end - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; | 
 | 150 |  | 
 | 151 | 	force_page_cache_readahead(file->f_mapping, file, start, end - start); | 
 | 152 | 	return 0; | 
 | 153 | } | 
 | 154 |  | 
 | 155 | /* | 
 | 156 |  * Application no longer needs these pages.  If the pages are dirty, | 
 | 157 |  * it's OK to just throw them away.  The app will be more careful about | 
 | 158 |  * data it wants to keep.  Be sure to free swap resources too.  The | 
 | 159 |  * zap_page_range call sets things up for shrink_active_list to actually free | 
 | 160 |  * these pages later if no one else has touched them in the meantime, | 
 | 161 |  * although we could add these pages to a global reuse list for | 
 | 162 |  * shrink_active_list to pick up before reclaiming other pages. | 
 | 163 |  * | 
 | 164 |  * NB: This interface discards data rather than pushes it out to swap, | 
 | 165 |  * as some implementations do.  This has performance implications for | 
 | 166 |  * applications like large transactional databases which want to discard | 
 | 167 |  * pages in anonymous maps after committing to backing store the data | 
 | 168 |  * that was kept in them.  There is no reason to write this data out to | 
 | 169 |  * the swap area if the application is discarding it. | 
 | 170 |  * | 
 | 171 |  * An interface that causes the system to free clean pages and flush | 
 | 172 |  * dirty pages is already available as msync(MS_INVALIDATE). | 
 | 173 |  */ | 
 | 174 | static long madvise_dontneed(struct vm_area_struct * vma, | 
 | 175 | 			     struct vm_area_struct ** prev, | 
 | 176 | 			     unsigned long start, unsigned long end) | 
 | 177 | { | 
 | 178 | 	*prev = vma; | 
 | 179 | 	if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP)) | 
 | 180 | 		return -EINVAL; | 
 | 181 |  | 
 | 182 | 	if (unlikely(vma->vm_flags & VM_NONLINEAR)) { | 
 | 183 | 		struct zap_details details = { | 
 | 184 | 			.nonlinear_vma = vma, | 
 | 185 | 			.last_index = ULONG_MAX, | 
 | 186 | 		}; | 
 | 187 | 		zap_page_range(vma, start, end - start, &details); | 
 | 188 | 	} else | 
 | 189 | 		zap_page_range(vma, start, end - start, NULL); | 
 | 190 | 	return 0; | 
 | 191 | } | 
 | 192 |  | 
 | 193 | /* | 
 | 194 |  * Application wants to free up the pages and associated backing store. | 
 | 195 |  * This is effectively punching a hole into the middle of a file. | 
 | 196 |  * | 
 | 197 |  * NOTE: Currently, only shmfs/tmpfs is supported for this operation. | 
 | 198 |  * Other filesystems return -ENOSYS. | 
 | 199 |  */ | 
 | 200 | static long madvise_remove(struct vm_area_struct *vma, | 
 | 201 | 				struct vm_area_struct **prev, | 
 | 202 | 				unsigned long start, unsigned long end) | 
 | 203 | { | 
 | 204 | 	struct address_space *mapping; | 
 | 205 | 	loff_t offset, endoff; | 
 | 206 | 	int error; | 
 | 207 | 	struct file *f; | 
 | 208 |  | 
 | 209 | 	*prev = NULL;	/* tell sys_madvise we drop mmap_sem */ | 
 | 210 |  | 
 | 211 | 	if (vma->vm_flags & (VM_LOCKED|VM_NONLINEAR|VM_HUGETLB)) | 
 | 212 | 		return -EINVAL; | 
 | 213 |  | 
 | 214 | 	f = vma->vm_file; | 
 | 215 |  | 
 | 216 | 	if (!f || !f->f_mapping || !f->f_mapping->host) { | 
 | 217 | 			return -EINVAL; | 
 | 218 | 	} | 
 | 219 |  | 
 | 220 | 	if ((vma->vm_flags & (VM_SHARED|VM_WRITE)) != (VM_SHARED|VM_WRITE)) | 
 | 221 | 		return -EACCES; | 
 | 222 |  | 
 | 223 | 	mapping = vma->vm_file->f_mapping; | 
 | 224 |  | 
 | 225 | 	offset = (loff_t)(start - vma->vm_start) | 
 | 226 | 			+ ((loff_t)vma->vm_pgoff << PAGE_SHIFT); | 
 | 227 | 	endoff = (loff_t)(end - vma->vm_start - 1) | 
 | 228 | 			+ ((loff_t)vma->vm_pgoff << PAGE_SHIFT); | 
 | 229 |  | 
 | 230 | 	/* | 
 | 231 | 	 * vmtruncate_range may need to take i_mutex.  We need to | 
 | 232 | 	 * explicitly grab a reference because the vma (and hence the | 
 | 233 | 	 * vma's reference to the file) can go away as soon as we drop | 
 | 234 | 	 * mmap_sem. | 
 | 235 | 	 */ | 
 | 236 | 	get_file(f); | 
 | 237 | 	up_read(¤t->mm->mmap_sem); | 
 | 238 | 	error = vmtruncate_range(mapping->host, offset, endoff); | 
 | 239 | 	fput(f); | 
 | 240 | 	down_read(¤t->mm->mmap_sem); | 
 | 241 | 	return error; | 
 | 242 | } | 
 | 243 |  | 
 | 244 | #ifdef CONFIG_MEMORY_FAILURE | 
 | 245 | /* | 
 | 246 |  * Error injection support for memory error handling. | 
 | 247 |  */ | 
 | 248 | static int madvise_hwpoison(int bhv, unsigned long start, unsigned long end) | 
 | 249 | { | 
 | 250 | 	int ret = 0; | 
 | 251 |  | 
 | 252 | 	if (!capable(CAP_SYS_ADMIN)) | 
 | 253 | 		return -EPERM; | 
 | 254 | 	for (; start < end; start += PAGE_SIZE) { | 
 | 255 | 		struct page *p; | 
 | 256 | 		int ret = get_user_pages_fast(start, 1, 0, &p); | 
 | 257 | 		if (ret != 1) | 
 | 258 | 			return ret; | 
 | 259 | 		if (bhv == MADV_SOFT_OFFLINE) { | 
 | 260 | 			printk(KERN_INFO "Soft offlining page %lx at %lx\n", | 
 | 261 | 				page_to_pfn(p), start); | 
 | 262 | 			ret = soft_offline_page(p, MF_COUNT_INCREASED); | 
 | 263 | 			if (ret) | 
 | 264 | 				break; | 
 | 265 | 			continue; | 
 | 266 | 		} | 
 | 267 | 		printk(KERN_INFO "Injecting memory failure for page %lx at %lx\n", | 
 | 268 | 		       page_to_pfn(p), start); | 
 | 269 | 		/* Ignore return value for now */ | 
 | 270 | 		memory_failure(page_to_pfn(p), 0, MF_COUNT_INCREASED); | 
 | 271 | 	} | 
 | 272 | 	return ret; | 
 | 273 | } | 
 | 274 | #endif | 
 | 275 |  | 
 | 276 | static long | 
 | 277 | madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev, | 
 | 278 | 		unsigned long start, unsigned long end, int behavior) | 
 | 279 | { | 
 | 280 | 	switch (behavior) { | 
 | 281 | 	case MADV_REMOVE: | 
 | 282 | 		return madvise_remove(vma, prev, start, end); | 
 | 283 | 	case MADV_WILLNEED: | 
 | 284 | 		return madvise_willneed(vma, prev, start, end); | 
 | 285 | 	case MADV_DONTNEED: | 
 | 286 | 		return madvise_dontneed(vma, prev, start, end); | 
 | 287 | 	default: | 
 | 288 | 		return madvise_behavior(vma, prev, start, end, behavior); | 
 | 289 | 	} | 
 | 290 | } | 
 | 291 |  | 
 | 292 | static int | 
 | 293 | madvise_behavior_valid(int behavior) | 
 | 294 | { | 
 | 295 | 	switch (behavior) { | 
 | 296 | 	case MADV_DOFORK: | 
 | 297 | 	case MADV_DONTFORK: | 
 | 298 | 	case MADV_NORMAL: | 
 | 299 | 	case MADV_SEQUENTIAL: | 
 | 300 | 	case MADV_RANDOM: | 
 | 301 | 	case MADV_REMOVE: | 
 | 302 | 	case MADV_WILLNEED: | 
 | 303 | 	case MADV_DONTNEED: | 
 | 304 | #ifdef CONFIG_KSM | 
 | 305 | 	case MADV_MERGEABLE: | 
 | 306 | 	case MADV_UNMERGEABLE: | 
 | 307 | #endif | 
 | 308 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | 
 | 309 | 	case MADV_HUGEPAGE: | 
 | 310 | 	case MADV_NOHUGEPAGE: | 
 | 311 | #endif | 
 | 312 | 	case MADV_DONTDUMP: | 
 | 313 | 	case MADV_DODUMP: | 
 | 314 | 		return 1; | 
 | 315 |  | 
 | 316 | 	default: | 
 | 317 | 		return 0; | 
 | 318 | 	} | 
 | 319 | } | 
 | 320 |  | 
 | 321 | /* | 
 | 322 |  * The madvise(2) system call. | 
 | 323 |  * | 
 | 324 |  * Applications can use madvise() to advise the kernel how it should | 
 | 325 |  * handle paging I/O in this VM area.  The idea is to help the kernel | 
 | 326 |  * use appropriate read-ahead and caching techniques.  The information | 
 | 327 |  * provided is advisory only, and can be safely disregarded by the | 
 | 328 |  * kernel without affecting the correct operation of the application. | 
 | 329 |  * | 
 | 330 |  * behavior values: | 
 | 331 |  *  MADV_NORMAL - the default behavior is to read clusters.  This | 
 | 332 |  *		results in some read-ahead and read-behind. | 
 | 333 |  *  MADV_RANDOM - the system should read the minimum amount of data | 
 | 334 |  *		on any access, since it is unlikely that the appli- | 
 | 335 |  *		cation will need more than what it asks for. | 
 | 336 |  *  MADV_SEQUENTIAL - pages in the given range will probably be accessed | 
 | 337 |  *		once, so they can be aggressively read ahead, and | 
 | 338 |  *		can be freed soon after they are accessed. | 
 | 339 |  *  MADV_WILLNEED - the application is notifying the system to read | 
 | 340 |  *		some pages ahead. | 
 | 341 |  *  MADV_DONTNEED - the application is finished with the given range, | 
 | 342 |  *		so the kernel can free resources associated with it. | 
 | 343 |  *  MADV_REMOVE - the application wants to free up the given range of | 
 | 344 |  *		pages and associated backing store. | 
 | 345 |  *  MADV_DONTFORK - omit this area from child's address space when forking: | 
 | 346 |  *		typically, to avoid COWing pages pinned by get_user_pages(). | 
 | 347 |  *  MADV_DOFORK - cancel MADV_DONTFORK: no longer omit this area when forking. | 
 | 348 |  *  MADV_MERGEABLE - the application recommends that KSM try to merge pages in | 
 | 349 |  *		this area with pages of identical content from other such areas. | 
 | 350 |  *  MADV_UNMERGEABLE- cancel MADV_MERGEABLE: no longer merge pages with others. | 
 | 351 |  * | 
 | 352 |  * return values: | 
 | 353 |  *  zero    - success | 
 | 354 |  *  -EINVAL - start + len < 0, start is not page-aligned, | 
 | 355 |  *		"behavior" is not a valid value, or application | 
 | 356 |  *		is attempting to release locked or shared pages. | 
 | 357 |  *  -ENOMEM - addresses in the specified range are not currently | 
 | 358 |  *		mapped, or are outside the AS of the process. | 
 | 359 |  *  -EIO    - an I/O error occurred while paging in data. | 
 | 360 |  *  -EBADF  - map exists, but area maps something that isn't a file. | 
 | 361 |  *  -EAGAIN - a kernel resource was temporarily unavailable. | 
 | 362 |  */ | 
 | 363 | SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior) | 
 | 364 | { | 
 | 365 | 	unsigned long end, tmp; | 
 | 366 | 	struct vm_area_struct * vma, *prev; | 
 | 367 | 	int unmapped_error = 0; | 
 | 368 | 	int error = -EINVAL; | 
 | 369 | 	int write; | 
 | 370 | 	size_t len; | 
 | 371 |  | 
 | 372 | #ifdef CONFIG_MEMORY_FAILURE | 
 | 373 | 	if (behavior == MADV_HWPOISON || behavior == MADV_SOFT_OFFLINE) | 
 | 374 | 		return madvise_hwpoison(behavior, start, start+len_in); | 
 | 375 | #endif | 
 | 376 | 	if (!madvise_behavior_valid(behavior)) | 
 | 377 | 		return error; | 
 | 378 |  | 
 | 379 | 	write = madvise_need_mmap_write(behavior); | 
 | 380 | 	if (write) | 
 | 381 | 		down_write(¤t->mm->mmap_sem); | 
 | 382 | 	else | 
 | 383 | 		down_read(¤t->mm->mmap_sem); | 
 | 384 |  | 
 | 385 | 	if (start & ~PAGE_MASK) | 
 | 386 | 		goto out; | 
 | 387 | 	len = (len_in + ~PAGE_MASK) & PAGE_MASK; | 
 | 388 |  | 
 | 389 | 	/* Check to see whether len was rounded up from small -ve to zero */ | 
 | 390 | 	if (len_in && !len) | 
 | 391 | 		goto out; | 
 | 392 |  | 
 | 393 | 	end = start + len; | 
 | 394 | 	if (end < start) | 
 | 395 | 		goto out; | 
 | 396 |  | 
 | 397 | 	error = 0; | 
 | 398 | 	if (end == start) | 
 | 399 | 		goto out; | 
 | 400 |  | 
 | 401 | 	/* | 
 | 402 | 	 * If the interval [start,end) covers some unmapped address | 
 | 403 | 	 * ranges, just ignore them, but return -ENOMEM at the end. | 
 | 404 | 	 * - different from the way of handling in mlock etc. | 
 | 405 | 	 */ | 
 | 406 | 	vma = find_vma_prev(current->mm, start, &prev); | 
 | 407 | 	if (vma && start > vma->vm_start) | 
 | 408 | 		prev = vma; | 
 | 409 |  | 
 | 410 | 	for (;;) { | 
 | 411 | 		/* Still start < end. */ | 
 | 412 | 		error = -ENOMEM; | 
 | 413 | 		if (!vma) | 
 | 414 | 			goto out; | 
 | 415 |  | 
 | 416 | 		/* Here start < (end|vma->vm_end). */ | 
 | 417 | 		if (start < vma->vm_start) { | 
 | 418 | 			unmapped_error = -ENOMEM; | 
 | 419 | 			start = vma->vm_start; | 
 | 420 | 			if (start >= end) | 
 | 421 | 				goto out; | 
 | 422 | 		} | 
 | 423 |  | 
 | 424 | 		/* Here vma->vm_start <= start < (end|vma->vm_end) */ | 
 | 425 | 		tmp = vma->vm_end; | 
 | 426 | 		if (end < tmp) | 
 | 427 | 			tmp = end; | 
 | 428 |  | 
 | 429 | 		/* Here vma->vm_start <= start < tmp <= (end|vma->vm_end). */ | 
 | 430 | 		error = madvise_vma(vma, &prev, start, tmp, behavior); | 
 | 431 | 		if (error) | 
 | 432 | 			goto out; | 
 | 433 | 		start = tmp; | 
 | 434 | 		if (prev && start < prev->vm_end) | 
 | 435 | 			start = prev->vm_end; | 
 | 436 | 		error = unmapped_error; | 
 | 437 | 		if (start >= end) | 
 | 438 | 			goto out; | 
 | 439 | 		if (prev) | 
 | 440 | 			vma = prev->vm_next; | 
 | 441 | 		else	/* madvise_remove dropped mmap_sem */ | 
 | 442 | 			vma = find_vma(current->mm, start); | 
 | 443 | 	} | 
 | 444 | out: | 
 | 445 | 	if (write) | 
 | 446 | 		up_write(¤t->mm->mmap_sem); | 
 | 447 | 	else | 
 | 448 | 		up_read(¤t->mm->mmap_sem); | 
 | 449 |  | 
 | 450 | 	return error; | 
 | 451 | } |