lh | 9ed821d | 2023-04-07 01:36:19 -0700 | [diff] [blame] | 1 | /* |
| 2 | * linux/mm/swap.c |
| 3 | * |
| 4 | * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds |
| 5 | */ |
| 6 | |
| 7 | /* |
| 8 | * This file contains the default values for the operation of the |
| 9 | * Linux VM subsystem. Fine-tuning documentation can be found in |
| 10 | * Documentation/sysctl/vm.txt. |
| 11 | * Started 18.12.91 |
| 12 | * Swap aging added 23.2.95, Stephen Tweedie. |
| 13 | * Buffermem limits added 12.3.98, Rik van Riel. |
| 14 | */ |
| 15 | |
| 16 | #include <linux/mm.h> |
| 17 | #include <linux/sched.h> |
| 18 | #include <linux/kernel_stat.h> |
| 19 | #include <linux/swap.h> |
| 20 | #include <linux/mman.h> |
| 21 | #include <linux/pagemap.h> |
| 22 | #include <linux/pagevec.h> |
| 23 | #include <linux/init.h> |
| 24 | #include <linux/export.h> |
| 25 | #include <linux/mm_inline.h> |
| 26 | #include <linux/percpu_counter.h> |
| 27 | #include <linux/percpu.h> |
| 28 | #include <linux/cpu.h> |
| 29 | #include <linux/notifier.h> |
| 30 | #include <linux/backing-dev.h> |
| 31 | #include <linux/memcontrol.h> |
| 32 | #include <linux/gfp.h> |
| 33 | #include <linux/hugetlb.h> |
| 34 | #include <linux/locallock.h> |
| 35 | |
| 36 | #include "internal.h" |
| 37 | |
| 38 | /* How many pages do we try to swap or page in/out together? */ |
| 39 | int page_cluster; |
| 40 | |
| 41 | static DEFINE_PER_CPU(struct pagevec[NR_LRU_LISTS], lru_add_pvecs); |
| 42 | static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs); |
| 43 | static DEFINE_PER_CPU(struct pagevec, lru_deactivate_pvecs); |
| 44 | |
| 45 | static DEFINE_LOCAL_IRQ_LOCK(rotate_lock); |
| 46 | static DEFINE_LOCAL_IRQ_LOCK(swapvec_lock); |
| 47 | |
| 48 | /* |
| 49 | * This path almost never happens for VM activity - pages are normally |
| 50 | * freed via pagevecs. But it gets used by networking. |
| 51 | */ |
| 52 | static void __page_cache_release(struct page *page) |
| 53 | { |
| 54 | if (PageLRU(page)) { |
| 55 | unsigned long flags; |
| 56 | struct zone *zone = page_zone(page); |
| 57 | |
| 58 | spin_lock_irqsave(&zone->lru_lock, flags); |
| 59 | VM_BUG_ON(!PageLRU(page)); |
| 60 | __ClearPageLRU(page); |
| 61 | del_page_from_lru_list(zone, page, page_off_lru(page)); |
| 62 | spin_unlock_irqrestore(&zone->lru_lock, flags); |
| 63 | } |
| 64 | } |
| 65 | |
| 66 | static void __put_single_page(struct page *page) |
| 67 | { |
| 68 | __page_cache_release(page); |
| 69 | free_hot_cold_page(page, 0); |
| 70 | } |
| 71 | |
| 72 | static void __put_compound_page(struct page *page) |
| 73 | { |
| 74 | compound_page_dtor *dtor; |
| 75 | |
| 76 | if (!PageHuge(page)) |
| 77 | __page_cache_release(page); |
| 78 | dtor = get_compound_page_dtor(page); |
| 79 | (*dtor)(page); |
| 80 | } |
| 81 | |
| 82 | static void put_compound_page(struct page *page) |
| 83 | { |
| 84 | if (unlikely(PageTail(page))) { |
| 85 | /* __split_huge_page_refcount can run under us */ |
| 86 | struct page *page_head = compound_trans_head(page); |
| 87 | |
| 88 | if (likely(page != page_head && |
| 89 | get_page_unless_zero(page_head))) { |
| 90 | unsigned long flags; |
| 91 | |
| 92 | if (PageHeadHuge(page_head)) { |
| 93 | if (likely(PageTail(page))) { |
| 94 | /* |
| 95 | * __split_huge_page_refcount |
| 96 | * cannot race here. |
| 97 | */ |
| 98 | VM_BUG_ON(!PageHead(page_head)); |
| 99 | atomic_dec(&page->_mapcount); |
| 100 | if (put_page_testzero(page_head)) |
| 101 | VM_BUG_ON(1); |
| 102 | if (put_page_testzero(page_head)) |
| 103 | __put_compound_page(page_head); |
| 104 | return; |
| 105 | } else { |
| 106 | /* |
| 107 | * __split_huge_page_refcount |
| 108 | * run before us, "page" was a |
| 109 | * THP tail. The split |
| 110 | * page_head has been freed |
| 111 | * and reallocated as slab or |
| 112 | * hugetlbfs page of smaller |
| 113 | * order (only possible if |
| 114 | * reallocated as slab on |
| 115 | * x86). |
| 116 | */ |
| 117 | goto skip_lock; |
| 118 | } |
| 119 | } |
| 120 | /* |
| 121 | * page_head wasn't a dangling pointer but it |
| 122 | * may not be a head page anymore by the time |
| 123 | * we obtain the lock. That is ok as long as it |
| 124 | * can't be freed from under us. |
| 125 | */ |
| 126 | flags = compound_lock_irqsave(page_head); |
| 127 | if (unlikely(!PageTail(page))) { |
| 128 | /* __split_huge_page_refcount run before us */ |
| 129 | compound_unlock_irqrestore(page_head, flags); |
| 130 | VM_BUG_ON(PageHead(page_head)); |
| 131 | skip_lock: |
| 132 | if (put_page_testzero(page_head)) { |
| 133 | /* |
| 134 | * The head page may have been |
| 135 | * freed and reallocated as a |
| 136 | * compound page of smaller |
| 137 | * order and then freed again. |
| 138 | * All we know is that it |
| 139 | * cannot have become: a THP |
| 140 | * page, a compound page of |
| 141 | * higher order, a tail page. |
| 142 | * That is because we still |
| 143 | * hold the refcount of the |
| 144 | * split THP tail and |
| 145 | * page_head was the THP head |
| 146 | * before the split. |
| 147 | */ |
| 148 | if (PageHead(page_head)) |
| 149 | __put_compound_page(page_head); |
| 150 | else |
| 151 | __put_single_page(page_head); |
| 152 | } |
| 153 | out_put_single: |
| 154 | if (put_page_testzero(page)) |
| 155 | __put_single_page(page); |
| 156 | return; |
| 157 | } |
| 158 | VM_BUG_ON(page_head != page->first_page); |
| 159 | /* |
| 160 | * We can release the refcount taken by |
| 161 | * get_page_unless_zero() now that |
| 162 | * __split_huge_page_refcount() is blocked on |
| 163 | * the compound_lock. |
| 164 | */ |
| 165 | if (put_page_testzero(page_head)) |
| 166 | VM_BUG_ON(1); |
| 167 | /* __split_huge_page_refcount will wait now */ |
| 168 | VM_BUG_ON(page_mapcount(page) <= 0); |
| 169 | atomic_dec(&page->_mapcount); |
| 170 | VM_BUG_ON(atomic_read(&page_head->_count) <= 0); |
| 171 | VM_BUG_ON(atomic_read(&page->_count) != 0); |
| 172 | compound_unlock_irqrestore(page_head, flags); |
| 173 | if (put_page_testzero(page_head)) { |
| 174 | if (PageHead(page_head)) |
| 175 | __put_compound_page(page_head); |
| 176 | else |
| 177 | __put_single_page(page_head); |
| 178 | } |
| 179 | } else { |
| 180 | /* page_head is a dangling pointer */ |
| 181 | VM_BUG_ON(PageTail(page)); |
| 182 | goto out_put_single; |
| 183 | } |
| 184 | } else if (put_page_testzero(page)) { |
| 185 | if (PageHead(page)) |
| 186 | __put_compound_page(page); |
| 187 | else |
| 188 | __put_single_page(page); |
| 189 | } |
| 190 | } |
| 191 | |
| 192 | void put_page(struct page *page) |
| 193 | { |
| 194 | if (unlikely(PageCompound(page))) |
| 195 | put_compound_page(page); |
| 196 | else if (put_page_testzero(page)) |
| 197 | __put_single_page(page); |
| 198 | } |
| 199 | EXPORT_SYMBOL(put_page); |
| 200 | |
| 201 | /* |
| 202 | * This function is exported but must not be called by anything other |
| 203 | * than get_page(). It implements the slow path of get_page(). |
| 204 | */ |
| 205 | bool __get_page_tail(struct page *page) |
| 206 | { |
| 207 | /* |
| 208 | * This takes care of get_page() if run on a tail page |
| 209 | * returned by one of the get_user_pages/follow_page variants. |
| 210 | * get_user_pages/follow_page itself doesn't need the compound |
| 211 | * lock because it runs __get_page_tail_foll() under the |
| 212 | * proper PT lock that already serializes against |
| 213 | * split_huge_page(). |
| 214 | */ |
| 215 | unsigned long flags; |
| 216 | bool got = false; |
| 217 | struct page *page_head = compound_trans_head(page); |
| 218 | |
| 219 | if (likely(page != page_head && get_page_unless_zero(page_head))) { |
| 220 | /* Ref to put_compound_page() comment. */ |
| 221 | if (PageHeadHuge(page_head)) { |
| 222 | if (likely(PageTail(page))) { |
| 223 | /* |
| 224 | * This is a hugetlbfs |
| 225 | * page. __split_huge_page_refcount |
| 226 | * cannot race here. |
| 227 | */ |
| 228 | VM_BUG_ON(!PageHead(page_head)); |
| 229 | __get_page_tail_foll(page, false); |
| 230 | return true; |
| 231 | } else { |
| 232 | /* |
| 233 | * __split_huge_page_refcount run |
| 234 | * before us, "page" was a THP |
| 235 | * tail. The split page_head has been |
| 236 | * freed and reallocated as slab or |
| 237 | * hugetlbfs page of smaller order |
| 238 | * (only possible if reallocated as |
| 239 | * slab on x86). |
| 240 | */ |
| 241 | put_page(page_head); |
| 242 | return false; |
| 243 | } |
| 244 | } |
| 245 | /* |
| 246 | * page_head wasn't a dangling pointer but it |
| 247 | * may not be a head page anymore by the time |
| 248 | * we obtain the lock. That is ok as long as it |
| 249 | * can't be freed from under us. |
| 250 | */ |
| 251 | flags = compound_lock_irqsave(page_head); |
| 252 | /* here __split_huge_page_refcount won't run anymore */ |
| 253 | if (likely(PageTail(page))) { |
| 254 | __get_page_tail_foll(page, false); |
| 255 | got = true; |
| 256 | } |
| 257 | compound_unlock_irqrestore(page_head, flags); |
| 258 | if (unlikely(!got)) |
| 259 | put_page(page_head); |
| 260 | } |
| 261 | return got; |
| 262 | } |
| 263 | EXPORT_SYMBOL(__get_page_tail); |
| 264 | |
| 265 | /** |
| 266 | * put_pages_list() - release a list of pages |
| 267 | * @pages: list of pages threaded on page->lru |
| 268 | * |
| 269 | * Release a list of pages which are strung together on page.lru. Currently |
| 270 | * used by read_cache_pages() and related error recovery code. |
| 271 | */ |
| 272 | void put_pages_list(struct list_head *pages) |
| 273 | { |
| 274 | while (!list_empty(pages)) { |
| 275 | struct page *victim; |
| 276 | |
| 277 | victim = list_entry(pages->prev, struct page, lru); |
| 278 | list_del(&victim->lru); |
| 279 | page_cache_release(victim); |
| 280 | } |
| 281 | } |
| 282 | EXPORT_SYMBOL(put_pages_list); |
| 283 | |
| 284 | static void pagevec_lru_move_fn(struct pagevec *pvec, |
| 285 | void (*move_fn)(struct page *page, void *arg), |
| 286 | void *arg) |
| 287 | { |
| 288 | int i; |
| 289 | struct zone *zone = NULL; |
| 290 | unsigned long flags = 0; |
| 291 | |
| 292 | for (i = 0; i < pagevec_count(pvec); i++) { |
| 293 | struct page *page = pvec->pages[i]; |
| 294 | struct zone *pagezone = page_zone(page); |
| 295 | |
| 296 | if (pagezone != zone) { |
| 297 | if (zone) |
| 298 | spin_unlock_irqrestore(&zone->lru_lock, flags); |
| 299 | zone = pagezone; |
| 300 | spin_lock_irqsave(&zone->lru_lock, flags); |
| 301 | } |
| 302 | |
| 303 | (*move_fn)(page, arg); |
| 304 | } |
| 305 | if (zone) |
| 306 | spin_unlock_irqrestore(&zone->lru_lock, flags); |
| 307 | release_pages(pvec->pages, pvec->nr, pvec->cold); |
| 308 | pagevec_reinit(pvec); |
| 309 | } |
| 310 | |
| 311 | static void pagevec_move_tail_fn(struct page *page, void *arg) |
| 312 | { |
| 313 | int *pgmoved = arg; |
| 314 | |
| 315 | if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { |
| 316 | enum lru_list lru = page_lru_base_type(page); |
| 317 | struct lruvec *lruvec; |
| 318 | |
| 319 | lruvec = mem_cgroup_lru_move_lists(page_zone(page), |
| 320 | page, lru, lru); |
| 321 | list_move_tail(&page->lru, &lruvec->lists[lru]); |
| 322 | (*pgmoved)++; |
| 323 | } |
| 324 | } |
| 325 | |
| 326 | /* |
| 327 | * pagevec_move_tail() must be called with IRQ disabled. |
| 328 | * Otherwise this may cause nasty races. |
| 329 | */ |
| 330 | static void pagevec_move_tail(struct pagevec *pvec) |
| 331 | { |
| 332 | int pgmoved = 0; |
| 333 | |
| 334 | pagevec_lru_move_fn(pvec, pagevec_move_tail_fn, &pgmoved); |
| 335 | __count_vm_events(PGROTATED, pgmoved); |
| 336 | } |
| 337 | |
| 338 | /* |
| 339 | * Writeback is about to end against a page which has been marked for immediate |
| 340 | * reclaim. If it still appears to be reclaimable, move it to the tail of the |
| 341 | * inactive list. |
| 342 | */ |
| 343 | void rotate_reclaimable_page(struct page *page) |
| 344 | { |
| 345 | if (!PageLocked(page) && !PageDirty(page) && !PageActive(page) && |
| 346 | !PageUnevictable(page) && PageLRU(page)) { |
| 347 | struct pagevec *pvec; |
| 348 | unsigned long flags; |
| 349 | |
| 350 | page_cache_get(page); |
| 351 | local_lock_irqsave(rotate_lock, flags); |
| 352 | pvec = &__get_cpu_var(lru_rotate_pvecs); |
| 353 | if (!pagevec_add(pvec, page)) |
| 354 | pagevec_move_tail(pvec); |
| 355 | local_unlock_irqrestore(rotate_lock, flags); |
| 356 | } |
| 357 | } |
| 358 | |
| 359 | static void update_page_reclaim_stat(struct zone *zone, struct page *page, |
| 360 | int file, int rotated) |
| 361 | { |
| 362 | struct zone_reclaim_stat *reclaim_stat = &zone->reclaim_stat; |
| 363 | struct zone_reclaim_stat *memcg_reclaim_stat; |
| 364 | |
| 365 | memcg_reclaim_stat = mem_cgroup_get_reclaim_stat_from_page(page); |
| 366 | |
| 367 | reclaim_stat->recent_scanned[file]++; |
| 368 | if (rotated) |
| 369 | reclaim_stat->recent_rotated[file]++; |
| 370 | |
| 371 | if (!memcg_reclaim_stat) |
| 372 | return; |
| 373 | |
| 374 | memcg_reclaim_stat->recent_scanned[file]++; |
| 375 | if (rotated) |
| 376 | memcg_reclaim_stat->recent_rotated[file]++; |
| 377 | } |
| 378 | |
| 379 | static void __activate_page(struct page *page, void *arg) |
| 380 | { |
| 381 | struct zone *zone = page_zone(page); |
| 382 | |
| 383 | if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { |
| 384 | int file = page_is_file_cache(page); |
| 385 | int lru = page_lru_base_type(page); |
| 386 | del_page_from_lru_list(zone, page, lru); |
| 387 | |
| 388 | SetPageActive(page); |
| 389 | lru += LRU_ACTIVE; |
| 390 | add_page_to_lru_list(zone, page, lru); |
| 391 | __count_vm_event(PGACTIVATE); |
| 392 | |
| 393 | update_page_reclaim_stat(zone, page, file, 1); |
| 394 | } |
| 395 | } |
| 396 | |
| 397 | #ifdef CONFIG_SMP |
| 398 | static DEFINE_PER_CPU(struct pagevec, activate_page_pvecs); |
| 399 | |
| 400 | static void activate_page_drain(int cpu) |
| 401 | { |
| 402 | struct pagevec *pvec = &per_cpu(activate_page_pvecs, cpu); |
| 403 | |
| 404 | if (pagevec_count(pvec)) |
| 405 | pagevec_lru_move_fn(pvec, __activate_page, NULL); |
| 406 | } |
| 407 | |
| 408 | void activate_page(struct page *page) |
| 409 | { |
| 410 | if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { |
| 411 | struct pagevec *pvec = &get_locked_var(swapvec_lock, |
| 412 | activate_page_pvecs); |
| 413 | |
| 414 | page_cache_get(page); |
| 415 | if (!pagevec_add(pvec, page)) |
| 416 | pagevec_lru_move_fn(pvec, __activate_page, NULL); |
| 417 | put_locked_var(swapvec_lock, activate_page_pvecs); |
| 418 | } |
| 419 | } |
| 420 | |
| 421 | #else |
| 422 | static inline void activate_page_drain(int cpu) |
| 423 | { |
| 424 | } |
| 425 | |
| 426 | void activate_page(struct page *page) |
| 427 | { |
| 428 | struct zone *zone = page_zone(page); |
| 429 | |
| 430 | spin_lock_irq(&zone->lru_lock); |
| 431 | __activate_page(page, NULL); |
| 432 | spin_unlock_irq(&zone->lru_lock); |
| 433 | } |
| 434 | #endif |
| 435 | |
| 436 | /* |
| 437 | * Mark a page as having seen activity. |
| 438 | * |
| 439 | * inactive,unreferenced -> inactive,referenced |
| 440 | * inactive,referenced -> active,unreferenced |
| 441 | * active,unreferenced -> active,referenced |
| 442 | */ |
| 443 | void mark_page_accessed(struct page *page) |
| 444 | { |
| 445 | if (!PageActive(page) && !PageUnevictable(page) && |
| 446 | PageReferenced(page) && PageLRU(page)) { |
| 447 | activate_page(page); |
| 448 | ClearPageReferenced(page); |
| 449 | } else if (!PageReferenced(page)) { |
| 450 | SetPageReferenced(page); |
| 451 | } |
| 452 | } |
| 453 | EXPORT_SYMBOL(mark_page_accessed); |
| 454 | |
| 455 | void __lru_cache_add(struct page *page, enum lru_list lru) |
| 456 | { |
| 457 | struct pagevec *pvec = &get_locked_var(swapvec_lock, lru_add_pvecs)[lru]; |
| 458 | |
| 459 | page_cache_get(page); |
| 460 | if (!pagevec_add(pvec, page)) |
| 461 | __pagevec_lru_add(pvec, lru); |
| 462 | put_locked_var(swapvec_lock, lru_add_pvecs); |
| 463 | } |
| 464 | EXPORT_SYMBOL(__lru_cache_add); |
| 465 | |
| 466 | /** |
| 467 | * lru_cache_add_lru - add a page to a page list |
| 468 | * @page: the page to be added to the LRU. |
| 469 | * @lru: the LRU list to which the page is added. |
| 470 | */ |
| 471 | void lru_cache_add_lru(struct page *page, enum lru_list lru) |
| 472 | { |
| 473 | if (PageActive(page)) { |
| 474 | VM_BUG_ON(PageUnevictable(page)); |
| 475 | ClearPageActive(page); |
| 476 | } else if (PageUnevictable(page)) { |
| 477 | VM_BUG_ON(PageActive(page)); |
| 478 | ClearPageUnevictable(page); |
| 479 | } |
| 480 | |
| 481 | VM_BUG_ON(PageLRU(page) || PageActive(page) || PageUnevictable(page)); |
| 482 | __lru_cache_add(page, lru); |
| 483 | } |
| 484 | |
| 485 | /** |
| 486 | * add_page_to_unevictable_list - add a page to the unevictable list |
| 487 | * @page: the page to be added to the unevictable list |
| 488 | * |
| 489 | * Add page directly to its zone's unevictable list. To avoid races with |
| 490 | * tasks that might be making the page evictable, through eg. munlock, |
| 491 | * munmap or exit, while it's not on the lru, we want to add the page |
| 492 | * while it's locked or otherwise "invisible" to other tasks. This is |
| 493 | * difficult to do when using the pagevec cache, so bypass that. |
| 494 | */ |
| 495 | void add_page_to_unevictable_list(struct page *page) |
| 496 | { |
| 497 | struct zone *zone = page_zone(page); |
| 498 | |
| 499 | spin_lock_irq(&zone->lru_lock); |
| 500 | SetPageUnevictable(page); |
| 501 | SetPageLRU(page); |
| 502 | add_page_to_lru_list(zone, page, LRU_UNEVICTABLE); |
| 503 | spin_unlock_irq(&zone->lru_lock); |
| 504 | } |
| 505 | |
| 506 | /* |
| 507 | * If the page can not be invalidated, it is moved to the |
| 508 | * inactive list to speed up its reclaim. It is moved to the |
| 509 | * head of the list, rather than the tail, to give the flusher |
| 510 | * threads some time to write it out, as this is much more |
| 511 | * effective than the single-page writeout from reclaim. |
| 512 | * |
| 513 | * If the page isn't page_mapped and dirty/writeback, the page |
| 514 | * could reclaim asap using PG_reclaim. |
| 515 | * |
| 516 | * 1. active, mapped page -> none |
| 517 | * 2. active, dirty/writeback page -> inactive, head, PG_reclaim |
| 518 | * 3. inactive, mapped page -> none |
| 519 | * 4. inactive, dirty/writeback page -> inactive, head, PG_reclaim |
| 520 | * 5. inactive, clean -> inactive, tail |
| 521 | * 6. Others -> none |
| 522 | * |
| 523 | * In 4, why it moves inactive's head, the VM expects the page would |
| 524 | * be write it out by flusher threads as this is much more effective |
| 525 | * than the single-page writeout from reclaim. |
| 526 | */ |
| 527 | static void lru_deactivate_fn(struct page *page, void *arg) |
| 528 | { |
| 529 | int lru, file; |
| 530 | bool active; |
| 531 | struct zone *zone = page_zone(page); |
| 532 | |
| 533 | if (!PageLRU(page)) |
| 534 | return; |
| 535 | |
| 536 | if (PageUnevictable(page)) |
| 537 | return; |
| 538 | |
| 539 | /* Some processes are using the page */ |
| 540 | if (page_mapped(page)) |
| 541 | return; |
| 542 | |
| 543 | active = PageActive(page); |
| 544 | |
| 545 | file = page_is_file_cache(page); |
| 546 | lru = page_lru_base_type(page); |
| 547 | del_page_from_lru_list(zone, page, lru + active); |
| 548 | ClearPageActive(page); |
| 549 | ClearPageReferenced(page); |
| 550 | add_page_to_lru_list(zone, page, lru); |
| 551 | |
| 552 | if (PageWriteback(page) || PageDirty(page)) { |
| 553 | /* |
| 554 | * PG_reclaim could be raced with end_page_writeback |
| 555 | * It can make readahead confusing. But race window |
| 556 | * is _really_ small and it's non-critical problem. |
| 557 | */ |
| 558 | SetPageReclaim(page); |
| 559 | } else { |
| 560 | struct lruvec *lruvec; |
| 561 | /* |
| 562 | * The page's writeback ends up during pagevec |
| 563 | * We moves tha page into tail of inactive. |
| 564 | */ |
| 565 | lruvec = mem_cgroup_lru_move_lists(zone, page, lru, lru); |
| 566 | list_move_tail(&page->lru, &lruvec->lists[lru]); |
| 567 | __count_vm_event(PGROTATED); |
| 568 | } |
| 569 | |
| 570 | if (active) |
| 571 | __count_vm_event(PGDEACTIVATE); |
| 572 | update_page_reclaim_stat(zone, page, file, 0); |
| 573 | } |
| 574 | |
| 575 | /* |
| 576 | * Drain pages out of the cpu's pagevecs. |
| 577 | * Either "cpu" is the current CPU, and preemption has already been |
| 578 | * disabled; or "cpu" is being hot-unplugged, and is already dead. |
| 579 | */ |
| 580 | void lru_add_drain_cpu(int cpu) |
| 581 | { |
| 582 | struct pagevec *pvecs = per_cpu(lru_add_pvecs, cpu); |
| 583 | struct pagevec *pvec; |
| 584 | int lru; |
| 585 | |
| 586 | for_each_lru(lru) { |
| 587 | pvec = &pvecs[lru - LRU_BASE]; |
| 588 | if (pagevec_count(pvec)) |
| 589 | __pagevec_lru_add(pvec, lru); |
| 590 | } |
| 591 | |
| 592 | pvec = &per_cpu(lru_rotate_pvecs, cpu); |
| 593 | if (pagevec_count(pvec)) { |
| 594 | unsigned long flags; |
| 595 | |
| 596 | /* No harm done if a racing interrupt already did this */ |
| 597 | local_lock_irqsave(rotate_lock, flags); |
| 598 | pagevec_move_tail(pvec); |
| 599 | local_unlock_irqrestore(rotate_lock, flags); |
| 600 | } |
| 601 | |
| 602 | pvec = &per_cpu(lru_deactivate_pvecs, cpu); |
| 603 | if (pagevec_count(pvec)) |
| 604 | pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL); |
| 605 | |
| 606 | activate_page_drain(cpu); |
| 607 | } |
| 608 | |
| 609 | /** |
| 610 | * deactivate_page - forcefully deactivate a page |
| 611 | * @page: page to deactivate |
| 612 | * |
| 613 | * This function hints the VM that @page is a good reclaim candidate, |
| 614 | * for example if its invalidation fails due to the page being dirty |
| 615 | * or under writeback. |
| 616 | */ |
| 617 | void deactivate_page(struct page *page) |
| 618 | { |
| 619 | /* |
| 620 | * In a workload with many unevictable page such as mprotect, unevictable |
| 621 | * page deactivation for accelerating reclaim is pointless. |
| 622 | */ |
| 623 | if (PageUnevictable(page)) |
| 624 | return; |
| 625 | |
| 626 | if (likely(get_page_unless_zero(page))) { |
| 627 | struct pagevec *pvec = &get_locked_var(swapvec_lock, |
| 628 | lru_deactivate_pvecs); |
| 629 | |
| 630 | if (!pagevec_add(pvec, page)) |
| 631 | pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL); |
| 632 | put_locked_var(swapvec_lock, lru_deactivate_pvecs); |
| 633 | } |
| 634 | } |
| 635 | |
| 636 | void lru_add_drain(void) |
| 637 | { |
| 638 | lru_add_drain_cpu(local_lock_cpu(swapvec_lock)); |
| 639 | local_unlock_cpu(swapvec_lock); |
| 640 | } |
| 641 | |
| 642 | static void lru_add_drain_per_cpu(struct work_struct *dummy) |
| 643 | { |
| 644 | lru_add_drain(); |
| 645 | } |
| 646 | |
| 647 | /* |
| 648 | * Returns 0 for success |
| 649 | */ |
| 650 | int lru_add_drain_all(void) |
| 651 | { |
| 652 | return schedule_on_each_cpu(lru_add_drain_per_cpu); |
| 653 | } |
| 654 | |
| 655 | /* |
| 656 | * Batched page_cache_release(). Decrement the reference count on all the |
| 657 | * passed pages. If it fell to zero then remove the page from the LRU and |
| 658 | * free it. |
| 659 | * |
| 660 | * Avoid taking zone->lru_lock if possible, but if it is taken, retain it |
| 661 | * for the remainder of the operation. |
| 662 | * |
| 663 | * The locking in this function is against shrink_inactive_list(): we recheck |
| 664 | * the page count inside the lock to see whether shrink_inactive_list() |
| 665 | * grabbed the page via the LRU. If it did, give up: shrink_inactive_list() |
| 666 | * will free it. |
| 667 | */ |
| 668 | void release_pages(struct page **pages, int nr, int cold) |
| 669 | { |
| 670 | int i; |
| 671 | LIST_HEAD(pages_to_free); |
| 672 | struct zone *zone = NULL; |
| 673 | unsigned long uninitialized_var(flags); |
| 674 | |
| 675 | for (i = 0; i < nr; i++) { |
| 676 | struct page *page = pages[i]; |
| 677 | |
| 678 | if (unlikely(PageCompound(page))) { |
| 679 | if (zone) { |
| 680 | spin_unlock_irqrestore(&zone->lru_lock, flags); |
| 681 | zone = NULL; |
| 682 | } |
| 683 | put_compound_page(page); |
| 684 | continue; |
| 685 | } |
| 686 | |
| 687 | if (!put_page_testzero(page)) |
| 688 | continue; |
| 689 | |
| 690 | if (PageLRU(page)) { |
| 691 | struct zone *pagezone = page_zone(page); |
| 692 | |
| 693 | if (pagezone != zone) { |
| 694 | if (zone) |
| 695 | spin_unlock_irqrestore(&zone->lru_lock, |
| 696 | flags); |
| 697 | zone = pagezone; |
| 698 | spin_lock_irqsave(&zone->lru_lock, flags); |
| 699 | } |
| 700 | VM_BUG_ON(!PageLRU(page)); |
| 701 | __ClearPageLRU(page); |
| 702 | del_page_from_lru_list(zone, page, page_off_lru(page)); |
| 703 | } |
| 704 | |
| 705 | list_add(&page->lru, &pages_to_free); |
| 706 | } |
| 707 | if (zone) |
| 708 | spin_unlock_irqrestore(&zone->lru_lock, flags); |
| 709 | |
| 710 | free_hot_cold_page_list(&pages_to_free, cold); |
| 711 | } |
| 712 | EXPORT_SYMBOL(release_pages); |
| 713 | |
| 714 | /* |
| 715 | * The pages which we're about to release may be in the deferred lru-addition |
| 716 | * queues. That would prevent them from really being freed right now. That's |
| 717 | * OK from a correctness point of view but is inefficient - those pages may be |
| 718 | * cache-warm and we want to give them back to the page allocator ASAP. |
| 719 | * |
| 720 | * So __pagevec_release() will drain those queues here. __pagevec_lru_add() |
| 721 | * and __pagevec_lru_add_active() call release_pages() directly to avoid |
| 722 | * mutual recursion. |
| 723 | */ |
| 724 | void __pagevec_release(struct pagevec *pvec) |
| 725 | { |
| 726 | lru_add_drain(); |
| 727 | release_pages(pvec->pages, pagevec_count(pvec), pvec->cold); |
| 728 | pagevec_reinit(pvec); |
| 729 | } |
| 730 | EXPORT_SYMBOL(__pagevec_release); |
| 731 | |
| 732 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
| 733 | /* used by __split_huge_page_refcount() */ |
| 734 | void lru_add_page_tail(struct zone* zone, |
| 735 | struct page *page, struct page *page_tail) |
| 736 | { |
| 737 | int uninitialized_var(active); |
| 738 | enum lru_list lru; |
| 739 | const int file = 0; |
| 740 | |
| 741 | VM_BUG_ON(!PageHead(page)); |
| 742 | VM_BUG_ON(PageCompound(page_tail)); |
| 743 | VM_BUG_ON(PageLRU(page_tail)); |
| 744 | VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&zone->lru_lock)); |
| 745 | |
| 746 | SetPageLRU(page_tail); |
| 747 | |
| 748 | if (page_evictable(page_tail, NULL)) { |
| 749 | if (PageActive(page)) { |
| 750 | SetPageActive(page_tail); |
| 751 | active = 1; |
| 752 | lru = LRU_ACTIVE_ANON; |
| 753 | } else { |
| 754 | active = 0; |
| 755 | lru = LRU_INACTIVE_ANON; |
| 756 | } |
| 757 | } else { |
| 758 | SetPageUnevictable(page_tail); |
| 759 | lru = LRU_UNEVICTABLE; |
| 760 | } |
| 761 | |
| 762 | if (likely(PageLRU(page))) |
| 763 | list_add_tail(&page_tail->lru, &page->lru); |
| 764 | else { |
| 765 | struct list_head *list_head; |
| 766 | /* |
| 767 | * Head page has not yet been counted, as an hpage, |
| 768 | * so we must account for each subpage individually. |
| 769 | * |
| 770 | * Use the standard add function to put page_tail on the list, |
| 771 | * but then correct its position so they all end up in order. |
| 772 | */ |
| 773 | add_page_to_lru_list(zone, page_tail, lru); |
| 774 | list_head = page_tail->lru.prev; |
| 775 | list_move_tail(&page_tail->lru, list_head); |
| 776 | } |
| 777 | |
| 778 | if (!PageUnevictable(page)) |
| 779 | update_page_reclaim_stat(zone, page_tail, file, active); |
| 780 | } |
| 781 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
| 782 | |
| 783 | static void __pagevec_lru_add_fn(struct page *page, void *arg) |
| 784 | { |
| 785 | enum lru_list lru = (enum lru_list)arg; |
| 786 | struct zone *zone = page_zone(page); |
| 787 | int file = is_file_lru(lru); |
| 788 | int active = is_active_lru(lru); |
| 789 | |
| 790 | VM_BUG_ON(PageActive(page)); |
| 791 | VM_BUG_ON(PageUnevictable(page)); |
| 792 | VM_BUG_ON(PageLRU(page)); |
| 793 | |
| 794 | SetPageLRU(page); |
| 795 | if (active) |
| 796 | SetPageActive(page); |
| 797 | add_page_to_lru_list(zone, page, lru); |
| 798 | update_page_reclaim_stat(zone, page, file, active); |
| 799 | } |
| 800 | |
| 801 | /* |
| 802 | * Add the passed pages to the LRU, then drop the caller's refcount |
| 803 | * on them. Reinitialises the caller's pagevec. |
| 804 | */ |
| 805 | void __pagevec_lru_add(struct pagevec *pvec, enum lru_list lru) |
| 806 | { |
| 807 | VM_BUG_ON(is_unevictable_lru(lru)); |
| 808 | |
| 809 | pagevec_lru_move_fn(pvec, __pagevec_lru_add_fn, (void *)lru); |
| 810 | } |
| 811 | EXPORT_SYMBOL(__pagevec_lru_add); |
| 812 | |
| 813 | /** |
| 814 | * pagevec_lookup - gang pagecache lookup |
| 815 | * @pvec: Where the resulting pages are placed |
| 816 | * @mapping: The address_space to search |
| 817 | * @start: The starting page index |
| 818 | * @nr_pages: The maximum number of pages |
| 819 | * |
| 820 | * pagevec_lookup() will search for and return a group of up to @nr_pages pages |
| 821 | * in the mapping. The pages are placed in @pvec. pagevec_lookup() takes a |
| 822 | * reference against the pages in @pvec. |
| 823 | * |
| 824 | * The search returns a group of mapping-contiguous pages with ascending |
| 825 | * indexes. There may be holes in the indices due to not-present pages. |
| 826 | * |
| 827 | * pagevec_lookup() returns the number of pages which were found. |
| 828 | */ |
| 829 | unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping, |
| 830 | pgoff_t start, unsigned nr_pages) |
| 831 | { |
| 832 | pvec->nr = find_get_pages(mapping, start, nr_pages, pvec->pages); |
| 833 | return pagevec_count(pvec); |
| 834 | } |
| 835 | EXPORT_SYMBOL(pagevec_lookup); |
| 836 | |
| 837 | unsigned pagevec_lookup_tag(struct pagevec *pvec, struct address_space *mapping, |
| 838 | pgoff_t *index, int tag, unsigned nr_pages) |
| 839 | { |
| 840 | pvec->nr = find_get_pages_tag(mapping, index, tag, |
| 841 | nr_pages, pvec->pages); |
| 842 | return pagevec_count(pvec); |
| 843 | } |
| 844 | EXPORT_SYMBOL(pagevec_lookup_tag); |
| 845 | |
| 846 | /* Early setup for the local locks */ |
| 847 | static int __init swap_init_locks(void) |
| 848 | { |
| 849 | local_irq_lock_init(rotate_lock); |
| 850 | local_irq_lock_init(swapvec_lock); |
| 851 | return 1; |
| 852 | } |
| 853 | early_initcall(swap_init_locks); |
| 854 | |
| 855 | /* |
| 856 | * Perform any setup for the swap system |
| 857 | */ |
| 858 | void __init swap_setup(void) |
| 859 | { |
| 860 | unsigned long megs = totalram_pages >> (20 - PAGE_SHIFT); |
| 861 | |
| 862 | #ifdef CONFIG_SWAP |
| 863 | bdi_init(swapper_space.backing_dev_info); |
| 864 | #endif |
| 865 | |
| 866 | /* Use a smaller cluster for small-memory machines */ |
| 867 | if (megs < 16) |
| 868 | page_cluster = 2; |
| 869 | else |
| 870 | page_cluster = 3; |
| 871 | /* |
| 872 | * Right now other parts of the system means that we |
| 873 | * _really_ don't want to cluster much more |
| 874 | */ |
| 875 | } |