| xj | b04a402 | 2021-11-25 15:01:52 +0800 | [diff] [blame] | 1 | #include <linux/kernel.h> | 
|  | 2 | #include <linux/errno.h> | 
|  | 3 | #include <linux/err.h> | 
|  | 4 | #include <linux/spinlock.h> | 
|  | 5 |  | 
|  | 6 | #include <linux/mm.h> | 
|  | 7 | #include <linux/memremap.h> | 
|  | 8 | #include <linux/pagemap.h> | 
|  | 9 | #include <linux/rmap.h> | 
|  | 10 | #include <linux/swap.h> | 
|  | 11 | #include <linux/swapops.h> | 
|  | 12 |  | 
|  | 13 | #include <linux/sched/signal.h> | 
|  | 14 | #include <linux/rwsem.h> | 
|  | 15 | #include <linux/hugetlb.h> | 
|  | 16 |  | 
|  | 17 | #include <asm/mmu_context.h> | 
|  | 18 | #include <asm/pgtable.h> | 
|  | 19 | #include <asm/tlbflush.h> | 
|  | 20 |  | 
|  | 21 | #include "internal.h" | 
|  | 22 |  | 
|  | 23 | static struct page *no_page_table(struct vm_area_struct *vma, | 
|  | 24 | unsigned int flags) | 
|  | 25 | { | 
|  | 26 | /* | 
|  | 27 | * When core dumping an enormous anonymous area that nobody | 
|  | 28 | * has touched so far, we don't want to allocate unnecessary pages or | 
|  | 29 | * page tables.  Return error instead of NULL to skip handle_mm_fault, | 
|  | 30 | * then get_dump_page() will return NULL to leave a hole in the dump. | 
|  | 31 | * But we can only make this optimization where a hole would surely | 
|  | 32 | * be zero-filled if handle_mm_fault() actually did handle it. | 
|  | 33 | */ | 
|  | 34 | if ((flags & FOLL_DUMP) && (!vma->vm_ops || !vma->vm_ops->fault)) | 
|  | 35 | return ERR_PTR(-EFAULT); | 
|  | 36 | return NULL; | 
|  | 37 | } | 
|  | 38 |  | 
|  | 39 | static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address, | 
|  | 40 | pte_t *pte, unsigned int flags) | 
|  | 41 | { | 
|  | 42 | /* No page to get reference */ | 
|  | 43 | if (flags & FOLL_GET) | 
|  | 44 | return -EFAULT; | 
|  | 45 |  | 
|  | 46 | if (flags & FOLL_TOUCH) { | 
|  | 47 | pte_t entry = *pte; | 
|  | 48 |  | 
|  | 49 | if (flags & FOLL_WRITE) | 
|  | 50 | entry = pte_mkdirty(entry); | 
|  | 51 | entry = pte_mkyoung(entry); | 
|  | 52 |  | 
|  | 53 | if (!pte_same(*pte, entry)) { | 
|  | 54 | set_pte_at(vma->vm_mm, address, pte, entry); | 
|  | 55 | update_mmu_cache(vma, address, pte); | 
|  | 56 | } | 
|  | 57 | } | 
|  | 58 |  | 
|  | 59 | /* Proper page table entry exists, but no corresponding struct page */ | 
|  | 60 | return -EEXIST; | 
|  | 61 | } | 
|  | 62 |  | 
|  | 63 | /* | 
|  | 64 | * FOLL_FORCE can write to even unwritable pte's, but only | 
|  | 65 | * after we've gone through a COW cycle and they are dirty. | 
|  | 66 | */ | 
|  | 67 | static inline bool can_follow_write_pte(pte_t pte, unsigned int flags) | 
|  | 68 | { | 
|  | 69 | return pte_write(pte) || | 
|  | 70 | ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte)); | 
|  | 71 | } | 
|  | 72 |  | 
|  | 73 | static struct page *follow_page_pte(struct vm_area_struct *vma, | 
|  | 74 | unsigned long address, pmd_t *pmd, unsigned int flags) | 
|  | 75 | { | 
|  | 76 | struct mm_struct *mm = vma->vm_mm; | 
|  | 77 | struct dev_pagemap *pgmap = NULL; | 
|  | 78 | struct page *page; | 
|  | 79 | spinlock_t *ptl; | 
|  | 80 | pte_t *ptep, pte; | 
|  | 81 |  | 
|  | 82 | retry: | 
|  | 83 | if (unlikely(pmd_bad(*pmd))) | 
|  | 84 | return no_page_table(vma, flags); | 
|  | 85 |  | 
|  | 86 | ptep = pte_offset_map_lock(mm, pmd, address, &ptl); | 
|  | 87 | pte = *ptep; | 
|  | 88 | if (!pte_present(pte)) { | 
|  | 89 | swp_entry_t entry; | 
|  | 90 | /* | 
|  | 91 | * KSM's break_ksm() relies upon recognizing a ksm page | 
|  | 92 | * even while it is being migrated, so for that case we | 
|  | 93 | * need migration_entry_wait(). | 
|  | 94 | */ | 
|  | 95 | if (likely(!(flags & FOLL_MIGRATION))) | 
|  | 96 | goto no_page; | 
|  | 97 | if (pte_none(pte)) | 
|  | 98 | goto no_page; | 
|  | 99 | entry = pte_to_swp_entry(pte); | 
|  | 100 | if (!is_migration_entry(entry)) | 
|  | 101 | goto no_page; | 
|  | 102 | pte_unmap_unlock(ptep, ptl); | 
|  | 103 | migration_entry_wait(mm, pmd, address); | 
|  | 104 | goto retry; | 
|  | 105 | } | 
|  | 106 | if ((flags & FOLL_NUMA) && pte_protnone(pte)) | 
|  | 107 | goto no_page; | 
|  | 108 | if ((flags & FOLL_WRITE) && !can_follow_write_pte(pte, flags)) { | 
|  | 109 | pte_unmap_unlock(ptep, ptl); | 
|  | 110 | return NULL; | 
|  | 111 | } | 
|  | 112 |  | 
|  | 113 | page = vm_normal_page(vma, address, pte); | 
|  | 114 | if (!page && pte_devmap(pte) && (flags & FOLL_GET)) { | 
|  | 115 | /* | 
|  | 116 | * Only return device mapping pages in the FOLL_GET case since | 
|  | 117 | * they are only valid while holding the pgmap reference. | 
|  | 118 | */ | 
|  | 119 | pgmap = get_dev_pagemap(pte_pfn(pte), NULL); | 
|  | 120 | if (pgmap) | 
|  | 121 | page = pte_page(pte); | 
|  | 122 | else | 
|  | 123 | goto no_page; | 
|  | 124 | } else if (unlikely(!page)) { | 
|  | 125 | if (flags & FOLL_DUMP) { | 
|  | 126 | /* Avoid special (like zero) pages in core dumps */ | 
|  | 127 | page = ERR_PTR(-EFAULT); | 
|  | 128 | goto out; | 
|  | 129 | } | 
|  | 130 |  | 
|  | 131 | if (is_zero_pfn(pte_pfn(pte))) { | 
|  | 132 | page = pte_page(pte); | 
|  | 133 | } else { | 
|  | 134 | int ret; | 
|  | 135 |  | 
|  | 136 | ret = follow_pfn_pte(vma, address, ptep, flags); | 
|  | 137 | page = ERR_PTR(ret); | 
|  | 138 | goto out; | 
|  | 139 | } | 
|  | 140 | } | 
|  | 141 |  | 
|  | 142 | if (flags & FOLL_SPLIT && PageTransCompound(page)) { | 
|  | 143 | int ret; | 
|  | 144 | get_page(page); | 
|  | 145 | pte_unmap_unlock(ptep, ptl); | 
|  | 146 | lock_page(page); | 
|  | 147 | ret = split_huge_page(page); | 
|  | 148 | unlock_page(page); | 
|  | 149 | put_page(page); | 
|  | 150 | if (ret) | 
|  | 151 | return ERR_PTR(ret); | 
|  | 152 | goto retry; | 
|  | 153 | } | 
|  | 154 |  | 
|  | 155 | if (flags & FOLL_GET) { | 
|  | 156 | if (unlikely(!try_get_page(page))) { | 
|  | 157 | page = ERR_PTR(-ENOMEM); | 
|  | 158 | goto out; | 
|  | 159 | } | 
|  | 160 |  | 
|  | 161 | /* drop the pgmap reference now that we hold the page */ | 
|  | 162 | if (pgmap) { | 
|  | 163 | put_dev_pagemap(pgmap); | 
|  | 164 | pgmap = NULL; | 
|  | 165 | } | 
|  | 166 | } | 
|  | 167 | if (flags & FOLL_TOUCH) { | 
|  | 168 | if ((flags & FOLL_WRITE) && | 
|  | 169 | !pte_dirty(pte) && !PageDirty(page)) | 
|  | 170 | set_page_dirty(page); | 
|  | 171 | /* | 
|  | 172 | * pte_mkyoung() would be more correct here, but atomic care | 
|  | 173 | * is needed to avoid losing the dirty bit: it is easier to use | 
|  | 174 | * mark_page_accessed(). | 
|  | 175 | */ | 
|  | 176 | mark_page_accessed(page); | 
|  | 177 | } | 
|  | 178 | if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) { | 
|  | 179 | /* Do not mlock pte-mapped THP */ | 
|  | 180 | if (PageTransCompound(page)) | 
|  | 181 | goto out; | 
|  | 182 |  | 
|  | 183 | /* | 
|  | 184 | * The preliminary mapping check is mainly to avoid the | 
|  | 185 | * pointless overhead of lock_page on the ZERO_PAGE | 
|  | 186 | * which might bounce very badly if there is contention. | 
|  | 187 | * | 
|  | 188 | * If the page is already locked, we don't need to | 
|  | 189 | * handle it now - vmscan will handle it later if and | 
|  | 190 | * when it attempts to reclaim the page. | 
|  | 191 | */ | 
|  | 192 | if (page->mapping && trylock_page(page)) { | 
|  | 193 | lru_add_drain();  /* push cached pages to LRU */ | 
|  | 194 | /* | 
|  | 195 | * Because we lock page here, and migration is | 
|  | 196 | * blocked by the pte's page reference, and we | 
|  | 197 | * know the page is still mapped, we don't even | 
|  | 198 | * need to check for file-cache page truncation. | 
|  | 199 | */ | 
|  | 200 | mlock_vma_page(page); | 
|  | 201 | unlock_page(page); | 
|  | 202 | } | 
|  | 203 | } | 
|  | 204 | out: | 
|  | 205 | pte_unmap_unlock(ptep, ptl); | 
|  | 206 | return page; | 
|  | 207 | no_page: | 
|  | 208 | pte_unmap_unlock(ptep, ptl); | 
|  | 209 | if (!pte_none(pte)) | 
|  | 210 | return NULL; | 
|  | 211 | return no_page_table(vma, flags); | 
|  | 212 | } | 
|  | 213 |  | 
|  | 214 | static struct page *follow_pmd_mask(struct vm_area_struct *vma, | 
|  | 215 | unsigned long address, pud_t *pudp, | 
|  | 216 | unsigned int flags, unsigned int *page_mask) | 
|  | 217 | { | 
|  | 218 | pmd_t *pmd, pmdval; | 
|  | 219 | spinlock_t *ptl; | 
|  | 220 | struct page *page; | 
|  | 221 | struct mm_struct *mm = vma->vm_mm; | 
|  | 222 |  | 
|  | 223 | pmd = pmd_offset(pudp, address); | 
|  | 224 | /* | 
|  | 225 | * The READ_ONCE() will stabilize the pmdval in a register or | 
|  | 226 | * on the stack so that it will stop changing under the code. | 
|  | 227 | */ | 
|  | 228 | pmdval = READ_ONCE(*pmd); | 
|  | 229 | if (pmd_none(pmdval)) | 
|  | 230 | return no_page_table(vma, flags); | 
|  | 231 | if (pmd_huge(pmdval) && vma->vm_flags & VM_HUGETLB) { | 
|  | 232 | page = follow_huge_pmd(mm, address, pmd, flags); | 
|  | 233 | if (page) | 
|  | 234 | return page; | 
|  | 235 | return no_page_table(vma, flags); | 
|  | 236 | } | 
|  | 237 | if (is_hugepd(__hugepd(pmd_val(pmdval)))) { | 
|  | 238 | page = follow_huge_pd(vma, address, | 
|  | 239 | __hugepd(pmd_val(pmdval)), flags, | 
|  | 240 | PMD_SHIFT); | 
|  | 241 | if (page) | 
|  | 242 | return page; | 
|  | 243 | return no_page_table(vma, flags); | 
|  | 244 | } | 
|  | 245 | retry: | 
|  | 246 | if (!pmd_present(pmdval)) { | 
|  | 247 | if (likely(!(flags & FOLL_MIGRATION))) | 
|  | 248 | return no_page_table(vma, flags); | 
|  | 249 | VM_BUG_ON(thp_migration_supported() && | 
|  | 250 | !is_pmd_migration_entry(pmdval)); | 
|  | 251 | if (is_pmd_migration_entry(pmdval)) | 
|  | 252 | pmd_migration_entry_wait(mm, pmd); | 
|  | 253 | pmdval = READ_ONCE(*pmd); | 
|  | 254 | /* | 
|  | 255 | * MADV_DONTNEED may convert the pmd to null because | 
|  | 256 | * mmap_sem is held in read mode | 
|  | 257 | */ | 
|  | 258 | if (pmd_none(pmdval)) | 
|  | 259 | return no_page_table(vma, flags); | 
|  | 260 | goto retry; | 
|  | 261 | } | 
|  | 262 | if (pmd_devmap(pmdval)) { | 
|  | 263 | ptl = pmd_lock(mm, pmd); | 
|  | 264 | page = follow_devmap_pmd(vma, address, pmd, flags); | 
|  | 265 | spin_unlock(ptl); | 
|  | 266 | if (page) | 
|  | 267 | return page; | 
|  | 268 | } | 
|  | 269 | if (likely(!pmd_trans_huge(pmdval))) | 
|  | 270 | return follow_page_pte(vma, address, pmd, flags); | 
|  | 271 |  | 
|  | 272 | if ((flags & FOLL_NUMA) && pmd_protnone(pmdval)) | 
|  | 273 | return no_page_table(vma, flags); | 
|  | 274 |  | 
|  | 275 | retry_locked: | 
|  | 276 | ptl = pmd_lock(mm, pmd); | 
|  | 277 | if (unlikely(pmd_none(*pmd))) { | 
|  | 278 | spin_unlock(ptl); | 
|  | 279 | return no_page_table(vma, flags); | 
|  | 280 | } | 
|  | 281 | if (unlikely(!pmd_present(*pmd))) { | 
|  | 282 | spin_unlock(ptl); | 
|  | 283 | if (likely(!(flags & FOLL_MIGRATION))) | 
|  | 284 | return no_page_table(vma, flags); | 
|  | 285 | pmd_migration_entry_wait(mm, pmd); | 
|  | 286 | goto retry_locked; | 
|  | 287 | } | 
|  | 288 | if (unlikely(!pmd_trans_huge(*pmd))) { | 
|  | 289 | spin_unlock(ptl); | 
|  | 290 | return follow_page_pte(vma, address, pmd, flags); | 
|  | 291 | } | 
|  | 292 | if (flags & FOLL_SPLIT) { | 
|  | 293 | int ret; | 
|  | 294 | page = pmd_page(*pmd); | 
|  | 295 | if (is_huge_zero_page(page)) { | 
|  | 296 | spin_unlock(ptl); | 
|  | 297 | ret = 0; | 
|  | 298 | split_huge_pmd(vma, pmd, address); | 
|  | 299 | if (pmd_trans_unstable(pmd)) | 
|  | 300 | ret = -EBUSY; | 
|  | 301 | } else { | 
|  | 302 | if (unlikely(!try_get_page(page))) { | 
|  | 303 | spin_unlock(ptl); | 
|  | 304 | return ERR_PTR(-ENOMEM); | 
|  | 305 | } | 
|  | 306 | spin_unlock(ptl); | 
|  | 307 | lock_page(page); | 
|  | 308 | ret = split_huge_page(page); | 
|  | 309 | unlock_page(page); | 
|  | 310 | put_page(page); | 
|  | 311 | if (pmd_none(*pmd)) | 
|  | 312 | return no_page_table(vma, flags); | 
|  | 313 | } | 
|  | 314 |  | 
|  | 315 | return ret ? ERR_PTR(ret) : | 
|  | 316 | follow_page_pte(vma, address, pmd, flags); | 
|  | 317 | } | 
|  | 318 | page = follow_trans_huge_pmd(vma, address, pmd, flags); | 
|  | 319 | spin_unlock(ptl); | 
|  | 320 | *page_mask = HPAGE_PMD_NR - 1; | 
|  | 321 | return page; | 
|  | 322 | } | 
|  | 323 |  | 
|  | 324 |  | 
|  | 325 | static struct page *follow_pud_mask(struct vm_area_struct *vma, | 
|  | 326 | unsigned long address, p4d_t *p4dp, | 
|  | 327 | unsigned int flags, unsigned int *page_mask) | 
|  | 328 | { | 
|  | 329 | pud_t *pud; | 
|  | 330 | spinlock_t *ptl; | 
|  | 331 | struct page *page; | 
|  | 332 | struct mm_struct *mm = vma->vm_mm; | 
|  | 333 |  | 
|  | 334 | pud = pud_offset(p4dp, address); | 
|  | 335 | if (pud_none(*pud)) | 
|  | 336 | return no_page_table(vma, flags); | 
|  | 337 | if (pud_huge(*pud) && vma->vm_flags & VM_HUGETLB) { | 
|  | 338 | page = follow_huge_pud(mm, address, pud, flags); | 
|  | 339 | if (page) | 
|  | 340 | return page; | 
|  | 341 | return no_page_table(vma, flags); | 
|  | 342 | } | 
|  | 343 | if (is_hugepd(__hugepd(pud_val(*pud)))) { | 
|  | 344 | page = follow_huge_pd(vma, address, | 
|  | 345 | __hugepd(pud_val(*pud)), flags, | 
|  | 346 | PUD_SHIFT); | 
|  | 347 | if (page) | 
|  | 348 | return page; | 
|  | 349 | return no_page_table(vma, flags); | 
|  | 350 | } | 
|  | 351 | if (pud_devmap(*pud)) { | 
|  | 352 | ptl = pud_lock(mm, pud); | 
|  | 353 | page = follow_devmap_pud(vma, address, pud, flags); | 
|  | 354 | spin_unlock(ptl); | 
|  | 355 | if (page) | 
|  | 356 | return page; | 
|  | 357 | } | 
|  | 358 | if (unlikely(pud_bad(*pud))) | 
|  | 359 | return no_page_table(vma, flags); | 
|  | 360 |  | 
|  | 361 | return follow_pmd_mask(vma, address, pud, flags, page_mask); | 
|  | 362 | } | 
|  | 363 |  | 
|  | 364 |  | 
|  | 365 | static struct page *follow_p4d_mask(struct vm_area_struct *vma, | 
|  | 366 | unsigned long address, pgd_t *pgdp, | 
|  | 367 | unsigned int flags, unsigned int *page_mask) | 
|  | 368 | { | 
|  | 369 | p4d_t *p4d; | 
|  | 370 | struct page *page; | 
|  | 371 |  | 
|  | 372 | p4d = p4d_offset(pgdp, address); | 
|  | 373 | if (p4d_none(*p4d)) | 
|  | 374 | return no_page_table(vma, flags); | 
|  | 375 | BUILD_BUG_ON(p4d_huge(*p4d)); | 
|  | 376 | if (unlikely(p4d_bad(*p4d))) | 
|  | 377 | return no_page_table(vma, flags); | 
|  | 378 |  | 
|  | 379 | if (is_hugepd(__hugepd(p4d_val(*p4d)))) { | 
|  | 380 | page = follow_huge_pd(vma, address, | 
|  | 381 | __hugepd(p4d_val(*p4d)), flags, | 
|  | 382 | P4D_SHIFT); | 
|  | 383 | if (page) | 
|  | 384 | return page; | 
|  | 385 | return no_page_table(vma, flags); | 
|  | 386 | } | 
|  | 387 | return follow_pud_mask(vma, address, p4d, flags, page_mask); | 
|  | 388 | } | 
|  | 389 |  | 
|  | 390 | /** | 
|  | 391 | * follow_page_mask - look up a page descriptor from a user-virtual address | 
|  | 392 | * @vma: vm_area_struct mapping @address | 
|  | 393 | * @address: virtual address to look up | 
|  | 394 | * @flags: flags modifying lookup behaviour | 
|  | 395 | * @page_mask: on output, *page_mask is set according to the size of the page | 
|  | 396 | * | 
|  | 397 | * @flags can have FOLL_ flags set, defined in <linux/mm.h> | 
|  | 398 | * | 
|  | 399 | * Returns the mapped (struct page *), %NULL if no mapping exists, or | 
|  | 400 | * an error pointer if there is a mapping to something not represented | 
|  | 401 | * by a page descriptor (see also vm_normal_page()). | 
|  | 402 | */ | 
|  | 403 | struct page *follow_page_mask(struct vm_area_struct *vma, | 
|  | 404 | unsigned long address, unsigned int flags, | 
|  | 405 | unsigned int *page_mask) | 
|  | 406 | { | 
|  | 407 | pgd_t *pgd; | 
|  | 408 | struct page *page; | 
|  | 409 | struct mm_struct *mm = vma->vm_mm; | 
|  | 410 |  | 
|  | 411 | *page_mask = 0; | 
|  | 412 |  | 
|  | 413 | /* make this handle hugepd */ | 
|  | 414 | page = follow_huge_addr(mm, address, flags & FOLL_WRITE); | 
|  | 415 | if (!IS_ERR(page)) { | 
|  | 416 | BUG_ON(flags & FOLL_GET); | 
|  | 417 | return page; | 
|  | 418 | } | 
|  | 419 |  | 
|  | 420 | pgd = pgd_offset(mm, address); | 
|  | 421 |  | 
|  | 422 | if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) | 
|  | 423 | return no_page_table(vma, flags); | 
|  | 424 |  | 
|  | 425 | if (pgd_huge(*pgd)) { | 
|  | 426 | page = follow_huge_pgd(mm, address, pgd, flags); | 
|  | 427 | if (page) | 
|  | 428 | return page; | 
|  | 429 | return no_page_table(vma, flags); | 
|  | 430 | } | 
|  | 431 | if (is_hugepd(__hugepd(pgd_val(*pgd)))) { | 
|  | 432 | page = follow_huge_pd(vma, address, | 
|  | 433 | __hugepd(pgd_val(*pgd)), flags, | 
|  | 434 | PGDIR_SHIFT); | 
|  | 435 | if (page) | 
|  | 436 | return page; | 
|  | 437 | return no_page_table(vma, flags); | 
|  | 438 | } | 
|  | 439 |  | 
|  | 440 | return follow_p4d_mask(vma, address, pgd, flags, page_mask); | 
|  | 441 | } | 
|  | 442 |  | 
|  | 443 | static int get_gate_page(struct mm_struct *mm, unsigned long address, | 
|  | 444 | unsigned int gup_flags, struct vm_area_struct **vma, | 
|  | 445 | struct page **page) | 
|  | 446 | { | 
|  | 447 | pgd_t *pgd; | 
|  | 448 | p4d_t *p4d; | 
|  | 449 | pud_t *pud; | 
|  | 450 | pmd_t *pmd; | 
|  | 451 | pte_t *pte; | 
|  | 452 | int ret = -EFAULT; | 
|  | 453 |  | 
|  | 454 | /* user gate pages are read-only */ | 
|  | 455 | if (gup_flags & FOLL_WRITE) | 
|  | 456 | return -EFAULT; | 
|  | 457 | if (address > TASK_SIZE) | 
|  | 458 | pgd = pgd_offset_k(address); | 
|  | 459 | else | 
|  | 460 | pgd = pgd_offset_gate(mm, address); | 
|  | 461 | if (pgd_none(*pgd)) | 
|  | 462 | return -EFAULT; | 
|  | 463 | p4d = p4d_offset(pgd, address); | 
|  | 464 | if (p4d_none(*p4d)) | 
|  | 465 | return -EFAULT; | 
|  | 466 | pud = pud_offset(p4d, address); | 
|  | 467 | if (pud_none(*pud)) | 
|  | 468 | return -EFAULT; | 
|  | 469 | pmd = pmd_offset(pud, address); | 
|  | 470 | if (!pmd_present(*pmd)) | 
|  | 471 | return -EFAULT; | 
|  | 472 | VM_BUG_ON(pmd_trans_huge(*pmd)); | 
|  | 473 | pte = pte_offset_map(pmd, address); | 
|  | 474 | if (pte_none(*pte)) | 
|  | 475 | goto unmap; | 
|  | 476 | *vma = get_gate_vma(mm); | 
|  | 477 | if (!page) | 
|  | 478 | goto out; | 
|  | 479 | *page = vm_normal_page(*vma, address, *pte); | 
|  | 480 | if (!*page) { | 
|  | 481 | if ((gup_flags & FOLL_DUMP) || !is_zero_pfn(pte_pfn(*pte))) | 
|  | 482 | goto unmap; | 
|  | 483 | *page = pte_page(*pte); | 
|  | 484 |  | 
|  | 485 | /* | 
|  | 486 | * This should never happen (a device public page in the gate | 
|  | 487 | * area). | 
|  | 488 | */ | 
|  | 489 | if (is_device_public_page(*page)) | 
|  | 490 | goto unmap; | 
|  | 491 | } | 
|  | 492 | if (unlikely(!try_get_page(*page))) { | 
|  | 493 | ret = -ENOMEM; | 
|  | 494 | goto unmap; | 
|  | 495 | } | 
|  | 496 | out: | 
|  | 497 | ret = 0; | 
|  | 498 | unmap: | 
|  | 499 | pte_unmap(pte); | 
|  | 500 | return ret; | 
|  | 501 | } | 
|  | 502 |  | 
|  | 503 | /* | 
|  | 504 | * mmap_sem must be held on entry.  If @nonblocking != NULL and | 
|  | 505 | * *@flags does not include FOLL_NOWAIT, the mmap_sem may be released. | 
|  | 506 | * If it is, *@nonblocking will be set to 0 and -EBUSY returned. | 
|  | 507 | */ | 
|  | 508 | static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma, | 
|  | 509 | unsigned long address, unsigned int *flags, int *nonblocking) | 
|  | 510 | { | 
|  | 511 | unsigned int fault_flags = 0; | 
|  | 512 | vm_fault_t ret; | 
|  | 513 |  | 
|  | 514 | /* mlock all present pages, but do not fault in new pages */ | 
|  | 515 | if ((*flags & (FOLL_POPULATE | FOLL_MLOCK)) == FOLL_MLOCK) | 
|  | 516 | return -ENOENT; | 
|  | 517 | if (*flags & FOLL_WRITE) | 
|  | 518 | fault_flags |= FAULT_FLAG_WRITE; | 
|  | 519 | if (*flags & FOLL_REMOTE) | 
|  | 520 | fault_flags |= FAULT_FLAG_REMOTE; | 
|  | 521 | if (nonblocking) | 
|  | 522 | fault_flags |= FAULT_FLAG_ALLOW_RETRY; | 
|  | 523 | if (*flags & FOLL_NOWAIT) | 
|  | 524 | fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT; | 
|  | 525 | if (*flags & FOLL_TRIED) { | 
|  | 526 | VM_WARN_ON_ONCE(fault_flags & FAULT_FLAG_ALLOW_RETRY); | 
|  | 527 | fault_flags |= FAULT_FLAG_TRIED; | 
|  | 528 | } | 
|  | 529 |  | 
|  | 530 | ret = handle_mm_fault(vma, address, fault_flags); | 
|  | 531 | if (ret & VM_FAULT_ERROR) { | 
|  | 532 | int err = vm_fault_to_errno(ret, *flags); | 
|  | 533 |  | 
|  | 534 | if (err) | 
|  | 535 | return err; | 
|  | 536 | BUG(); | 
|  | 537 | } | 
|  | 538 |  | 
|  | 539 | if (tsk) { | 
|  | 540 | if (ret & VM_FAULT_MAJOR) | 
|  | 541 | tsk->maj_flt++; | 
|  | 542 | else | 
|  | 543 | tsk->min_flt++; | 
|  | 544 | } | 
|  | 545 |  | 
|  | 546 | if (ret & VM_FAULT_RETRY) { | 
|  | 547 | if (nonblocking && !(fault_flags & FAULT_FLAG_RETRY_NOWAIT)) | 
|  | 548 | *nonblocking = 0; | 
|  | 549 | return -EBUSY; | 
|  | 550 | } | 
|  | 551 |  | 
|  | 552 | /* | 
|  | 553 | * The VM_FAULT_WRITE bit tells us that do_wp_page has broken COW when | 
|  | 554 | * necessary, even if maybe_mkwrite decided not to set pte_write. We | 
|  | 555 | * can thus safely do subsequent page lookups as if they were reads. | 
|  | 556 | * But only do so when looping for pte_write is futile: in some cases | 
|  | 557 | * userspace may also be wanting to write to the gotten user page, | 
|  | 558 | * which a read fault here might prevent (a readonly page might get | 
|  | 559 | * reCOWed by userspace write). | 
|  | 560 | */ | 
|  | 561 | if ((ret & VM_FAULT_WRITE) && !(vma->vm_flags & VM_WRITE)) | 
|  | 562 | *flags |= FOLL_COW; | 
|  | 563 | return 0; | 
|  | 564 | } | 
|  | 565 |  | 
|  | 566 | static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags) | 
|  | 567 | { | 
|  | 568 | vm_flags_t vm_flags = vma->vm_flags; | 
|  | 569 | int write = (gup_flags & FOLL_WRITE); | 
|  | 570 | int foreign = (gup_flags & FOLL_REMOTE); | 
|  | 571 |  | 
|  | 572 | if (vm_flags & (VM_IO | VM_PFNMAP)) | 
|  | 573 | return -EFAULT; | 
|  | 574 |  | 
|  | 575 | if (gup_flags & FOLL_ANON && !vma_is_anonymous(vma)) | 
|  | 576 | return -EFAULT; | 
|  | 577 |  | 
|  | 578 | if (write) { | 
|  | 579 | if (!(vm_flags & VM_WRITE)) { | 
|  | 580 | if (!(gup_flags & FOLL_FORCE)) | 
|  | 581 | return -EFAULT; | 
|  | 582 | /* | 
|  | 583 | * We used to let the write,force case do COW in a | 
|  | 584 | * VM_MAYWRITE VM_SHARED !VM_WRITE vma, so ptrace could | 
|  | 585 | * set a breakpoint in a read-only mapping of an | 
|  | 586 | * executable, without corrupting the file (yet only | 
|  | 587 | * when that file had been opened for writing!). | 
|  | 588 | * Anon pages in shared mappings are surprising: now | 
|  | 589 | * just reject it. | 
|  | 590 | */ | 
|  | 591 | if (!is_cow_mapping(vm_flags)) | 
|  | 592 | return -EFAULT; | 
|  | 593 | } | 
|  | 594 | } else if (!(vm_flags & VM_READ)) { | 
|  | 595 | if (!(gup_flags & FOLL_FORCE)) | 
|  | 596 | return -EFAULT; | 
|  | 597 | /* | 
|  | 598 | * Is there actually any vma we can reach here which does not | 
|  | 599 | * have VM_MAYREAD set? | 
|  | 600 | */ | 
|  | 601 | if (!(vm_flags & VM_MAYREAD)) | 
|  | 602 | return -EFAULT; | 
|  | 603 | } | 
|  | 604 | /* | 
|  | 605 | * gups are always data accesses, not instruction | 
|  | 606 | * fetches, so execute=false here | 
|  | 607 | */ | 
|  | 608 | if (!arch_vma_access_permitted(vma, write, false, foreign)) | 
|  | 609 | return -EFAULT; | 
|  | 610 | return 0; | 
|  | 611 | } | 
|  | 612 |  | 
|  | 613 | /** | 
|  | 614 | * __get_user_pages() - pin user pages in memory | 
|  | 615 | * @tsk:	task_struct of target task | 
|  | 616 | * @mm:		mm_struct of target mm | 
|  | 617 | * @start:	starting user address | 
|  | 618 | * @nr_pages:	number of pages from start to pin | 
|  | 619 | * @gup_flags:	flags modifying pin behaviour | 
|  | 620 | * @pages:	array that receives pointers to the pages pinned. | 
|  | 621 | *		Should be at least nr_pages long. Or NULL, if caller | 
|  | 622 | *		only intends to ensure the pages are faulted in. | 
|  | 623 | * @vmas:	array of pointers to vmas corresponding to each page. | 
|  | 624 | *		Or NULL if the caller does not require them. | 
|  | 625 | * @nonblocking: whether waiting for disk IO or mmap_sem contention | 
|  | 626 | * | 
|  | 627 | * Returns number of pages pinned. This may be fewer than the number | 
|  | 628 | * requested. If nr_pages is 0 or negative, returns 0. If no pages | 
|  | 629 | * were pinned, returns -errno. Each page returned must be released | 
|  | 630 | * with a put_page() call when it is finished with. vmas will only | 
|  | 631 | * remain valid while mmap_sem is held. | 
|  | 632 | * | 
|  | 633 | * Must be called with mmap_sem held.  It may be released.  See below. | 
|  | 634 | * | 
|  | 635 | * __get_user_pages walks a process's page tables and takes a reference to | 
|  | 636 | * each struct page that each user address corresponds to at a given | 
|  | 637 | * instant. That is, it takes the page that would be accessed if a user | 
|  | 638 | * thread accesses the given user virtual address at that instant. | 
|  | 639 | * | 
|  | 640 | * This does not guarantee that the page exists in the user mappings when | 
|  | 641 | * __get_user_pages returns, and there may even be a completely different | 
|  | 642 | * page there in some cases (eg. if mmapped pagecache has been invalidated | 
|  | 643 | * and subsequently re faulted). However it does guarantee that the page | 
|  | 644 | * won't be freed completely. And mostly callers simply care that the page | 
|  | 645 | * contains data that was valid *at some point in time*. Typically, an IO | 
|  | 646 | * or similar operation cannot guarantee anything stronger anyway because | 
|  | 647 | * locks can't be held over the syscall boundary. | 
|  | 648 | * | 
|  | 649 | * If @gup_flags & FOLL_WRITE == 0, the page must not be written to. If | 
|  | 650 | * the page is written to, set_page_dirty (or set_page_dirty_lock, as | 
|  | 651 | * appropriate) must be called after the page is finished with, and | 
|  | 652 | * before put_page is called. | 
|  | 653 | * | 
|  | 654 | * If @nonblocking != NULL, __get_user_pages will not wait for disk IO | 
|  | 655 | * or mmap_sem contention, and if waiting is needed to pin all pages, | 
|  | 656 | * *@nonblocking will be set to 0.  Further, if @gup_flags does not | 
|  | 657 | * include FOLL_NOWAIT, the mmap_sem will be released via up_read() in | 
|  | 658 | * this case. | 
|  | 659 | * | 
|  | 660 | * A caller using such a combination of @nonblocking and @gup_flags | 
|  | 661 | * must therefore hold the mmap_sem for reading only, and recognize | 
|  | 662 | * when it's been released.  Otherwise, it must be held for either | 
|  | 663 | * reading or writing and will not be released. | 
|  | 664 | * | 
|  | 665 | * In most cases, get_user_pages or get_user_pages_fast should be used | 
|  | 666 | * instead of __get_user_pages. __get_user_pages should be used only if | 
|  | 667 | * you need some special @gup_flags. | 
|  | 668 | */ | 
|  | 669 | static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | 
|  | 670 | unsigned long start, unsigned long nr_pages, | 
|  | 671 | unsigned int gup_flags, struct page **pages, | 
|  | 672 | struct vm_area_struct **vmas, int *nonblocking) | 
|  | 673 | { | 
|  | 674 | long i = 0; | 
|  | 675 | unsigned int page_mask; | 
|  | 676 | struct vm_area_struct *vma = NULL; | 
|  | 677 |  | 
|  | 678 | if (!nr_pages) | 
|  | 679 | return 0; | 
|  | 680 |  | 
|  | 681 | start = untagged_addr(start); | 
|  | 682 |  | 
|  | 683 | VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET)); | 
|  | 684 |  | 
|  | 685 | /* | 
|  | 686 | * If FOLL_FORCE is set then do not force a full fault as the hinting | 
|  | 687 | * fault information is unrelated to the reference behaviour of a task | 
|  | 688 | * using the address space | 
|  | 689 | */ | 
|  | 690 | if (!(gup_flags & FOLL_FORCE)) | 
|  | 691 | gup_flags |= FOLL_NUMA; | 
|  | 692 |  | 
|  | 693 | do { | 
|  | 694 | struct page *page; | 
|  | 695 | unsigned int foll_flags = gup_flags; | 
|  | 696 | unsigned int page_increm; | 
|  | 697 |  | 
|  | 698 | /* first iteration or cross vma bound */ | 
|  | 699 | if (!vma || start >= vma->vm_end) { | 
|  | 700 | vma = find_extend_vma(mm, start); | 
|  | 701 | if (!vma && in_gate_area(mm, start)) { | 
|  | 702 | int ret; | 
|  | 703 | ret = get_gate_page(mm, start & PAGE_MASK, | 
|  | 704 | gup_flags, &vma, | 
|  | 705 | pages ? &pages[i] : NULL); | 
|  | 706 | if (ret) | 
|  | 707 | return i ? : ret; | 
|  | 708 | page_mask = 0; | 
|  | 709 | goto next_page; | 
|  | 710 | } | 
|  | 711 |  | 
|  | 712 | if (!vma || check_vma_flags(vma, gup_flags)) | 
|  | 713 | return i ? : -EFAULT; | 
|  | 714 | if (is_vm_hugetlb_page(vma)) { | 
|  | 715 | i = follow_hugetlb_page(mm, vma, pages, vmas, | 
|  | 716 | &start, &nr_pages, i, | 
|  | 717 | gup_flags, nonblocking); | 
|  | 718 | continue; | 
|  | 719 | } | 
|  | 720 | } | 
|  | 721 | retry: | 
|  | 722 | /* | 
|  | 723 | * If we have a pending SIGKILL, don't keep faulting pages and | 
|  | 724 | * potentially allocating memory. | 
|  | 725 | */ | 
|  | 726 | if (unlikely(fatal_signal_pending(current))) | 
|  | 727 | return i ? i : -ERESTARTSYS; | 
|  | 728 | cond_resched(); | 
|  | 729 | page = follow_page_mask(vma, start, foll_flags, &page_mask); | 
|  | 730 | if (!page) { | 
|  | 731 | int ret; | 
|  | 732 | ret = faultin_page(tsk, vma, start, &foll_flags, | 
|  | 733 | nonblocking); | 
|  | 734 | switch (ret) { | 
|  | 735 | case 0: | 
|  | 736 | goto retry; | 
|  | 737 | case -EFAULT: | 
|  | 738 | case -ENOMEM: | 
|  | 739 | case -EHWPOISON: | 
|  | 740 | return i ? i : ret; | 
|  | 741 | case -EBUSY: | 
|  | 742 | return i; | 
|  | 743 | case -ENOENT: | 
|  | 744 | goto next_page; | 
|  | 745 | } | 
|  | 746 | BUG(); | 
|  | 747 | } else if (PTR_ERR(page) == -EEXIST) { | 
|  | 748 | /* | 
|  | 749 | * Proper page table entry exists, but no corresponding | 
|  | 750 | * struct page. | 
|  | 751 | */ | 
|  | 752 | goto next_page; | 
|  | 753 | } else if (IS_ERR(page)) { | 
|  | 754 | return i ? i : PTR_ERR(page); | 
|  | 755 | } | 
|  | 756 | if (pages) { | 
|  | 757 | pages[i] = page; | 
|  | 758 | flush_anon_page(vma, page, start); | 
|  | 759 | flush_dcache_page(page); | 
|  | 760 | page_mask = 0; | 
|  | 761 | } | 
|  | 762 | next_page: | 
|  | 763 | if (vmas) { | 
|  | 764 | vmas[i] = vma; | 
|  | 765 | page_mask = 0; | 
|  | 766 | } | 
|  | 767 | page_increm = 1 + (~(start >> PAGE_SHIFT) & page_mask); | 
|  | 768 | if (page_increm > nr_pages) | 
|  | 769 | page_increm = nr_pages; | 
|  | 770 | i += page_increm; | 
|  | 771 | start += page_increm * PAGE_SIZE; | 
|  | 772 | nr_pages -= page_increm; | 
|  | 773 | } while (nr_pages); | 
|  | 774 | return i; | 
|  | 775 | } | 
|  | 776 |  | 
|  | 777 | static bool vma_permits_fault(struct vm_area_struct *vma, | 
|  | 778 | unsigned int fault_flags) | 
|  | 779 | { | 
|  | 780 | bool write   = !!(fault_flags & FAULT_FLAG_WRITE); | 
|  | 781 | bool foreign = !!(fault_flags & FAULT_FLAG_REMOTE); | 
|  | 782 | vm_flags_t vm_flags = write ? VM_WRITE : VM_READ; | 
|  | 783 |  | 
|  | 784 | if (!(vm_flags & vma->vm_flags)) | 
|  | 785 | return false; | 
|  | 786 |  | 
|  | 787 | /* | 
|  | 788 | * The architecture might have a hardware protection | 
|  | 789 | * mechanism other than read/write that can deny access. | 
|  | 790 | * | 
|  | 791 | * gup always represents data access, not instruction | 
|  | 792 | * fetches, so execute=false here: | 
|  | 793 | */ | 
|  | 794 | if (!arch_vma_access_permitted(vma, write, false, foreign)) | 
|  | 795 | return false; | 
|  | 796 |  | 
|  | 797 | return true; | 
|  | 798 | } | 
|  | 799 |  | 
|  | 800 | /* | 
|  | 801 | * fixup_user_fault() - manually resolve a user page fault | 
|  | 802 | * @tsk:	the task_struct to use for page fault accounting, or | 
|  | 803 | *		NULL if faults are not to be recorded. | 
|  | 804 | * @mm:		mm_struct of target mm | 
|  | 805 | * @address:	user address | 
|  | 806 | * @fault_flags:flags to pass down to handle_mm_fault() | 
|  | 807 | * @unlocked:	did we unlock the mmap_sem while retrying, maybe NULL if caller | 
|  | 808 | *		does not allow retry | 
|  | 809 | * | 
|  | 810 | * This is meant to be called in the specific scenario where for locking reasons | 
|  | 811 | * we try to access user memory in atomic context (within a pagefault_disable() | 
|  | 812 | * section), this returns -EFAULT, and we want to resolve the user fault before | 
|  | 813 | * trying again. | 
|  | 814 | * | 
|  | 815 | * Typically this is meant to be used by the futex code. | 
|  | 816 | * | 
|  | 817 | * The main difference with get_user_pages() is that this function will | 
|  | 818 | * unconditionally call handle_mm_fault() which will in turn perform all the | 
|  | 819 | * necessary SW fixup of the dirty and young bits in the PTE, while | 
|  | 820 | * get_user_pages() only guarantees to update these in the struct page. | 
|  | 821 | * | 
|  | 822 | * This is important for some architectures where those bits also gate the | 
|  | 823 | * access permission to the page because they are maintained in software.  On | 
|  | 824 | * such architectures, gup() will not be enough to make a subsequent access | 
|  | 825 | * succeed. | 
|  | 826 | * | 
|  | 827 | * This function will not return with an unlocked mmap_sem. So it has not the | 
|  | 828 | * same semantics wrt the @mm->mmap_sem as does filemap_fault(). | 
|  | 829 | */ | 
|  | 830 | int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm, | 
|  | 831 | unsigned long address, unsigned int fault_flags, | 
|  | 832 | bool *unlocked) | 
|  | 833 | { | 
|  | 834 | struct vm_area_struct *vma; | 
|  | 835 | vm_fault_t ret, major = 0; | 
|  | 836 |  | 
|  | 837 | address = untagged_addr(address); | 
|  | 838 |  | 
|  | 839 | if (unlocked) | 
|  | 840 | fault_flags |= FAULT_FLAG_ALLOW_RETRY; | 
|  | 841 |  | 
|  | 842 | retry: | 
|  | 843 | vma = find_extend_vma(mm, address); | 
|  | 844 | if (!vma || address < vma->vm_start) | 
|  | 845 | return -EFAULT; | 
|  | 846 |  | 
|  | 847 | if (!vma_permits_fault(vma, fault_flags)) | 
|  | 848 | return -EFAULT; | 
|  | 849 |  | 
|  | 850 | ret = handle_mm_fault(vma, address, fault_flags); | 
|  | 851 | major |= ret & VM_FAULT_MAJOR; | 
|  | 852 | if (ret & VM_FAULT_ERROR) { | 
|  | 853 | int err = vm_fault_to_errno(ret, 0); | 
|  | 854 |  | 
|  | 855 | if (err) | 
|  | 856 | return err; | 
|  | 857 | BUG(); | 
|  | 858 | } | 
|  | 859 |  | 
|  | 860 | if (ret & VM_FAULT_RETRY) { | 
|  | 861 | down_read(&mm->mmap_sem); | 
|  | 862 | if (!(fault_flags & FAULT_FLAG_TRIED)) { | 
|  | 863 | *unlocked = true; | 
|  | 864 | fault_flags &= ~FAULT_FLAG_ALLOW_RETRY; | 
|  | 865 | fault_flags |= FAULT_FLAG_TRIED; | 
|  | 866 | goto retry; | 
|  | 867 | } | 
|  | 868 | } | 
|  | 869 |  | 
|  | 870 | if (tsk) { | 
|  | 871 | if (major) | 
|  | 872 | tsk->maj_flt++; | 
|  | 873 | else | 
|  | 874 | tsk->min_flt++; | 
|  | 875 | } | 
|  | 876 | return 0; | 
|  | 877 | } | 
|  | 878 | EXPORT_SYMBOL_GPL(fixup_user_fault); | 
|  | 879 |  | 
|  | 880 | static __always_inline long __get_user_pages_locked(struct task_struct *tsk, | 
|  | 881 | struct mm_struct *mm, | 
|  | 882 | unsigned long start, | 
|  | 883 | unsigned long nr_pages, | 
|  | 884 | struct page **pages, | 
|  | 885 | struct vm_area_struct **vmas, | 
|  | 886 | int *locked, | 
|  | 887 | unsigned int flags) | 
|  | 888 | { | 
|  | 889 | long ret, pages_done; | 
|  | 890 | bool lock_dropped; | 
|  | 891 |  | 
|  | 892 | if (locked) { | 
|  | 893 | /* if VM_FAULT_RETRY can be returned, vmas become invalid */ | 
|  | 894 | BUG_ON(vmas); | 
|  | 895 | /* check caller initialized locked */ | 
|  | 896 | BUG_ON(*locked != 1); | 
|  | 897 | } | 
|  | 898 |  | 
|  | 899 | if (pages) | 
|  | 900 | flags |= FOLL_GET; | 
|  | 901 |  | 
|  | 902 | pages_done = 0; | 
|  | 903 | lock_dropped = false; | 
|  | 904 | for (;;) { | 
|  | 905 | ret = __get_user_pages(tsk, mm, start, nr_pages, flags, pages, | 
|  | 906 | vmas, locked); | 
|  | 907 | if (!locked) | 
|  | 908 | /* VM_FAULT_RETRY couldn't trigger, bypass */ | 
|  | 909 | return ret; | 
|  | 910 |  | 
|  | 911 | /* VM_FAULT_RETRY cannot return errors */ | 
|  | 912 | if (!*locked) { | 
|  | 913 | BUG_ON(ret < 0); | 
|  | 914 | BUG_ON(ret >= nr_pages); | 
|  | 915 | } | 
|  | 916 |  | 
|  | 917 | if (!pages) | 
|  | 918 | /* If it's a prefault don't insist harder */ | 
|  | 919 | return ret; | 
|  | 920 |  | 
|  | 921 | if (ret > 0) { | 
|  | 922 | nr_pages -= ret; | 
|  | 923 | pages_done += ret; | 
|  | 924 | if (!nr_pages) | 
|  | 925 | break; | 
|  | 926 | } | 
|  | 927 | if (*locked) { | 
|  | 928 | /* | 
|  | 929 | * VM_FAULT_RETRY didn't trigger or it was a | 
|  | 930 | * FOLL_NOWAIT. | 
|  | 931 | */ | 
|  | 932 | if (!pages_done) | 
|  | 933 | pages_done = ret; | 
|  | 934 | break; | 
|  | 935 | } | 
|  | 936 | /* VM_FAULT_RETRY triggered, so seek to the faulting offset */ | 
|  | 937 | pages += ret; | 
|  | 938 | start += ret << PAGE_SHIFT; | 
|  | 939 |  | 
|  | 940 | /* | 
|  | 941 | * Repeat on the address that fired VM_FAULT_RETRY | 
|  | 942 | * without FAULT_FLAG_ALLOW_RETRY but with | 
|  | 943 | * FAULT_FLAG_TRIED. | 
|  | 944 | */ | 
|  | 945 | *locked = 1; | 
|  | 946 | lock_dropped = true; | 
|  | 947 | down_read(&mm->mmap_sem); | 
|  | 948 | ret = __get_user_pages(tsk, mm, start, 1, flags | FOLL_TRIED, | 
|  | 949 | pages, NULL, NULL); | 
|  | 950 | if (ret != 1) { | 
|  | 951 | BUG_ON(ret > 1); | 
|  | 952 | if (!pages_done) | 
|  | 953 | pages_done = ret; | 
|  | 954 | break; | 
|  | 955 | } | 
|  | 956 | nr_pages--; | 
|  | 957 | pages_done++; | 
|  | 958 | if (!nr_pages) | 
|  | 959 | break; | 
|  | 960 | pages++; | 
|  | 961 | start += PAGE_SIZE; | 
|  | 962 | } | 
|  | 963 | if (lock_dropped && *locked) { | 
|  | 964 | /* | 
|  | 965 | * We must let the caller know we temporarily dropped the lock | 
|  | 966 | * and so the critical section protected by it was lost. | 
|  | 967 | */ | 
|  | 968 | up_read(&mm->mmap_sem); | 
|  | 969 | *locked = 0; | 
|  | 970 | } | 
|  | 971 | return pages_done; | 
|  | 972 | } | 
|  | 973 |  | 
|  | 974 | /* | 
|  | 975 | * We can leverage the VM_FAULT_RETRY functionality in the page fault | 
|  | 976 | * paths better by using either get_user_pages_locked() or | 
|  | 977 | * get_user_pages_unlocked(). | 
|  | 978 | * | 
|  | 979 | * get_user_pages_locked() is suitable to replace the form: | 
|  | 980 | * | 
|  | 981 | *      down_read(&mm->mmap_sem); | 
|  | 982 | *      do_something() | 
|  | 983 | *      get_user_pages(tsk, mm, ..., pages, NULL); | 
|  | 984 | *      up_read(&mm->mmap_sem); | 
|  | 985 | * | 
|  | 986 | *  to: | 
|  | 987 | * | 
|  | 988 | *      int locked = 1; | 
|  | 989 | *      down_read(&mm->mmap_sem); | 
|  | 990 | *      do_something() | 
|  | 991 | *      get_user_pages_locked(tsk, mm, ..., pages, &locked); | 
|  | 992 | *      if (locked) | 
|  | 993 | *          up_read(&mm->mmap_sem); | 
|  | 994 | */ | 
|  | 995 | long get_user_pages_locked(unsigned long start, unsigned long nr_pages, | 
|  | 996 | unsigned int gup_flags, struct page **pages, | 
|  | 997 | int *locked) | 
|  | 998 | { | 
|  | 999 | return __get_user_pages_locked(current, current->mm, start, nr_pages, | 
|  | 1000 | pages, NULL, locked, | 
|  | 1001 | gup_flags | FOLL_TOUCH); | 
|  | 1002 | } | 
|  | 1003 | EXPORT_SYMBOL(get_user_pages_locked); | 
|  | 1004 |  | 
|  | 1005 | /* | 
|  | 1006 | * get_user_pages_unlocked() is suitable to replace the form: | 
|  | 1007 | * | 
|  | 1008 | *      down_read(&mm->mmap_sem); | 
|  | 1009 | *      get_user_pages(tsk, mm, ..., pages, NULL); | 
|  | 1010 | *      up_read(&mm->mmap_sem); | 
|  | 1011 | * | 
|  | 1012 | *  with: | 
|  | 1013 | * | 
|  | 1014 | *      get_user_pages_unlocked(tsk, mm, ..., pages); | 
|  | 1015 | * | 
|  | 1016 | * It is functionally equivalent to get_user_pages_fast so | 
|  | 1017 | * get_user_pages_fast should be used instead if specific gup_flags | 
|  | 1018 | * (e.g. FOLL_FORCE) are not required. | 
|  | 1019 | */ | 
|  | 1020 | long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, | 
|  | 1021 | struct page **pages, unsigned int gup_flags) | 
|  | 1022 | { | 
|  | 1023 | struct mm_struct *mm = current->mm; | 
|  | 1024 | int locked = 1; | 
|  | 1025 | long ret; | 
|  | 1026 |  | 
|  | 1027 | down_read(&mm->mmap_sem); | 
|  | 1028 | ret = __get_user_pages_locked(current, mm, start, nr_pages, pages, NULL, | 
|  | 1029 | &locked, gup_flags | FOLL_TOUCH); | 
|  | 1030 | if (locked) | 
|  | 1031 | up_read(&mm->mmap_sem); | 
|  | 1032 | return ret; | 
|  | 1033 | } | 
|  | 1034 | EXPORT_SYMBOL(get_user_pages_unlocked); | 
|  | 1035 |  | 
|  | 1036 | /* | 
|  | 1037 | * get_user_pages_remote() - pin user pages in memory | 
|  | 1038 | * @tsk:	the task_struct to use for page fault accounting, or | 
|  | 1039 | *		NULL if faults are not to be recorded. | 
|  | 1040 | * @mm:		mm_struct of target mm | 
|  | 1041 | * @start:	starting user address | 
|  | 1042 | * @nr_pages:	number of pages from start to pin | 
|  | 1043 | * @gup_flags:	flags modifying lookup behaviour | 
|  | 1044 | * @pages:	array that receives pointers to the pages pinned. | 
|  | 1045 | *		Should be at least nr_pages long. Or NULL, if caller | 
|  | 1046 | *		only intends to ensure the pages are faulted in. | 
|  | 1047 | * @vmas:	array of pointers to vmas corresponding to each page. | 
|  | 1048 | *		Or NULL if the caller does not require them. | 
|  | 1049 | * @locked:	pointer to lock flag indicating whether lock is held and | 
|  | 1050 | *		subsequently whether VM_FAULT_RETRY functionality can be | 
|  | 1051 | *		utilised. Lock must initially be held. | 
|  | 1052 | * | 
|  | 1053 | * Returns number of pages pinned. This may be fewer than the number | 
|  | 1054 | * requested. If nr_pages is 0 or negative, returns 0. If no pages | 
|  | 1055 | * were pinned, returns -errno. Each page returned must be released | 
|  | 1056 | * with a put_page() call when it is finished with. vmas will only | 
|  | 1057 | * remain valid while mmap_sem is held. | 
|  | 1058 | * | 
|  | 1059 | * Must be called with mmap_sem held for read or write. | 
|  | 1060 | * | 
|  | 1061 | * get_user_pages walks a process's page tables and takes a reference to | 
|  | 1062 | * each struct page that each user address corresponds to at a given | 
|  | 1063 | * instant. That is, it takes the page that would be accessed if a user | 
|  | 1064 | * thread accesses the given user virtual address at that instant. | 
|  | 1065 | * | 
|  | 1066 | * This does not guarantee that the page exists in the user mappings when | 
|  | 1067 | * get_user_pages returns, and there may even be a completely different | 
|  | 1068 | * page there in some cases (eg. if mmapped pagecache has been invalidated | 
|  | 1069 | * and subsequently re faulted). However it does guarantee that the page | 
|  | 1070 | * won't be freed completely. And mostly callers simply care that the page | 
|  | 1071 | * contains data that was valid *at some point in time*. Typically, an IO | 
|  | 1072 | * or similar operation cannot guarantee anything stronger anyway because | 
|  | 1073 | * locks can't be held over the syscall boundary. | 
|  | 1074 | * | 
|  | 1075 | * If gup_flags & FOLL_WRITE == 0, the page must not be written to. If the page | 
|  | 1076 | * is written to, set_page_dirty (or set_page_dirty_lock, as appropriate) must | 
|  | 1077 | * be called after the page is finished with, and before put_page is called. | 
|  | 1078 | * | 
|  | 1079 | * get_user_pages is typically used for fewer-copy IO operations, to get a | 
|  | 1080 | * handle on the memory by some means other than accesses via the user virtual | 
|  | 1081 | * addresses. The pages may be submitted for DMA to devices or accessed via | 
|  | 1082 | * their kernel linear mapping (via the kmap APIs). Care should be taken to | 
|  | 1083 | * use the correct cache flushing APIs. | 
|  | 1084 | * | 
|  | 1085 | * See also get_user_pages_fast, for performance critical applications. | 
|  | 1086 | * | 
|  | 1087 | * get_user_pages should be phased out in favor of | 
|  | 1088 | * get_user_pages_locked|unlocked or get_user_pages_fast. Nothing | 
|  | 1089 | * should use get_user_pages because it cannot pass | 
|  | 1090 | * FAULT_FLAG_ALLOW_RETRY to handle_mm_fault. | 
|  | 1091 | */ | 
|  | 1092 | long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm, | 
|  | 1093 | unsigned long start, unsigned long nr_pages, | 
|  | 1094 | unsigned int gup_flags, struct page **pages, | 
|  | 1095 | struct vm_area_struct **vmas, int *locked) | 
|  | 1096 | { | 
|  | 1097 | return __get_user_pages_locked(tsk, mm, start, nr_pages, pages, vmas, | 
|  | 1098 | locked, | 
|  | 1099 | gup_flags | FOLL_TOUCH | FOLL_REMOTE); | 
|  | 1100 | } | 
|  | 1101 | EXPORT_SYMBOL(get_user_pages_remote); | 
|  | 1102 |  | 
|  | 1103 | /* | 
|  | 1104 | * This is the same as get_user_pages_remote(), just with a | 
|  | 1105 | * less-flexible calling convention where we assume that the task | 
|  | 1106 | * and mm being operated on are the current task's and don't allow | 
|  | 1107 | * passing of a locked parameter.  We also obviously don't pass | 
|  | 1108 | * FOLL_REMOTE in here. | 
|  | 1109 | */ | 
|  | 1110 | long get_user_pages(unsigned long start, unsigned long nr_pages, | 
|  | 1111 | unsigned int gup_flags, struct page **pages, | 
|  | 1112 | struct vm_area_struct **vmas) | 
|  | 1113 | { | 
|  | 1114 | return __get_user_pages_locked(current, current->mm, start, nr_pages, | 
|  | 1115 | pages, vmas, NULL, | 
|  | 1116 | gup_flags | FOLL_TOUCH); | 
|  | 1117 | } | 
|  | 1118 | EXPORT_SYMBOL(get_user_pages); | 
|  | 1119 |  | 
|  | 1120 | #ifdef CONFIG_FS_DAX | 
|  | 1121 | /* | 
|  | 1122 | * This is the same as get_user_pages() in that it assumes we are | 
|  | 1123 | * operating on the current task's mm, but it goes further to validate | 
|  | 1124 | * that the vmas associated with the address range are suitable for | 
|  | 1125 | * longterm elevated page reference counts. For example, filesystem-dax | 
|  | 1126 | * mappings are subject to the lifetime enforced by the filesystem and | 
|  | 1127 | * we need guarantees that longterm users like RDMA and V4L2 only | 
|  | 1128 | * establish mappings that have a kernel enforced revocation mechanism. | 
|  | 1129 | * | 
|  | 1130 | * "longterm" == userspace controlled elevated page count lifetime. | 
|  | 1131 | * Contrast this to iov_iter_get_pages() usages which are transient. | 
|  | 1132 | */ | 
|  | 1133 | long get_user_pages_longterm(unsigned long start, unsigned long nr_pages, | 
|  | 1134 | unsigned int gup_flags, struct page **pages, | 
|  | 1135 | struct vm_area_struct **vmas_arg) | 
|  | 1136 | { | 
|  | 1137 | struct vm_area_struct **vmas = vmas_arg; | 
|  | 1138 | struct vm_area_struct *vma_prev = NULL; | 
|  | 1139 | long rc, i; | 
|  | 1140 |  | 
|  | 1141 | if (!pages) | 
|  | 1142 | return -EINVAL; | 
|  | 1143 |  | 
|  | 1144 | if (!vmas) { | 
|  | 1145 | vmas = kcalloc(nr_pages, sizeof(struct vm_area_struct *), | 
|  | 1146 | GFP_KERNEL); | 
|  | 1147 | if (!vmas) | 
|  | 1148 | return -ENOMEM; | 
|  | 1149 | } | 
|  | 1150 |  | 
|  | 1151 | rc = get_user_pages(start, nr_pages, gup_flags, pages, vmas); | 
|  | 1152 |  | 
|  | 1153 | for (i = 0; i < rc; i++) { | 
|  | 1154 | struct vm_area_struct *vma = vmas[i]; | 
|  | 1155 |  | 
|  | 1156 | if (vma == vma_prev) | 
|  | 1157 | continue; | 
|  | 1158 |  | 
|  | 1159 | vma_prev = vma; | 
|  | 1160 |  | 
|  | 1161 | if (vma_is_fsdax(vma)) | 
|  | 1162 | break; | 
|  | 1163 | } | 
|  | 1164 |  | 
|  | 1165 | /* | 
|  | 1166 | * Either get_user_pages() failed, or the vma validation | 
|  | 1167 | * succeeded, in either case we don't need to put_page() before | 
|  | 1168 | * returning. | 
|  | 1169 | */ | 
|  | 1170 | if (i >= rc) | 
|  | 1171 | goto out; | 
|  | 1172 |  | 
|  | 1173 | for (i = 0; i < rc; i++) | 
|  | 1174 | put_page(pages[i]); | 
|  | 1175 | rc = -EOPNOTSUPP; | 
|  | 1176 | out: | 
|  | 1177 | if (vmas != vmas_arg) | 
|  | 1178 | kfree(vmas); | 
|  | 1179 | return rc; | 
|  | 1180 | } | 
|  | 1181 | EXPORT_SYMBOL(get_user_pages_longterm); | 
|  | 1182 | #endif /* CONFIG_FS_DAX */ | 
|  | 1183 |  | 
|  | 1184 | /** | 
|  | 1185 | * populate_vma_page_range() -  populate a range of pages in the vma. | 
|  | 1186 | * @vma:   target vma | 
|  | 1187 | * @start: start address | 
|  | 1188 | * @end:   end address | 
|  | 1189 | * @nonblocking: | 
|  | 1190 | * | 
|  | 1191 | * This takes care of mlocking the pages too if VM_LOCKED is set. | 
|  | 1192 | * | 
|  | 1193 | * return 0 on success, negative error code on error. | 
|  | 1194 | * | 
|  | 1195 | * vma->vm_mm->mmap_sem must be held. | 
|  | 1196 | * | 
|  | 1197 | * If @nonblocking is NULL, it may be held for read or write and will | 
|  | 1198 | * be unperturbed. | 
|  | 1199 | * | 
|  | 1200 | * If @nonblocking is non-NULL, it must held for read only and may be | 
|  | 1201 | * released.  If it's released, *@nonblocking will be set to 0. | 
|  | 1202 | */ | 
|  | 1203 | long populate_vma_page_range(struct vm_area_struct *vma, | 
|  | 1204 | unsigned long start, unsigned long end, int *nonblocking) | 
|  | 1205 | { | 
|  | 1206 | struct mm_struct *mm = vma->vm_mm; | 
|  | 1207 | unsigned long nr_pages = (end - start) / PAGE_SIZE; | 
|  | 1208 | int gup_flags; | 
|  | 1209 |  | 
|  | 1210 | VM_BUG_ON(start & ~PAGE_MASK); | 
|  | 1211 | VM_BUG_ON(end   & ~PAGE_MASK); | 
|  | 1212 | VM_BUG_ON_VMA(start < vma->vm_start, vma); | 
|  | 1213 | VM_BUG_ON_VMA(end   > vma->vm_end, vma); | 
|  | 1214 | VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_sem), mm); | 
|  | 1215 |  | 
|  | 1216 | gup_flags = FOLL_TOUCH | FOLL_POPULATE | FOLL_MLOCK; | 
|  | 1217 | if (vma->vm_flags & VM_LOCKONFAULT) | 
|  | 1218 | gup_flags &= ~FOLL_POPULATE; | 
|  | 1219 | /* | 
|  | 1220 | * We want to touch writable mappings with a write fault in order | 
|  | 1221 | * to break COW, except for shared mappings because these don't COW | 
|  | 1222 | * and we would not want to dirty them for nothing. | 
|  | 1223 | */ | 
|  | 1224 | if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE) | 
|  | 1225 | gup_flags |= FOLL_WRITE; | 
|  | 1226 |  | 
|  | 1227 | /* | 
|  | 1228 | * We want mlock to succeed for regions that have any permissions | 
|  | 1229 | * other than PROT_NONE. | 
|  | 1230 | */ | 
|  | 1231 | if (vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) | 
|  | 1232 | gup_flags |= FOLL_FORCE; | 
|  | 1233 |  | 
|  | 1234 | /* | 
|  | 1235 | * We made sure addr is within a VMA, so the following will | 
|  | 1236 | * not result in a stack expansion that recurses back here. | 
|  | 1237 | */ | 
|  | 1238 | return __get_user_pages(current, mm, start, nr_pages, gup_flags, | 
|  | 1239 | NULL, NULL, nonblocking); | 
|  | 1240 | } | 
|  | 1241 |  | 
|  | 1242 | /* | 
|  | 1243 | * __mm_populate - populate and/or mlock pages within a range of address space. | 
|  | 1244 | * | 
|  | 1245 | * This is used to implement mlock() and the MAP_POPULATE / MAP_LOCKED mmap | 
|  | 1246 | * flags. VMAs must be already marked with the desired vm_flags, and | 
|  | 1247 | * mmap_sem must not be held. | 
|  | 1248 | */ | 
|  | 1249 | int __mm_populate(unsigned long start, unsigned long len, int ignore_errors) | 
|  | 1250 | { | 
|  | 1251 | struct mm_struct *mm = current->mm; | 
|  | 1252 | unsigned long end, nstart, nend; | 
|  | 1253 | struct vm_area_struct *vma = NULL; | 
|  | 1254 | int locked = 0; | 
|  | 1255 | long ret = 0; | 
|  | 1256 |  | 
|  | 1257 | end = start + len; | 
|  | 1258 |  | 
|  | 1259 | for (nstart = start; nstart < end; nstart = nend) { | 
|  | 1260 | /* | 
|  | 1261 | * We want to fault in pages for [nstart; end) address range. | 
|  | 1262 | * Find first corresponding VMA. | 
|  | 1263 | */ | 
|  | 1264 | if (!locked) { | 
|  | 1265 | locked = 1; | 
|  | 1266 | down_read(&mm->mmap_sem); | 
|  | 1267 | vma = find_vma(mm, nstart); | 
|  | 1268 | } else if (nstart >= vma->vm_end) | 
|  | 1269 | vma = vma->vm_next; | 
|  | 1270 | if (!vma || vma->vm_start >= end) | 
|  | 1271 | break; | 
|  | 1272 | /* | 
|  | 1273 | * Set [nstart; nend) to intersection of desired address | 
|  | 1274 | * range with the first VMA. Also, skip undesirable VMA types. | 
|  | 1275 | */ | 
|  | 1276 | nend = min(end, vma->vm_end); | 
|  | 1277 | if (vma->vm_flags & (VM_IO | VM_PFNMAP)) | 
|  | 1278 | continue; | 
|  | 1279 | if (nstart < vma->vm_start) | 
|  | 1280 | nstart = vma->vm_start; | 
|  | 1281 | /* | 
|  | 1282 | * Now fault in a range of pages. populate_vma_page_range() | 
|  | 1283 | * double checks the vma flags, so that it won't mlock pages | 
|  | 1284 | * if the vma was already munlocked. | 
|  | 1285 | */ | 
|  | 1286 | ret = populate_vma_page_range(vma, nstart, nend, &locked); | 
|  | 1287 | if (ret < 0) { | 
|  | 1288 | if (ignore_errors) { | 
|  | 1289 | ret = 0; | 
|  | 1290 | continue;	/* continue at next VMA */ | 
|  | 1291 | } | 
|  | 1292 | break; | 
|  | 1293 | } | 
|  | 1294 | nend = nstart + ret * PAGE_SIZE; | 
|  | 1295 | ret = 0; | 
|  | 1296 | } | 
|  | 1297 | if (locked) | 
|  | 1298 | up_read(&mm->mmap_sem); | 
|  | 1299 | return ret;	/* 0 or negative error code */ | 
|  | 1300 | } | 
|  | 1301 |  | 
|  | 1302 | /** | 
|  | 1303 | * get_dump_page() - pin user page in memory while writing it to core dump | 
|  | 1304 | * @addr: user address | 
|  | 1305 | * | 
|  | 1306 | * Returns struct page pointer of user page pinned for dump, | 
|  | 1307 | * to be freed afterwards by put_page(). | 
|  | 1308 | * | 
|  | 1309 | * Returns NULL on any kind of failure - a hole must then be inserted into | 
|  | 1310 | * the corefile, to preserve alignment with its headers; and also returns | 
|  | 1311 | * NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found - | 
|  | 1312 | * allowing a hole to be left in the corefile to save diskspace. | 
|  | 1313 | * | 
|  | 1314 | * Called without mmap_sem, but after all other threads have been killed. | 
|  | 1315 | */ | 
|  | 1316 | #ifdef CONFIG_ELF_CORE | 
|  | 1317 | struct page *get_dump_page(unsigned long addr) | 
|  | 1318 | { | 
|  | 1319 | struct vm_area_struct *vma; | 
|  | 1320 | struct page *page; | 
|  | 1321 |  | 
|  | 1322 | if (__get_user_pages(current, current->mm, addr, 1, | 
|  | 1323 | FOLL_FORCE | FOLL_DUMP | FOLL_GET, &page, &vma, | 
|  | 1324 | NULL) < 1) | 
|  | 1325 | return NULL; | 
|  | 1326 | flush_cache_page(vma, addr, page_to_pfn(page)); | 
|  | 1327 | return page; | 
|  | 1328 | } | 
|  | 1329 | #endif /* CONFIG_ELF_CORE */ | 
|  | 1330 |  | 
|  | 1331 | /* | 
|  | 1332 | * Generic Fast GUP | 
|  | 1333 | * | 
|  | 1334 | * get_user_pages_fast attempts to pin user pages by walking the page | 
|  | 1335 | * tables directly and avoids taking locks. Thus the walker needs to be | 
|  | 1336 | * protected from page table pages being freed from under it, and should | 
|  | 1337 | * block any THP splits. | 
|  | 1338 | * | 
|  | 1339 | * One way to achieve this is to have the walker disable interrupts, and | 
|  | 1340 | * rely on IPIs from the TLB flushing code blocking before the page table | 
|  | 1341 | * pages are freed. This is unsuitable for architectures that do not need | 
|  | 1342 | * to broadcast an IPI when invalidating TLBs. | 
|  | 1343 | * | 
|  | 1344 | * Another way to achieve this is to batch up page table containing pages | 
|  | 1345 | * belonging to more than one mm_user, then rcu_sched a callback to free those | 
|  | 1346 | * pages. Disabling interrupts will allow the fast_gup walker to both block | 
|  | 1347 | * the rcu_sched callback, and an IPI that we broadcast for splitting THPs | 
|  | 1348 | * (which is a relatively rare event). The code below adopts this strategy. | 
|  | 1349 | * | 
|  | 1350 | * Before activating this code, please be aware that the following assumptions | 
|  | 1351 | * are currently made: | 
|  | 1352 | * | 
|  | 1353 | *  *) Either HAVE_RCU_TABLE_FREE is enabled, and tlb_remove_table() is used to | 
|  | 1354 | *  free pages containing page tables or TLB flushing requires IPI broadcast. | 
|  | 1355 | * | 
|  | 1356 | *  *) ptes can be read atomically by the architecture. | 
|  | 1357 | * | 
|  | 1358 | *  *) access_ok is sufficient to validate userspace address ranges. | 
|  | 1359 | * | 
|  | 1360 | * The last two assumptions can be relaxed by the addition of helper functions. | 
|  | 1361 | * | 
|  | 1362 | * This code is based heavily on the PowerPC implementation by Nick Piggin. | 
|  | 1363 | */ | 
|  | 1364 | #ifdef CONFIG_HAVE_GENERIC_GUP | 
|  | 1365 |  | 
|  | 1366 | #ifndef gup_get_pte | 
|  | 1367 | /* | 
|  | 1368 | * We assume that the PTE can be read atomically. If this is not the case for | 
|  | 1369 | * your architecture, please provide the helper. | 
|  | 1370 | */ | 
|  | 1371 | static inline pte_t gup_get_pte(pte_t *ptep) | 
|  | 1372 | { | 
|  | 1373 | return READ_ONCE(*ptep); | 
|  | 1374 | } | 
|  | 1375 | #endif | 
|  | 1376 |  | 
|  | 1377 | static void __maybe_unused undo_dev_pagemap(int *nr, int nr_start, | 
|  | 1378 | struct page **pages) | 
|  | 1379 | { | 
|  | 1380 | while ((*nr) - nr_start) { | 
|  | 1381 | struct page *page = pages[--(*nr)]; | 
|  | 1382 |  | 
|  | 1383 | ClearPageReferenced(page); | 
|  | 1384 | put_page(page); | 
|  | 1385 | } | 
|  | 1386 | } | 
|  | 1387 |  | 
|  | 1388 | /* | 
|  | 1389 | * Return the compund head page with ref appropriately incremented, | 
|  | 1390 | * or NULL if that failed. | 
|  | 1391 | */ | 
|  | 1392 | static inline struct page *try_get_compound_head(struct page *page, int refs) | 
|  | 1393 | { | 
|  | 1394 | struct page *head = compound_head(page); | 
|  | 1395 | if (WARN_ON_ONCE(page_ref_count(head) < 0)) | 
|  | 1396 | return NULL; | 
|  | 1397 | if (unlikely(!page_cache_add_speculative(head, refs))) | 
|  | 1398 | return NULL; | 
|  | 1399 | return head; | 
|  | 1400 | } | 
|  | 1401 |  | 
|  | 1402 | #ifdef CONFIG_ARCH_HAS_PTE_SPECIAL | 
|  | 1403 | static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, | 
|  | 1404 | int write, struct page **pages, int *nr) | 
|  | 1405 | { | 
|  | 1406 | struct dev_pagemap *pgmap = NULL; | 
|  | 1407 | int nr_start = *nr, ret = 0; | 
|  | 1408 | pte_t *ptep, *ptem; | 
|  | 1409 |  | 
|  | 1410 | ptem = ptep = pte_offset_map(&pmd, addr); | 
|  | 1411 | do { | 
|  | 1412 | pte_t pte = gup_get_pte(ptep); | 
|  | 1413 | struct page *head, *page; | 
|  | 1414 |  | 
|  | 1415 | /* | 
|  | 1416 | * Similar to the PMD case below, NUMA hinting must take slow | 
|  | 1417 | * path using the pte_protnone check. | 
|  | 1418 | */ | 
|  | 1419 | if (pte_protnone(pte)) | 
|  | 1420 | goto pte_unmap; | 
|  | 1421 |  | 
|  | 1422 | if (!pte_access_permitted(pte, write)) | 
|  | 1423 | goto pte_unmap; | 
|  | 1424 |  | 
|  | 1425 | if (pte_devmap(pte)) { | 
|  | 1426 | pgmap = get_dev_pagemap(pte_pfn(pte), pgmap); | 
|  | 1427 | if (unlikely(!pgmap)) { | 
|  | 1428 | undo_dev_pagemap(nr, nr_start, pages); | 
|  | 1429 | goto pte_unmap; | 
|  | 1430 | } | 
|  | 1431 | } else if (pte_special(pte)) | 
|  | 1432 | goto pte_unmap; | 
|  | 1433 |  | 
|  | 1434 | VM_BUG_ON(!pfn_valid(pte_pfn(pte))); | 
|  | 1435 | page = pte_page(pte); | 
|  | 1436 |  | 
|  | 1437 | head = try_get_compound_head(page, 1); | 
|  | 1438 | if (!head) | 
|  | 1439 | goto pte_unmap; | 
|  | 1440 |  | 
|  | 1441 | if (unlikely(pte_val(pte) != pte_val(*ptep))) { | 
|  | 1442 | put_page(head); | 
|  | 1443 | goto pte_unmap; | 
|  | 1444 | } | 
|  | 1445 |  | 
|  | 1446 | VM_BUG_ON_PAGE(compound_head(page) != head, page); | 
|  | 1447 |  | 
|  | 1448 | SetPageReferenced(page); | 
|  | 1449 | pages[*nr] = page; | 
|  | 1450 | (*nr)++; | 
|  | 1451 |  | 
|  | 1452 | } while (ptep++, addr += PAGE_SIZE, addr != end); | 
|  | 1453 |  | 
|  | 1454 | ret = 1; | 
|  | 1455 |  | 
|  | 1456 | pte_unmap: | 
|  | 1457 | if (pgmap) | 
|  | 1458 | put_dev_pagemap(pgmap); | 
|  | 1459 | pte_unmap(ptem); | 
|  | 1460 | return ret; | 
|  | 1461 | } | 
|  | 1462 | #else | 
|  | 1463 |  | 
|  | 1464 | /* | 
|  | 1465 | * If we can't determine whether or not a pte is special, then fail immediately | 
|  | 1466 | * for ptes. Note, we can still pin HugeTLB and THP as these are guaranteed not | 
|  | 1467 | * to be special. | 
|  | 1468 | * | 
|  | 1469 | * For a futex to be placed on a THP tail page, get_futex_key requires a | 
|  | 1470 | * __get_user_pages_fast implementation that can pin pages. Thus it's still | 
|  | 1471 | * useful to have gup_huge_pmd even if we can't operate on ptes. | 
|  | 1472 | */ | 
|  | 1473 | static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, | 
|  | 1474 | int write, struct page **pages, int *nr) | 
|  | 1475 | { | 
|  | 1476 | return 0; | 
|  | 1477 | } | 
|  | 1478 | #endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */ | 
|  | 1479 |  | 
|  | 1480 | #if defined(__HAVE_ARCH_PTE_DEVMAP) && defined(CONFIG_TRANSPARENT_HUGEPAGE) | 
|  | 1481 | static int __gup_device_huge(unsigned long pfn, unsigned long addr, | 
|  | 1482 | unsigned long end, struct page **pages, int *nr) | 
|  | 1483 | { | 
|  | 1484 | int nr_start = *nr; | 
|  | 1485 | struct dev_pagemap *pgmap = NULL; | 
|  | 1486 |  | 
|  | 1487 | do { | 
|  | 1488 | struct page *page = pfn_to_page(pfn); | 
|  | 1489 |  | 
|  | 1490 | pgmap = get_dev_pagemap(pfn, pgmap); | 
|  | 1491 | if (unlikely(!pgmap)) { | 
|  | 1492 | undo_dev_pagemap(nr, nr_start, pages); | 
|  | 1493 | return 0; | 
|  | 1494 | } | 
|  | 1495 | SetPageReferenced(page); | 
|  | 1496 | pages[*nr] = page; | 
|  | 1497 | get_page(page); | 
|  | 1498 | (*nr)++; | 
|  | 1499 | pfn++; | 
|  | 1500 | } while (addr += PAGE_SIZE, addr != end); | 
|  | 1501 |  | 
|  | 1502 | if (pgmap) | 
|  | 1503 | put_dev_pagemap(pgmap); | 
|  | 1504 | return 1; | 
|  | 1505 | } | 
|  | 1506 |  | 
|  | 1507 | static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr, | 
|  | 1508 | unsigned long end, struct page **pages, int *nr) | 
|  | 1509 | { | 
|  | 1510 | unsigned long fault_pfn; | 
|  | 1511 | int nr_start = *nr; | 
|  | 1512 |  | 
|  | 1513 | fault_pfn = pmd_pfn(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT); | 
|  | 1514 | if (!__gup_device_huge(fault_pfn, addr, end, pages, nr)) | 
|  | 1515 | return 0; | 
|  | 1516 |  | 
|  | 1517 | if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) { | 
|  | 1518 | undo_dev_pagemap(nr, nr_start, pages); | 
|  | 1519 | return 0; | 
|  | 1520 | } | 
|  | 1521 | return 1; | 
|  | 1522 | } | 
|  | 1523 |  | 
|  | 1524 | static int __gup_device_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr, | 
|  | 1525 | unsigned long end, struct page **pages, int *nr) | 
|  | 1526 | { | 
|  | 1527 | unsigned long fault_pfn; | 
|  | 1528 | int nr_start = *nr; | 
|  | 1529 |  | 
|  | 1530 | fault_pfn = pud_pfn(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT); | 
|  | 1531 | if (!__gup_device_huge(fault_pfn, addr, end, pages, nr)) | 
|  | 1532 | return 0; | 
|  | 1533 |  | 
|  | 1534 | if (unlikely(pud_val(orig) != pud_val(*pudp))) { | 
|  | 1535 | undo_dev_pagemap(nr, nr_start, pages); | 
|  | 1536 | return 0; | 
|  | 1537 | } | 
|  | 1538 | return 1; | 
|  | 1539 | } | 
|  | 1540 | #else | 
|  | 1541 | static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr, | 
|  | 1542 | unsigned long end, struct page **pages, int *nr) | 
|  | 1543 | { | 
|  | 1544 | BUILD_BUG(); | 
|  | 1545 | return 0; | 
|  | 1546 | } | 
|  | 1547 |  | 
|  | 1548 | static int __gup_device_huge_pud(pud_t pud, pud_t *pudp, unsigned long addr, | 
|  | 1549 | unsigned long end, struct page **pages, int *nr) | 
|  | 1550 | { | 
|  | 1551 | BUILD_BUG(); | 
|  | 1552 | return 0; | 
|  | 1553 | } | 
|  | 1554 | #endif | 
|  | 1555 |  | 
|  | 1556 | static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr, | 
|  | 1557 | unsigned long end, int write, struct page **pages, int *nr) | 
|  | 1558 | { | 
|  | 1559 | struct page *head, *page; | 
|  | 1560 | int refs; | 
|  | 1561 |  | 
|  | 1562 | if (!pmd_access_permitted(orig, write)) | 
|  | 1563 | return 0; | 
|  | 1564 |  | 
|  | 1565 | if (pmd_devmap(orig)) | 
|  | 1566 | return __gup_device_huge_pmd(orig, pmdp, addr, end, pages, nr); | 
|  | 1567 |  | 
|  | 1568 | refs = 0; | 
|  | 1569 | page = pmd_page(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT); | 
|  | 1570 | do { | 
|  | 1571 | pages[*nr] = page; | 
|  | 1572 | (*nr)++; | 
|  | 1573 | page++; | 
|  | 1574 | refs++; | 
|  | 1575 | } while (addr += PAGE_SIZE, addr != end); | 
|  | 1576 |  | 
|  | 1577 | head = try_get_compound_head(pmd_page(orig), refs); | 
|  | 1578 | if (!head) { | 
|  | 1579 | *nr -= refs; | 
|  | 1580 | return 0; | 
|  | 1581 | } | 
|  | 1582 |  | 
|  | 1583 | if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) { | 
|  | 1584 | *nr -= refs; | 
|  | 1585 | while (refs--) | 
|  | 1586 | put_page(head); | 
|  | 1587 | return 0; | 
|  | 1588 | } | 
|  | 1589 |  | 
|  | 1590 | SetPageReferenced(head); | 
|  | 1591 | return 1; | 
|  | 1592 | } | 
|  | 1593 |  | 
|  | 1594 | static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr, | 
|  | 1595 | unsigned long end, int write, struct page **pages, int *nr) | 
|  | 1596 | { | 
|  | 1597 | struct page *head, *page; | 
|  | 1598 | int refs; | 
|  | 1599 |  | 
|  | 1600 | if (!pud_access_permitted(orig, write)) | 
|  | 1601 | return 0; | 
|  | 1602 |  | 
|  | 1603 | if (pud_devmap(orig)) | 
|  | 1604 | return __gup_device_huge_pud(orig, pudp, addr, end, pages, nr); | 
|  | 1605 |  | 
|  | 1606 | refs = 0; | 
|  | 1607 | page = pud_page(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT); | 
|  | 1608 | do { | 
|  | 1609 | pages[*nr] = page; | 
|  | 1610 | (*nr)++; | 
|  | 1611 | page++; | 
|  | 1612 | refs++; | 
|  | 1613 | } while (addr += PAGE_SIZE, addr != end); | 
|  | 1614 |  | 
|  | 1615 | head = try_get_compound_head(pud_page(orig), refs); | 
|  | 1616 | if (!head) { | 
|  | 1617 | *nr -= refs; | 
|  | 1618 | return 0; | 
|  | 1619 | } | 
|  | 1620 |  | 
|  | 1621 | if (unlikely(pud_val(orig) != pud_val(*pudp))) { | 
|  | 1622 | *nr -= refs; | 
|  | 1623 | while (refs--) | 
|  | 1624 | put_page(head); | 
|  | 1625 | return 0; | 
|  | 1626 | } | 
|  | 1627 |  | 
|  | 1628 | SetPageReferenced(head); | 
|  | 1629 | return 1; | 
|  | 1630 | } | 
|  | 1631 |  | 
|  | 1632 | static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr, | 
|  | 1633 | unsigned long end, int write, | 
|  | 1634 | struct page **pages, int *nr) | 
|  | 1635 | { | 
|  | 1636 | int refs; | 
|  | 1637 | struct page *head, *page; | 
|  | 1638 |  | 
|  | 1639 | if (!pgd_access_permitted(orig, write)) | 
|  | 1640 | return 0; | 
|  | 1641 |  | 
|  | 1642 | BUILD_BUG_ON(pgd_devmap(orig)); | 
|  | 1643 | refs = 0; | 
|  | 1644 | page = pgd_page(orig) + ((addr & ~PGDIR_MASK) >> PAGE_SHIFT); | 
|  | 1645 | do { | 
|  | 1646 | pages[*nr] = page; | 
|  | 1647 | (*nr)++; | 
|  | 1648 | page++; | 
|  | 1649 | refs++; | 
|  | 1650 | } while (addr += PAGE_SIZE, addr != end); | 
|  | 1651 |  | 
|  | 1652 | head = try_get_compound_head(pgd_page(orig), refs); | 
|  | 1653 | if (!head) { | 
|  | 1654 | *nr -= refs; | 
|  | 1655 | return 0; | 
|  | 1656 | } | 
|  | 1657 |  | 
|  | 1658 | if (unlikely(pgd_val(orig) != pgd_val(*pgdp))) { | 
|  | 1659 | *nr -= refs; | 
|  | 1660 | while (refs--) | 
|  | 1661 | put_page(head); | 
|  | 1662 | return 0; | 
|  | 1663 | } | 
|  | 1664 |  | 
|  | 1665 | SetPageReferenced(head); | 
|  | 1666 | return 1; | 
|  | 1667 | } | 
|  | 1668 |  | 
|  | 1669 | static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end, | 
|  | 1670 | int write, struct page **pages, int *nr) | 
|  | 1671 | { | 
|  | 1672 | unsigned long next; | 
|  | 1673 | pmd_t *pmdp; | 
|  | 1674 |  | 
|  | 1675 | pmdp = pmd_offset(&pud, addr); | 
|  | 1676 | do { | 
|  | 1677 | pmd_t pmd = READ_ONCE(*pmdp); | 
|  | 1678 |  | 
|  | 1679 | next = pmd_addr_end(addr, end); | 
|  | 1680 | if (!pmd_present(pmd)) | 
|  | 1681 | return 0; | 
|  | 1682 |  | 
|  | 1683 | if (unlikely(pmd_trans_huge(pmd) || pmd_huge(pmd) || | 
|  | 1684 | pmd_devmap(pmd))) { | 
|  | 1685 | /* | 
|  | 1686 | * NUMA hinting faults need to be handled in the GUP | 
|  | 1687 | * slowpath for accounting purposes and so that they | 
|  | 1688 | * can be serialised against THP migration. | 
|  | 1689 | */ | 
|  | 1690 | if (pmd_protnone(pmd)) | 
|  | 1691 | return 0; | 
|  | 1692 |  | 
|  | 1693 | if (!gup_huge_pmd(pmd, pmdp, addr, next, write, | 
|  | 1694 | pages, nr)) | 
|  | 1695 | return 0; | 
|  | 1696 |  | 
|  | 1697 | } else if (unlikely(is_hugepd(__hugepd(pmd_val(pmd))))) { | 
|  | 1698 | /* | 
|  | 1699 | * architecture have different format for hugetlbfs | 
|  | 1700 | * pmd format and THP pmd format | 
|  | 1701 | */ | 
|  | 1702 | if (!gup_huge_pd(__hugepd(pmd_val(pmd)), addr, | 
|  | 1703 | PMD_SHIFT, next, write, pages, nr)) | 
|  | 1704 | return 0; | 
|  | 1705 | } else if (!gup_pte_range(pmd, addr, next, write, pages, nr)) | 
|  | 1706 | return 0; | 
|  | 1707 | } while (pmdp++, addr = next, addr != end); | 
|  | 1708 |  | 
|  | 1709 | return 1; | 
|  | 1710 | } | 
|  | 1711 |  | 
|  | 1712 | static int gup_pud_range(p4d_t p4d, unsigned long addr, unsigned long end, | 
|  | 1713 | int write, struct page **pages, int *nr) | 
|  | 1714 | { | 
|  | 1715 | unsigned long next; | 
|  | 1716 | pud_t *pudp; | 
|  | 1717 |  | 
|  | 1718 | pudp = pud_offset(&p4d, addr); | 
|  | 1719 | do { | 
|  | 1720 | pud_t pud = READ_ONCE(*pudp); | 
|  | 1721 |  | 
|  | 1722 | next = pud_addr_end(addr, end); | 
|  | 1723 | if (pud_none(pud)) | 
|  | 1724 | return 0; | 
|  | 1725 | if (unlikely(pud_huge(pud))) { | 
|  | 1726 | if (!gup_huge_pud(pud, pudp, addr, next, write, | 
|  | 1727 | pages, nr)) | 
|  | 1728 | return 0; | 
|  | 1729 | } else if (unlikely(is_hugepd(__hugepd(pud_val(pud))))) { | 
|  | 1730 | if (!gup_huge_pd(__hugepd(pud_val(pud)), addr, | 
|  | 1731 | PUD_SHIFT, next, write, pages, nr)) | 
|  | 1732 | return 0; | 
|  | 1733 | } else if (!gup_pmd_range(pud, addr, next, write, pages, nr)) | 
|  | 1734 | return 0; | 
|  | 1735 | } while (pudp++, addr = next, addr != end); | 
|  | 1736 |  | 
|  | 1737 | return 1; | 
|  | 1738 | } | 
|  | 1739 |  | 
|  | 1740 | static int gup_p4d_range(pgd_t pgd, unsigned long addr, unsigned long end, | 
|  | 1741 | int write, struct page **pages, int *nr) | 
|  | 1742 | { | 
|  | 1743 | unsigned long next; | 
|  | 1744 | p4d_t *p4dp; | 
|  | 1745 |  | 
|  | 1746 | p4dp = p4d_offset(&pgd, addr); | 
|  | 1747 | do { | 
|  | 1748 | p4d_t p4d = READ_ONCE(*p4dp); | 
|  | 1749 |  | 
|  | 1750 | next = p4d_addr_end(addr, end); | 
|  | 1751 | if (p4d_none(p4d)) | 
|  | 1752 | return 0; | 
|  | 1753 | BUILD_BUG_ON(p4d_huge(p4d)); | 
|  | 1754 | if (unlikely(is_hugepd(__hugepd(p4d_val(p4d))))) { | 
|  | 1755 | if (!gup_huge_pd(__hugepd(p4d_val(p4d)), addr, | 
|  | 1756 | P4D_SHIFT, next, write, pages, nr)) | 
|  | 1757 | return 0; | 
|  | 1758 | } else if (!gup_pud_range(p4d, addr, next, write, pages, nr)) | 
|  | 1759 | return 0; | 
|  | 1760 | } while (p4dp++, addr = next, addr != end); | 
|  | 1761 |  | 
|  | 1762 | return 1; | 
|  | 1763 | } | 
|  | 1764 |  | 
|  | 1765 | static void gup_pgd_range(unsigned long addr, unsigned long end, | 
|  | 1766 | int write, struct page **pages, int *nr) | 
|  | 1767 | { | 
|  | 1768 | unsigned long next; | 
|  | 1769 | pgd_t *pgdp; | 
|  | 1770 |  | 
|  | 1771 | pgdp = pgd_offset(current->mm, addr); | 
|  | 1772 | do { | 
|  | 1773 | pgd_t pgd = READ_ONCE(*pgdp); | 
|  | 1774 |  | 
|  | 1775 | next = pgd_addr_end(addr, end); | 
|  | 1776 | if (pgd_none(pgd)) | 
|  | 1777 | return; | 
|  | 1778 | if (unlikely(pgd_huge(pgd))) { | 
|  | 1779 | if (!gup_huge_pgd(pgd, pgdp, addr, next, write, | 
|  | 1780 | pages, nr)) | 
|  | 1781 | return; | 
|  | 1782 | } else if (unlikely(is_hugepd(__hugepd(pgd_val(pgd))))) { | 
|  | 1783 | if (!gup_huge_pd(__hugepd(pgd_val(pgd)), addr, | 
|  | 1784 | PGDIR_SHIFT, next, write, pages, nr)) | 
|  | 1785 | return; | 
|  | 1786 | } else if (!gup_p4d_range(pgd, addr, next, write, pages, nr)) | 
|  | 1787 | return; | 
|  | 1788 | } while (pgdp++, addr = next, addr != end); | 
|  | 1789 | } | 
|  | 1790 |  | 
|  | 1791 | #ifndef gup_fast_permitted | 
|  | 1792 | /* | 
|  | 1793 | * Check if it's allowed to use __get_user_pages_fast() for the range, or | 
|  | 1794 | * we need to fall back to the slow version: | 
|  | 1795 | */ | 
|  | 1796 | bool gup_fast_permitted(unsigned long start, int nr_pages, int write) | 
|  | 1797 | { | 
|  | 1798 | unsigned long len, end; | 
|  | 1799 |  | 
|  | 1800 | len = (unsigned long) nr_pages << PAGE_SHIFT; | 
|  | 1801 | end = start + len; | 
|  | 1802 | return end >= start; | 
|  | 1803 | } | 
|  | 1804 | #endif | 
|  | 1805 |  | 
|  | 1806 | /* | 
|  | 1807 | * Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to | 
|  | 1808 | * the regular GUP. | 
|  | 1809 | * Note a difference with get_user_pages_fast: this always returns the | 
|  | 1810 | * number of pages pinned, 0 if no pages were pinned. | 
|  | 1811 | */ | 
|  | 1812 | int __get_user_pages_fast(unsigned long start, int nr_pages, int write, | 
|  | 1813 | struct page **pages) | 
|  | 1814 | { | 
|  | 1815 | unsigned long addr, len, end; | 
|  | 1816 | unsigned long flags; | 
|  | 1817 | int nr = 0; | 
|  | 1818 |  | 
|  | 1819 | start &= PAGE_MASK; | 
|  | 1820 | addr = start; | 
|  | 1821 | len = (unsigned long) nr_pages << PAGE_SHIFT; | 
|  | 1822 | end = start + len; | 
|  | 1823 |  | 
|  | 1824 | if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ, | 
|  | 1825 | (void __user *)start, len))) | 
|  | 1826 | return 0; | 
|  | 1827 |  | 
|  | 1828 | /* | 
|  | 1829 | * Disable interrupts.  We use the nested form as we can already have | 
|  | 1830 | * interrupts disabled by get_futex_key. | 
|  | 1831 | * | 
|  | 1832 | * With interrupts disabled, we block page table pages from being | 
|  | 1833 | * freed from under us. See mmu_gather_tlb in asm-generic/tlb.h | 
|  | 1834 | * for more details. | 
|  | 1835 | * | 
|  | 1836 | * We do not adopt an rcu_read_lock(.) here as we also want to | 
|  | 1837 | * block IPIs that come from THPs splitting. | 
|  | 1838 | */ | 
|  | 1839 |  | 
|  | 1840 | if (gup_fast_permitted(start, nr_pages, write)) { | 
|  | 1841 | local_irq_save(flags); | 
|  | 1842 | gup_pgd_range(addr, end, write, pages, &nr); | 
|  | 1843 | local_irq_restore(flags); | 
|  | 1844 | } | 
|  | 1845 |  | 
|  | 1846 | return nr; | 
|  | 1847 | } | 
|  | 1848 |  | 
|  | 1849 | /** | 
|  | 1850 | * get_user_pages_fast() - pin user pages in memory | 
|  | 1851 | * @start:	starting user address | 
|  | 1852 | * @nr_pages:	number of pages from start to pin | 
|  | 1853 | * @write:	whether pages will be written to | 
|  | 1854 | * @pages:	array that receives pointers to the pages pinned. | 
|  | 1855 | *		Should be at least nr_pages long. | 
|  | 1856 | * | 
|  | 1857 | * Attempt to pin user pages in memory without taking mm->mmap_sem. | 
|  | 1858 | * If not successful, it will fall back to taking the lock and | 
|  | 1859 | * calling get_user_pages(). | 
|  | 1860 | * | 
|  | 1861 | * Returns number of pages pinned. This may be fewer than the number | 
|  | 1862 | * requested. If nr_pages is 0 or negative, returns 0. If no pages | 
|  | 1863 | * were pinned, returns -errno. | 
|  | 1864 | */ | 
|  | 1865 | int get_user_pages_fast(unsigned long start, int nr_pages, int write, | 
|  | 1866 | struct page **pages) | 
|  | 1867 | { | 
|  | 1868 | unsigned long addr, len, end; | 
|  | 1869 | int nr = 0, ret = 0; | 
|  | 1870 |  | 
|  | 1871 | start &= PAGE_MASK; | 
|  | 1872 | addr = start; | 
|  | 1873 | len = (unsigned long) nr_pages << PAGE_SHIFT; | 
|  | 1874 | end = start + len; | 
|  | 1875 |  | 
|  | 1876 | if (nr_pages <= 0) | 
|  | 1877 | return 0; | 
|  | 1878 |  | 
|  | 1879 | if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ, | 
|  | 1880 | (void __user *)start, len))) | 
|  | 1881 | return -EFAULT; | 
|  | 1882 |  | 
|  | 1883 | if (gup_fast_permitted(start, nr_pages, write)) { | 
|  | 1884 | local_irq_disable(); | 
|  | 1885 | gup_pgd_range(addr, end, write, pages, &nr); | 
|  | 1886 | local_irq_enable(); | 
|  | 1887 | ret = nr; | 
|  | 1888 | } | 
|  | 1889 |  | 
|  | 1890 | if (nr < nr_pages) { | 
|  | 1891 | /* Try to get the remaining pages with get_user_pages */ | 
|  | 1892 | start += nr << PAGE_SHIFT; | 
|  | 1893 | pages += nr; | 
|  | 1894 |  | 
|  | 1895 | ret = get_user_pages_unlocked(start, nr_pages - nr, pages, | 
|  | 1896 | write ? FOLL_WRITE : 0); | 
|  | 1897 |  | 
|  | 1898 | /* Have to be a bit careful with return values */ | 
|  | 1899 | if (nr > 0) { | 
|  | 1900 | if (ret < 0) | 
|  | 1901 | ret = nr; | 
|  | 1902 | else | 
|  | 1903 | ret += nr; | 
|  | 1904 | } | 
|  | 1905 | } | 
|  | 1906 |  | 
|  | 1907 | return ret; | 
|  | 1908 | } | 
|  | 1909 |  | 
|  | 1910 | #endif /* CONFIG_HAVE_GENERIC_GUP */ |