blob: a29d2e88b00ef7ebb71f5736e8343e182a523446 [file] [log] [blame]
rjw1f884582022-01-06 17:20:42 +08001/*
2 * KVM guest address space mapping code
3 *
4 * Copyright IBM Corp. 2007, 2016
5 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
6 */
7
8#include <linux/kernel.h>
9#include <linux/mm.h>
10#include <linux/swap.h>
11#include <linux/smp.h>
12#include <linux/spinlock.h>
13#include <linux/slab.h>
14#include <linux/swapops.h>
15#include <linux/ksm.h>
16#include <linux/mman.h>
17
18#include <asm/pgtable.h>
19#include <asm/pgalloc.h>
20#include <asm/gmap.h>
21#include <asm/tlb.h>
22
23#define GMAP_SHADOW_FAKE_TABLE 1ULL
24
25/**
26 * gmap_alloc - allocate and initialize a guest address space
27 * @mm: pointer to the parent mm_struct
28 * @limit: maximum address of the gmap address space
29 *
30 * Returns a guest address space structure.
31 */
32static struct gmap *gmap_alloc(unsigned long limit)
33{
34 struct gmap *gmap;
35 struct page *page;
36 unsigned long *table;
37 unsigned long etype, atype;
38
39 if (limit < _REGION3_SIZE) {
40 limit = _REGION3_SIZE - 1;
41 atype = _ASCE_TYPE_SEGMENT;
42 etype = _SEGMENT_ENTRY_EMPTY;
43 } else if (limit < _REGION2_SIZE) {
44 limit = _REGION2_SIZE - 1;
45 atype = _ASCE_TYPE_REGION3;
46 etype = _REGION3_ENTRY_EMPTY;
47 } else if (limit < _REGION1_SIZE) {
48 limit = _REGION1_SIZE - 1;
49 atype = _ASCE_TYPE_REGION2;
50 etype = _REGION2_ENTRY_EMPTY;
51 } else {
52 limit = -1UL;
53 atype = _ASCE_TYPE_REGION1;
54 etype = _REGION1_ENTRY_EMPTY;
55 }
56 gmap = kzalloc(sizeof(struct gmap), GFP_KERNEL);
57 if (!gmap)
58 goto out;
59 INIT_LIST_HEAD(&gmap->crst_list);
60 INIT_LIST_HEAD(&gmap->children);
61 INIT_LIST_HEAD(&gmap->pt_list);
62 INIT_RADIX_TREE(&gmap->guest_to_host, GFP_KERNEL);
63 INIT_RADIX_TREE(&gmap->host_to_guest, GFP_ATOMIC);
64 INIT_RADIX_TREE(&gmap->host_to_rmap, GFP_ATOMIC);
65 spin_lock_init(&gmap->guest_table_lock);
66 spin_lock_init(&gmap->shadow_lock);
67 atomic_set(&gmap->ref_count, 1);
68 page = alloc_pages(GFP_KERNEL, CRST_ALLOC_ORDER);
69 if (!page)
70 goto out_free;
71 page->index = 0;
72 list_add(&page->lru, &gmap->crst_list);
73 table = (unsigned long *) page_to_phys(page);
74 crst_table_init(table, etype);
75 gmap->table = table;
76 gmap->asce = atype | _ASCE_TABLE_LENGTH |
77 _ASCE_USER_BITS | __pa(table);
78 gmap->asce_end = limit;
79 return gmap;
80
81out_free:
82 kfree(gmap);
83out:
84 return NULL;
85}
86
87/**
88 * gmap_create - create a guest address space
89 * @mm: pointer to the parent mm_struct
90 * @limit: maximum size of the gmap address space
91 *
92 * Returns a guest address space structure.
93 */
94struct gmap *gmap_create(struct mm_struct *mm, unsigned long limit)
95{
96 struct gmap *gmap;
97 unsigned long gmap_asce;
98
99 gmap = gmap_alloc(limit);
100 if (!gmap)
101 return NULL;
102 gmap->mm = mm;
103 spin_lock(&mm->context.lock);
104 list_add_rcu(&gmap->list, &mm->context.gmap_list);
105 if (list_is_singular(&mm->context.gmap_list))
106 gmap_asce = gmap->asce;
107 else
108 gmap_asce = -1UL;
109 WRITE_ONCE(mm->context.gmap_asce, gmap_asce);
110 spin_unlock(&mm->context.lock);
111 return gmap;
112}
113EXPORT_SYMBOL_GPL(gmap_create);
114
115static void gmap_flush_tlb(struct gmap *gmap)
116{
117 if (MACHINE_HAS_IDTE)
118 __tlb_flush_idte(gmap->asce);
119 else
120 __tlb_flush_global();
121}
122
123static void gmap_radix_tree_free(struct radix_tree_root *root)
124{
125 struct radix_tree_iter iter;
126 unsigned long indices[16];
127 unsigned long index;
128 void __rcu **slot;
129 int i, nr;
130
131 /* A radix tree is freed by deleting all of its entries */
132 index = 0;
133 do {
134 nr = 0;
135 radix_tree_for_each_slot(slot, root, &iter, index) {
136 indices[nr] = iter.index;
137 if (++nr == 16)
138 break;
139 }
140 for (i = 0; i < nr; i++) {
141 index = indices[i];
142 radix_tree_delete(root, index);
143 }
144 } while (nr > 0);
145}
146
147static void gmap_rmap_radix_tree_free(struct radix_tree_root *root)
148{
149 struct gmap_rmap *rmap, *rnext, *head;
150 struct radix_tree_iter iter;
151 unsigned long indices[16];
152 unsigned long index;
153 void __rcu **slot;
154 int i, nr;
155
156 /* A radix tree is freed by deleting all of its entries */
157 index = 0;
158 do {
159 nr = 0;
160 radix_tree_for_each_slot(slot, root, &iter, index) {
161 indices[nr] = iter.index;
162 if (++nr == 16)
163 break;
164 }
165 for (i = 0; i < nr; i++) {
166 index = indices[i];
167 head = radix_tree_delete(root, index);
168 gmap_for_each_rmap_safe(rmap, rnext, head)
169 kfree(rmap);
170 }
171 } while (nr > 0);
172}
173
174/**
175 * gmap_free - free a guest address space
176 * @gmap: pointer to the guest address space structure
177 *
178 * No locks required. There are no references to this gmap anymore.
179 */
180static void gmap_free(struct gmap *gmap)
181{
182 struct page *page, *next;
183
184 /* Flush tlb of all gmaps (if not already done for shadows) */
185 if (!(gmap_is_shadow(gmap) && gmap->removed))
186 gmap_flush_tlb(gmap);
187 /* Free all segment & region tables. */
188 list_for_each_entry_safe(page, next, &gmap->crst_list, lru)
189 __free_pages(page, CRST_ALLOC_ORDER);
190 gmap_radix_tree_free(&gmap->guest_to_host);
191 gmap_radix_tree_free(&gmap->host_to_guest);
192
193 /* Free additional data for a shadow gmap */
194 if (gmap_is_shadow(gmap)) {
195 /* Free all page tables. */
196 list_for_each_entry_safe(page, next, &gmap->pt_list, lru)
197 page_table_free_pgste(page);
198 gmap_rmap_radix_tree_free(&gmap->host_to_rmap);
199 /* Release reference to the parent */
200 gmap_put(gmap->parent);
201 }
202
203 kfree(gmap);
204}
205
206/**
207 * gmap_get - increase reference counter for guest address space
208 * @gmap: pointer to the guest address space structure
209 *
210 * Returns the gmap pointer
211 */
212struct gmap *gmap_get(struct gmap *gmap)
213{
214 atomic_inc(&gmap->ref_count);
215 return gmap;
216}
217EXPORT_SYMBOL_GPL(gmap_get);
218
219/**
220 * gmap_put - decrease reference counter for guest address space
221 * @gmap: pointer to the guest address space structure
222 *
223 * If the reference counter reaches zero the guest address space is freed.
224 */
225void gmap_put(struct gmap *gmap)
226{
227 if (atomic_dec_return(&gmap->ref_count) == 0)
228 gmap_free(gmap);
229}
230EXPORT_SYMBOL_GPL(gmap_put);
231
232/**
233 * gmap_remove - remove a guest address space but do not free it yet
234 * @gmap: pointer to the guest address space structure
235 */
236void gmap_remove(struct gmap *gmap)
237{
238 struct gmap *sg, *next;
239 unsigned long gmap_asce;
240
241 /* Remove all shadow gmaps linked to this gmap */
242 if (!list_empty(&gmap->children)) {
243 spin_lock(&gmap->shadow_lock);
244 list_for_each_entry_safe(sg, next, &gmap->children, list) {
245 list_del(&sg->list);
246 gmap_put(sg);
247 }
248 spin_unlock(&gmap->shadow_lock);
249 }
250 /* Remove gmap from the pre-mm list */
251 spin_lock(&gmap->mm->context.lock);
252 list_del_rcu(&gmap->list);
253 if (list_empty(&gmap->mm->context.gmap_list))
254 gmap_asce = 0;
255 else if (list_is_singular(&gmap->mm->context.gmap_list))
256 gmap_asce = list_first_entry(&gmap->mm->context.gmap_list,
257 struct gmap, list)->asce;
258 else
259 gmap_asce = -1UL;
260 WRITE_ONCE(gmap->mm->context.gmap_asce, gmap_asce);
261 spin_unlock(&gmap->mm->context.lock);
262 synchronize_rcu();
263 /* Put reference */
264 gmap_put(gmap);
265}
266EXPORT_SYMBOL_GPL(gmap_remove);
267
268/**
269 * gmap_enable - switch primary space to the guest address space
270 * @gmap: pointer to the guest address space structure
271 */
272void gmap_enable(struct gmap *gmap)
273{
274 S390_lowcore.gmap = (unsigned long) gmap;
275}
276EXPORT_SYMBOL_GPL(gmap_enable);
277
278/**
279 * gmap_disable - switch back to the standard primary address space
280 * @gmap: pointer to the guest address space structure
281 */
282void gmap_disable(struct gmap *gmap)
283{
284 S390_lowcore.gmap = 0UL;
285}
286EXPORT_SYMBOL_GPL(gmap_disable);
287
288/**
289 * gmap_get_enabled - get a pointer to the currently enabled gmap
290 *
291 * Returns a pointer to the currently enabled gmap. 0 if none is enabled.
292 */
293struct gmap *gmap_get_enabled(void)
294{
295 return (struct gmap *) S390_lowcore.gmap;
296}
297EXPORT_SYMBOL_GPL(gmap_get_enabled);
298
299/*
300 * gmap_alloc_table is assumed to be called with mmap_sem held
301 */
302static int gmap_alloc_table(struct gmap *gmap, unsigned long *table,
303 unsigned long init, unsigned long gaddr)
304{
305 struct page *page;
306 unsigned long *new;
307
308 /* since we dont free the gmap table until gmap_free we can unlock */
309 page = alloc_pages(GFP_KERNEL, CRST_ALLOC_ORDER);
310 if (!page)
311 return -ENOMEM;
312 new = (unsigned long *) page_to_phys(page);
313 crst_table_init(new, init);
314 spin_lock(&gmap->guest_table_lock);
315 if (*table & _REGION_ENTRY_INVALID) {
316 list_add(&page->lru, &gmap->crst_list);
317 *table = (unsigned long) new | _REGION_ENTRY_LENGTH |
318 (*table & _REGION_ENTRY_TYPE_MASK);
319 page->index = gaddr;
320 page = NULL;
321 }
322 spin_unlock(&gmap->guest_table_lock);
323 if (page)
324 __free_pages(page, CRST_ALLOC_ORDER);
325 return 0;
326}
327
328/**
329 * __gmap_segment_gaddr - find virtual address from segment pointer
330 * @entry: pointer to a segment table entry in the guest address space
331 *
332 * Returns the virtual address in the guest address space for the segment
333 */
334static unsigned long __gmap_segment_gaddr(unsigned long *entry)
335{
336 struct page *page;
337 unsigned long offset, mask;
338
339 offset = (unsigned long) entry / sizeof(unsigned long);
340 offset = (offset & (PTRS_PER_PMD - 1)) * PMD_SIZE;
341 mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1);
342 page = virt_to_page((void *)((unsigned long) entry & mask));
343 return page->index + offset;
344}
345
346/**
347 * __gmap_unlink_by_vmaddr - unlink a single segment via a host address
348 * @gmap: pointer to the guest address space structure
349 * @vmaddr: address in the host process address space
350 *
351 * Returns 1 if a TLB flush is required
352 */
353static int __gmap_unlink_by_vmaddr(struct gmap *gmap, unsigned long vmaddr)
354{
355 unsigned long *entry;
356 int flush = 0;
357
358 BUG_ON(gmap_is_shadow(gmap));
359 spin_lock(&gmap->guest_table_lock);
360 entry = radix_tree_delete(&gmap->host_to_guest, vmaddr >> PMD_SHIFT);
361 if (entry) {
362 flush = (*entry != _SEGMENT_ENTRY_EMPTY);
363 *entry = _SEGMENT_ENTRY_EMPTY;
364 }
365 spin_unlock(&gmap->guest_table_lock);
366 return flush;
367}
368
369/**
370 * __gmap_unmap_by_gaddr - unmap a single segment via a guest address
371 * @gmap: pointer to the guest address space structure
372 * @gaddr: address in the guest address space
373 *
374 * Returns 1 if a TLB flush is required
375 */
376static int __gmap_unmap_by_gaddr(struct gmap *gmap, unsigned long gaddr)
377{
378 unsigned long vmaddr;
379
380 vmaddr = (unsigned long) radix_tree_delete(&gmap->guest_to_host,
381 gaddr >> PMD_SHIFT);
382 return vmaddr ? __gmap_unlink_by_vmaddr(gmap, vmaddr) : 0;
383}
384
385/**
386 * gmap_unmap_segment - unmap segment from the guest address space
387 * @gmap: pointer to the guest address space structure
388 * @to: address in the guest address space
389 * @len: length of the memory area to unmap
390 *
391 * Returns 0 if the unmap succeeded, -EINVAL if not.
392 */
393int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
394{
395 unsigned long off;
396 int flush;
397
398 BUG_ON(gmap_is_shadow(gmap));
399 if ((to | len) & (PMD_SIZE - 1))
400 return -EINVAL;
401 if (len == 0 || to + len < to)
402 return -EINVAL;
403
404 flush = 0;
405 down_write(&gmap->mm->mmap_sem);
406 for (off = 0; off < len; off += PMD_SIZE)
407 flush |= __gmap_unmap_by_gaddr(gmap, to + off);
408 up_write(&gmap->mm->mmap_sem);
409 if (flush)
410 gmap_flush_tlb(gmap);
411 return 0;
412}
413EXPORT_SYMBOL_GPL(gmap_unmap_segment);
414
415/**
416 * gmap_map_segment - map a segment to the guest address space
417 * @gmap: pointer to the guest address space structure
418 * @from: source address in the parent address space
419 * @to: target address in the guest address space
420 * @len: length of the memory area to map
421 *
422 * Returns 0 if the mmap succeeded, -EINVAL or -ENOMEM if not.
423 */
424int gmap_map_segment(struct gmap *gmap, unsigned long from,
425 unsigned long to, unsigned long len)
426{
427 unsigned long off;
428 int flush;
429
430 BUG_ON(gmap_is_shadow(gmap));
431 if ((from | to | len) & (PMD_SIZE - 1))
432 return -EINVAL;
433 if (len == 0 || from + len < from || to + len < to ||
434 from + len - 1 > TASK_SIZE_MAX || to + len - 1 > gmap->asce_end)
435 return -EINVAL;
436
437 flush = 0;
438 down_write(&gmap->mm->mmap_sem);
439 for (off = 0; off < len; off += PMD_SIZE) {
440 /* Remove old translation */
441 flush |= __gmap_unmap_by_gaddr(gmap, to + off);
442 /* Store new translation */
443 if (radix_tree_insert(&gmap->guest_to_host,
444 (to + off) >> PMD_SHIFT,
445 (void *) from + off))
446 break;
447 }
448 up_write(&gmap->mm->mmap_sem);
449 if (flush)
450 gmap_flush_tlb(gmap);
451 if (off >= len)
452 return 0;
453 gmap_unmap_segment(gmap, to, len);
454 return -ENOMEM;
455}
456EXPORT_SYMBOL_GPL(gmap_map_segment);
457
458/**
459 * __gmap_translate - translate a guest address to a user space address
460 * @gmap: pointer to guest mapping meta data structure
461 * @gaddr: guest address
462 *
463 * Returns user space address which corresponds to the guest address or
464 * -EFAULT if no such mapping exists.
465 * This function does not establish potentially missing page table entries.
466 * The mmap_sem of the mm that belongs to the address space must be held
467 * when this function gets called.
468 *
469 * Note: Can also be called for shadow gmaps.
470 */
471unsigned long __gmap_translate(struct gmap *gmap, unsigned long gaddr)
472{
473 unsigned long vmaddr;
474
475 vmaddr = (unsigned long)
476 radix_tree_lookup(&gmap->guest_to_host, gaddr >> PMD_SHIFT);
477 /* Note: guest_to_host is empty for a shadow gmap */
478 return vmaddr ? (vmaddr | (gaddr & ~PMD_MASK)) : -EFAULT;
479}
480EXPORT_SYMBOL_GPL(__gmap_translate);
481
482/**
483 * gmap_translate - translate a guest address to a user space address
484 * @gmap: pointer to guest mapping meta data structure
485 * @gaddr: guest address
486 *
487 * Returns user space address which corresponds to the guest address or
488 * -EFAULT if no such mapping exists.
489 * This function does not establish potentially missing page table entries.
490 */
491unsigned long gmap_translate(struct gmap *gmap, unsigned long gaddr)
492{
493 unsigned long rc;
494
495 down_read(&gmap->mm->mmap_sem);
496 rc = __gmap_translate(gmap, gaddr);
497 up_read(&gmap->mm->mmap_sem);
498 return rc;
499}
500EXPORT_SYMBOL_GPL(gmap_translate);
501
502/**
503 * gmap_unlink - disconnect a page table from the gmap shadow tables
504 * @gmap: pointer to guest mapping meta data structure
505 * @table: pointer to the host page table
506 * @vmaddr: vm address associated with the host page table
507 */
508void gmap_unlink(struct mm_struct *mm, unsigned long *table,
509 unsigned long vmaddr)
510{
511 struct gmap *gmap;
512 int flush;
513
514 rcu_read_lock();
515 list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
516 flush = __gmap_unlink_by_vmaddr(gmap, vmaddr);
517 if (flush)
518 gmap_flush_tlb(gmap);
519 }
520 rcu_read_unlock();
521}
522
523/**
524 * gmap_link - set up shadow page tables to connect a host to a guest address
525 * @gmap: pointer to guest mapping meta data structure
526 * @gaddr: guest address
527 * @vmaddr: vm address
528 *
529 * Returns 0 on success, -ENOMEM for out of memory conditions, and -EFAULT
530 * if the vm address is already mapped to a different guest segment.
531 * The mmap_sem of the mm that belongs to the address space must be held
532 * when this function gets called.
533 */
534int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
535{
536 struct mm_struct *mm;
537 unsigned long *table;
538 spinlock_t *ptl;
539 pgd_t *pgd;
540 p4d_t *p4d;
541 pud_t *pud;
542 pmd_t *pmd;
543 int rc;
544
545 BUG_ON(gmap_is_shadow(gmap));
546 /* Create higher level tables in the gmap page table */
547 table = gmap->table;
548 if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION1) {
549 table += (gaddr & _REGION1_INDEX) >> _REGION1_SHIFT;
550 if ((*table & _REGION_ENTRY_INVALID) &&
551 gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY,
552 gaddr & _REGION1_MASK))
553 return -ENOMEM;
554 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
555 }
556 if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION2) {
557 table += (gaddr & _REGION2_INDEX) >> _REGION2_SHIFT;
558 if ((*table & _REGION_ENTRY_INVALID) &&
559 gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY,
560 gaddr & _REGION2_MASK))
561 return -ENOMEM;
562 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
563 }
564 if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION3) {
565 table += (gaddr & _REGION3_INDEX) >> _REGION3_SHIFT;
566 if ((*table & _REGION_ENTRY_INVALID) &&
567 gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY,
568 gaddr & _REGION3_MASK))
569 return -ENOMEM;
570 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
571 }
572 table += (gaddr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
573 /* Walk the parent mm page table */
574 mm = gmap->mm;
575 pgd = pgd_offset(mm, vmaddr);
576 VM_BUG_ON(pgd_none(*pgd));
577 p4d = p4d_offset(pgd, vmaddr);
578 VM_BUG_ON(p4d_none(*p4d));
579 pud = pud_offset(p4d, vmaddr);
580 VM_BUG_ON(pud_none(*pud));
581 /* large puds cannot yet be handled */
582 if (pud_large(*pud))
583 return -EFAULT;
584 pmd = pmd_offset(pud, vmaddr);
585 VM_BUG_ON(pmd_none(*pmd));
586 /* large pmds cannot yet be handled */
587 if (pmd_large(*pmd))
588 return -EFAULT;
589 /* Link gmap segment table entry location to page table. */
590 rc = radix_tree_preload(GFP_KERNEL);
591 if (rc)
592 return rc;
593 ptl = pmd_lock(mm, pmd);
594 spin_lock(&gmap->guest_table_lock);
595 if (*table == _SEGMENT_ENTRY_EMPTY) {
596 rc = radix_tree_insert(&gmap->host_to_guest,
597 vmaddr >> PMD_SHIFT, table);
598 if (!rc)
599 *table = pmd_val(*pmd);
600 } else
601 rc = 0;
602 spin_unlock(&gmap->guest_table_lock);
603 spin_unlock(ptl);
604 radix_tree_preload_end();
605 return rc;
606}
607
608/**
609 * gmap_fault - resolve a fault on a guest address
610 * @gmap: pointer to guest mapping meta data structure
611 * @gaddr: guest address
612 * @fault_flags: flags to pass down to handle_mm_fault()
613 *
614 * Returns 0 on success, -ENOMEM for out of memory conditions, and -EFAULT
615 * if the vm address is already mapped to a different guest segment.
616 */
617int gmap_fault(struct gmap *gmap, unsigned long gaddr,
618 unsigned int fault_flags)
619{
620 unsigned long vmaddr;
621 int rc;
622 bool unlocked;
623
624 down_read(&gmap->mm->mmap_sem);
625
626retry:
627 unlocked = false;
628 vmaddr = __gmap_translate(gmap, gaddr);
629 if (IS_ERR_VALUE(vmaddr)) {
630 rc = vmaddr;
631 goto out_up;
632 }
633 if (fixup_user_fault(current, gmap->mm, vmaddr, fault_flags,
634 &unlocked)) {
635 rc = -EFAULT;
636 goto out_up;
637 }
638 /*
639 * In the case that fixup_user_fault unlocked the mmap_sem during
640 * faultin redo __gmap_translate to not race with a map/unmap_segment.
641 */
642 if (unlocked)
643 goto retry;
644
645 rc = __gmap_link(gmap, gaddr, vmaddr);
646out_up:
647 up_read(&gmap->mm->mmap_sem);
648 return rc;
649}
650EXPORT_SYMBOL_GPL(gmap_fault);
651
652/*
653 * this function is assumed to be called with mmap_sem held
654 */
655void __gmap_zap(struct gmap *gmap, unsigned long gaddr)
656{
657 unsigned long vmaddr;
658 spinlock_t *ptl;
659 pte_t *ptep;
660
661 /* Find the vm address for the guest address */
662 vmaddr = (unsigned long) radix_tree_lookup(&gmap->guest_to_host,
663 gaddr >> PMD_SHIFT);
664 if (vmaddr) {
665 vmaddr |= gaddr & ~PMD_MASK;
666 /* Get pointer to the page table entry */
667 ptep = get_locked_pte(gmap->mm, vmaddr, &ptl);
668 if (likely(ptep))
669 ptep_zap_unused(gmap->mm, vmaddr, ptep, 0);
670 pte_unmap_unlock(ptep, ptl);
671 }
672}
673EXPORT_SYMBOL_GPL(__gmap_zap);
674
675void gmap_discard(struct gmap *gmap, unsigned long from, unsigned long to)
676{
677 unsigned long gaddr, vmaddr, size;
678 struct vm_area_struct *vma;
679
680 down_read(&gmap->mm->mmap_sem);
681 for (gaddr = from; gaddr < to;
682 gaddr = (gaddr + PMD_SIZE) & PMD_MASK) {
683 /* Find the vm address for the guest address */
684 vmaddr = (unsigned long)
685 radix_tree_lookup(&gmap->guest_to_host,
686 gaddr >> PMD_SHIFT);
687 if (!vmaddr)
688 continue;
689 vmaddr |= gaddr & ~PMD_MASK;
690 /* Find vma in the parent mm */
691 vma = find_vma(gmap->mm, vmaddr);
692 if (!vma)
693 continue;
694 size = min(to - gaddr, PMD_SIZE - (gaddr & ~PMD_MASK));
695 zap_page_range(vma, vmaddr, size);
696 }
697 up_read(&gmap->mm->mmap_sem);
698}
699EXPORT_SYMBOL_GPL(gmap_discard);
700
701static LIST_HEAD(gmap_notifier_list);
702static DEFINE_SPINLOCK(gmap_notifier_lock);
703
704/**
705 * gmap_register_pte_notifier - register a pte invalidation callback
706 * @nb: pointer to the gmap notifier block
707 */
708void gmap_register_pte_notifier(struct gmap_notifier *nb)
709{
710 spin_lock(&gmap_notifier_lock);
711 list_add_rcu(&nb->list, &gmap_notifier_list);
712 spin_unlock(&gmap_notifier_lock);
713}
714EXPORT_SYMBOL_GPL(gmap_register_pte_notifier);
715
716/**
717 * gmap_unregister_pte_notifier - remove a pte invalidation callback
718 * @nb: pointer to the gmap notifier block
719 */
720void gmap_unregister_pte_notifier(struct gmap_notifier *nb)
721{
722 spin_lock(&gmap_notifier_lock);
723 list_del_rcu(&nb->list);
724 spin_unlock(&gmap_notifier_lock);
725 synchronize_rcu();
726}
727EXPORT_SYMBOL_GPL(gmap_unregister_pte_notifier);
728
729/**
730 * gmap_call_notifier - call all registered invalidation callbacks
731 * @gmap: pointer to guest mapping meta data structure
732 * @start: start virtual address in the guest address space
733 * @end: end virtual address in the guest address space
734 */
735static void gmap_call_notifier(struct gmap *gmap, unsigned long start,
736 unsigned long end)
737{
738 struct gmap_notifier *nb;
739
740 list_for_each_entry(nb, &gmap_notifier_list, list)
741 nb->notifier_call(gmap, start, end);
742}
743
744/**
745 * gmap_table_walk - walk the gmap page tables
746 * @gmap: pointer to guest mapping meta data structure
747 * @gaddr: virtual address in the guest address space
748 * @level: page table level to stop at
749 *
750 * Returns a table entry pointer for the given guest address and @level
751 * @level=0 : returns a pointer to a page table table entry (or NULL)
752 * @level=1 : returns a pointer to a segment table entry (or NULL)
753 * @level=2 : returns a pointer to a region-3 table entry (or NULL)
754 * @level=3 : returns a pointer to a region-2 table entry (or NULL)
755 * @level=4 : returns a pointer to a region-1 table entry (or NULL)
756 *
757 * Returns NULL if the gmap page tables could not be walked to the
758 * requested level.
759 *
760 * Note: Can also be called for shadow gmaps.
761 */
762static inline unsigned long *gmap_table_walk(struct gmap *gmap,
763 unsigned long gaddr, int level)
764{
765 const int asce_type = gmap->asce & _ASCE_TYPE_MASK;
766 unsigned long *table;
767
768 if ((gmap->asce & _ASCE_TYPE_MASK) + 4 < (level * 4))
769 return NULL;
770 if (gmap_is_shadow(gmap) && gmap->removed)
771 return NULL;
772
773 if (asce_type != _ASCE_TYPE_REGION1 &&
774 gaddr & (-1UL << (31 + (asce_type >> 2) * 11)))
775 return NULL;
776
777 table = gmap->table;
778 switch (gmap->asce & _ASCE_TYPE_MASK) {
779 case _ASCE_TYPE_REGION1:
780 table += (gaddr & _REGION1_INDEX) >> _REGION1_SHIFT;
781 if (level == 4)
782 break;
783 if (*table & _REGION_ENTRY_INVALID)
784 return NULL;
785 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
786 /* Fallthrough */
787 case _ASCE_TYPE_REGION2:
788 table += (gaddr & _REGION2_INDEX) >> _REGION2_SHIFT;
789 if (level == 3)
790 break;
791 if (*table & _REGION_ENTRY_INVALID)
792 return NULL;
793 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
794 /* Fallthrough */
795 case _ASCE_TYPE_REGION3:
796 table += (gaddr & _REGION3_INDEX) >> _REGION3_SHIFT;
797 if (level == 2)
798 break;
799 if (*table & _REGION_ENTRY_INVALID)
800 return NULL;
801 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
802 /* Fallthrough */
803 case _ASCE_TYPE_SEGMENT:
804 table += (gaddr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
805 if (level == 1)
806 break;
807 if (*table & _REGION_ENTRY_INVALID)
808 return NULL;
809 table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
810 table += (gaddr & _PAGE_INDEX) >> _PAGE_SHIFT;
811 }
812 return table;
813}
814
815/**
816 * gmap_pte_op_walk - walk the gmap page table, get the page table lock
817 * and return the pte pointer
818 * @gmap: pointer to guest mapping meta data structure
819 * @gaddr: virtual address in the guest address space
820 * @ptl: pointer to the spinlock pointer
821 *
822 * Returns a pointer to the locked pte for a guest address, or NULL
823 *
824 * Note: Can also be called for shadow gmaps.
825 */
826static pte_t *gmap_pte_op_walk(struct gmap *gmap, unsigned long gaddr,
827 spinlock_t **ptl)
828{
829 unsigned long *table;
830
831 if (gmap_is_shadow(gmap))
832 spin_lock(&gmap->guest_table_lock);
833 /* Walk the gmap page table, lock and get pte pointer */
834 table = gmap_table_walk(gmap, gaddr, 1); /* get segment pointer */
835 if (!table || *table & _SEGMENT_ENTRY_INVALID) {
836 if (gmap_is_shadow(gmap))
837 spin_unlock(&gmap->guest_table_lock);
838 return NULL;
839 }
840 if (gmap_is_shadow(gmap)) {
841 *ptl = &gmap->guest_table_lock;
842 return pte_offset_map((pmd_t *) table, gaddr);
843 }
844 return pte_alloc_map_lock(gmap->mm, (pmd_t *) table, gaddr, ptl);
845}
846
847/**
848 * gmap_pte_op_fixup - force a page in and connect the gmap page table
849 * @gmap: pointer to guest mapping meta data structure
850 * @gaddr: virtual address in the guest address space
851 * @vmaddr: address in the host process address space
852 * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
853 *
854 * Returns 0 if the caller can retry __gmap_translate (might fail again),
855 * -ENOMEM if out of memory and -EFAULT if anything goes wrong while fixing
856 * up or connecting the gmap page table.
857 */
858static int gmap_pte_op_fixup(struct gmap *gmap, unsigned long gaddr,
859 unsigned long vmaddr, int prot)
860{
861 struct mm_struct *mm = gmap->mm;
862 unsigned int fault_flags;
863 bool unlocked = false;
864
865 BUG_ON(gmap_is_shadow(gmap));
866 fault_flags = (prot == PROT_WRITE) ? FAULT_FLAG_WRITE : 0;
867 if (fixup_user_fault(current, mm, vmaddr, fault_flags, &unlocked))
868 return -EFAULT;
869 if (unlocked)
870 /* lost mmap_sem, caller has to retry __gmap_translate */
871 return 0;
872 /* Connect the page tables */
873 return __gmap_link(gmap, gaddr, vmaddr);
874}
875
876/**
877 * gmap_pte_op_end - release the page table lock
878 * @ptl: pointer to the spinlock pointer
879 */
880static void gmap_pte_op_end(spinlock_t *ptl)
881{
882 spin_unlock(ptl);
883}
884
885/*
886 * gmap_protect_range - remove access rights to memory and set pgste bits
887 * @gmap: pointer to guest mapping meta data structure
888 * @gaddr: virtual address in the guest address space
889 * @len: size of area
890 * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
891 * @bits: pgste notification bits to set
892 *
893 * Returns 0 if successfully protected, -ENOMEM if out of memory and
894 * -EFAULT if gaddr is invalid (or mapping for shadows is missing).
895 *
896 * Called with sg->mm->mmap_sem in read.
897 *
898 * Note: Can also be called for shadow gmaps.
899 */
900static int gmap_protect_range(struct gmap *gmap, unsigned long gaddr,
901 unsigned long len, int prot, unsigned long bits)
902{
903 unsigned long vmaddr;
904 spinlock_t *ptl;
905 pte_t *ptep;
906 int rc;
907
908 while (len) {
909 rc = -EAGAIN;
910 ptep = gmap_pte_op_walk(gmap, gaddr, &ptl);
911 if (ptep) {
912 rc = ptep_force_prot(gmap->mm, gaddr, ptep, prot, bits);
913 gmap_pte_op_end(ptl);
914 }
915 if (rc) {
916 vmaddr = __gmap_translate(gmap, gaddr);
917 if (IS_ERR_VALUE(vmaddr))
918 return vmaddr;
919 rc = gmap_pte_op_fixup(gmap, gaddr, vmaddr, prot);
920 if (rc)
921 return rc;
922 continue;
923 }
924 gaddr += PAGE_SIZE;
925 len -= PAGE_SIZE;
926 }
927 return 0;
928}
929
930/**
931 * gmap_mprotect_notify - change access rights for a range of ptes and
932 * call the notifier if any pte changes again
933 * @gmap: pointer to guest mapping meta data structure
934 * @gaddr: virtual address in the guest address space
935 * @len: size of area
936 * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
937 *
938 * Returns 0 if for each page in the given range a gmap mapping exists,
939 * the new access rights could be set and the notifier could be armed.
940 * If the gmap mapping is missing for one or more pages -EFAULT is
941 * returned. If no memory could be allocated -ENOMEM is returned.
942 * This function establishes missing page table entries.
943 */
944int gmap_mprotect_notify(struct gmap *gmap, unsigned long gaddr,
945 unsigned long len, int prot)
946{
947 int rc;
948
949 if ((gaddr & ~PAGE_MASK) || (len & ~PAGE_MASK) || gmap_is_shadow(gmap))
950 return -EINVAL;
951 if (!MACHINE_HAS_ESOP && prot == PROT_READ)
952 return -EINVAL;
953 down_read(&gmap->mm->mmap_sem);
954 rc = gmap_protect_range(gmap, gaddr, len, prot, PGSTE_IN_BIT);
955 up_read(&gmap->mm->mmap_sem);
956 return rc;
957}
958EXPORT_SYMBOL_GPL(gmap_mprotect_notify);
959
960/**
961 * gmap_read_table - get an unsigned long value from a guest page table using
962 * absolute addressing, without marking the page referenced.
963 * @gmap: pointer to guest mapping meta data structure
964 * @gaddr: virtual address in the guest address space
965 * @val: pointer to the unsigned long value to return
966 *
967 * Returns 0 if the value was read, -ENOMEM if out of memory and -EFAULT
968 * if reading using the virtual address failed.
969 *
970 * Called with gmap->mm->mmap_sem in read.
971 */
972int gmap_read_table(struct gmap *gmap, unsigned long gaddr, unsigned long *val)
973{
974 unsigned long address, vmaddr;
975 spinlock_t *ptl;
976 pte_t *ptep, pte;
977 int rc;
978
979 while (1) {
980 rc = -EAGAIN;
981 ptep = gmap_pte_op_walk(gmap, gaddr, &ptl);
982 if (ptep) {
983 pte = *ptep;
984 if (pte_present(pte) && (pte_val(pte) & _PAGE_READ)) {
985 address = pte_val(pte) & PAGE_MASK;
986 address += gaddr & ~PAGE_MASK;
987 *val = *(unsigned long *) address;
988 pte_val(*ptep) |= _PAGE_YOUNG;
989 /* Do *NOT* clear the _PAGE_INVALID bit! */
990 rc = 0;
991 }
992 gmap_pte_op_end(ptl);
993 }
994 if (!rc)
995 break;
996 vmaddr = __gmap_translate(gmap, gaddr);
997 if (IS_ERR_VALUE(vmaddr)) {
998 rc = vmaddr;
999 break;
1000 }
1001 rc = gmap_pte_op_fixup(gmap, gaddr, vmaddr, PROT_READ);
1002 if (rc)
1003 break;
1004 }
1005 return rc;
1006}
1007EXPORT_SYMBOL_GPL(gmap_read_table);
1008
1009/**
1010 * gmap_insert_rmap - add a rmap to the host_to_rmap radix tree
1011 * @sg: pointer to the shadow guest address space structure
1012 * @vmaddr: vm address associated with the rmap
1013 * @rmap: pointer to the rmap structure
1014 *
1015 * Called with the sg->guest_table_lock
1016 */
1017static inline void gmap_insert_rmap(struct gmap *sg, unsigned long vmaddr,
1018 struct gmap_rmap *rmap)
1019{
1020 void __rcu **slot;
1021
1022 BUG_ON(!gmap_is_shadow(sg));
1023 slot = radix_tree_lookup_slot(&sg->host_to_rmap, vmaddr >> PAGE_SHIFT);
1024 if (slot) {
1025 rmap->next = radix_tree_deref_slot_protected(slot,
1026 &sg->guest_table_lock);
1027 radix_tree_replace_slot(&sg->host_to_rmap, slot, rmap);
1028 } else {
1029 rmap->next = NULL;
1030 radix_tree_insert(&sg->host_to_rmap, vmaddr >> PAGE_SHIFT,
1031 rmap);
1032 }
1033}
1034
1035/**
1036 * gmap_protect_rmap - modify access rights to memory and create an rmap
1037 * @sg: pointer to the shadow guest address space structure
1038 * @raddr: rmap address in the shadow gmap
1039 * @paddr: address in the parent guest address space
1040 * @len: length of the memory area to protect
1041 * @prot: indicates access rights: none, read-only or read-write
1042 *
1043 * Returns 0 if successfully protected and the rmap was created, -ENOMEM
1044 * if out of memory and -EFAULT if paddr is invalid.
1045 */
1046static int gmap_protect_rmap(struct gmap *sg, unsigned long raddr,
1047 unsigned long paddr, unsigned long len, int prot)
1048{
1049 struct gmap *parent;
1050 struct gmap_rmap *rmap;
1051 unsigned long vmaddr;
1052 spinlock_t *ptl;
1053 pte_t *ptep;
1054 int rc;
1055
1056 BUG_ON(!gmap_is_shadow(sg));
1057 parent = sg->parent;
1058 while (len) {
1059 vmaddr = __gmap_translate(parent, paddr);
1060 if (IS_ERR_VALUE(vmaddr))
1061 return vmaddr;
1062 rmap = kzalloc(sizeof(*rmap), GFP_KERNEL);
1063 if (!rmap)
1064 return -ENOMEM;
1065 rmap->raddr = raddr;
1066 rc = radix_tree_preload(GFP_KERNEL);
1067 if (rc) {
1068 kfree(rmap);
1069 return rc;
1070 }
1071 rc = -EAGAIN;
1072 ptep = gmap_pte_op_walk(parent, paddr, &ptl);
1073 if (ptep) {
1074 spin_lock(&sg->guest_table_lock);
1075 rc = ptep_force_prot(parent->mm, paddr, ptep, prot,
1076 PGSTE_VSIE_BIT);
1077 if (!rc)
1078 gmap_insert_rmap(sg, vmaddr, rmap);
1079 spin_unlock(&sg->guest_table_lock);
1080 gmap_pte_op_end(ptl);
1081 }
1082 radix_tree_preload_end();
1083 if (rc) {
1084 kfree(rmap);
1085 rc = gmap_pte_op_fixup(parent, paddr, vmaddr, prot);
1086 if (rc)
1087 return rc;
1088 continue;
1089 }
1090 paddr += PAGE_SIZE;
1091 len -= PAGE_SIZE;
1092 }
1093 return 0;
1094}
1095
1096#define _SHADOW_RMAP_MASK 0x7
1097#define _SHADOW_RMAP_REGION1 0x5
1098#define _SHADOW_RMAP_REGION2 0x4
1099#define _SHADOW_RMAP_REGION3 0x3
1100#define _SHADOW_RMAP_SEGMENT 0x2
1101#define _SHADOW_RMAP_PGTABLE 0x1
1102
1103/**
1104 * gmap_idte_one - invalidate a single region or segment table entry
1105 * @asce: region or segment table *origin* + table-type bits
1106 * @vaddr: virtual address to identify the table entry to flush
1107 *
1108 * The invalid bit of a single region or segment table entry is set
1109 * and the associated TLB entries depending on the entry are flushed.
1110 * The table-type of the @asce identifies the portion of the @vaddr
1111 * that is used as the invalidation index.
1112 */
1113static inline void gmap_idte_one(unsigned long asce, unsigned long vaddr)
1114{
1115 asm volatile(
1116 " .insn rrf,0xb98e0000,%0,%1,0,0"
1117 : : "a" (asce), "a" (vaddr) : "cc", "memory");
1118}
1119
1120/**
1121 * gmap_unshadow_page - remove a page from a shadow page table
1122 * @sg: pointer to the shadow guest address space structure
1123 * @raddr: rmap address in the shadow guest address space
1124 *
1125 * Called with the sg->guest_table_lock
1126 */
1127static void gmap_unshadow_page(struct gmap *sg, unsigned long raddr)
1128{
1129 unsigned long *table;
1130
1131 BUG_ON(!gmap_is_shadow(sg));
1132 table = gmap_table_walk(sg, raddr, 0); /* get page table pointer */
1133 if (!table || *table & _PAGE_INVALID)
1134 return;
1135 gmap_call_notifier(sg, raddr, raddr + _PAGE_SIZE - 1);
1136 ptep_unshadow_pte(sg->mm, raddr, (pte_t *) table);
1137}
1138
1139/**
1140 * __gmap_unshadow_pgt - remove all entries from a shadow page table
1141 * @sg: pointer to the shadow guest address space structure
1142 * @raddr: rmap address in the shadow guest address space
1143 * @pgt: pointer to the start of a shadow page table
1144 *
1145 * Called with the sg->guest_table_lock
1146 */
1147static void __gmap_unshadow_pgt(struct gmap *sg, unsigned long raddr,
1148 unsigned long *pgt)
1149{
1150 int i;
1151
1152 BUG_ON(!gmap_is_shadow(sg));
1153 for (i = 0; i < _PAGE_ENTRIES; i++, raddr += _PAGE_SIZE)
1154 pgt[i] = _PAGE_INVALID;
1155}
1156
1157/**
1158 * gmap_unshadow_pgt - remove a shadow page table from a segment entry
1159 * @sg: pointer to the shadow guest address space structure
1160 * @raddr: address in the shadow guest address space
1161 *
1162 * Called with the sg->guest_table_lock
1163 */
1164static void gmap_unshadow_pgt(struct gmap *sg, unsigned long raddr)
1165{
1166 unsigned long sto, *ste, *pgt;
1167 struct page *page;
1168
1169 BUG_ON(!gmap_is_shadow(sg));
1170 ste = gmap_table_walk(sg, raddr, 1); /* get segment pointer */
1171 if (!ste || !(*ste & _SEGMENT_ENTRY_ORIGIN))
1172 return;
1173 gmap_call_notifier(sg, raddr, raddr + _SEGMENT_SIZE - 1);
1174 sto = (unsigned long) (ste - ((raddr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT));
1175 gmap_idte_one(sto | _ASCE_TYPE_SEGMENT, raddr);
1176 pgt = (unsigned long *)(*ste & _SEGMENT_ENTRY_ORIGIN);
1177 *ste = _SEGMENT_ENTRY_EMPTY;
1178 __gmap_unshadow_pgt(sg, raddr, pgt);
1179 /* Free page table */
1180 page = pfn_to_page(__pa(pgt) >> PAGE_SHIFT);
1181 list_del(&page->lru);
1182 page_table_free_pgste(page);
1183}
1184
1185/**
1186 * __gmap_unshadow_sgt - remove all entries from a shadow segment table
1187 * @sg: pointer to the shadow guest address space structure
1188 * @raddr: rmap address in the shadow guest address space
1189 * @sgt: pointer to the start of a shadow segment table
1190 *
1191 * Called with the sg->guest_table_lock
1192 */
1193static void __gmap_unshadow_sgt(struct gmap *sg, unsigned long raddr,
1194 unsigned long *sgt)
1195{
1196 unsigned long asce, *pgt;
1197 struct page *page;
1198 int i;
1199
1200 BUG_ON(!gmap_is_shadow(sg));
1201 asce = (unsigned long) sgt | _ASCE_TYPE_SEGMENT;
1202 for (i = 0; i < _CRST_ENTRIES; i++, raddr += _SEGMENT_SIZE) {
1203 if (!(sgt[i] & _SEGMENT_ENTRY_ORIGIN))
1204 continue;
1205 pgt = (unsigned long *)(sgt[i] & _REGION_ENTRY_ORIGIN);
1206 sgt[i] = _SEGMENT_ENTRY_EMPTY;
1207 __gmap_unshadow_pgt(sg, raddr, pgt);
1208 /* Free page table */
1209 page = pfn_to_page(__pa(pgt) >> PAGE_SHIFT);
1210 list_del(&page->lru);
1211 page_table_free_pgste(page);
1212 }
1213}
1214
1215/**
1216 * gmap_unshadow_sgt - remove a shadow segment table from a region-3 entry
1217 * @sg: pointer to the shadow guest address space structure
1218 * @raddr: rmap address in the shadow guest address space
1219 *
1220 * Called with the shadow->guest_table_lock
1221 */
1222static void gmap_unshadow_sgt(struct gmap *sg, unsigned long raddr)
1223{
1224 unsigned long r3o, *r3e, *sgt;
1225 struct page *page;
1226
1227 BUG_ON(!gmap_is_shadow(sg));
1228 r3e = gmap_table_walk(sg, raddr, 2); /* get region-3 pointer */
1229 if (!r3e || !(*r3e & _REGION_ENTRY_ORIGIN))
1230 return;
1231 gmap_call_notifier(sg, raddr, raddr + _REGION3_SIZE - 1);
1232 r3o = (unsigned long) (r3e - ((raddr & _REGION3_INDEX) >> _REGION3_SHIFT));
1233 gmap_idte_one(r3o | _ASCE_TYPE_REGION3, raddr);
1234 sgt = (unsigned long *)(*r3e & _REGION_ENTRY_ORIGIN);
1235 *r3e = _REGION3_ENTRY_EMPTY;
1236 __gmap_unshadow_sgt(sg, raddr, sgt);
1237 /* Free segment table */
1238 page = pfn_to_page(__pa(sgt) >> PAGE_SHIFT);
1239 list_del(&page->lru);
1240 __free_pages(page, CRST_ALLOC_ORDER);
1241}
1242
1243/**
1244 * __gmap_unshadow_r3t - remove all entries from a shadow region-3 table
1245 * @sg: pointer to the shadow guest address space structure
1246 * @raddr: address in the shadow guest address space
1247 * @r3t: pointer to the start of a shadow region-3 table
1248 *
1249 * Called with the sg->guest_table_lock
1250 */
1251static void __gmap_unshadow_r3t(struct gmap *sg, unsigned long raddr,
1252 unsigned long *r3t)
1253{
1254 unsigned long asce, *sgt;
1255 struct page *page;
1256 int i;
1257
1258 BUG_ON(!gmap_is_shadow(sg));
1259 asce = (unsigned long) r3t | _ASCE_TYPE_REGION3;
1260 for (i = 0; i < _CRST_ENTRIES; i++, raddr += _REGION3_SIZE) {
1261 if (!(r3t[i] & _REGION_ENTRY_ORIGIN))
1262 continue;
1263 sgt = (unsigned long *)(r3t[i] & _REGION_ENTRY_ORIGIN);
1264 r3t[i] = _REGION3_ENTRY_EMPTY;
1265 __gmap_unshadow_sgt(sg, raddr, sgt);
1266 /* Free segment table */
1267 page = pfn_to_page(__pa(sgt) >> PAGE_SHIFT);
1268 list_del(&page->lru);
1269 __free_pages(page, CRST_ALLOC_ORDER);
1270 }
1271}
1272
1273/**
1274 * gmap_unshadow_r3t - remove a shadow region-3 table from a region-2 entry
1275 * @sg: pointer to the shadow guest address space structure
1276 * @raddr: rmap address in the shadow guest address space
1277 *
1278 * Called with the sg->guest_table_lock
1279 */
1280static void gmap_unshadow_r3t(struct gmap *sg, unsigned long raddr)
1281{
1282 unsigned long r2o, *r2e, *r3t;
1283 struct page *page;
1284
1285 BUG_ON(!gmap_is_shadow(sg));
1286 r2e = gmap_table_walk(sg, raddr, 3); /* get region-2 pointer */
1287 if (!r2e || !(*r2e & _REGION_ENTRY_ORIGIN))
1288 return;
1289 gmap_call_notifier(sg, raddr, raddr + _REGION2_SIZE - 1);
1290 r2o = (unsigned long) (r2e - ((raddr & _REGION2_INDEX) >> _REGION2_SHIFT));
1291 gmap_idte_one(r2o | _ASCE_TYPE_REGION2, raddr);
1292 r3t = (unsigned long *)(*r2e & _REGION_ENTRY_ORIGIN);
1293 *r2e = _REGION2_ENTRY_EMPTY;
1294 __gmap_unshadow_r3t(sg, raddr, r3t);
1295 /* Free region 3 table */
1296 page = pfn_to_page(__pa(r3t) >> PAGE_SHIFT);
1297 list_del(&page->lru);
1298 __free_pages(page, CRST_ALLOC_ORDER);
1299}
1300
1301/**
1302 * __gmap_unshadow_r2t - remove all entries from a shadow region-2 table
1303 * @sg: pointer to the shadow guest address space structure
1304 * @raddr: rmap address in the shadow guest address space
1305 * @r2t: pointer to the start of a shadow region-2 table
1306 *
1307 * Called with the sg->guest_table_lock
1308 */
1309static void __gmap_unshadow_r2t(struct gmap *sg, unsigned long raddr,
1310 unsigned long *r2t)
1311{
1312 unsigned long asce, *r3t;
1313 struct page *page;
1314 int i;
1315
1316 BUG_ON(!gmap_is_shadow(sg));
1317 asce = (unsigned long) r2t | _ASCE_TYPE_REGION2;
1318 for (i = 0; i < _CRST_ENTRIES; i++, raddr += _REGION2_SIZE) {
1319 if (!(r2t[i] & _REGION_ENTRY_ORIGIN))
1320 continue;
1321 r3t = (unsigned long *)(r2t[i] & _REGION_ENTRY_ORIGIN);
1322 r2t[i] = _REGION2_ENTRY_EMPTY;
1323 __gmap_unshadow_r3t(sg, raddr, r3t);
1324 /* Free region 3 table */
1325 page = pfn_to_page(__pa(r3t) >> PAGE_SHIFT);
1326 list_del(&page->lru);
1327 __free_pages(page, CRST_ALLOC_ORDER);
1328 }
1329}
1330
1331/**
1332 * gmap_unshadow_r2t - remove a shadow region-2 table from a region-1 entry
1333 * @sg: pointer to the shadow guest address space structure
1334 * @raddr: rmap address in the shadow guest address space
1335 *
1336 * Called with the sg->guest_table_lock
1337 */
1338static void gmap_unshadow_r2t(struct gmap *sg, unsigned long raddr)
1339{
1340 unsigned long r1o, *r1e, *r2t;
1341 struct page *page;
1342
1343 BUG_ON(!gmap_is_shadow(sg));
1344 r1e = gmap_table_walk(sg, raddr, 4); /* get region-1 pointer */
1345 if (!r1e || !(*r1e & _REGION_ENTRY_ORIGIN))
1346 return;
1347 gmap_call_notifier(sg, raddr, raddr + _REGION1_SIZE - 1);
1348 r1o = (unsigned long) (r1e - ((raddr & _REGION1_INDEX) >> _REGION1_SHIFT));
1349 gmap_idte_one(r1o | _ASCE_TYPE_REGION1, raddr);
1350 r2t = (unsigned long *)(*r1e & _REGION_ENTRY_ORIGIN);
1351 *r1e = _REGION1_ENTRY_EMPTY;
1352 __gmap_unshadow_r2t(sg, raddr, r2t);
1353 /* Free region 2 table */
1354 page = pfn_to_page(__pa(r2t) >> PAGE_SHIFT);
1355 list_del(&page->lru);
1356 __free_pages(page, CRST_ALLOC_ORDER);
1357}
1358
1359/**
1360 * __gmap_unshadow_r1t - remove all entries from a shadow region-1 table
1361 * @sg: pointer to the shadow guest address space structure
1362 * @raddr: rmap address in the shadow guest address space
1363 * @r1t: pointer to the start of a shadow region-1 table
1364 *
1365 * Called with the shadow->guest_table_lock
1366 */
1367static void __gmap_unshadow_r1t(struct gmap *sg, unsigned long raddr,
1368 unsigned long *r1t)
1369{
1370 unsigned long asce, *r2t;
1371 struct page *page;
1372 int i;
1373
1374 BUG_ON(!gmap_is_shadow(sg));
1375 asce = (unsigned long) r1t | _ASCE_TYPE_REGION1;
1376 for (i = 0; i < _CRST_ENTRIES; i++, raddr += _REGION1_SIZE) {
1377 if (!(r1t[i] & _REGION_ENTRY_ORIGIN))
1378 continue;
1379 r2t = (unsigned long *)(r1t[i] & _REGION_ENTRY_ORIGIN);
1380 __gmap_unshadow_r2t(sg, raddr, r2t);
1381 /* Clear entry and flush translation r1t -> r2t */
1382 gmap_idte_one(asce, raddr);
1383 r1t[i] = _REGION1_ENTRY_EMPTY;
1384 /* Free region 2 table */
1385 page = pfn_to_page(__pa(r2t) >> PAGE_SHIFT);
1386 list_del(&page->lru);
1387 __free_pages(page, CRST_ALLOC_ORDER);
1388 }
1389}
1390
1391/**
1392 * gmap_unshadow - remove a shadow page table completely
1393 * @sg: pointer to the shadow guest address space structure
1394 *
1395 * Called with sg->guest_table_lock
1396 */
1397static void gmap_unshadow(struct gmap *sg)
1398{
1399 unsigned long *table;
1400
1401 BUG_ON(!gmap_is_shadow(sg));
1402 if (sg->removed)
1403 return;
1404 sg->removed = 1;
1405 gmap_call_notifier(sg, 0, -1UL);
1406 gmap_flush_tlb(sg);
1407 table = (unsigned long *)(sg->asce & _ASCE_ORIGIN);
1408 switch (sg->asce & _ASCE_TYPE_MASK) {
1409 case _ASCE_TYPE_REGION1:
1410 __gmap_unshadow_r1t(sg, 0, table);
1411 break;
1412 case _ASCE_TYPE_REGION2:
1413 __gmap_unshadow_r2t(sg, 0, table);
1414 break;
1415 case _ASCE_TYPE_REGION3:
1416 __gmap_unshadow_r3t(sg, 0, table);
1417 break;
1418 case _ASCE_TYPE_SEGMENT:
1419 __gmap_unshadow_sgt(sg, 0, table);
1420 break;
1421 }
1422}
1423
1424/**
1425 * gmap_find_shadow - find a specific asce in the list of shadow tables
1426 * @parent: pointer to the parent gmap
1427 * @asce: ASCE for which the shadow table is created
1428 * @edat_level: edat level to be used for the shadow translation
1429 *
1430 * Returns the pointer to a gmap if a shadow table with the given asce is
1431 * already available, ERR_PTR(-EAGAIN) if another one is just being created,
1432 * otherwise NULL
1433 */
1434static struct gmap *gmap_find_shadow(struct gmap *parent, unsigned long asce,
1435 int edat_level)
1436{
1437 struct gmap *sg;
1438
1439 list_for_each_entry(sg, &parent->children, list) {
1440 if (sg->orig_asce != asce || sg->edat_level != edat_level ||
1441 sg->removed)
1442 continue;
1443 if (!sg->initialized)
1444 return ERR_PTR(-EAGAIN);
1445 atomic_inc(&sg->ref_count);
1446 return sg;
1447 }
1448 return NULL;
1449}
1450
1451/**
1452 * gmap_shadow_valid - check if a shadow guest address space matches the
1453 * given properties and is still valid
1454 * @sg: pointer to the shadow guest address space structure
1455 * @asce: ASCE for which the shadow table is requested
1456 * @edat_level: edat level to be used for the shadow translation
1457 *
1458 * Returns 1 if the gmap shadow is still valid and matches the given
1459 * properties, the caller can continue using it. Returns 0 otherwise, the
1460 * caller has to request a new shadow gmap in this case.
1461 *
1462 */
1463int gmap_shadow_valid(struct gmap *sg, unsigned long asce, int edat_level)
1464{
1465 if (sg->removed)
1466 return 0;
1467 return sg->orig_asce == asce && sg->edat_level == edat_level;
1468}
1469EXPORT_SYMBOL_GPL(gmap_shadow_valid);
1470
1471/**
1472 * gmap_shadow - create/find a shadow guest address space
1473 * @parent: pointer to the parent gmap
1474 * @asce: ASCE for which the shadow table is created
1475 * @edat_level: edat level to be used for the shadow translation
1476 *
1477 * The pages of the top level page table referred by the asce parameter
1478 * will be set to read-only and marked in the PGSTEs of the kvm process.
1479 * The shadow table will be removed automatically on any change to the
1480 * PTE mapping for the source table.
1481 *
1482 * Returns a guest address space structure, ERR_PTR(-ENOMEM) if out of memory,
1483 * ERR_PTR(-EAGAIN) if the caller has to retry and ERR_PTR(-EFAULT) if the
1484 * parent gmap table could not be protected.
1485 */
1486struct gmap *gmap_shadow(struct gmap *parent, unsigned long asce,
1487 int edat_level)
1488{
1489 struct gmap *sg, *new;
1490 unsigned long limit;
1491 int rc;
1492
1493 BUG_ON(gmap_is_shadow(parent));
1494 spin_lock(&parent->shadow_lock);
1495 sg = gmap_find_shadow(parent, asce, edat_level);
1496 spin_unlock(&parent->shadow_lock);
1497 if (sg)
1498 return sg;
1499 /* Create a new shadow gmap */
1500 limit = -1UL >> (33 - (((asce & _ASCE_TYPE_MASK) >> 2) * 11));
1501 if (asce & _ASCE_REAL_SPACE)
1502 limit = -1UL;
1503 new = gmap_alloc(limit);
1504 if (!new)
1505 return ERR_PTR(-ENOMEM);
1506 new->mm = parent->mm;
1507 new->parent = gmap_get(parent);
1508 new->orig_asce = asce;
1509 new->edat_level = edat_level;
1510 new->initialized = false;
1511 spin_lock(&parent->shadow_lock);
1512 /* Recheck if another CPU created the same shadow */
1513 sg = gmap_find_shadow(parent, asce, edat_level);
1514 if (sg) {
1515 spin_unlock(&parent->shadow_lock);
1516 gmap_free(new);
1517 return sg;
1518 }
1519 if (asce & _ASCE_REAL_SPACE) {
1520 /* only allow one real-space gmap shadow */
1521 list_for_each_entry(sg, &parent->children, list) {
1522 if (sg->orig_asce & _ASCE_REAL_SPACE) {
1523 spin_lock(&sg->guest_table_lock);
1524 gmap_unshadow(sg);
1525 spin_unlock(&sg->guest_table_lock);
1526 list_del(&sg->list);
1527 gmap_put(sg);
1528 break;
1529 }
1530 }
1531 }
1532 atomic_set(&new->ref_count, 2);
1533 list_add(&new->list, &parent->children);
1534 if (asce & _ASCE_REAL_SPACE) {
1535 /* nothing to protect, return right away */
1536 new->initialized = true;
1537 spin_unlock(&parent->shadow_lock);
1538 return new;
1539 }
1540 spin_unlock(&parent->shadow_lock);
1541 /* protect after insertion, so it will get properly invalidated */
1542 down_read(&parent->mm->mmap_sem);
1543 rc = gmap_protect_range(parent, asce & _ASCE_ORIGIN,
1544 ((asce & _ASCE_TABLE_LENGTH) + 1) * PAGE_SIZE,
1545 PROT_READ, PGSTE_VSIE_BIT);
1546 up_read(&parent->mm->mmap_sem);
1547 spin_lock(&parent->shadow_lock);
1548 new->initialized = true;
1549 if (rc) {
1550 list_del(&new->list);
1551 gmap_free(new);
1552 new = ERR_PTR(rc);
1553 }
1554 spin_unlock(&parent->shadow_lock);
1555 return new;
1556}
1557EXPORT_SYMBOL_GPL(gmap_shadow);
1558
1559/**
1560 * gmap_shadow_r2t - create an empty shadow region 2 table
1561 * @sg: pointer to the shadow guest address space structure
1562 * @saddr: faulting address in the shadow gmap
1563 * @r2t: parent gmap address of the region 2 table to get shadowed
1564 * @fake: r2t references contiguous guest memory block, not a r2t
1565 *
1566 * The r2t parameter specifies the address of the source table. The
1567 * four pages of the source table are made read-only in the parent gmap
1568 * address space. A write to the source table area @r2t will automatically
1569 * remove the shadow r2 table and all of its decendents.
1570 *
1571 * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
1572 * shadow table structure is incomplete, -ENOMEM if out of memory and
1573 * -EFAULT if an address in the parent gmap could not be resolved.
1574 *
1575 * Called with sg->mm->mmap_sem in read.
1576 */
1577int gmap_shadow_r2t(struct gmap *sg, unsigned long saddr, unsigned long r2t,
1578 int fake)
1579{
1580 unsigned long raddr, origin, offset, len;
1581 unsigned long *s_r2t, *table;
1582 struct page *page;
1583 int rc;
1584
1585 BUG_ON(!gmap_is_shadow(sg));
1586 /* Allocate a shadow region second table */
1587 page = alloc_pages(GFP_KERNEL, CRST_ALLOC_ORDER);
1588 if (!page)
1589 return -ENOMEM;
1590 page->index = r2t & _REGION_ENTRY_ORIGIN;
1591 if (fake)
1592 page->index |= GMAP_SHADOW_FAKE_TABLE;
1593 s_r2t = (unsigned long *) page_to_phys(page);
1594 /* Install shadow region second table */
1595 spin_lock(&sg->guest_table_lock);
1596 table = gmap_table_walk(sg, saddr, 4); /* get region-1 pointer */
1597 if (!table) {
1598 rc = -EAGAIN; /* Race with unshadow */
1599 goto out_free;
1600 }
1601 if (!(*table & _REGION_ENTRY_INVALID)) {
1602 rc = 0; /* Already established */
1603 goto out_free;
1604 } else if (*table & _REGION_ENTRY_ORIGIN) {
1605 rc = -EAGAIN; /* Race with shadow */
1606 goto out_free;
1607 }
1608 crst_table_init(s_r2t, _REGION2_ENTRY_EMPTY);
1609 /* mark as invalid as long as the parent table is not protected */
1610 *table = (unsigned long) s_r2t | _REGION_ENTRY_LENGTH |
1611 _REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID;
1612 if (sg->edat_level >= 1)
1613 *table |= (r2t & _REGION_ENTRY_PROTECT);
1614 list_add(&page->lru, &sg->crst_list);
1615 if (fake) {
1616 /* nothing to protect for fake tables */
1617 *table &= ~_REGION_ENTRY_INVALID;
1618 spin_unlock(&sg->guest_table_lock);
1619 return 0;
1620 }
1621 spin_unlock(&sg->guest_table_lock);
1622 /* Make r2t read-only in parent gmap page table */
1623 raddr = (saddr & _REGION1_MASK) | _SHADOW_RMAP_REGION1;
1624 origin = r2t & _REGION_ENTRY_ORIGIN;
1625 offset = ((r2t & _REGION_ENTRY_OFFSET) >> 6) * PAGE_SIZE;
1626 len = ((r2t & _REGION_ENTRY_LENGTH) + 1) * PAGE_SIZE - offset;
1627 rc = gmap_protect_rmap(sg, raddr, origin + offset, len, PROT_READ);
1628 spin_lock(&sg->guest_table_lock);
1629 if (!rc) {
1630 table = gmap_table_walk(sg, saddr, 4);
1631 if (!table || (*table & _REGION_ENTRY_ORIGIN) !=
1632 (unsigned long) s_r2t)
1633 rc = -EAGAIN; /* Race with unshadow */
1634 else
1635 *table &= ~_REGION_ENTRY_INVALID;
1636 } else {
1637 gmap_unshadow_r2t(sg, raddr);
1638 }
1639 spin_unlock(&sg->guest_table_lock);
1640 return rc;
1641out_free:
1642 spin_unlock(&sg->guest_table_lock);
1643 __free_pages(page, CRST_ALLOC_ORDER);
1644 return rc;
1645}
1646EXPORT_SYMBOL_GPL(gmap_shadow_r2t);
1647
1648/**
1649 * gmap_shadow_r3t - create a shadow region 3 table
1650 * @sg: pointer to the shadow guest address space structure
1651 * @saddr: faulting address in the shadow gmap
1652 * @r3t: parent gmap address of the region 3 table to get shadowed
1653 * @fake: r3t references contiguous guest memory block, not a r3t
1654 *
1655 * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
1656 * shadow table structure is incomplete, -ENOMEM if out of memory and
1657 * -EFAULT if an address in the parent gmap could not be resolved.
1658 *
1659 * Called with sg->mm->mmap_sem in read.
1660 */
1661int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t,
1662 int fake)
1663{
1664 unsigned long raddr, origin, offset, len;
1665 unsigned long *s_r3t, *table;
1666 struct page *page;
1667 int rc;
1668
1669 BUG_ON(!gmap_is_shadow(sg));
1670 /* Allocate a shadow region second table */
1671 page = alloc_pages(GFP_KERNEL, CRST_ALLOC_ORDER);
1672 if (!page)
1673 return -ENOMEM;
1674 page->index = r3t & _REGION_ENTRY_ORIGIN;
1675 if (fake)
1676 page->index |= GMAP_SHADOW_FAKE_TABLE;
1677 s_r3t = (unsigned long *) page_to_phys(page);
1678 /* Install shadow region second table */
1679 spin_lock(&sg->guest_table_lock);
1680 table = gmap_table_walk(sg, saddr, 3); /* get region-2 pointer */
1681 if (!table) {
1682 rc = -EAGAIN; /* Race with unshadow */
1683 goto out_free;
1684 }
1685 if (!(*table & _REGION_ENTRY_INVALID)) {
1686 rc = 0; /* Already established */
1687 goto out_free;
1688 } else if (*table & _REGION_ENTRY_ORIGIN) {
1689 rc = -EAGAIN; /* Race with shadow */
1690 goto out_free;
1691 }
1692 crst_table_init(s_r3t, _REGION3_ENTRY_EMPTY);
1693 /* mark as invalid as long as the parent table is not protected */
1694 *table = (unsigned long) s_r3t | _REGION_ENTRY_LENGTH |
1695 _REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID;
1696 if (sg->edat_level >= 1)
1697 *table |= (r3t & _REGION_ENTRY_PROTECT);
1698 list_add(&page->lru, &sg->crst_list);
1699 if (fake) {
1700 /* nothing to protect for fake tables */
1701 *table &= ~_REGION_ENTRY_INVALID;
1702 spin_unlock(&sg->guest_table_lock);
1703 return 0;
1704 }
1705 spin_unlock(&sg->guest_table_lock);
1706 /* Make r3t read-only in parent gmap page table */
1707 raddr = (saddr & _REGION2_MASK) | _SHADOW_RMAP_REGION2;
1708 origin = r3t & _REGION_ENTRY_ORIGIN;
1709 offset = ((r3t & _REGION_ENTRY_OFFSET) >> 6) * PAGE_SIZE;
1710 len = ((r3t & _REGION_ENTRY_LENGTH) + 1) * PAGE_SIZE - offset;
1711 rc = gmap_protect_rmap(sg, raddr, origin + offset, len, PROT_READ);
1712 spin_lock(&sg->guest_table_lock);
1713 if (!rc) {
1714 table = gmap_table_walk(sg, saddr, 3);
1715 if (!table || (*table & _REGION_ENTRY_ORIGIN) !=
1716 (unsigned long) s_r3t)
1717 rc = -EAGAIN; /* Race with unshadow */
1718 else
1719 *table &= ~_REGION_ENTRY_INVALID;
1720 } else {
1721 gmap_unshadow_r3t(sg, raddr);
1722 }
1723 spin_unlock(&sg->guest_table_lock);
1724 return rc;
1725out_free:
1726 spin_unlock(&sg->guest_table_lock);
1727 __free_pages(page, CRST_ALLOC_ORDER);
1728 return rc;
1729}
1730EXPORT_SYMBOL_GPL(gmap_shadow_r3t);
1731
1732/**
1733 * gmap_shadow_sgt - create a shadow segment table
1734 * @sg: pointer to the shadow guest address space structure
1735 * @saddr: faulting address in the shadow gmap
1736 * @sgt: parent gmap address of the segment table to get shadowed
1737 * @fake: sgt references contiguous guest memory block, not a sgt
1738 *
1739 * Returns: 0 if successfully shadowed or already shadowed, -EAGAIN if the
1740 * shadow table structure is incomplete, -ENOMEM if out of memory and
1741 * -EFAULT if an address in the parent gmap could not be resolved.
1742 *
1743 * Called with sg->mm->mmap_sem in read.
1744 */
1745int gmap_shadow_sgt(struct gmap *sg, unsigned long saddr, unsigned long sgt,
1746 int fake)
1747{
1748 unsigned long raddr, origin, offset, len;
1749 unsigned long *s_sgt, *table;
1750 struct page *page;
1751 int rc;
1752
1753 BUG_ON(!gmap_is_shadow(sg) || (sgt & _REGION3_ENTRY_LARGE));
1754 /* Allocate a shadow segment table */
1755 page = alloc_pages(GFP_KERNEL, CRST_ALLOC_ORDER);
1756 if (!page)
1757 return -ENOMEM;
1758 page->index = sgt & _REGION_ENTRY_ORIGIN;
1759 if (fake)
1760 page->index |= GMAP_SHADOW_FAKE_TABLE;
1761 s_sgt = (unsigned long *) page_to_phys(page);
1762 /* Install shadow region second table */
1763 spin_lock(&sg->guest_table_lock);
1764 table = gmap_table_walk(sg, saddr, 2); /* get region-3 pointer */
1765 if (!table) {
1766 rc = -EAGAIN; /* Race with unshadow */
1767 goto out_free;
1768 }
1769 if (!(*table & _REGION_ENTRY_INVALID)) {
1770 rc = 0; /* Already established */
1771 goto out_free;
1772 } else if (*table & _REGION_ENTRY_ORIGIN) {
1773 rc = -EAGAIN; /* Race with shadow */
1774 goto out_free;
1775 }
1776 crst_table_init(s_sgt, _SEGMENT_ENTRY_EMPTY);
1777 /* mark as invalid as long as the parent table is not protected */
1778 *table = (unsigned long) s_sgt | _REGION_ENTRY_LENGTH |
1779 _REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID;
1780 if (sg->edat_level >= 1)
1781 *table |= sgt & _REGION_ENTRY_PROTECT;
1782 list_add(&page->lru, &sg->crst_list);
1783 if (fake) {
1784 /* nothing to protect for fake tables */
1785 *table &= ~_REGION_ENTRY_INVALID;
1786 spin_unlock(&sg->guest_table_lock);
1787 return 0;
1788 }
1789 spin_unlock(&sg->guest_table_lock);
1790 /* Make sgt read-only in parent gmap page table */
1791 raddr = (saddr & _REGION3_MASK) | _SHADOW_RMAP_REGION3;
1792 origin = sgt & _REGION_ENTRY_ORIGIN;
1793 offset = ((sgt & _REGION_ENTRY_OFFSET) >> 6) * PAGE_SIZE;
1794 len = ((sgt & _REGION_ENTRY_LENGTH) + 1) * PAGE_SIZE - offset;
1795 rc = gmap_protect_rmap(sg, raddr, origin + offset, len, PROT_READ);
1796 spin_lock(&sg->guest_table_lock);
1797 if (!rc) {
1798 table = gmap_table_walk(sg, saddr, 2);
1799 if (!table || (*table & _REGION_ENTRY_ORIGIN) !=
1800 (unsigned long) s_sgt)
1801 rc = -EAGAIN; /* Race with unshadow */
1802 else
1803 *table &= ~_REGION_ENTRY_INVALID;
1804 } else {
1805 gmap_unshadow_sgt(sg, raddr);
1806 }
1807 spin_unlock(&sg->guest_table_lock);
1808 return rc;
1809out_free:
1810 spin_unlock(&sg->guest_table_lock);
1811 __free_pages(page, CRST_ALLOC_ORDER);
1812 return rc;
1813}
1814EXPORT_SYMBOL_GPL(gmap_shadow_sgt);
1815
1816/**
1817 * gmap_shadow_lookup_pgtable - find a shadow page table
1818 * @sg: pointer to the shadow guest address space structure
1819 * @saddr: the address in the shadow aguest address space
1820 * @pgt: parent gmap address of the page table to get shadowed
1821 * @dat_protection: if the pgtable is marked as protected by dat
1822 * @fake: pgt references contiguous guest memory block, not a pgtable
1823 *
1824 * Returns 0 if the shadow page table was found and -EAGAIN if the page
1825 * table was not found.
1826 *
1827 * Called with sg->mm->mmap_sem in read.
1828 */
1829int gmap_shadow_pgt_lookup(struct gmap *sg, unsigned long saddr,
1830 unsigned long *pgt, int *dat_protection,
1831 int *fake)
1832{
1833 unsigned long *table;
1834 struct page *page;
1835 int rc;
1836
1837 BUG_ON(!gmap_is_shadow(sg));
1838 spin_lock(&sg->guest_table_lock);
1839 table = gmap_table_walk(sg, saddr, 1); /* get segment pointer */
1840 if (table && !(*table & _SEGMENT_ENTRY_INVALID)) {
1841 /* Shadow page tables are full pages (pte+pgste) */
1842 page = pfn_to_page(*table >> PAGE_SHIFT);
1843 *pgt = page->index & ~GMAP_SHADOW_FAKE_TABLE;
1844 *dat_protection = !!(*table & _SEGMENT_ENTRY_PROTECT);
1845 *fake = !!(page->index & GMAP_SHADOW_FAKE_TABLE);
1846 rc = 0;
1847 } else {
1848 rc = -EAGAIN;
1849 }
1850 spin_unlock(&sg->guest_table_lock);
1851 return rc;
1852
1853}
1854EXPORT_SYMBOL_GPL(gmap_shadow_pgt_lookup);
1855
1856/**
1857 * gmap_shadow_pgt - instantiate a shadow page table
1858 * @sg: pointer to the shadow guest address space structure
1859 * @saddr: faulting address in the shadow gmap
1860 * @pgt: parent gmap address of the page table to get shadowed
1861 * @fake: pgt references contiguous guest memory block, not a pgtable
1862 *
1863 * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
1864 * shadow table structure is incomplete, -ENOMEM if out of memory,
1865 * -EFAULT if an address in the parent gmap could not be resolved and
1866 *
1867 * Called with gmap->mm->mmap_sem in read
1868 */
1869int gmap_shadow_pgt(struct gmap *sg, unsigned long saddr, unsigned long pgt,
1870 int fake)
1871{
1872 unsigned long raddr, origin;
1873 unsigned long *s_pgt, *table;
1874 struct page *page;
1875 int rc;
1876
1877 BUG_ON(!gmap_is_shadow(sg) || (pgt & _SEGMENT_ENTRY_LARGE));
1878 /* Allocate a shadow page table */
1879 page = page_table_alloc_pgste(sg->mm);
1880 if (!page)
1881 return -ENOMEM;
1882 page->index = pgt & _SEGMENT_ENTRY_ORIGIN;
1883 if (fake)
1884 page->index |= GMAP_SHADOW_FAKE_TABLE;
1885 s_pgt = (unsigned long *) page_to_phys(page);
1886 /* Install shadow page table */
1887 spin_lock(&sg->guest_table_lock);
1888 table = gmap_table_walk(sg, saddr, 1); /* get segment pointer */
1889 if (!table) {
1890 rc = -EAGAIN; /* Race with unshadow */
1891 goto out_free;
1892 }
1893 if (!(*table & _SEGMENT_ENTRY_INVALID)) {
1894 rc = 0; /* Already established */
1895 goto out_free;
1896 } else if (*table & _SEGMENT_ENTRY_ORIGIN) {
1897 rc = -EAGAIN; /* Race with shadow */
1898 goto out_free;
1899 }
1900 /* mark as invalid as long as the parent table is not protected */
1901 *table = (unsigned long) s_pgt | _SEGMENT_ENTRY |
1902 (pgt & _SEGMENT_ENTRY_PROTECT) | _SEGMENT_ENTRY_INVALID;
1903 list_add(&page->lru, &sg->pt_list);
1904 if (fake) {
1905 /* nothing to protect for fake tables */
1906 *table &= ~_SEGMENT_ENTRY_INVALID;
1907 spin_unlock(&sg->guest_table_lock);
1908 return 0;
1909 }
1910 spin_unlock(&sg->guest_table_lock);
1911 /* Make pgt read-only in parent gmap page table (not the pgste) */
1912 raddr = (saddr & _SEGMENT_MASK) | _SHADOW_RMAP_SEGMENT;
1913 origin = pgt & _SEGMENT_ENTRY_ORIGIN & PAGE_MASK;
1914 rc = gmap_protect_rmap(sg, raddr, origin, PAGE_SIZE, PROT_READ);
1915 spin_lock(&sg->guest_table_lock);
1916 if (!rc) {
1917 table = gmap_table_walk(sg, saddr, 1);
1918 if (!table || (*table & _SEGMENT_ENTRY_ORIGIN) !=
1919 (unsigned long) s_pgt)
1920 rc = -EAGAIN; /* Race with unshadow */
1921 else
1922 *table &= ~_SEGMENT_ENTRY_INVALID;
1923 } else {
1924 gmap_unshadow_pgt(sg, raddr);
1925 }
1926 spin_unlock(&sg->guest_table_lock);
1927 return rc;
1928out_free:
1929 spin_unlock(&sg->guest_table_lock);
1930 page_table_free_pgste(page);
1931 return rc;
1932
1933}
1934EXPORT_SYMBOL_GPL(gmap_shadow_pgt);
1935
1936/**
1937 * gmap_shadow_page - create a shadow page mapping
1938 * @sg: pointer to the shadow guest address space structure
1939 * @saddr: faulting address in the shadow gmap
1940 * @pte: pte in parent gmap address space to get shadowed
1941 *
1942 * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
1943 * shadow table structure is incomplete, -ENOMEM if out of memory and
1944 * -EFAULT if an address in the parent gmap could not be resolved.
1945 *
1946 * Called with sg->mm->mmap_sem in read.
1947 */
1948int gmap_shadow_page(struct gmap *sg, unsigned long saddr, pte_t pte)
1949{
1950 struct gmap *parent;
1951 struct gmap_rmap *rmap;
1952 unsigned long vmaddr, paddr;
1953 spinlock_t *ptl;
1954 pte_t *sptep, *tptep;
1955 int prot;
1956 int rc;
1957
1958 BUG_ON(!gmap_is_shadow(sg));
1959 parent = sg->parent;
1960 prot = (pte_val(pte) & _PAGE_PROTECT) ? PROT_READ : PROT_WRITE;
1961
1962 rmap = kzalloc(sizeof(*rmap), GFP_KERNEL);
1963 if (!rmap)
1964 return -ENOMEM;
1965 rmap->raddr = (saddr & PAGE_MASK) | _SHADOW_RMAP_PGTABLE;
1966
1967 while (1) {
1968 paddr = pte_val(pte) & PAGE_MASK;
1969 vmaddr = __gmap_translate(parent, paddr);
1970 if (IS_ERR_VALUE(vmaddr)) {
1971 rc = vmaddr;
1972 break;
1973 }
1974 rc = radix_tree_preload(GFP_KERNEL);
1975 if (rc)
1976 break;
1977 rc = -EAGAIN;
1978 sptep = gmap_pte_op_walk(parent, paddr, &ptl);
1979 if (sptep) {
1980 spin_lock(&sg->guest_table_lock);
1981 /* Get page table pointer */
1982 tptep = (pte_t *) gmap_table_walk(sg, saddr, 0);
1983 if (!tptep) {
1984 spin_unlock(&sg->guest_table_lock);
1985 gmap_pte_op_end(ptl);
1986 radix_tree_preload_end();
1987 break;
1988 }
1989 rc = ptep_shadow_pte(sg->mm, saddr, sptep, tptep, pte);
1990 if (rc > 0) {
1991 /* Success and a new mapping */
1992 gmap_insert_rmap(sg, vmaddr, rmap);
1993 rmap = NULL;
1994 rc = 0;
1995 }
1996 gmap_pte_op_end(ptl);
1997 spin_unlock(&sg->guest_table_lock);
1998 }
1999 radix_tree_preload_end();
2000 if (!rc)
2001 break;
2002 rc = gmap_pte_op_fixup(parent, paddr, vmaddr, prot);
2003 if (rc)
2004 break;
2005 }
2006 kfree(rmap);
2007 return rc;
2008}
2009EXPORT_SYMBOL_GPL(gmap_shadow_page);
2010
2011/**
2012 * gmap_shadow_notify - handle notifications for shadow gmap
2013 *
2014 * Called with sg->parent->shadow_lock.
2015 */
2016static void gmap_shadow_notify(struct gmap *sg, unsigned long vmaddr,
2017 unsigned long gaddr, pte_t *pte)
2018{
2019 struct gmap_rmap *rmap, *rnext, *head;
2020 unsigned long start, end, bits, raddr;
2021
2022 BUG_ON(!gmap_is_shadow(sg));
2023
2024 spin_lock(&sg->guest_table_lock);
2025 if (sg->removed) {
2026 spin_unlock(&sg->guest_table_lock);
2027 return;
2028 }
2029 /* Check for top level table */
2030 start = sg->orig_asce & _ASCE_ORIGIN;
2031 end = start + ((sg->orig_asce & _ASCE_TABLE_LENGTH) + 1) * PAGE_SIZE;
2032 if (!(sg->orig_asce & _ASCE_REAL_SPACE) && gaddr >= start &&
2033 gaddr < end) {
2034 /* The complete shadow table has to go */
2035 gmap_unshadow(sg);
2036 spin_unlock(&sg->guest_table_lock);
2037 list_del(&sg->list);
2038 gmap_put(sg);
2039 return;
2040 }
2041 /* Remove the page table tree from on specific entry */
2042 head = radix_tree_delete(&sg->host_to_rmap, vmaddr >> PAGE_SHIFT);
2043 gmap_for_each_rmap_safe(rmap, rnext, head) {
2044 bits = rmap->raddr & _SHADOW_RMAP_MASK;
2045 raddr = rmap->raddr ^ bits;
2046 switch (bits) {
2047 case _SHADOW_RMAP_REGION1:
2048 gmap_unshadow_r2t(sg, raddr);
2049 break;
2050 case _SHADOW_RMAP_REGION2:
2051 gmap_unshadow_r3t(sg, raddr);
2052 break;
2053 case _SHADOW_RMAP_REGION3:
2054 gmap_unshadow_sgt(sg, raddr);
2055 break;
2056 case _SHADOW_RMAP_SEGMENT:
2057 gmap_unshadow_pgt(sg, raddr);
2058 break;
2059 case _SHADOW_RMAP_PGTABLE:
2060 gmap_unshadow_page(sg, raddr);
2061 break;
2062 }
2063 kfree(rmap);
2064 }
2065 spin_unlock(&sg->guest_table_lock);
2066}
2067
2068/**
2069 * ptep_notify - call all invalidation callbacks for a specific pte.
2070 * @mm: pointer to the process mm_struct
2071 * @addr: virtual address in the process address space
2072 * @pte: pointer to the page table entry
2073 * @bits: bits from the pgste that caused the notify call
2074 *
2075 * This function is assumed to be called with the page table lock held
2076 * for the pte to notify.
2077 */
2078void ptep_notify(struct mm_struct *mm, unsigned long vmaddr,
2079 pte_t *pte, unsigned long bits)
2080{
2081 unsigned long offset, gaddr = 0;
2082 unsigned long *table;
2083 struct gmap *gmap, *sg, *next;
2084
2085 offset = ((unsigned long) pte) & (255 * sizeof(pte_t));
2086 offset = offset * (PAGE_SIZE / sizeof(pte_t));
2087 rcu_read_lock();
2088 list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
2089 spin_lock(&gmap->guest_table_lock);
2090 table = radix_tree_lookup(&gmap->host_to_guest,
2091 vmaddr >> PMD_SHIFT);
2092 if (table)
2093 gaddr = __gmap_segment_gaddr(table) + offset;
2094 spin_unlock(&gmap->guest_table_lock);
2095 if (!table)
2096 continue;
2097
2098 if (!list_empty(&gmap->children) && (bits & PGSTE_VSIE_BIT)) {
2099 spin_lock(&gmap->shadow_lock);
2100 list_for_each_entry_safe(sg, next,
2101 &gmap->children, list)
2102 gmap_shadow_notify(sg, vmaddr, gaddr, pte);
2103 spin_unlock(&gmap->shadow_lock);
2104 }
2105 if (bits & PGSTE_IN_BIT)
2106 gmap_call_notifier(gmap, gaddr, gaddr + PAGE_SIZE - 1);
2107 }
2108 rcu_read_unlock();
2109}
2110EXPORT_SYMBOL_GPL(ptep_notify);
2111
2112static inline void thp_split_mm(struct mm_struct *mm)
2113{
2114#ifdef CONFIG_TRANSPARENT_HUGEPAGE
2115 struct vm_area_struct *vma;
2116 unsigned long addr;
2117
2118 for (vma = mm->mmap; vma != NULL; vma = vma->vm_next) {
2119 for (addr = vma->vm_start;
2120 addr < vma->vm_end;
2121 addr += PAGE_SIZE)
2122 follow_page(vma, addr, FOLL_SPLIT);
2123 vma->vm_flags &= ~VM_HUGEPAGE;
2124 vma->vm_flags |= VM_NOHUGEPAGE;
2125 }
2126 mm->def_flags |= VM_NOHUGEPAGE;
2127#endif
2128}
2129
2130/*
2131 * Remove all empty zero pages from the mapping for lazy refaulting
2132 * - This must be called after mm->context.has_pgste is set, to avoid
2133 * future creation of zero pages
2134 * - This must be called after THP was enabled
2135 */
2136static int __zap_zero_pages(pmd_t *pmd, unsigned long start,
2137 unsigned long end, struct mm_walk *walk)
2138{
2139 unsigned long addr;
2140
2141 for (addr = start; addr != end; addr += PAGE_SIZE) {
2142 pte_t *ptep;
2143 spinlock_t *ptl;
2144
2145 ptep = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
2146 if (is_zero_pfn(pte_pfn(*ptep)))
2147 ptep_xchg_direct(walk->mm, addr, ptep, __pte(_PAGE_INVALID));
2148 pte_unmap_unlock(ptep, ptl);
2149 }
2150 return 0;
2151}
2152
2153static inline void zap_zero_pages(struct mm_struct *mm)
2154{
2155 struct mm_walk walk = { .pmd_entry = __zap_zero_pages };
2156
2157 walk.mm = mm;
2158 walk_page_range(0, TASK_SIZE, &walk);
2159}
2160
2161/*
2162 * switch on pgstes for its userspace process (for kvm)
2163 */
2164int s390_enable_sie(void)
2165{
2166 struct mm_struct *mm = current->mm;
2167
2168 /* Do we have pgstes? if yes, we are done */
2169 if (mm_has_pgste(mm))
2170 return 0;
2171 /* Fail if the page tables are 2K */
2172 if (!mm_alloc_pgste(mm))
2173 return -EINVAL;
2174 down_write(&mm->mmap_sem);
2175 mm->context.has_pgste = 1;
2176 /* split thp mappings and disable thp for future mappings */
2177 thp_split_mm(mm);
2178 zap_zero_pages(mm);
2179 up_write(&mm->mmap_sem);
2180 return 0;
2181}
2182EXPORT_SYMBOL_GPL(s390_enable_sie);
2183
2184/*
2185 * Enable storage key handling from now on and initialize the storage
2186 * keys with the default key.
2187 */
2188static int __s390_enable_skey(pte_t *pte, unsigned long addr,
2189 unsigned long next, struct mm_walk *walk)
2190{
2191 /* Clear storage key */
2192 ptep_zap_key(walk->mm, addr, pte);
2193 return 0;
2194}
2195
2196int s390_enable_skey(void)
2197{
2198 struct mm_walk walk = { .pte_entry = __s390_enable_skey };
2199 struct mm_struct *mm = current->mm;
2200 struct vm_area_struct *vma;
2201 int rc = 0;
2202
2203 down_write(&mm->mmap_sem);
2204 if (mm_use_skey(mm))
2205 goto out_up;
2206
2207 mm->context.use_skey = 1;
2208 for (vma = mm->mmap; vma; vma = vma->vm_next) {
2209 if (ksm_madvise(vma, vma->vm_start, vma->vm_end,
2210 MADV_UNMERGEABLE, &vma->vm_flags)) {
2211 mm->context.use_skey = 0;
2212 rc = -ENOMEM;
2213 goto out_up;
2214 }
2215 }
2216 mm->def_flags &= ~VM_MERGEABLE;
2217
2218 walk.mm = mm;
2219 walk_page_range(0, TASK_SIZE, &walk);
2220
2221out_up:
2222 up_write(&mm->mmap_sem);
2223 return rc;
2224}
2225EXPORT_SYMBOL_GPL(s390_enable_skey);
2226
2227/*
2228 * Reset CMMA state, make all pages stable again.
2229 */
2230static int __s390_reset_cmma(pte_t *pte, unsigned long addr,
2231 unsigned long next, struct mm_walk *walk)
2232{
2233 ptep_zap_unused(walk->mm, addr, pte, 1);
2234 return 0;
2235}
2236
2237void s390_reset_cmma(struct mm_struct *mm)
2238{
2239 struct mm_walk walk = { .pte_entry = __s390_reset_cmma };
2240
2241 down_write(&mm->mmap_sem);
2242 walk.mm = mm;
2243 walk_page_range(0, TASK_SIZE, &walk);
2244 up_write(&mm->mmap_sem);
2245}
2246EXPORT_SYMBOL_GPL(s390_reset_cmma);