| xj | b04a402 | 2021-11-25 15:01:52 +0800 | [diff] [blame] | 1 | #include <linux/pfn.h> | 
|  | 2 | #include <asm/xen/page.h> | 
|  | 3 | #include <asm/xen/hypercall.h> | 
|  | 4 | #include <xen/interface/memory.h> | 
|  | 5 |  | 
|  | 6 | #include "multicalls.h" | 
|  | 7 | #include "mmu.h" | 
|  | 8 |  | 
|  | 9 | /* | 
|  | 10 | * Protects atomic reservation decrease/increase against concurrent increases. | 
|  | 11 | * Also protects non-atomic updates of current_pages and balloon lists. | 
|  | 12 | */ | 
|  | 13 | DEFINE_SPINLOCK(xen_reservation_lock); | 
|  | 14 |  | 
|  | 15 | unsigned long arbitrary_virt_to_mfn(void *vaddr) | 
|  | 16 | { | 
|  | 17 | xmaddr_t maddr = arbitrary_virt_to_machine(vaddr); | 
|  | 18 |  | 
|  | 19 | return PFN_DOWN(maddr.maddr); | 
|  | 20 | } | 
|  | 21 |  | 
|  | 22 | xmaddr_t arbitrary_virt_to_machine(void *vaddr) | 
|  | 23 | { | 
|  | 24 | unsigned long address = (unsigned long)vaddr; | 
|  | 25 | unsigned int level; | 
|  | 26 | pte_t *pte; | 
|  | 27 | unsigned offset; | 
|  | 28 |  | 
|  | 29 | /* | 
|  | 30 | * if the PFN is in the linear mapped vaddr range, we can just use | 
|  | 31 | * the (quick) virt_to_machine() p2m lookup | 
|  | 32 | */ | 
|  | 33 | if (virt_addr_valid(vaddr)) | 
|  | 34 | return virt_to_machine(vaddr); | 
|  | 35 |  | 
|  | 36 | /* otherwise we have to do a (slower) full page-table walk */ | 
|  | 37 |  | 
|  | 38 | pte = lookup_address(address, &level); | 
|  | 39 | BUG_ON(pte == NULL); | 
|  | 40 | offset = address & ~PAGE_MASK; | 
|  | 41 | return XMADDR(((phys_addr_t)pte_mfn(*pte) << PAGE_SHIFT) + offset); | 
|  | 42 | } | 
|  | 43 | EXPORT_SYMBOL_GPL(arbitrary_virt_to_machine); | 
|  | 44 |  | 
|  | 45 | static noinline void xen_flush_tlb_all(void) | 
|  | 46 | { | 
|  | 47 | struct mmuext_op *op; | 
|  | 48 | struct multicall_space mcs; | 
|  | 49 |  | 
|  | 50 | preempt_disable(); | 
|  | 51 |  | 
|  | 52 | mcs = xen_mc_entry(sizeof(*op)); | 
|  | 53 |  | 
|  | 54 | op = mcs.args; | 
|  | 55 | op->cmd = MMUEXT_TLB_FLUSH_ALL; | 
|  | 56 | MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF); | 
|  | 57 |  | 
|  | 58 | xen_mc_issue(PARAVIRT_LAZY_MMU); | 
|  | 59 |  | 
|  | 60 | preempt_enable(); | 
|  | 61 | } | 
|  | 62 |  | 
|  | 63 | #define REMAP_BATCH_SIZE 16 | 
|  | 64 |  | 
|  | 65 | struct remap_data { | 
|  | 66 | xen_pfn_t *pfn; | 
|  | 67 | bool contiguous; | 
|  | 68 | bool no_translate; | 
|  | 69 | pgprot_t prot; | 
|  | 70 | struct mmu_update *mmu_update; | 
|  | 71 | }; | 
|  | 72 |  | 
|  | 73 | static int remap_area_pfn_pte_fn(pte_t *ptep, pgtable_t token, | 
|  | 74 | unsigned long addr, void *data) | 
|  | 75 | { | 
|  | 76 | struct remap_data *rmd = data; | 
|  | 77 | pte_t pte = pte_mkspecial(mfn_pte(*rmd->pfn, rmd->prot)); | 
|  | 78 |  | 
|  | 79 | /* | 
|  | 80 | * If we have a contiguous range, just update the pfn itself, | 
|  | 81 | * else update pointer to be "next pfn". | 
|  | 82 | */ | 
|  | 83 | if (rmd->contiguous) | 
|  | 84 | (*rmd->pfn)++; | 
|  | 85 | else | 
|  | 86 | rmd->pfn++; | 
|  | 87 |  | 
|  | 88 | rmd->mmu_update->ptr = virt_to_machine(ptep).maddr; | 
|  | 89 | rmd->mmu_update->ptr |= rmd->no_translate ? | 
|  | 90 | MMU_PT_UPDATE_NO_TRANSLATE : | 
|  | 91 | MMU_NORMAL_PT_UPDATE; | 
|  | 92 | rmd->mmu_update->val = pte_val_ma(pte); | 
|  | 93 | rmd->mmu_update++; | 
|  | 94 |  | 
|  | 95 | return 0; | 
|  | 96 | } | 
|  | 97 |  | 
|  | 98 | static int do_remap_pfn(struct vm_area_struct *vma, | 
|  | 99 | unsigned long addr, | 
|  | 100 | xen_pfn_t *pfn, int nr, | 
|  | 101 | int *err_ptr, pgprot_t prot, | 
|  | 102 | unsigned int domid, | 
|  | 103 | bool no_translate, | 
|  | 104 | struct page **pages) | 
|  | 105 | { | 
|  | 106 | int err = 0; | 
|  | 107 | struct remap_data rmd; | 
|  | 108 | struct mmu_update mmu_update[REMAP_BATCH_SIZE]; | 
|  | 109 | unsigned long range; | 
|  | 110 | int mapped = 0; | 
|  | 111 |  | 
|  | 112 | BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO))); | 
|  | 113 |  | 
|  | 114 | rmd.pfn = pfn; | 
|  | 115 | rmd.prot = prot; | 
|  | 116 | /* | 
|  | 117 | * We use the err_ptr to indicate if there we are doing a contiguous | 
|  | 118 | * mapping or a discontigious mapping. | 
|  | 119 | */ | 
|  | 120 | rmd.contiguous = !err_ptr; | 
|  | 121 | rmd.no_translate = no_translate; | 
|  | 122 |  | 
|  | 123 | while (nr) { | 
|  | 124 | int index = 0; | 
|  | 125 | int done = 0; | 
|  | 126 | int batch = min(REMAP_BATCH_SIZE, nr); | 
|  | 127 | int batch_left = batch; | 
|  | 128 | range = (unsigned long)batch << PAGE_SHIFT; | 
|  | 129 |  | 
|  | 130 | rmd.mmu_update = mmu_update; | 
|  | 131 | err = apply_to_page_range(vma->vm_mm, addr, range, | 
|  | 132 | remap_area_pfn_pte_fn, &rmd); | 
|  | 133 | if (err) | 
|  | 134 | goto out; | 
|  | 135 |  | 
|  | 136 | /* We record the error for each page that gives an error, but | 
|  | 137 | * continue mapping until the whole set is done */ | 
|  | 138 | do { | 
|  | 139 | int i; | 
|  | 140 |  | 
|  | 141 | err = HYPERVISOR_mmu_update(&mmu_update[index], | 
|  | 142 | batch_left, &done, domid); | 
|  | 143 |  | 
|  | 144 | /* | 
|  | 145 | * @err_ptr may be the same buffer as @gfn, so | 
|  | 146 | * only clear it after each chunk of @gfn is | 
|  | 147 | * used. | 
|  | 148 | */ | 
|  | 149 | if (err_ptr) { | 
|  | 150 | for (i = index; i < index + done; i++) | 
|  | 151 | err_ptr[i] = 0; | 
|  | 152 | } | 
|  | 153 | if (err < 0) { | 
|  | 154 | if (!err_ptr) | 
|  | 155 | goto out; | 
|  | 156 | err_ptr[i] = err; | 
|  | 157 | done++; /* Skip failed frame. */ | 
|  | 158 | } else | 
|  | 159 | mapped += done; | 
|  | 160 | batch_left -= done; | 
|  | 161 | index += done; | 
|  | 162 | } while (batch_left); | 
|  | 163 |  | 
|  | 164 | nr -= batch; | 
|  | 165 | addr += range; | 
|  | 166 | if (err_ptr) | 
|  | 167 | err_ptr += batch; | 
|  | 168 | cond_resched(); | 
|  | 169 | } | 
|  | 170 | out: | 
|  | 171 |  | 
|  | 172 | xen_flush_tlb_all(); | 
|  | 173 |  | 
|  | 174 | return err < 0 ? err : mapped; | 
|  | 175 | } | 
|  | 176 |  | 
|  | 177 | int xen_remap_domain_gfn_range(struct vm_area_struct *vma, | 
|  | 178 | unsigned long addr, | 
|  | 179 | xen_pfn_t gfn, int nr, | 
|  | 180 | pgprot_t prot, unsigned domid, | 
|  | 181 | struct page **pages) | 
|  | 182 | { | 
|  | 183 | if (xen_feature(XENFEAT_auto_translated_physmap)) | 
|  | 184 | return -EOPNOTSUPP; | 
|  | 185 |  | 
|  | 186 | return do_remap_pfn(vma, addr, &gfn, nr, NULL, prot, domid, false, | 
|  | 187 | pages); | 
|  | 188 | } | 
|  | 189 | EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_range); | 
|  | 190 |  | 
|  | 191 | int xen_remap_domain_gfn_array(struct vm_area_struct *vma, | 
|  | 192 | unsigned long addr, | 
|  | 193 | xen_pfn_t *gfn, int nr, | 
|  | 194 | int *err_ptr, pgprot_t prot, | 
|  | 195 | unsigned domid, struct page **pages) | 
|  | 196 | { | 
|  | 197 | if (xen_feature(XENFEAT_auto_translated_physmap)) | 
|  | 198 | return xen_xlate_remap_gfn_array(vma, addr, gfn, nr, err_ptr, | 
|  | 199 | prot, domid, pages); | 
|  | 200 |  | 
|  | 201 | /* We BUG_ON because it's a programmer error to pass a NULL err_ptr, | 
|  | 202 | * and the consequences later is quite hard to detect what the actual | 
|  | 203 | * cause of "wrong memory was mapped in". | 
|  | 204 | */ | 
|  | 205 | BUG_ON(err_ptr == NULL); | 
|  | 206 | return do_remap_pfn(vma, addr, gfn, nr, err_ptr, prot, domid, | 
|  | 207 | false, pages); | 
|  | 208 | } | 
|  | 209 | EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_array); | 
|  | 210 |  | 
|  | 211 | int xen_remap_domain_mfn_array(struct vm_area_struct *vma, | 
|  | 212 | unsigned long addr, | 
|  | 213 | xen_pfn_t *mfn, int nr, | 
|  | 214 | int *err_ptr, pgprot_t prot, | 
|  | 215 | unsigned int domid, struct page **pages) | 
|  | 216 | { | 
|  | 217 | if (xen_feature(XENFEAT_auto_translated_physmap)) | 
|  | 218 | return -EOPNOTSUPP; | 
|  | 219 |  | 
|  | 220 | return do_remap_pfn(vma, addr, mfn, nr, err_ptr, prot, domid, | 
|  | 221 | true, pages); | 
|  | 222 | } | 
|  | 223 | EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_array); | 
|  | 224 |  | 
|  | 225 | /* Returns: 0 success */ | 
|  | 226 | int xen_unmap_domain_gfn_range(struct vm_area_struct *vma, | 
|  | 227 | int nr, struct page **pages) | 
|  | 228 | { | 
|  | 229 | if (xen_feature(XENFEAT_auto_translated_physmap)) | 
|  | 230 | return xen_xlate_unmap_gfn_range(vma, nr, pages); | 
|  | 231 |  | 
|  | 232 | if (!pages) | 
|  | 233 | return 0; | 
|  | 234 |  | 
|  | 235 | return -EINVAL; | 
|  | 236 | } | 
|  | 237 | EXPORT_SYMBOL_GPL(xen_unmap_domain_gfn_range); |