| xj | b04a402 | 2021-11-25 15:01:52 +0800 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 | 
|  | 2 | /* | 
|  | 3 | * handling diagnose instructions | 
|  | 4 | * | 
|  | 5 | * Copyright IBM Corp. 2008, 2011 | 
|  | 6 | * | 
|  | 7 | *    Author(s): Carsten Otte <cotte@de.ibm.com> | 
|  | 8 | *               Christian Borntraeger <borntraeger@de.ibm.com> | 
|  | 9 | */ | 
|  | 10 |  | 
|  | 11 | #include <linux/kvm.h> | 
|  | 12 | #include <linux/kvm_host.h> | 
|  | 13 | #include <asm/pgalloc.h> | 
|  | 14 | #include <asm/gmap.h> | 
|  | 15 | #include <asm/virtio-ccw.h> | 
|  | 16 | #include "kvm-s390.h" | 
|  | 17 | #include "trace.h" | 
|  | 18 | #include "trace-s390.h" | 
|  | 19 | #include "gaccess.h" | 
|  | 20 |  | 
|  | 21 | static int diag_release_pages(struct kvm_vcpu *vcpu) | 
|  | 22 | { | 
|  | 23 | unsigned long start, end; | 
|  | 24 | unsigned long prefix  = kvm_s390_get_prefix(vcpu); | 
|  | 25 |  | 
|  | 26 | start = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4]; | 
|  | 27 | end = vcpu->run->s.regs.gprs[vcpu->arch.sie_block->ipa & 0xf] + PAGE_SIZE; | 
|  | 28 | vcpu->stat.diagnose_10++; | 
|  | 29 |  | 
|  | 30 | if (start & ~PAGE_MASK || end & ~PAGE_MASK || start >= end | 
|  | 31 | || start < 2 * PAGE_SIZE) | 
|  | 32 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); | 
|  | 33 |  | 
|  | 34 | VCPU_EVENT(vcpu, 5, "diag release pages %lX %lX", start, end); | 
|  | 35 |  | 
|  | 36 | /* | 
|  | 37 | * We checked for start >= end above, so lets check for the | 
|  | 38 | * fast path (no prefix swap page involved) | 
|  | 39 | */ | 
|  | 40 | if (end <= prefix || start >= prefix + 2 * PAGE_SIZE) { | 
|  | 41 | gmap_discard(vcpu->arch.gmap, start, end); | 
|  | 42 | } else { | 
|  | 43 | /* | 
|  | 44 | * This is slow path.  gmap_discard will check for start | 
|  | 45 | * so lets split this into before prefix, prefix, after | 
|  | 46 | * prefix and let gmap_discard make some of these calls | 
|  | 47 | * NOPs. | 
|  | 48 | */ | 
|  | 49 | gmap_discard(vcpu->arch.gmap, start, prefix); | 
|  | 50 | if (start <= prefix) | 
|  | 51 | gmap_discard(vcpu->arch.gmap, 0, PAGE_SIZE); | 
|  | 52 | if (end > prefix + PAGE_SIZE) | 
|  | 53 | gmap_discard(vcpu->arch.gmap, PAGE_SIZE, 2 * PAGE_SIZE); | 
|  | 54 | gmap_discard(vcpu->arch.gmap, prefix + 2 * PAGE_SIZE, end); | 
|  | 55 | } | 
|  | 56 | return 0; | 
|  | 57 | } | 
|  | 58 |  | 
|  | 59 | static int __diag_page_ref_service(struct kvm_vcpu *vcpu) | 
|  | 60 | { | 
|  | 61 | struct prs_parm { | 
|  | 62 | u16 code; | 
|  | 63 | u16 subcode; | 
|  | 64 | u16 parm_len; | 
|  | 65 | u16 parm_version; | 
|  | 66 | u64 token_addr; | 
|  | 67 | u64 select_mask; | 
|  | 68 | u64 compare_mask; | 
|  | 69 | u64 zarch; | 
|  | 70 | }; | 
|  | 71 | struct prs_parm parm; | 
|  | 72 | int rc; | 
|  | 73 | u16 rx = (vcpu->arch.sie_block->ipa & 0xf0) >> 4; | 
|  | 74 | u16 ry = (vcpu->arch.sie_block->ipa & 0x0f); | 
|  | 75 |  | 
|  | 76 | VCPU_EVENT(vcpu, 3, "diag page reference parameter block at 0x%llx", | 
|  | 77 | vcpu->run->s.regs.gprs[rx]); | 
|  | 78 | vcpu->stat.diagnose_258++; | 
|  | 79 | if (vcpu->run->s.regs.gprs[rx] & 7) | 
|  | 80 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); | 
|  | 81 | rc = read_guest(vcpu, vcpu->run->s.regs.gprs[rx], rx, &parm, sizeof(parm)); | 
|  | 82 | if (rc) | 
|  | 83 | return kvm_s390_inject_prog_cond(vcpu, rc); | 
|  | 84 | if (parm.parm_version != 2 || parm.parm_len < 5 || parm.code != 0x258) | 
|  | 85 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); | 
|  | 86 |  | 
|  | 87 | switch (parm.subcode) { | 
|  | 88 | case 0: /* TOKEN */ | 
|  | 89 | VCPU_EVENT(vcpu, 3, "pageref token addr 0x%llx " | 
|  | 90 | "select mask 0x%llx compare mask 0x%llx", | 
|  | 91 | parm.token_addr, parm.select_mask, parm.compare_mask); | 
|  | 92 | if (vcpu->arch.pfault_token != KVM_S390_PFAULT_TOKEN_INVALID) { | 
|  | 93 | /* | 
|  | 94 | * If the pagefault handshake is already activated, | 
|  | 95 | * the token must not be changed.  We have to return | 
|  | 96 | * decimal 8 instead, as mandated in SC24-6084. | 
|  | 97 | */ | 
|  | 98 | vcpu->run->s.regs.gprs[ry] = 8; | 
|  | 99 | return 0; | 
|  | 100 | } | 
|  | 101 |  | 
|  | 102 | if ((parm.compare_mask & parm.select_mask) != parm.compare_mask || | 
|  | 103 | parm.token_addr & 7 || parm.zarch != 0x8000000000000000ULL) | 
|  | 104 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); | 
|  | 105 |  | 
|  | 106 | if (kvm_is_error_gpa(vcpu->kvm, parm.token_addr)) | 
|  | 107 | return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); | 
|  | 108 |  | 
|  | 109 | vcpu->arch.pfault_token = parm.token_addr; | 
|  | 110 | vcpu->arch.pfault_select = parm.select_mask; | 
|  | 111 | vcpu->arch.pfault_compare = parm.compare_mask; | 
|  | 112 | vcpu->run->s.regs.gprs[ry] = 0; | 
|  | 113 | rc = 0; | 
|  | 114 | break; | 
|  | 115 | case 1: /* | 
|  | 116 | * CANCEL | 
|  | 117 | * Specification allows to let already pending tokens survive | 
|  | 118 | * the cancel, therefore to reduce code complexity, we assume | 
|  | 119 | * all outstanding tokens are already pending. | 
|  | 120 | */ | 
|  | 121 | VCPU_EVENT(vcpu, 3, "pageref cancel addr 0x%llx", parm.token_addr); | 
|  | 122 | if (parm.token_addr || parm.select_mask || | 
|  | 123 | parm.compare_mask || parm.zarch) | 
|  | 124 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); | 
|  | 125 |  | 
|  | 126 | vcpu->run->s.regs.gprs[ry] = 0; | 
|  | 127 | /* | 
|  | 128 | * If the pfault handling was not established or is already | 
|  | 129 | * canceled SC24-6084 requests to return decimal 4. | 
|  | 130 | */ | 
|  | 131 | if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID) | 
|  | 132 | vcpu->run->s.regs.gprs[ry] = 4; | 
|  | 133 | else | 
|  | 134 | vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; | 
|  | 135 |  | 
|  | 136 | rc = 0; | 
|  | 137 | break; | 
|  | 138 | default: | 
|  | 139 | rc = -EOPNOTSUPP; | 
|  | 140 | break; | 
|  | 141 | } | 
|  | 142 |  | 
|  | 143 | return rc; | 
|  | 144 | } | 
|  | 145 |  | 
|  | 146 | static int __diag_time_slice_end(struct kvm_vcpu *vcpu) | 
|  | 147 | { | 
|  | 148 | VCPU_EVENT(vcpu, 5, "%s", "diag time slice end"); | 
|  | 149 | vcpu->stat.diagnose_44++; | 
|  | 150 | kvm_vcpu_on_spin(vcpu, true); | 
|  | 151 | return 0; | 
|  | 152 | } | 
|  | 153 |  | 
|  | 154 | static int __diag_time_slice_end_directed(struct kvm_vcpu *vcpu) | 
|  | 155 | { | 
|  | 156 | struct kvm_vcpu *tcpu; | 
|  | 157 | int tid; | 
|  | 158 |  | 
|  | 159 | tid = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4]; | 
|  | 160 | vcpu->stat.diagnose_9c++; | 
|  | 161 | VCPU_EVENT(vcpu, 5, "diag time slice end directed to %d", tid); | 
|  | 162 |  | 
|  | 163 | if (tid == vcpu->vcpu_id) | 
|  | 164 | return 0; | 
|  | 165 |  | 
|  | 166 | tcpu = kvm_get_vcpu_by_id(vcpu->kvm, tid); | 
|  | 167 | if (tcpu) | 
|  | 168 | kvm_vcpu_yield_to(tcpu); | 
|  | 169 | return 0; | 
|  | 170 | } | 
|  | 171 |  | 
|  | 172 | static int __diag_ipl_functions(struct kvm_vcpu *vcpu) | 
|  | 173 | { | 
|  | 174 | unsigned int reg = vcpu->arch.sie_block->ipa & 0xf; | 
|  | 175 | unsigned long subcode = vcpu->run->s.regs.gprs[reg] & 0xffff; | 
|  | 176 |  | 
|  | 177 | VCPU_EVENT(vcpu, 3, "diag ipl functions, subcode %lx", subcode); | 
|  | 178 | vcpu->stat.diagnose_308++; | 
|  | 179 | switch (subcode) { | 
|  | 180 | case 3: | 
|  | 181 | vcpu->run->s390_reset_flags = KVM_S390_RESET_CLEAR; | 
|  | 182 | break; | 
|  | 183 | case 4: | 
|  | 184 | vcpu->run->s390_reset_flags = 0; | 
|  | 185 | break; | 
|  | 186 | default: | 
|  | 187 | return -EOPNOTSUPP; | 
|  | 188 | } | 
|  | 189 |  | 
|  | 190 | if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) | 
|  | 191 | kvm_s390_vcpu_stop(vcpu); | 
|  | 192 | vcpu->run->s390_reset_flags |= KVM_S390_RESET_SUBSYSTEM; | 
|  | 193 | vcpu->run->s390_reset_flags |= KVM_S390_RESET_IPL; | 
|  | 194 | vcpu->run->s390_reset_flags |= KVM_S390_RESET_CPU_INIT; | 
|  | 195 | vcpu->run->exit_reason = KVM_EXIT_S390_RESET; | 
|  | 196 | VCPU_EVENT(vcpu, 3, "requesting userspace resets %llx", | 
|  | 197 | vcpu->run->s390_reset_flags); | 
|  | 198 | trace_kvm_s390_request_resets(vcpu->run->s390_reset_flags); | 
|  | 199 | return -EREMOTE; | 
|  | 200 | } | 
|  | 201 |  | 
|  | 202 | static int __diag_virtio_hypercall(struct kvm_vcpu *vcpu) | 
|  | 203 | { | 
|  | 204 | int ret; | 
|  | 205 |  | 
|  | 206 | vcpu->stat.diagnose_500++; | 
|  | 207 | /* No virtio-ccw notification? Get out quickly. */ | 
|  | 208 | if (!vcpu->kvm->arch.css_support || | 
|  | 209 | (vcpu->run->s.regs.gprs[1] != KVM_S390_VIRTIO_CCW_NOTIFY)) | 
|  | 210 | return -EOPNOTSUPP; | 
|  | 211 |  | 
|  | 212 | VCPU_EVENT(vcpu, 4, "diag 0x500 schid 0x%8.8x queue 0x%x cookie 0x%llx", | 
|  | 213 | (u32) vcpu->run->s.regs.gprs[2], | 
|  | 214 | (u32) vcpu->run->s.regs.gprs[3], | 
|  | 215 | vcpu->run->s.regs.gprs[4]); | 
|  | 216 |  | 
|  | 217 | /* | 
|  | 218 | * The layout is as follows: | 
|  | 219 | * - gpr 2 contains the subchannel id (passed as addr) | 
|  | 220 | * - gpr 3 contains the virtqueue index (passed as datamatch) | 
|  | 221 | * - gpr 4 contains the index on the bus (optionally) | 
|  | 222 | */ | 
|  | 223 | ret = kvm_io_bus_write_cookie(vcpu, KVM_VIRTIO_CCW_NOTIFY_BUS, | 
|  | 224 | vcpu->run->s.regs.gprs[2] & 0xffffffff, | 
|  | 225 | 8, &vcpu->run->s.regs.gprs[3], | 
|  | 226 | vcpu->run->s.regs.gprs[4]); | 
|  | 227 |  | 
|  | 228 | /* | 
|  | 229 | * Return cookie in gpr 2, but don't overwrite the register if the | 
|  | 230 | * diagnose will be handled by userspace. | 
|  | 231 | */ | 
|  | 232 | if (ret != -EOPNOTSUPP) | 
|  | 233 | vcpu->run->s.regs.gprs[2] = ret; | 
|  | 234 | /* kvm_io_bus_write_cookie returns -EOPNOTSUPP if it found no match. */ | 
|  | 235 | return ret < 0 ? ret : 0; | 
|  | 236 | } | 
|  | 237 |  | 
|  | 238 | int kvm_s390_handle_diag(struct kvm_vcpu *vcpu) | 
|  | 239 | { | 
|  | 240 | int code = kvm_s390_get_base_disp_rs(vcpu, NULL) & 0xffff; | 
|  | 241 |  | 
|  | 242 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) | 
|  | 243 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); | 
|  | 244 |  | 
|  | 245 | trace_kvm_s390_handle_diag(vcpu, code); | 
|  | 246 | switch (code) { | 
|  | 247 | case 0x10: | 
|  | 248 | return diag_release_pages(vcpu); | 
|  | 249 | case 0x44: | 
|  | 250 | return __diag_time_slice_end(vcpu); | 
|  | 251 | case 0x9c: | 
|  | 252 | return __diag_time_slice_end_directed(vcpu); | 
|  | 253 | case 0x258: | 
|  | 254 | return __diag_page_ref_service(vcpu); | 
|  | 255 | case 0x308: | 
|  | 256 | return __diag_ipl_functions(vcpu); | 
|  | 257 | case 0x500: | 
|  | 258 | return __diag_virtio_hypercall(vcpu); | 
|  | 259 | default: | 
|  | 260 | vcpu->stat.diagnose_other++; | 
|  | 261 | return -EOPNOTSUPP; | 
|  | 262 | } | 
|  | 263 | } |