rjw | 1f88458 | 2022-01-06 17:20:42 +0800 | [diff] [blame^] | 1 | /* |
| 2 | * handling kvm guest interrupts |
| 3 | * |
| 4 | * Copyright IBM Corp. 2008, 2015 |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or modify |
| 7 | * it under the terms of the GNU General Public License (version 2 only) |
| 8 | * as published by the Free Software Foundation. |
| 9 | * |
| 10 | * Author(s): Carsten Otte <cotte@de.ibm.com> |
| 11 | */ |
| 12 | |
| 13 | #include <linux/interrupt.h> |
| 14 | #include <linux/kvm_host.h> |
| 15 | #include <linux/hrtimer.h> |
| 16 | #include <linux/mmu_context.h> |
| 17 | #include <linux/signal.h> |
| 18 | #include <linux/slab.h> |
| 19 | #include <linux/bitmap.h> |
| 20 | #include <linux/vmalloc.h> |
| 21 | #include <asm/asm-offsets.h> |
| 22 | #include <asm/dis.h> |
| 23 | #include <linux/uaccess.h> |
| 24 | #include <asm/sclp.h> |
| 25 | #include <asm/isc.h> |
| 26 | #include <asm/gmap.h> |
| 27 | #include <asm/switch_to.h> |
| 28 | #include <asm/nmi.h> |
| 29 | #include "kvm-s390.h" |
| 30 | #include "gaccess.h" |
| 31 | #include "trace-s390.h" |
| 32 | |
| 33 | #define PFAULT_INIT 0x0600 |
| 34 | #define PFAULT_DONE 0x0680 |
| 35 | #define VIRTIO_PARAM 0x0d00 |
| 36 | |
| 37 | /* handle external calls via sigp interpretation facility */ |
| 38 | static int sca_ext_call_pending(struct kvm_vcpu *vcpu, int *src_id) |
| 39 | { |
| 40 | int c, scn; |
| 41 | |
| 42 | if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_ECALL_PEND)) |
| 43 | return 0; |
| 44 | |
| 45 | BUG_ON(!kvm_s390_use_sca_entries()); |
| 46 | read_lock(&vcpu->kvm->arch.sca_lock); |
| 47 | if (vcpu->kvm->arch.use_esca) { |
| 48 | struct esca_block *sca = vcpu->kvm->arch.sca; |
| 49 | union esca_sigp_ctrl sigp_ctrl = |
| 50 | sca->cpu[vcpu->vcpu_id].sigp_ctrl; |
| 51 | |
| 52 | c = sigp_ctrl.c; |
| 53 | scn = sigp_ctrl.scn; |
| 54 | } else { |
| 55 | struct bsca_block *sca = vcpu->kvm->arch.sca; |
| 56 | union bsca_sigp_ctrl sigp_ctrl = |
| 57 | sca->cpu[vcpu->vcpu_id].sigp_ctrl; |
| 58 | |
| 59 | c = sigp_ctrl.c; |
| 60 | scn = sigp_ctrl.scn; |
| 61 | } |
| 62 | read_unlock(&vcpu->kvm->arch.sca_lock); |
| 63 | |
| 64 | if (src_id) |
| 65 | *src_id = scn; |
| 66 | |
| 67 | return c; |
| 68 | } |
| 69 | |
| 70 | static int sca_inject_ext_call(struct kvm_vcpu *vcpu, int src_id) |
| 71 | { |
| 72 | int expect, rc; |
| 73 | |
| 74 | BUG_ON(!kvm_s390_use_sca_entries()); |
| 75 | read_lock(&vcpu->kvm->arch.sca_lock); |
| 76 | if (vcpu->kvm->arch.use_esca) { |
| 77 | struct esca_block *sca = vcpu->kvm->arch.sca; |
| 78 | union esca_sigp_ctrl *sigp_ctrl = |
| 79 | &(sca->cpu[vcpu->vcpu_id].sigp_ctrl); |
| 80 | union esca_sigp_ctrl new_val = {0}, old_val = *sigp_ctrl; |
| 81 | |
| 82 | new_val.scn = src_id; |
| 83 | new_val.c = 1; |
| 84 | old_val.c = 0; |
| 85 | |
| 86 | expect = old_val.value; |
| 87 | rc = cmpxchg(&sigp_ctrl->value, old_val.value, new_val.value); |
| 88 | } else { |
| 89 | struct bsca_block *sca = vcpu->kvm->arch.sca; |
| 90 | union bsca_sigp_ctrl *sigp_ctrl = |
| 91 | &(sca->cpu[vcpu->vcpu_id].sigp_ctrl); |
| 92 | union bsca_sigp_ctrl new_val = {0}, old_val = *sigp_ctrl; |
| 93 | |
| 94 | new_val.scn = src_id; |
| 95 | new_val.c = 1; |
| 96 | old_val.c = 0; |
| 97 | |
| 98 | expect = old_val.value; |
| 99 | rc = cmpxchg(&sigp_ctrl->value, old_val.value, new_val.value); |
| 100 | } |
| 101 | read_unlock(&vcpu->kvm->arch.sca_lock); |
| 102 | |
| 103 | if (rc != expect) { |
| 104 | /* another external call is pending */ |
| 105 | return -EBUSY; |
| 106 | } |
| 107 | atomic_or(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags); |
| 108 | return 0; |
| 109 | } |
| 110 | |
| 111 | static void sca_clear_ext_call(struct kvm_vcpu *vcpu) |
| 112 | { |
| 113 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; |
| 114 | int rc, expect; |
| 115 | |
| 116 | if (!kvm_s390_use_sca_entries()) |
| 117 | return; |
| 118 | atomic_andnot(CPUSTAT_ECALL_PEND, li->cpuflags); |
| 119 | read_lock(&vcpu->kvm->arch.sca_lock); |
| 120 | if (vcpu->kvm->arch.use_esca) { |
| 121 | struct esca_block *sca = vcpu->kvm->arch.sca; |
| 122 | union esca_sigp_ctrl *sigp_ctrl = |
| 123 | &(sca->cpu[vcpu->vcpu_id].sigp_ctrl); |
| 124 | union esca_sigp_ctrl old = *sigp_ctrl; |
| 125 | |
| 126 | expect = old.value; |
| 127 | rc = cmpxchg(&sigp_ctrl->value, old.value, 0); |
| 128 | } else { |
| 129 | struct bsca_block *sca = vcpu->kvm->arch.sca; |
| 130 | union bsca_sigp_ctrl *sigp_ctrl = |
| 131 | &(sca->cpu[vcpu->vcpu_id].sigp_ctrl); |
| 132 | union bsca_sigp_ctrl old = *sigp_ctrl; |
| 133 | |
| 134 | expect = old.value; |
| 135 | rc = cmpxchg(&sigp_ctrl->value, old.value, 0); |
| 136 | } |
| 137 | read_unlock(&vcpu->kvm->arch.sca_lock); |
| 138 | WARN_ON(rc != expect); /* cannot clear? */ |
| 139 | } |
| 140 | |
| 141 | int psw_extint_disabled(struct kvm_vcpu *vcpu) |
| 142 | { |
| 143 | return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT); |
| 144 | } |
| 145 | |
| 146 | static int psw_ioint_disabled(struct kvm_vcpu *vcpu) |
| 147 | { |
| 148 | return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO); |
| 149 | } |
| 150 | |
| 151 | static int psw_mchk_disabled(struct kvm_vcpu *vcpu) |
| 152 | { |
| 153 | return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_MCHECK); |
| 154 | } |
| 155 | |
| 156 | static int psw_interrupts_disabled(struct kvm_vcpu *vcpu) |
| 157 | { |
| 158 | return psw_extint_disabled(vcpu) && |
| 159 | psw_ioint_disabled(vcpu) && |
| 160 | psw_mchk_disabled(vcpu); |
| 161 | } |
| 162 | |
| 163 | static int ckc_interrupts_enabled(struct kvm_vcpu *vcpu) |
| 164 | { |
| 165 | if (psw_extint_disabled(vcpu) || |
| 166 | !(vcpu->arch.sie_block->gcr[0] & 0x800ul)) |
| 167 | return 0; |
| 168 | if (guestdbg_enabled(vcpu) && guestdbg_sstep_enabled(vcpu)) |
| 169 | /* No timer interrupts when single stepping */ |
| 170 | return 0; |
| 171 | return 1; |
| 172 | } |
| 173 | |
| 174 | static int ckc_irq_pending(struct kvm_vcpu *vcpu) |
| 175 | { |
| 176 | const u64 now = kvm_s390_get_tod_clock_fast(vcpu->kvm); |
| 177 | const u64 ckc = vcpu->arch.sie_block->ckc; |
| 178 | |
| 179 | if (vcpu->arch.sie_block->gcr[0] & 0x0020000000000000ul) { |
| 180 | if ((s64)ckc >= (s64)now) |
| 181 | return 0; |
| 182 | } else if (ckc >= now) { |
| 183 | return 0; |
| 184 | } |
| 185 | return ckc_interrupts_enabled(vcpu); |
| 186 | } |
| 187 | |
| 188 | static int cpu_timer_interrupts_enabled(struct kvm_vcpu *vcpu) |
| 189 | { |
| 190 | return !psw_extint_disabled(vcpu) && |
| 191 | (vcpu->arch.sie_block->gcr[0] & 0x400ul); |
| 192 | } |
| 193 | |
| 194 | static int cpu_timer_irq_pending(struct kvm_vcpu *vcpu) |
| 195 | { |
| 196 | if (!cpu_timer_interrupts_enabled(vcpu)) |
| 197 | return 0; |
| 198 | return kvm_s390_get_cpu_timer(vcpu) >> 63; |
| 199 | } |
| 200 | |
| 201 | static inline int is_ioirq(unsigned long irq_type) |
| 202 | { |
| 203 | return ((irq_type >= IRQ_PEND_IO_ISC_0) && |
| 204 | (irq_type <= IRQ_PEND_IO_ISC_7)); |
| 205 | } |
| 206 | |
| 207 | static uint64_t isc_to_isc_bits(int isc) |
| 208 | { |
| 209 | return (0x80 >> isc) << 24; |
| 210 | } |
| 211 | |
| 212 | static inline u8 int_word_to_isc(u32 int_word) |
| 213 | { |
| 214 | return (int_word & 0x38000000) >> 27; |
| 215 | } |
| 216 | |
| 217 | static inline unsigned long pending_irqs(struct kvm_vcpu *vcpu) |
| 218 | { |
| 219 | return vcpu->kvm->arch.float_int.pending_irqs | |
| 220 | vcpu->arch.local_int.pending_irqs; |
| 221 | } |
| 222 | |
| 223 | static unsigned long disable_iscs(struct kvm_vcpu *vcpu, |
| 224 | unsigned long active_mask) |
| 225 | { |
| 226 | int i; |
| 227 | |
| 228 | for (i = 0; i <= MAX_ISC; i++) |
| 229 | if (!(vcpu->arch.sie_block->gcr[6] & isc_to_isc_bits(i))) |
| 230 | active_mask &= ~(1UL << (IRQ_PEND_IO_ISC_0 + i)); |
| 231 | |
| 232 | return active_mask; |
| 233 | } |
| 234 | |
| 235 | static unsigned long deliverable_irqs(struct kvm_vcpu *vcpu) |
| 236 | { |
| 237 | unsigned long active_mask; |
| 238 | |
| 239 | active_mask = pending_irqs(vcpu); |
| 240 | if (!active_mask) |
| 241 | return 0; |
| 242 | |
| 243 | if (psw_extint_disabled(vcpu)) |
| 244 | active_mask &= ~IRQ_PEND_EXT_MASK; |
| 245 | if (psw_ioint_disabled(vcpu)) |
| 246 | active_mask &= ~IRQ_PEND_IO_MASK; |
| 247 | else |
| 248 | active_mask = disable_iscs(vcpu, active_mask); |
| 249 | if (!(vcpu->arch.sie_block->gcr[0] & 0x2000ul)) |
| 250 | __clear_bit(IRQ_PEND_EXT_EXTERNAL, &active_mask); |
| 251 | if (!(vcpu->arch.sie_block->gcr[0] & 0x4000ul)) |
| 252 | __clear_bit(IRQ_PEND_EXT_EMERGENCY, &active_mask); |
| 253 | if (!(vcpu->arch.sie_block->gcr[0] & 0x800ul)) |
| 254 | __clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &active_mask); |
| 255 | if (!(vcpu->arch.sie_block->gcr[0] & 0x400ul)) |
| 256 | __clear_bit(IRQ_PEND_EXT_CPU_TIMER, &active_mask); |
| 257 | if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul)) |
| 258 | __clear_bit(IRQ_PEND_EXT_SERVICE, &active_mask); |
| 259 | if (psw_mchk_disabled(vcpu)) |
| 260 | active_mask &= ~IRQ_PEND_MCHK_MASK; |
| 261 | /* |
| 262 | * Check both floating and local interrupt's cr14 because |
| 263 | * bit IRQ_PEND_MCHK_REP could be set in both cases. |
| 264 | */ |
| 265 | if (!(vcpu->arch.sie_block->gcr[14] & |
| 266 | (vcpu->kvm->arch.float_int.mchk.cr14 | |
| 267 | vcpu->arch.local_int.irq.mchk.cr14))) |
| 268 | __clear_bit(IRQ_PEND_MCHK_REP, &active_mask); |
| 269 | |
| 270 | /* |
| 271 | * STOP irqs will never be actively delivered. They are triggered via |
| 272 | * intercept requests and cleared when the stop intercept is performed. |
| 273 | */ |
| 274 | __clear_bit(IRQ_PEND_SIGP_STOP, &active_mask); |
| 275 | |
| 276 | return active_mask; |
| 277 | } |
| 278 | |
| 279 | static void __set_cpu_idle(struct kvm_vcpu *vcpu) |
| 280 | { |
| 281 | atomic_or(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags); |
| 282 | set_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask); |
| 283 | } |
| 284 | |
| 285 | static void __unset_cpu_idle(struct kvm_vcpu *vcpu) |
| 286 | { |
| 287 | atomic_andnot(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags); |
| 288 | clear_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask); |
| 289 | } |
| 290 | |
| 291 | static void __reset_intercept_indicators(struct kvm_vcpu *vcpu) |
| 292 | { |
| 293 | atomic_andnot(CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT, |
| 294 | &vcpu->arch.sie_block->cpuflags); |
| 295 | vcpu->arch.sie_block->lctl = 0x0000; |
| 296 | vcpu->arch.sie_block->ictl &= ~(ICTL_LPSW | ICTL_STCTL | ICTL_PINT); |
| 297 | |
| 298 | if (guestdbg_enabled(vcpu)) { |
| 299 | vcpu->arch.sie_block->lctl |= (LCTL_CR0 | LCTL_CR9 | |
| 300 | LCTL_CR10 | LCTL_CR11); |
| 301 | vcpu->arch.sie_block->ictl |= (ICTL_STCTL | ICTL_PINT); |
| 302 | } |
| 303 | } |
| 304 | |
| 305 | static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag) |
| 306 | { |
| 307 | atomic_or(flag, &vcpu->arch.sie_block->cpuflags); |
| 308 | } |
| 309 | |
| 310 | static void set_intercept_indicators_io(struct kvm_vcpu *vcpu) |
| 311 | { |
| 312 | if (!(pending_irqs(vcpu) & IRQ_PEND_IO_MASK)) |
| 313 | return; |
| 314 | else if (psw_ioint_disabled(vcpu)) |
| 315 | __set_cpuflag(vcpu, CPUSTAT_IO_INT); |
| 316 | else |
| 317 | vcpu->arch.sie_block->lctl |= LCTL_CR6; |
| 318 | } |
| 319 | |
| 320 | static void set_intercept_indicators_ext(struct kvm_vcpu *vcpu) |
| 321 | { |
| 322 | if (!(pending_irqs(vcpu) & IRQ_PEND_EXT_MASK)) |
| 323 | return; |
| 324 | if (psw_extint_disabled(vcpu)) |
| 325 | __set_cpuflag(vcpu, CPUSTAT_EXT_INT); |
| 326 | else |
| 327 | vcpu->arch.sie_block->lctl |= LCTL_CR0; |
| 328 | } |
| 329 | |
| 330 | static void set_intercept_indicators_mchk(struct kvm_vcpu *vcpu) |
| 331 | { |
| 332 | if (!(pending_irqs(vcpu) & IRQ_PEND_MCHK_MASK)) |
| 333 | return; |
| 334 | if (psw_mchk_disabled(vcpu)) |
| 335 | vcpu->arch.sie_block->ictl |= ICTL_LPSW; |
| 336 | else |
| 337 | vcpu->arch.sie_block->lctl |= LCTL_CR14; |
| 338 | } |
| 339 | |
| 340 | static void set_intercept_indicators_stop(struct kvm_vcpu *vcpu) |
| 341 | { |
| 342 | if (kvm_s390_is_stop_irq_pending(vcpu)) |
| 343 | __set_cpuflag(vcpu, CPUSTAT_STOP_INT); |
| 344 | } |
| 345 | |
| 346 | /* Set interception request for non-deliverable interrupts */ |
| 347 | static void set_intercept_indicators(struct kvm_vcpu *vcpu) |
| 348 | { |
| 349 | set_intercept_indicators_io(vcpu); |
| 350 | set_intercept_indicators_ext(vcpu); |
| 351 | set_intercept_indicators_mchk(vcpu); |
| 352 | set_intercept_indicators_stop(vcpu); |
| 353 | } |
| 354 | |
| 355 | static int __must_check __deliver_cpu_timer(struct kvm_vcpu *vcpu) |
| 356 | { |
| 357 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; |
| 358 | int rc; |
| 359 | |
| 360 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_CPU_TIMER, |
| 361 | 0, 0); |
| 362 | |
| 363 | rc = put_guest_lc(vcpu, EXT_IRQ_CPU_TIMER, |
| 364 | (u16 *)__LC_EXT_INT_CODE); |
| 365 | rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR); |
| 366 | rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, |
| 367 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); |
| 368 | rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, |
| 369 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); |
| 370 | clear_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs); |
| 371 | return rc ? -EFAULT : 0; |
| 372 | } |
| 373 | |
| 374 | static int __must_check __deliver_ckc(struct kvm_vcpu *vcpu) |
| 375 | { |
| 376 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; |
| 377 | int rc; |
| 378 | |
| 379 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_CLOCK_COMP, |
| 380 | 0, 0); |
| 381 | |
| 382 | rc = put_guest_lc(vcpu, EXT_IRQ_CLK_COMP, |
| 383 | (u16 __user *)__LC_EXT_INT_CODE); |
| 384 | rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR); |
| 385 | rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, |
| 386 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); |
| 387 | rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, |
| 388 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); |
| 389 | clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs); |
| 390 | return rc ? -EFAULT : 0; |
| 391 | } |
| 392 | |
| 393 | static int __must_check __deliver_pfault_init(struct kvm_vcpu *vcpu) |
| 394 | { |
| 395 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; |
| 396 | struct kvm_s390_ext_info ext; |
| 397 | int rc; |
| 398 | |
| 399 | spin_lock(&li->lock); |
| 400 | ext = li->irq.ext; |
| 401 | clear_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs); |
| 402 | li->irq.ext.ext_params2 = 0; |
| 403 | spin_unlock(&li->lock); |
| 404 | |
| 405 | VCPU_EVENT(vcpu, 4, "deliver: pfault init token 0x%llx", |
| 406 | ext.ext_params2); |
| 407 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, |
| 408 | KVM_S390_INT_PFAULT_INIT, |
| 409 | 0, ext.ext_params2); |
| 410 | |
| 411 | rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, (u16 *) __LC_EXT_INT_CODE); |
| 412 | rc |= put_guest_lc(vcpu, PFAULT_INIT, (u16 *) __LC_EXT_CPU_ADDR); |
| 413 | rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, |
| 414 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); |
| 415 | rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, |
| 416 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); |
| 417 | rc |= put_guest_lc(vcpu, ext.ext_params2, (u64 *) __LC_EXT_PARAMS2); |
| 418 | return rc ? -EFAULT : 0; |
| 419 | } |
| 420 | |
| 421 | static int __write_machine_check(struct kvm_vcpu *vcpu, |
| 422 | struct kvm_s390_mchk_info *mchk) |
| 423 | { |
| 424 | unsigned long ext_sa_addr; |
| 425 | unsigned long lc; |
| 426 | freg_t fprs[NUM_FPRS]; |
| 427 | union mci mci; |
| 428 | int rc; |
| 429 | |
| 430 | mci.val = mchk->mcic; |
| 431 | /* take care of lazy register loading */ |
| 432 | save_fpu_regs(); |
| 433 | save_access_regs(vcpu->run->s.regs.acrs); |
| 434 | if (MACHINE_HAS_GS && vcpu->arch.gs_enabled) |
| 435 | save_gs_cb(current->thread.gs_cb); |
| 436 | |
| 437 | /* Extended save area */ |
| 438 | rc = read_guest_lc(vcpu, __LC_MCESAD, &ext_sa_addr, |
| 439 | sizeof(unsigned long)); |
| 440 | /* Only bits 0 through 63-LC are used for address formation */ |
| 441 | lc = ext_sa_addr & MCESA_LC_MASK; |
| 442 | if (test_kvm_facility(vcpu->kvm, 133)) { |
| 443 | switch (lc) { |
| 444 | case 0: |
| 445 | case 10: |
| 446 | ext_sa_addr &= ~0x3ffUL; |
| 447 | break; |
| 448 | case 11: |
| 449 | ext_sa_addr &= ~0x7ffUL; |
| 450 | break; |
| 451 | case 12: |
| 452 | ext_sa_addr &= ~0xfffUL; |
| 453 | break; |
| 454 | default: |
| 455 | ext_sa_addr = 0; |
| 456 | break; |
| 457 | } |
| 458 | } else { |
| 459 | ext_sa_addr &= ~0x3ffUL; |
| 460 | } |
| 461 | |
| 462 | if (!rc && mci.vr && ext_sa_addr && test_kvm_facility(vcpu->kvm, 129)) { |
| 463 | if (write_guest_abs(vcpu, ext_sa_addr, vcpu->run->s.regs.vrs, |
| 464 | 512)) |
| 465 | mci.vr = 0; |
| 466 | } else { |
| 467 | mci.vr = 0; |
| 468 | } |
| 469 | if (!rc && mci.gs && ext_sa_addr && test_kvm_facility(vcpu->kvm, 133) |
| 470 | && (lc == 11 || lc == 12)) { |
| 471 | if (write_guest_abs(vcpu, ext_sa_addr + 1024, |
| 472 | &vcpu->run->s.regs.gscb, 32)) |
| 473 | mci.gs = 0; |
| 474 | } else { |
| 475 | mci.gs = 0; |
| 476 | } |
| 477 | |
| 478 | /* General interruption information */ |
| 479 | rc |= put_guest_lc(vcpu, 1, (u8 __user *) __LC_AR_MODE_ID); |
| 480 | rc |= write_guest_lc(vcpu, __LC_MCK_OLD_PSW, |
| 481 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); |
| 482 | rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW, |
| 483 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); |
| 484 | rc |= put_guest_lc(vcpu, mci.val, (u64 __user *) __LC_MCCK_CODE); |
| 485 | |
| 486 | /* Register-save areas */ |
| 487 | if (MACHINE_HAS_VX) { |
| 488 | convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs); |
| 489 | rc |= write_guest_lc(vcpu, __LC_FPREGS_SAVE_AREA, fprs, 128); |
| 490 | } else { |
| 491 | rc |= write_guest_lc(vcpu, __LC_FPREGS_SAVE_AREA, |
| 492 | vcpu->run->s.regs.fprs, 128); |
| 493 | } |
| 494 | rc |= write_guest_lc(vcpu, __LC_GPREGS_SAVE_AREA, |
| 495 | vcpu->run->s.regs.gprs, 128); |
| 496 | rc |= put_guest_lc(vcpu, current->thread.fpu.fpc, |
| 497 | (u32 __user *) __LC_FP_CREG_SAVE_AREA); |
| 498 | rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->todpr, |
| 499 | (u32 __user *) __LC_TOD_PROGREG_SAVE_AREA); |
| 500 | rc |= put_guest_lc(vcpu, kvm_s390_get_cpu_timer(vcpu), |
| 501 | (u64 __user *) __LC_CPU_TIMER_SAVE_AREA); |
| 502 | rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->ckc >> 8, |
| 503 | (u64 __user *) __LC_CLOCK_COMP_SAVE_AREA); |
| 504 | rc |= write_guest_lc(vcpu, __LC_AREGS_SAVE_AREA, |
| 505 | &vcpu->run->s.regs.acrs, 64); |
| 506 | rc |= write_guest_lc(vcpu, __LC_CREGS_SAVE_AREA, |
| 507 | &vcpu->arch.sie_block->gcr, 128); |
| 508 | |
| 509 | /* Extended interruption information */ |
| 510 | rc |= put_guest_lc(vcpu, mchk->ext_damage_code, |
| 511 | (u32 __user *) __LC_EXT_DAMAGE_CODE); |
| 512 | rc |= put_guest_lc(vcpu, mchk->failing_storage_address, |
| 513 | (u64 __user *) __LC_MCCK_FAIL_STOR_ADDR); |
| 514 | rc |= write_guest_lc(vcpu, __LC_PSW_SAVE_AREA, &mchk->fixed_logout, |
| 515 | sizeof(mchk->fixed_logout)); |
| 516 | return rc ? -EFAULT : 0; |
| 517 | } |
| 518 | |
| 519 | static int __must_check __deliver_machine_check(struct kvm_vcpu *vcpu) |
| 520 | { |
| 521 | struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; |
| 522 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; |
| 523 | struct kvm_s390_mchk_info mchk = {}; |
| 524 | int deliver = 0; |
| 525 | int rc = 0; |
| 526 | |
| 527 | spin_lock(&fi->lock); |
| 528 | spin_lock(&li->lock); |
| 529 | if (test_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs) || |
| 530 | test_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs)) { |
| 531 | /* |
| 532 | * If there was an exigent machine check pending, then any |
| 533 | * repressible machine checks that might have been pending |
| 534 | * are indicated along with it, so always clear bits for |
| 535 | * repressible and exigent interrupts |
| 536 | */ |
| 537 | mchk = li->irq.mchk; |
| 538 | clear_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs); |
| 539 | clear_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs); |
| 540 | memset(&li->irq.mchk, 0, sizeof(mchk)); |
| 541 | deliver = 1; |
| 542 | } |
| 543 | /* |
| 544 | * We indicate floating repressible conditions along with |
| 545 | * other pending conditions. Channel Report Pending and Channel |
| 546 | * Subsystem damage are the only two and and are indicated by |
| 547 | * bits in mcic and masked in cr14. |
| 548 | */ |
| 549 | if (test_and_clear_bit(IRQ_PEND_MCHK_REP, &fi->pending_irqs)) { |
| 550 | mchk.mcic |= fi->mchk.mcic; |
| 551 | mchk.cr14 |= fi->mchk.cr14; |
| 552 | memset(&fi->mchk, 0, sizeof(mchk)); |
| 553 | deliver = 1; |
| 554 | } |
| 555 | spin_unlock(&li->lock); |
| 556 | spin_unlock(&fi->lock); |
| 557 | |
| 558 | if (deliver) { |
| 559 | VCPU_EVENT(vcpu, 3, "deliver: machine check mcic 0x%llx", |
| 560 | mchk.mcic); |
| 561 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, |
| 562 | KVM_S390_MCHK, |
| 563 | mchk.cr14, mchk.mcic); |
| 564 | rc = __write_machine_check(vcpu, &mchk); |
| 565 | } |
| 566 | return rc; |
| 567 | } |
| 568 | |
| 569 | static int __must_check __deliver_restart(struct kvm_vcpu *vcpu) |
| 570 | { |
| 571 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; |
| 572 | int rc; |
| 573 | |
| 574 | VCPU_EVENT(vcpu, 3, "%s", "deliver: cpu restart"); |
| 575 | vcpu->stat.deliver_restart_signal++; |
| 576 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0); |
| 577 | |
| 578 | rc = write_guest_lc(vcpu, |
| 579 | offsetof(struct lowcore, restart_old_psw), |
| 580 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); |
| 581 | rc |= read_guest_lc(vcpu, offsetof(struct lowcore, restart_psw), |
| 582 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); |
| 583 | clear_bit(IRQ_PEND_RESTART, &li->pending_irqs); |
| 584 | return rc ? -EFAULT : 0; |
| 585 | } |
| 586 | |
| 587 | static int __must_check __deliver_set_prefix(struct kvm_vcpu *vcpu) |
| 588 | { |
| 589 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; |
| 590 | struct kvm_s390_prefix_info prefix; |
| 591 | |
| 592 | spin_lock(&li->lock); |
| 593 | prefix = li->irq.prefix; |
| 594 | li->irq.prefix.address = 0; |
| 595 | clear_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs); |
| 596 | spin_unlock(&li->lock); |
| 597 | |
| 598 | vcpu->stat.deliver_prefix_signal++; |
| 599 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, |
| 600 | KVM_S390_SIGP_SET_PREFIX, |
| 601 | prefix.address, 0); |
| 602 | |
| 603 | kvm_s390_set_prefix(vcpu, prefix.address); |
| 604 | return 0; |
| 605 | } |
| 606 | |
| 607 | static int __must_check __deliver_emergency_signal(struct kvm_vcpu *vcpu) |
| 608 | { |
| 609 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; |
| 610 | int rc; |
| 611 | int cpu_addr; |
| 612 | |
| 613 | spin_lock(&li->lock); |
| 614 | cpu_addr = find_first_bit(li->sigp_emerg_pending, KVM_MAX_VCPUS); |
| 615 | clear_bit(cpu_addr, li->sigp_emerg_pending); |
| 616 | if (bitmap_empty(li->sigp_emerg_pending, KVM_MAX_VCPUS)) |
| 617 | clear_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs); |
| 618 | spin_unlock(&li->lock); |
| 619 | |
| 620 | VCPU_EVENT(vcpu, 4, "%s", "deliver: sigp emerg"); |
| 621 | vcpu->stat.deliver_emergency_signal++; |
| 622 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY, |
| 623 | cpu_addr, 0); |
| 624 | |
| 625 | rc = put_guest_lc(vcpu, EXT_IRQ_EMERGENCY_SIG, |
| 626 | (u16 *)__LC_EXT_INT_CODE); |
| 627 | rc |= put_guest_lc(vcpu, cpu_addr, (u16 *)__LC_EXT_CPU_ADDR); |
| 628 | rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, |
| 629 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); |
| 630 | rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, |
| 631 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); |
| 632 | return rc ? -EFAULT : 0; |
| 633 | } |
| 634 | |
| 635 | static int __must_check __deliver_external_call(struct kvm_vcpu *vcpu) |
| 636 | { |
| 637 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; |
| 638 | struct kvm_s390_extcall_info extcall; |
| 639 | int rc; |
| 640 | |
| 641 | spin_lock(&li->lock); |
| 642 | extcall = li->irq.extcall; |
| 643 | li->irq.extcall.code = 0; |
| 644 | clear_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs); |
| 645 | spin_unlock(&li->lock); |
| 646 | |
| 647 | VCPU_EVENT(vcpu, 4, "%s", "deliver: sigp ext call"); |
| 648 | vcpu->stat.deliver_external_call++; |
| 649 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, |
| 650 | KVM_S390_INT_EXTERNAL_CALL, |
| 651 | extcall.code, 0); |
| 652 | |
| 653 | rc = put_guest_lc(vcpu, EXT_IRQ_EXTERNAL_CALL, |
| 654 | (u16 *)__LC_EXT_INT_CODE); |
| 655 | rc |= put_guest_lc(vcpu, extcall.code, (u16 *)__LC_EXT_CPU_ADDR); |
| 656 | rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, |
| 657 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); |
| 658 | rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, &vcpu->arch.sie_block->gpsw, |
| 659 | sizeof(psw_t)); |
| 660 | return rc ? -EFAULT : 0; |
| 661 | } |
| 662 | |
| 663 | static int __must_check __deliver_prog(struct kvm_vcpu *vcpu) |
| 664 | { |
| 665 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; |
| 666 | struct kvm_s390_pgm_info pgm_info; |
| 667 | int rc = 0, nullifying = false; |
| 668 | u16 ilen; |
| 669 | |
| 670 | spin_lock(&li->lock); |
| 671 | pgm_info = li->irq.pgm; |
| 672 | clear_bit(IRQ_PEND_PROG, &li->pending_irqs); |
| 673 | memset(&li->irq.pgm, 0, sizeof(pgm_info)); |
| 674 | spin_unlock(&li->lock); |
| 675 | |
| 676 | ilen = pgm_info.flags & KVM_S390_PGM_FLAGS_ILC_MASK; |
| 677 | VCPU_EVENT(vcpu, 3, "deliver: program irq code 0x%x, ilen:%d", |
| 678 | pgm_info.code, ilen); |
| 679 | vcpu->stat.deliver_program_int++; |
| 680 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_PROGRAM_INT, |
| 681 | pgm_info.code, 0); |
| 682 | |
| 683 | switch (pgm_info.code & ~PGM_PER) { |
| 684 | case PGM_AFX_TRANSLATION: |
| 685 | case PGM_ASX_TRANSLATION: |
| 686 | case PGM_EX_TRANSLATION: |
| 687 | case PGM_LFX_TRANSLATION: |
| 688 | case PGM_LSTE_SEQUENCE: |
| 689 | case PGM_LSX_TRANSLATION: |
| 690 | case PGM_LX_TRANSLATION: |
| 691 | case PGM_PRIMARY_AUTHORITY: |
| 692 | case PGM_SECONDARY_AUTHORITY: |
| 693 | nullifying = true; |
| 694 | /* fall through */ |
| 695 | case PGM_SPACE_SWITCH: |
| 696 | rc = put_guest_lc(vcpu, pgm_info.trans_exc_code, |
| 697 | (u64 *)__LC_TRANS_EXC_CODE); |
| 698 | break; |
| 699 | case PGM_ALEN_TRANSLATION: |
| 700 | case PGM_ALE_SEQUENCE: |
| 701 | case PGM_ASTE_INSTANCE: |
| 702 | case PGM_ASTE_SEQUENCE: |
| 703 | case PGM_ASTE_VALIDITY: |
| 704 | case PGM_EXTENDED_AUTHORITY: |
| 705 | rc = put_guest_lc(vcpu, pgm_info.exc_access_id, |
| 706 | (u8 *)__LC_EXC_ACCESS_ID); |
| 707 | nullifying = true; |
| 708 | break; |
| 709 | case PGM_ASCE_TYPE: |
| 710 | case PGM_PAGE_TRANSLATION: |
| 711 | case PGM_REGION_FIRST_TRANS: |
| 712 | case PGM_REGION_SECOND_TRANS: |
| 713 | case PGM_REGION_THIRD_TRANS: |
| 714 | case PGM_SEGMENT_TRANSLATION: |
| 715 | rc = put_guest_lc(vcpu, pgm_info.trans_exc_code, |
| 716 | (u64 *)__LC_TRANS_EXC_CODE); |
| 717 | rc |= put_guest_lc(vcpu, pgm_info.exc_access_id, |
| 718 | (u8 *)__LC_EXC_ACCESS_ID); |
| 719 | rc |= put_guest_lc(vcpu, pgm_info.op_access_id, |
| 720 | (u8 *)__LC_OP_ACCESS_ID); |
| 721 | nullifying = true; |
| 722 | break; |
| 723 | case PGM_MONITOR: |
| 724 | rc = put_guest_lc(vcpu, pgm_info.mon_class_nr, |
| 725 | (u16 *)__LC_MON_CLASS_NR); |
| 726 | rc |= put_guest_lc(vcpu, pgm_info.mon_code, |
| 727 | (u64 *)__LC_MON_CODE); |
| 728 | break; |
| 729 | case PGM_VECTOR_PROCESSING: |
| 730 | case PGM_DATA: |
| 731 | rc = put_guest_lc(vcpu, pgm_info.data_exc_code, |
| 732 | (u32 *)__LC_DATA_EXC_CODE); |
| 733 | break; |
| 734 | case PGM_PROTECTION: |
| 735 | rc = put_guest_lc(vcpu, pgm_info.trans_exc_code, |
| 736 | (u64 *)__LC_TRANS_EXC_CODE); |
| 737 | rc |= put_guest_lc(vcpu, pgm_info.exc_access_id, |
| 738 | (u8 *)__LC_EXC_ACCESS_ID); |
| 739 | break; |
| 740 | case PGM_STACK_FULL: |
| 741 | case PGM_STACK_EMPTY: |
| 742 | case PGM_STACK_SPECIFICATION: |
| 743 | case PGM_STACK_TYPE: |
| 744 | case PGM_STACK_OPERATION: |
| 745 | case PGM_TRACE_TABEL: |
| 746 | case PGM_CRYPTO_OPERATION: |
| 747 | nullifying = true; |
| 748 | break; |
| 749 | } |
| 750 | |
| 751 | if (pgm_info.code & PGM_PER) { |
| 752 | rc |= put_guest_lc(vcpu, pgm_info.per_code, |
| 753 | (u8 *) __LC_PER_CODE); |
| 754 | rc |= put_guest_lc(vcpu, pgm_info.per_atmid, |
| 755 | (u8 *)__LC_PER_ATMID); |
| 756 | rc |= put_guest_lc(vcpu, pgm_info.per_address, |
| 757 | (u64 *) __LC_PER_ADDRESS); |
| 758 | rc |= put_guest_lc(vcpu, pgm_info.per_access_id, |
| 759 | (u8 *) __LC_PER_ACCESS_ID); |
| 760 | } |
| 761 | |
| 762 | if (nullifying && !(pgm_info.flags & KVM_S390_PGM_FLAGS_NO_REWIND)) |
| 763 | kvm_s390_rewind_psw(vcpu, ilen); |
| 764 | |
| 765 | /* bit 1+2 of the target are the ilc, so we can directly use ilen */ |
| 766 | rc |= put_guest_lc(vcpu, ilen, (u16 *) __LC_PGM_ILC); |
| 767 | rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->gbea, |
| 768 | (u64 *) __LC_LAST_BREAK); |
| 769 | rc |= put_guest_lc(vcpu, pgm_info.code, |
| 770 | (u16 *)__LC_PGM_INT_CODE); |
| 771 | rc |= write_guest_lc(vcpu, __LC_PGM_OLD_PSW, |
| 772 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); |
| 773 | rc |= read_guest_lc(vcpu, __LC_PGM_NEW_PSW, |
| 774 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); |
| 775 | return rc ? -EFAULT : 0; |
| 776 | } |
| 777 | |
| 778 | static int __must_check __deliver_service(struct kvm_vcpu *vcpu) |
| 779 | { |
| 780 | struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; |
| 781 | struct kvm_s390_ext_info ext; |
| 782 | int rc = 0; |
| 783 | |
| 784 | spin_lock(&fi->lock); |
| 785 | if (!(test_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs))) { |
| 786 | spin_unlock(&fi->lock); |
| 787 | return 0; |
| 788 | } |
| 789 | ext = fi->srv_signal; |
| 790 | memset(&fi->srv_signal, 0, sizeof(ext)); |
| 791 | clear_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs); |
| 792 | spin_unlock(&fi->lock); |
| 793 | |
| 794 | VCPU_EVENT(vcpu, 4, "deliver: sclp parameter 0x%x", |
| 795 | ext.ext_params); |
| 796 | vcpu->stat.deliver_service_signal++; |
| 797 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_SERVICE, |
| 798 | ext.ext_params, 0); |
| 799 | |
| 800 | rc = put_guest_lc(vcpu, EXT_IRQ_SERVICE_SIG, (u16 *)__LC_EXT_INT_CODE); |
| 801 | rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR); |
| 802 | rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, |
| 803 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); |
| 804 | rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, |
| 805 | &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); |
| 806 | rc |= put_guest_lc(vcpu, ext.ext_params, |
| 807 | (u32 *)__LC_EXT_PARAMS); |
| 808 | |
| 809 | return rc ? -EFAULT : 0; |
| 810 | } |
| 811 | |
| 812 | static int __must_check __deliver_pfault_done(struct kvm_vcpu *vcpu) |
| 813 | { |
| 814 | struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; |
| 815 | struct kvm_s390_interrupt_info *inti; |
| 816 | int rc = 0; |
| 817 | |
| 818 | spin_lock(&fi->lock); |
| 819 | inti = list_first_entry_or_null(&fi->lists[FIRQ_LIST_PFAULT], |
| 820 | struct kvm_s390_interrupt_info, |
| 821 | list); |
| 822 | if (inti) { |
| 823 | list_del(&inti->list); |
| 824 | fi->counters[FIRQ_CNTR_PFAULT] -= 1; |
| 825 | } |
| 826 | if (list_empty(&fi->lists[FIRQ_LIST_PFAULT])) |
| 827 | clear_bit(IRQ_PEND_PFAULT_DONE, &fi->pending_irqs); |
| 828 | spin_unlock(&fi->lock); |
| 829 | |
| 830 | if (inti) { |
| 831 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, |
| 832 | KVM_S390_INT_PFAULT_DONE, 0, |
| 833 | inti->ext.ext_params2); |
| 834 | VCPU_EVENT(vcpu, 4, "deliver: pfault done token 0x%llx", |
| 835 | inti->ext.ext_params2); |
| 836 | |
| 837 | rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, |
| 838 | (u16 *)__LC_EXT_INT_CODE); |
| 839 | rc |= put_guest_lc(vcpu, PFAULT_DONE, |
| 840 | (u16 *)__LC_EXT_CPU_ADDR); |
| 841 | rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, |
| 842 | &vcpu->arch.sie_block->gpsw, |
| 843 | sizeof(psw_t)); |
| 844 | rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, |
| 845 | &vcpu->arch.sie_block->gpsw, |
| 846 | sizeof(psw_t)); |
| 847 | rc |= put_guest_lc(vcpu, inti->ext.ext_params2, |
| 848 | (u64 *)__LC_EXT_PARAMS2); |
| 849 | kfree(inti); |
| 850 | } |
| 851 | return rc ? -EFAULT : 0; |
| 852 | } |
| 853 | |
| 854 | static int __must_check __deliver_virtio(struct kvm_vcpu *vcpu) |
| 855 | { |
| 856 | struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; |
| 857 | struct kvm_s390_interrupt_info *inti; |
| 858 | int rc = 0; |
| 859 | |
| 860 | spin_lock(&fi->lock); |
| 861 | inti = list_first_entry_or_null(&fi->lists[FIRQ_LIST_VIRTIO], |
| 862 | struct kvm_s390_interrupt_info, |
| 863 | list); |
| 864 | if (inti) { |
| 865 | VCPU_EVENT(vcpu, 4, |
| 866 | "deliver: virtio parm: 0x%x,parm64: 0x%llx", |
| 867 | inti->ext.ext_params, inti->ext.ext_params2); |
| 868 | vcpu->stat.deliver_virtio_interrupt++; |
| 869 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, |
| 870 | inti->type, |
| 871 | inti->ext.ext_params, |
| 872 | inti->ext.ext_params2); |
| 873 | list_del(&inti->list); |
| 874 | fi->counters[FIRQ_CNTR_VIRTIO] -= 1; |
| 875 | } |
| 876 | if (list_empty(&fi->lists[FIRQ_LIST_VIRTIO])) |
| 877 | clear_bit(IRQ_PEND_VIRTIO, &fi->pending_irqs); |
| 878 | spin_unlock(&fi->lock); |
| 879 | |
| 880 | if (inti) { |
| 881 | rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, |
| 882 | (u16 *)__LC_EXT_INT_CODE); |
| 883 | rc |= put_guest_lc(vcpu, VIRTIO_PARAM, |
| 884 | (u16 *)__LC_EXT_CPU_ADDR); |
| 885 | rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, |
| 886 | &vcpu->arch.sie_block->gpsw, |
| 887 | sizeof(psw_t)); |
| 888 | rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, |
| 889 | &vcpu->arch.sie_block->gpsw, |
| 890 | sizeof(psw_t)); |
| 891 | rc |= put_guest_lc(vcpu, inti->ext.ext_params, |
| 892 | (u32 *)__LC_EXT_PARAMS); |
| 893 | rc |= put_guest_lc(vcpu, inti->ext.ext_params2, |
| 894 | (u64 *)__LC_EXT_PARAMS2); |
| 895 | kfree(inti); |
| 896 | } |
| 897 | return rc ? -EFAULT : 0; |
| 898 | } |
| 899 | |
| 900 | static int __must_check __deliver_io(struct kvm_vcpu *vcpu, |
| 901 | unsigned long irq_type) |
| 902 | { |
| 903 | struct list_head *isc_list; |
| 904 | struct kvm_s390_float_interrupt *fi; |
| 905 | struct kvm_s390_interrupt_info *inti = NULL; |
| 906 | int rc = 0; |
| 907 | |
| 908 | fi = &vcpu->kvm->arch.float_int; |
| 909 | |
| 910 | spin_lock(&fi->lock); |
| 911 | isc_list = &fi->lists[irq_type - IRQ_PEND_IO_ISC_0]; |
| 912 | inti = list_first_entry_or_null(isc_list, |
| 913 | struct kvm_s390_interrupt_info, |
| 914 | list); |
| 915 | if (inti) { |
| 916 | if (inti->type & KVM_S390_INT_IO_AI_MASK) |
| 917 | VCPU_EVENT(vcpu, 4, "%s", "deliver: I/O (AI)"); |
| 918 | else |
| 919 | VCPU_EVENT(vcpu, 4, "deliver: I/O %x ss %x schid %04x", |
| 920 | inti->io.subchannel_id >> 8, |
| 921 | inti->io.subchannel_id >> 1 & 0x3, |
| 922 | inti->io.subchannel_nr); |
| 923 | |
| 924 | vcpu->stat.deliver_io_int++; |
| 925 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, |
| 926 | inti->type, |
| 927 | ((__u32)inti->io.subchannel_id << 16) | |
| 928 | inti->io.subchannel_nr, |
| 929 | ((__u64)inti->io.io_int_parm << 32) | |
| 930 | inti->io.io_int_word); |
| 931 | list_del(&inti->list); |
| 932 | fi->counters[FIRQ_CNTR_IO] -= 1; |
| 933 | } |
| 934 | if (list_empty(isc_list)) |
| 935 | clear_bit(irq_type, &fi->pending_irqs); |
| 936 | spin_unlock(&fi->lock); |
| 937 | |
| 938 | if (inti) { |
| 939 | rc = put_guest_lc(vcpu, inti->io.subchannel_id, |
| 940 | (u16 *)__LC_SUBCHANNEL_ID); |
| 941 | rc |= put_guest_lc(vcpu, inti->io.subchannel_nr, |
| 942 | (u16 *)__LC_SUBCHANNEL_NR); |
| 943 | rc |= put_guest_lc(vcpu, inti->io.io_int_parm, |
| 944 | (u32 *)__LC_IO_INT_PARM); |
| 945 | rc |= put_guest_lc(vcpu, inti->io.io_int_word, |
| 946 | (u32 *)__LC_IO_INT_WORD); |
| 947 | rc |= write_guest_lc(vcpu, __LC_IO_OLD_PSW, |
| 948 | &vcpu->arch.sie_block->gpsw, |
| 949 | sizeof(psw_t)); |
| 950 | rc |= read_guest_lc(vcpu, __LC_IO_NEW_PSW, |
| 951 | &vcpu->arch.sie_block->gpsw, |
| 952 | sizeof(psw_t)); |
| 953 | kfree(inti); |
| 954 | } |
| 955 | |
| 956 | return rc ? -EFAULT : 0; |
| 957 | } |
| 958 | |
| 959 | typedef int (*deliver_irq_t)(struct kvm_vcpu *vcpu); |
| 960 | |
| 961 | static const deliver_irq_t deliver_irq_funcs[] = { |
| 962 | [IRQ_PEND_MCHK_EX] = __deliver_machine_check, |
| 963 | [IRQ_PEND_MCHK_REP] = __deliver_machine_check, |
| 964 | [IRQ_PEND_PROG] = __deliver_prog, |
| 965 | [IRQ_PEND_EXT_EMERGENCY] = __deliver_emergency_signal, |
| 966 | [IRQ_PEND_EXT_EXTERNAL] = __deliver_external_call, |
| 967 | [IRQ_PEND_EXT_CLOCK_COMP] = __deliver_ckc, |
| 968 | [IRQ_PEND_EXT_CPU_TIMER] = __deliver_cpu_timer, |
| 969 | [IRQ_PEND_RESTART] = __deliver_restart, |
| 970 | [IRQ_PEND_SET_PREFIX] = __deliver_set_prefix, |
| 971 | [IRQ_PEND_PFAULT_INIT] = __deliver_pfault_init, |
| 972 | [IRQ_PEND_EXT_SERVICE] = __deliver_service, |
| 973 | [IRQ_PEND_PFAULT_DONE] = __deliver_pfault_done, |
| 974 | [IRQ_PEND_VIRTIO] = __deliver_virtio, |
| 975 | }; |
| 976 | |
| 977 | /* Check whether an external call is pending (deliverable or not) */ |
| 978 | int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu) |
| 979 | { |
| 980 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; |
| 981 | |
| 982 | if (!sclp.has_sigpif) |
| 983 | return test_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs); |
| 984 | |
| 985 | return sca_ext_call_pending(vcpu, NULL); |
| 986 | } |
| 987 | |
| 988 | int kvm_s390_vcpu_has_irq(struct kvm_vcpu *vcpu, int exclude_stop) |
| 989 | { |
| 990 | if (deliverable_irqs(vcpu)) |
| 991 | return 1; |
| 992 | |
| 993 | if (kvm_cpu_has_pending_timer(vcpu)) |
| 994 | return 1; |
| 995 | |
| 996 | /* external call pending and deliverable */ |
| 997 | if (kvm_s390_ext_call_pending(vcpu) && |
| 998 | !psw_extint_disabled(vcpu) && |
| 999 | (vcpu->arch.sie_block->gcr[0] & 0x2000ul)) |
| 1000 | return 1; |
| 1001 | |
| 1002 | if (!exclude_stop && kvm_s390_is_stop_irq_pending(vcpu)) |
| 1003 | return 1; |
| 1004 | return 0; |
| 1005 | } |
| 1006 | |
| 1007 | int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) |
| 1008 | { |
| 1009 | return ckc_irq_pending(vcpu) || cpu_timer_irq_pending(vcpu); |
| 1010 | } |
| 1011 | |
| 1012 | static u64 __calculate_sltime(struct kvm_vcpu *vcpu) |
| 1013 | { |
| 1014 | const u64 now = kvm_s390_get_tod_clock_fast(vcpu->kvm); |
| 1015 | const u64 ckc = vcpu->arch.sie_block->ckc; |
| 1016 | u64 cputm, sltime = 0; |
| 1017 | |
| 1018 | if (ckc_interrupts_enabled(vcpu)) { |
| 1019 | if (vcpu->arch.sie_block->gcr[0] & 0x0020000000000000ul) { |
| 1020 | if ((s64)now < (s64)ckc) |
| 1021 | sltime = tod_to_ns((s64)ckc - (s64)now); |
| 1022 | } else if (now < ckc) { |
| 1023 | sltime = tod_to_ns(ckc - now); |
| 1024 | } |
| 1025 | /* already expired */ |
| 1026 | if (!sltime) |
| 1027 | return 0; |
| 1028 | if (cpu_timer_interrupts_enabled(vcpu)) { |
| 1029 | cputm = kvm_s390_get_cpu_timer(vcpu); |
| 1030 | /* already expired? */ |
| 1031 | if (cputm >> 63) |
| 1032 | return 0; |
| 1033 | return min(sltime, tod_to_ns(cputm)); |
| 1034 | } |
| 1035 | } else if (cpu_timer_interrupts_enabled(vcpu)) { |
| 1036 | sltime = kvm_s390_get_cpu_timer(vcpu); |
| 1037 | /* already expired? */ |
| 1038 | if (sltime >> 63) |
| 1039 | return 0; |
| 1040 | } |
| 1041 | return sltime; |
| 1042 | } |
| 1043 | |
| 1044 | int kvm_s390_handle_wait(struct kvm_vcpu *vcpu) |
| 1045 | { |
| 1046 | u64 sltime; |
| 1047 | |
| 1048 | vcpu->stat.exit_wait_state++; |
| 1049 | |
| 1050 | /* fast path */ |
| 1051 | if (kvm_arch_vcpu_runnable(vcpu)) |
| 1052 | return 0; |
| 1053 | |
| 1054 | if (psw_interrupts_disabled(vcpu)) { |
| 1055 | VCPU_EVENT(vcpu, 3, "%s", "disabled wait"); |
| 1056 | return -EOPNOTSUPP; /* disabled wait */ |
| 1057 | } |
| 1058 | |
| 1059 | if (!ckc_interrupts_enabled(vcpu) && |
| 1060 | !cpu_timer_interrupts_enabled(vcpu)) { |
| 1061 | VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer"); |
| 1062 | __set_cpu_idle(vcpu); |
| 1063 | goto no_timer; |
| 1064 | } |
| 1065 | |
| 1066 | sltime = __calculate_sltime(vcpu); |
| 1067 | if (!sltime) |
| 1068 | return 0; |
| 1069 | |
| 1070 | __set_cpu_idle(vcpu); |
| 1071 | hrtimer_start(&vcpu->arch.ckc_timer, sltime, HRTIMER_MODE_REL); |
| 1072 | VCPU_EVENT(vcpu, 4, "enabled wait: %llu ns", sltime); |
| 1073 | no_timer: |
| 1074 | srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); |
| 1075 | kvm_vcpu_block(vcpu); |
| 1076 | __unset_cpu_idle(vcpu); |
| 1077 | vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); |
| 1078 | |
| 1079 | hrtimer_cancel(&vcpu->arch.ckc_timer); |
| 1080 | return 0; |
| 1081 | } |
| 1082 | |
| 1083 | void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu) |
| 1084 | { |
| 1085 | /* |
| 1086 | * We cannot move this into the if, as the CPU might be already |
| 1087 | * in kvm_vcpu_block without having the waitqueue set (polling) |
| 1088 | */ |
| 1089 | vcpu->valid_wakeup = true; |
| 1090 | if (swait_active(&vcpu->wq)) { |
| 1091 | /* |
| 1092 | * The vcpu gave up the cpu voluntarily, mark it as a good |
| 1093 | * yield-candidate. |
| 1094 | */ |
| 1095 | vcpu->preempted = true; |
| 1096 | swake_up(&vcpu->wq); |
| 1097 | vcpu->stat.halt_wakeup++; |
| 1098 | } |
| 1099 | /* |
| 1100 | * The VCPU might not be sleeping but is executing the VSIE. Let's |
| 1101 | * kick it, so it leaves the SIE to process the request. |
| 1102 | */ |
| 1103 | kvm_s390_vsie_kick(vcpu); |
| 1104 | } |
| 1105 | |
| 1106 | enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer) |
| 1107 | { |
| 1108 | struct kvm_vcpu *vcpu; |
| 1109 | u64 sltime; |
| 1110 | |
| 1111 | vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer); |
| 1112 | sltime = __calculate_sltime(vcpu); |
| 1113 | |
| 1114 | /* |
| 1115 | * If the monotonic clock runs faster than the tod clock we might be |
| 1116 | * woken up too early and have to go back to sleep to avoid deadlocks. |
| 1117 | */ |
| 1118 | if (sltime && hrtimer_forward_now(timer, ns_to_ktime(sltime))) |
| 1119 | return HRTIMER_RESTART; |
| 1120 | kvm_s390_vcpu_wakeup(vcpu); |
| 1121 | return HRTIMER_NORESTART; |
| 1122 | } |
| 1123 | |
| 1124 | void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu) |
| 1125 | { |
| 1126 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; |
| 1127 | |
| 1128 | spin_lock(&li->lock); |
| 1129 | li->pending_irqs = 0; |
| 1130 | bitmap_zero(li->sigp_emerg_pending, KVM_MAX_VCPUS); |
| 1131 | memset(&li->irq, 0, sizeof(li->irq)); |
| 1132 | spin_unlock(&li->lock); |
| 1133 | |
| 1134 | sca_clear_ext_call(vcpu); |
| 1135 | } |
| 1136 | |
| 1137 | int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu) |
| 1138 | { |
| 1139 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; |
| 1140 | deliver_irq_t func; |
| 1141 | int rc = 0; |
| 1142 | unsigned long irq_type; |
| 1143 | unsigned long irqs; |
| 1144 | |
| 1145 | __reset_intercept_indicators(vcpu); |
| 1146 | |
| 1147 | /* pending ckc conditions might have been invalidated */ |
| 1148 | clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs); |
| 1149 | if (ckc_irq_pending(vcpu)) |
| 1150 | set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs); |
| 1151 | |
| 1152 | /* pending cpu timer conditions might have been invalidated */ |
| 1153 | clear_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs); |
| 1154 | if (cpu_timer_irq_pending(vcpu)) |
| 1155 | set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs); |
| 1156 | |
| 1157 | while ((irqs = deliverable_irqs(vcpu)) && !rc) { |
| 1158 | /* bits are in the order of interrupt priority */ |
| 1159 | irq_type = find_first_bit(&irqs, IRQ_PEND_COUNT); |
| 1160 | if (is_ioirq(irq_type)) { |
| 1161 | rc = __deliver_io(vcpu, irq_type); |
| 1162 | } else { |
| 1163 | func = deliver_irq_funcs[irq_type]; |
| 1164 | if (!func) { |
| 1165 | WARN_ON_ONCE(func == NULL); |
| 1166 | clear_bit(irq_type, &li->pending_irqs); |
| 1167 | continue; |
| 1168 | } |
| 1169 | rc = func(vcpu); |
| 1170 | } |
| 1171 | } |
| 1172 | |
| 1173 | set_intercept_indicators(vcpu); |
| 1174 | |
| 1175 | return rc; |
| 1176 | } |
| 1177 | |
| 1178 | static int __inject_prog(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) |
| 1179 | { |
| 1180 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; |
| 1181 | |
| 1182 | VCPU_EVENT(vcpu, 3, "inject: program irq code 0x%x", irq->u.pgm.code); |
| 1183 | trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_PROGRAM_INT, |
| 1184 | irq->u.pgm.code, 0); |
| 1185 | |
| 1186 | if (!(irq->u.pgm.flags & KVM_S390_PGM_FLAGS_ILC_VALID)) { |
| 1187 | /* auto detection if no valid ILC was given */ |
| 1188 | irq->u.pgm.flags &= ~KVM_S390_PGM_FLAGS_ILC_MASK; |
| 1189 | irq->u.pgm.flags |= kvm_s390_get_ilen(vcpu); |
| 1190 | irq->u.pgm.flags |= KVM_S390_PGM_FLAGS_ILC_VALID; |
| 1191 | } |
| 1192 | |
| 1193 | if (irq->u.pgm.code == PGM_PER) { |
| 1194 | li->irq.pgm.code |= PGM_PER; |
| 1195 | li->irq.pgm.flags = irq->u.pgm.flags; |
| 1196 | /* only modify PER related information */ |
| 1197 | li->irq.pgm.per_address = irq->u.pgm.per_address; |
| 1198 | li->irq.pgm.per_code = irq->u.pgm.per_code; |
| 1199 | li->irq.pgm.per_atmid = irq->u.pgm.per_atmid; |
| 1200 | li->irq.pgm.per_access_id = irq->u.pgm.per_access_id; |
| 1201 | } else if (!(irq->u.pgm.code & PGM_PER)) { |
| 1202 | li->irq.pgm.code = (li->irq.pgm.code & PGM_PER) | |
| 1203 | irq->u.pgm.code; |
| 1204 | li->irq.pgm.flags = irq->u.pgm.flags; |
| 1205 | /* only modify non-PER information */ |
| 1206 | li->irq.pgm.trans_exc_code = irq->u.pgm.trans_exc_code; |
| 1207 | li->irq.pgm.mon_code = irq->u.pgm.mon_code; |
| 1208 | li->irq.pgm.data_exc_code = irq->u.pgm.data_exc_code; |
| 1209 | li->irq.pgm.mon_class_nr = irq->u.pgm.mon_class_nr; |
| 1210 | li->irq.pgm.exc_access_id = irq->u.pgm.exc_access_id; |
| 1211 | li->irq.pgm.op_access_id = irq->u.pgm.op_access_id; |
| 1212 | } else { |
| 1213 | li->irq.pgm = irq->u.pgm; |
| 1214 | } |
| 1215 | set_bit(IRQ_PEND_PROG, &li->pending_irqs); |
| 1216 | return 0; |
| 1217 | } |
| 1218 | |
| 1219 | static int __inject_pfault_init(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) |
| 1220 | { |
| 1221 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; |
| 1222 | |
| 1223 | VCPU_EVENT(vcpu, 4, "inject: pfault init parameter block at 0x%llx", |
| 1224 | irq->u.ext.ext_params2); |
| 1225 | trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_PFAULT_INIT, |
| 1226 | irq->u.ext.ext_params, |
| 1227 | irq->u.ext.ext_params2); |
| 1228 | |
| 1229 | li->irq.ext = irq->u.ext; |
| 1230 | set_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs); |
| 1231 | atomic_or(CPUSTAT_EXT_INT, li->cpuflags); |
| 1232 | return 0; |
| 1233 | } |
| 1234 | |
| 1235 | static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) |
| 1236 | { |
| 1237 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; |
| 1238 | struct kvm_s390_extcall_info *extcall = &li->irq.extcall; |
| 1239 | uint16_t src_id = irq->u.extcall.code; |
| 1240 | |
| 1241 | VCPU_EVENT(vcpu, 4, "inject: external call source-cpu:%u", |
| 1242 | src_id); |
| 1243 | trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EXTERNAL_CALL, |
| 1244 | src_id, 0); |
| 1245 | |
| 1246 | /* sending vcpu invalid */ |
| 1247 | if (kvm_get_vcpu_by_id(vcpu->kvm, src_id) == NULL) |
| 1248 | return -EINVAL; |
| 1249 | |
| 1250 | if (sclp.has_sigpif) |
| 1251 | return sca_inject_ext_call(vcpu, src_id); |
| 1252 | |
| 1253 | if (test_and_set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs)) |
| 1254 | return -EBUSY; |
| 1255 | *extcall = irq->u.extcall; |
| 1256 | atomic_or(CPUSTAT_EXT_INT, li->cpuflags); |
| 1257 | return 0; |
| 1258 | } |
| 1259 | |
| 1260 | static int __inject_set_prefix(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) |
| 1261 | { |
| 1262 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; |
| 1263 | struct kvm_s390_prefix_info *prefix = &li->irq.prefix; |
| 1264 | |
| 1265 | VCPU_EVENT(vcpu, 3, "inject: set prefix to %x", |
| 1266 | irq->u.prefix.address); |
| 1267 | trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_SET_PREFIX, |
| 1268 | irq->u.prefix.address, 0); |
| 1269 | |
| 1270 | if (!is_vcpu_stopped(vcpu)) |
| 1271 | return -EBUSY; |
| 1272 | |
| 1273 | *prefix = irq->u.prefix; |
| 1274 | set_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs); |
| 1275 | return 0; |
| 1276 | } |
| 1277 | |
| 1278 | #define KVM_S390_STOP_SUPP_FLAGS (KVM_S390_STOP_FLAG_STORE_STATUS) |
| 1279 | static int __inject_sigp_stop(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) |
| 1280 | { |
| 1281 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; |
| 1282 | struct kvm_s390_stop_info *stop = &li->irq.stop; |
| 1283 | int rc = 0; |
| 1284 | |
| 1285 | trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_STOP, 0, 0); |
| 1286 | |
| 1287 | if (irq->u.stop.flags & ~KVM_S390_STOP_SUPP_FLAGS) |
| 1288 | return -EINVAL; |
| 1289 | |
| 1290 | if (is_vcpu_stopped(vcpu)) { |
| 1291 | if (irq->u.stop.flags & KVM_S390_STOP_FLAG_STORE_STATUS) |
| 1292 | rc = kvm_s390_store_status_unloaded(vcpu, |
| 1293 | KVM_S390_STORE_STATUS_NOADDR); |
| 1294 | return rc; |
| 1295 | } |
| 1296 | |
| 1297 | if (test_and_set_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs)) |
| 1298 | return -EBUSY; |
| 1299 | stop->flags = irq->u.stop.flags; |
| 1300 | __set_cpuflag(vcpu, CPUSTAT_STOP_INT); |
| 1301 | return 0; |
| 1302 | } |
| 1303 | |
| 1304 | static int __inject_sigp_restart(struct kvm_vcpu *vcpu, |
| 1305 | struct kvm_s390_irq *irq) |
| 1306 | { |
| 1307 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; |
| 1308 | |
| 1309 | VCPU_EVENT(vcpu, 3, "%s", "inject: restart int"); |
| 1310 | trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0); |
| 1311 | |
| 1312 | set_bit(IRQ_PEND_RESTART, &li->pending_irqs); |
| 1313 | return 0; |
| 1314 | } |
| 1315 | |
| 1316 | static int __inject_sigp_emergency(struct kvm_vcpu *vcpu, |
| 1317 | struct kvm_s390_irq *irq) |
| 1318 | { |
| 1319 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; |
| 1320 | |
| 1321 | VCPU_EVENT(vcpu, 4, "inject: emergency from cpu %u", |
| 1322 | irq->u.emerg.code); |
| 1323 | trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY, |
| 1324 | irq->u.emerg.code, 0); |
| 1325 | |
| 1326 | /* sending vcpu invalid */ |
| 1327 | if (kvm_get_vcpu_by_id(vcpu->kvm, irq->u.emerg.code) == NULL) |
| 1328 | return -EINVAL; |
| 1329 | |
| 1330 | set_bit(irq->u.emerg.code, li->sigp_emerg_pending); |
| 1331 | set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs); |
| 1332 | atomic_or(CPUSTAT_EXT_INT, li->cpuflags); |
| 1333 | return 0; |
| 1334 | } |
| 1335 | |
| 1336 | static int __inject_mchk(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) |
| 1337 | { |
| 1338 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; |
| 1339 | struct kvm_s390_mchk_info *mchk = &li->irq.mchk; |
| 1340 | |
| 1341 | VCPU_EVENT(vcpu, 3, "inject: machine check mcic 0x%llx", |
| 1342 | irq->u.mchk.mcic); |
| 1343 | trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_MCHK, 0, |
| 1344 | irq->u.mchk.mcic); |
| 1345 | |
| 1346 | /* |
| 1347 | * Because repressible machine checks can be indicated along with |
| 1348 | * exigent machine checks (PoP, Chapter 11, Interruption action) |
| 1349 | * we need to combine cr14, mcic and external damage code. |
| 1350 | * Failing storage address and the logout area should not be or'ed |
| 1351 | * together, we just indicate the last occurrence of the corresponding |
| 1352 | * machine check |
| 1353 | */ |
| 1354 | mchk->cr14 |= irq->u.mchk.cr14; |
| 1355 | mchk->mcic |= irq->u.mchk.mcic; |
| 1356 | mchk->ext_damage_code |= irq->u.mchk.ext_damage_code; |
| 1357 | mchk->failing_storage_address = irq->u.mchk.failing_storage_address; |
| 1358 | memcpy(&mchk->fixed_logout, &irq->u.mchk.fixed_logout, |
| 1359 | sizeof(mchk->fixed_logout)); |
| 1360 | if (mchk->mcic & MCHK_EX_MASK) |
| 1361 | set_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs); |
| 1362 | else if (mchk->mcic & MCHK_REP_MASK) |
| 1363 | set_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs); |
| 1364 | return 0; |
| 1365 | } |
| 1366 | |
| 1367 | static int __inject_ckc(struct kvm_vcpu *vcpu) |
| 1368 | { |
| 1369 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; |
| 1370 | |
| 1371 | VCPU_EVENT(vcpu, 3, "%s", "inject: clock comparator external"); |
| 1372 | trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_CLOCK_COMP, |
| 1373 | 0, 0); |
| 1374 | |
| 1375 | set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs); |
| 1376 | atomic_or(CPUSTAT_EXT_INT, li->cpuflags); |
| 1377 | return 0; |
| 1378 | } |
| 1379 | |
| 1380 | static int __inject_cpu_timer(struct kvm_vcpu *vcpu) |
| 1381 | { |
| 1382 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; |
| 1383 | |
| 1384 | VCPU_EVENT(vcpu, 3, "%s", "inject: cpu timer external"); |
| 1385 | trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_CPU_TIMER, |
| 1386 | 0, 0); |
| 1387 | |
| 1388 | set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs); |
| 1389 | atomic_or(CPUSTAT_EXT_INT, li->cpuflags); |
| 1390 | return 0; |
| 1391 | } |
| 1392 | |
| 1393 | static struct kvm_s390_interrupt_info *get_io_int(struct kvm *kvm, |
| 1394 | int isc, u32 schid) |
| 1395 | { |
| 1396 | struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int; |
| 1397 | struct list_head *isc_list = &fi->lists[FIRQ_LIST_IO_ISC_0 + isc]; |
| 1398 | struct kvm_s390_interrupt_info *iter; |
| 1399 | u16 id = (schid & 0xffff0000U) >> 16; |
| 1400 | u16 nr = schid & 0x0000ffffU; |
| 1401 | |
| 1402 | spin_lock(&fi->lock); |
| 1403 | list_for_each_entry(iter, isc_list, list) { |
| 1404 | if (schid && (id != iter->io.subchannel_id || |
| 1405 | nr != iter->io.subchannel_nr)) |
| 1406 | continue; |
| 1407 | /* found an appropriate entry */ |
| 1408 | list_del_init(&iter->list); |
| 1409 | fi->counters[FIRQ_CNTR_IO] -= 1; |
| 1410 | if (list_empty(isc_list)) |
| 1411 | clear_bit(IRQ_PEND_IO_ISC_0 + isc, &fi->pending_irqs); |
| 1412 | spin_unlock(&fi->lock); |
| 1413 | return iter; |
| 1414 | } |
| 1415 | spin_unlock(&fi->lock); |
| 1416 | return NULL; |
| 1417 | } |
| 1418 | |
| 1419 | /* |
| 1420 | * Dequeue and return an I/O interrupt matching any of the interruption |
| 1421 | * subclasses as designated by the isc mask in cr6 and the schid (if != 0). |
| 1422 | */ |
| 1423 | struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm, |
| 1424 | u64 isc_mask, u32 schid) |
| 1425 | { |
| 1426 | struct kvm_s390_interrupt_info *inti = NULL; |
| 1427 | int isc; |
| 1428 | |
| 1429 | for (isc = 0; isc <= MAX_ISC && !inti; isc++) { |
| 1430 | if (isc_mask & isc_to_isc_bits(isc)) |
| 1431 | inti = get_io_int(kvm, isc, schid); |
| 1432 | } |
| 1433 | return inti; |
| 1434 | } |
| 1435 | |
| 1436 | #define SCCB_MASK 0xFFFFFFF8 |
| 1437 | #define SCCB_EVENT_PENDING 0x3 |
| 1438 | |
| 1439 | static int __inject_service(struct kvm *kvm, |
| 1440 | struct kvm_s390_interrupt_info *inti) |
| 1441 | { |
| 1442 | struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int; |
| 1443 | |
| 1444 | spin_lock(&fi->lock); |
| 1445 | fi->srv_signal.ext_params |= inti->ext.ext_params & SCCB_EVENT_PENDING; |
| 1446 | /* |
| 1447 | * Early versions of the QEMU s390 bios will inject several |
| 1448 | * service interrupts after another without handling a |
| 1449 | * condition code indicating busy. |
| 1450 | * We will silently ignore those superfluous sccb values. |
| 1451 | * A future version of QEMU will take care of serialization |
| 1452 | * of servc requests |
| 1453 | */ |
| 1454 | if (fi->srv_signal.ext_params & SCCB_MASK) |
| 1455 | goto out; |
| 1456 | fi->srv_signal.ext_params |= inti->ext.ext_params & SCCB_MASK; |
| 1457 | set_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs); |
| 1458 | out: |
| 1459 | spin_unlock(&fi->lock); |
| 1460 | kfree(inti); |
| 1461 | return 0; |
| 1462 | } |
| 1463 | |
| 1464 | static int __inject_virtio(struct kvm *kvm, |
| 1465 | struct kvm_s390_interrupt_info *inti) |
| 1466 | { |
| 1467 | struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int; |
| 1468 | |
| 1469 | spin_lock(&fi->lock); |
| 1470 | if (fi->counters[FIRQ_CNTR_VIRTIO] >= KVM_S390_MAX_VIRTIO_IRQS) { |
| 1471 | spin_unlock(&fi->lock); |
| 1472 | return -EBUSY; |
| 1473 | } |
| 1474 | fi->counters[FIRQ_CNTR_VIRTIO] += 1; |
| 1475 | list_add_tail(&inti->list, &fi->lists[FIRQ_LIST_VIRTIO]); |
| 1476 | set_bit(IRQ_PEND_VIRTIO, &fi->pending_irqs); |
| 1477 | spin_unlock(&fi->lock); |
| 1478 | return 0; |
| 1479 | } |
| 1480 | |
| 1481 | static int __inject_pfault_done(struct kvm *kvm, |
| 1482 | struct kvm_s390_interrupt_info *inti) |
| 1483 | { |
| 1484 | struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int; |
| 1485 | |
| 1486 | spin_lock(&fi->lock); |
| 1487 | if (fi->counters[FIRQ_CNTR_PFAULT] >= |
| 1488 | (ASYNC_PF_PER_VCPU * KVM_MAX_VCPUS)) { |
| 1489 | spin_unlock(&fi->lock); |
| 1490 | return -EBUSY; |
| 1491 | } |
| 1492 | fi->counters[FIRQ_CNTR_PFAULT] += 1; |
| 1493 | list_add_tail(&inti->list, &fi->lists[FIRQ_LIST_PFAULT]); |
| 1494 | set_bit(IRQ_PEND_PFAULT_DONE, &fi->pending_irqs); |
| 1495 | spin_unlock(&fi->lock); |
| 1496 | return 0; |
| 1497 | } |
| 1498 | |
| 1499 | #define CR_PENDING_SUBCLASS 28 |
| 1500 | static int __inject_float_mchk(struct kvm *kvm, |
| 1501 | struct kvm_s390_interrupt_info *inti) |
| 1502 | { |
| 1503 | struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int; |
| 1504 | |
| 1505 | spin_lock(&fi->lock); |
| 1506 | fi->mchk.cr14 |= inti->mchk.cr14 & (1UL << CR_PENDING_SUBCLASS); |
| 1507 | fi->mchk.mcic |= inti->mchk.mcic; |
| 1508 | set_bit(IRQ_PEND_MCHK_REP, &fi->pending_irqs); |
| 1509 | spin_unlock(&fi->lock); |
| 1510 | kfree(inti); |
| 1511 | return 0; |
| 1512 | } |
| 1513 | |
| 1514 | static int __inject_io(struct kvm *kvm, struct kvm_s390_interrupt_info *inti) |
| 1515 | { |
| 1516 | struct kvm_s390_float_interrupt *fi; |
| 1517 | struct list_head *list; |
| 1518 | int isc; |
| 1519 | |
| 1520 | fi = &kvm->arch.float_int; |
| 1521 | spin_lock(&fi->lock); |
| 1522 | if (fi->counters[FIRQ_CNTR_IO] >= KVM_S390_MAX_FLOAT_IRQS) { |
| 1523 | spin_unlock(&fi->lock); |
| 1524 | return -EBUSY; |
| 1525 | } |
| 1526 | fi->counters[FIRQ_CNTR_IO] += 1; |
| 1527 | |
| 1528 | if (inti->type & KVM_S390_INT_IO_AI_MASK) |
| 1529 | VM_EVENT(kvm, 4, "%s", "inject: I/O (AI)"); |
| 1530 | else |
| 1531 | VM_EVENT(kvm, 4, "inject: I/O %x ss %x schid %04x", |
| 1532 | inti->io.subchannel_id >> 8, |
| 1533 | inti->io.subchannel_id >> 1 & 0x3, |
| 1534 | inti->io.subchannel_nr); |
| 1535 | isc = int_word_to_isc(inti->io.io_int_word); |
| 1536 | list = &fi->lists[FIRQ_LIST_IO_ISC_0 + isc]; |
| 1537 | list_add_tail(&inti->list, list); |
| 1538 | set_bit(IRQ_PEND_IO_ISC_0 + isc, &fi->pending_irqs); |
| 1539 | spin_unlock(&fi->lock); |
| 1540 | return 0; |
| 1541 | } |
| 1542 | |
| 1543 | /* |
| 1544 | * Find a destination VCPU for a floating irq and kick it. |
| 1545 | */ |
| 1546 | static void __floating_irq_kick(struct kvm *kvm, u64 type) |
| 1547 | { |
| 1548 | struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int; |
| 1549 | struct kvm_s390_local_interrupt *li; |
| 1550 | struct kvm_vcpu *dst_vcpu; |
| 1551 | int sigcpu, online_vcpus, nr_tries = 0; |
| 1552 | |
| 1553 | online_vcpus = atomic_read(&kvm->online_vcpus); |
| 1554 | if (!online_vcpus) |
| 1555 | return; |
| 1556 | |
| 1557 | /* find idle VCPUs first, then round robin */ |
| 1558 | sigcpu = find_first_bit(fi->idle_mask, online_vcpus); |
| 1559 | if (sigcpu == online_vcpus) { |
| 1560 | do { |
| 1561 | sigcpu = fi->next_rr_cpu; |
| 1562 | fi->next_rr_cpu = (fi->next_rr_cpu + 1) % online_vcpus; |
| 1563 | /* avoid endless loops if all vcpus are stopped */ |
| 1564 | if (nr_tries++ >= online_vcpus) |
| 1565 | return; |
| 1566 | } while (is_vcpu_stopped(kvm_get_vcpu(kvm, sigcpu))); |
| 1567 | } |
| 1568 | dst_vcpu = kvm_get_vcpu(kvm, sigcpu); |
| 1569 | |
| 1570 | /* make the VCPU drop out of the SIE, or wake it up if sleeping */ |
| 1571 | li = &dst_vcpu->arch.local_int; |
| 1572 | spin_lock(&li->lock); |
| 1573 | switch (type) { |
| 1574 | case KVM_S390_MCHK: |
| 1575 | atomic_or(CPUSTAT_STOP_INT, li->cpuflags); |
| 1576 | break; |
| 1577 | case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: |
| 1578 | atomic_or(CPUSTAT_IO_INT, li->cpuflags); |
| 1579 | break; |
| 1580 | default: |
| 1581 | atomic_or(CPUSTAT_EXT_INT, li->cpuflags); |
| 1582 | break; |
| 1583 | } |
| 1584 | spin_unlock(&li->lock); |
| 1585 | kvm_s390_vcpu_wakeup(dst_vcpu); |
| 1586 | } |
| 1587 | |
| 1588 | static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti) |
| 1589 | { |
| 1590 | u64 type = READ_ONCE(inti->type); |
| 1591 | int rc; |
| 1592 | |
| 1593 | switch (type) { |
| 1594 | case KVM_S390_MCHK: |
| 1595 | rc = __inject_float_mchk(kvm, inti); |
| 1596 | break; |
| 1597 | case KVM_S390_INT_VIRTIO: |
| 1598 | rc = __inject_virtio(kvm, inti); |
| 1599 | break; |
| 1600 | case KVM_S390_INT_SERVICE: |
| 1601 | rc = __inject_service(kvm, inti); |
| 1602 | break; |
| 1603 | case KVM_S390_INT_PFAULT_DONE: |
| 1604 | rc = __inject_pfault_done(kvm, inti); |
| 1605 | break; |
| 1606 | case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: |
| 1607 | rc = __inject_io(kvm, inti); |
| 1608 | break; |
| 1609 | default: |
| 1610 | rc = -EINVAL; |
| 1611 | } |
| 1612 | if (rc) |
| 1613 | return rc; |
| 1614 | |
| 1615 | __floating_irq_kick(kvm, type); |
| 1616 | return 0; |
| 1617 | } |
| 1618 | |
| 1619 | int kvm_s390_inject_vm(struct kvm *kvm, |
| 1620 | struct kvm_s390_interrupt *s390int) |
| 1621 | { |
| 1622 | struct kvm_s390_interrupt_info *inti; |
| 1623 | int rc; |
| 1624 | |
| 1625 | inti = kzalloc(sizeof(*inti), GFP_KERNEL); |
| 1626 | if (!inti) |
| 1627 | return -ENOMEM; |
| 1628 | |
| 1629 | inti->type = s390int->type; |
| 1630 | switch (inti->type) { |
| 1631 | case KVM_S390_INT_VIRTIO: |
| 1632 | VM_EVENT(kvm, 5, "inject: virtio parm:%x,parm64:%llx", |
| 1633 | s390int->parm, s390int->parm64); |
| 1634 | inti->ext.ext_params = s390int->parm; |
| 1635 | inti->ext.ext_params2 = s390int->parm64; |
| 1636 | break; |
| 1637 | case KVM_S390_INT_SERVICE: |
| 1638 | VM_EVENT(kvm, 4, "inject: sclp parm:%x", s390int->parm); |
| 1639 | inti->ext.ext_params = s390int->parm; |
| 1640 | break; |
| 1641 | case KVM_S390_INT_PFAULT_DONE: |
| 1642 | inti->ext.ext_params2 = s390int->parm64; |
| 1643 | break; |
| 1644 | case KVM_S390_MCHK: |
| 1645 | VM_EVENT(kvm, 3, "inject: machine check mcic 0x%llx", |
| 1646 | s390int->parm64); |
| 1647 | inti->mchk.cr14 = s390int->parm; /* upper bits are not used */ |
| 1648 | inti->mchk.mcic = s390int->parm64; |
| 1649 | break; |
| 1650 | case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: |
| 1651 | inti->io.subchannel_id = s390int->parm >> 16; |
| 1652 | inti->io.subchannel_nr = s390int->parm & 0x0000ffffu; |
| 1653 | inti->io.io_int_parm = s390int->parm64 >> 32; |
| 1654 | inti->io.io_int_word = s390int->parm64 & 0x00000000ffffffffull; |
| 1655 | break; |
| 1656 | default: |
| 1657 | kfree(inti); |
| 1658 | return -EINVAL; |
| 1659 | } |
| 1660 | trace_kvm_s390_inject_vm(s390int->type, s390int->parm, s390int->parm64, |
| 1661 | 2); |
| 1662 | |
| 1663 | rc = __inject_vm(kvm, inti); |
| 1664 | if (rc) |
| 1665 | kfree(inti); |
| 1666 | return rc; |
| 1667 | } |
| 1668 | |
| 1669 | int kvm_s390_reinject_io_int(struct kvm *kvm, |
| 1670 | struct kvm_s390_interrupt_info *inti) |
| 1671 | { |
| 1672 | return __inject_vm(kvm, inti); |
| 1673 | } |
| 1674 | |
| 1675 | int s390int_to_s390irq(struct kvm_s390_interrupt *s390int, |
| 1676 | struct kvm_s390_irq *irq) |
| 1677 | { |
| 1678 | irq->type = s390int->type; |
| 1679 | switch (irq->type) { |
| 1680 | case KVM_S390_PROGRAM_INT: |
| 1681 | if (s390int->parm & 0xffff0000) |
| 1682 | return -EINVAL; |
| 1683 | irq->u.pgm.code = s390int->parm; |
| 1684 | break; |
| 1685 | case KVM_S390_SIGP_SET_PREFIX: |
| 1686 | irq->u.prefix.address = s390int->parm; |
| 1687 | break; |
| 1688 | case KVM_S390_SIGP_STOP: |
| 1689 | irq->u.stop.flags = s390int->parm; |
| 1690 | break; |
| 1691 | case KVM_S390_INT_EXTERNAL_CALL: |
| 1692 | if (s390int->parm & 0xffff0000) |
| 1693 | return -EINVAL; |
| 1694 | irq->u.extcall.code = s390int->parm; |
| 1695 | break; |
| 1696 | case KVM_S390_INT_EMERGENCY: |
| 1697 | if (s390int->parm & 0xffff0000) |
| 1698 | return -EINVAL; |
| 1699 | irq->u.emerg.code = s390int->parm; |
| 1700 | break; |
| 1701 | case KVM_S390_MCHK: |
| 1702 | irq->u.mchk.mcic = s390int->parm64; |
| 1703 | break; |
| 1704 | case KVM_S390_INT_PFAULT_INIT: |
| 1705 | irq->u.ext.ext_params = s390int->parm; |
| 1706 | irq->u.ext.ext_params2 = s390int->parm64; |
| 1707 | break; |
| 1708 | case KVM_S390_RESTART: |
| 1709 | case KVM_S390_INT_CLOCK_COMP: |
| 1710 | case KVM_S390_INT_CPU_TIMER: |
| 1711 | break; |
| 1712 | default: |
| 1713 | return -EINVAL; |
| 1714 | } |
| 1715 | return 0; |
| 1716 | } |
| 1717 | |
| 1718 | int kvm_s390_is_stop_irq_pending(struct kvm_vcpu *vcpu) |
| 1719 | { |
| 1720 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; |
| 1721 | |
| 1722 | return test_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs); |
| 1723 | } |
| 1724 | |
| 1725 | void kvm_s390_clear_stop_irq(struct kvm_vcpu *vcpu) |
| 1726 | { |
| 1727 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; |
| 1728 | |
| 1729 | spin_lock(&li->lock); |
| 1730 | li->irq.stop.flags = 0; |
| 1731 | clear_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs); |
| 1732 | spin_unlock(&li->lock); |
| 1733 | } |
| 1734 | |
| 1735 | static int do_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) |
| 1736 | { |
| 1737 | int rc; |
| 1738 | |
| 1739 | switch (irq->type) { |
| 1740 | case KVM_S390_PROGRAM_INT: |
| 1741 | rc = __inject_prog(vcpu, irq); |
| 1742 | break; |
| 1743 | case KVM_S390_SIGP_SET_PREFIX: |
| 1744 | rc = __inject_set_prefix(vcpu, irq); |
| 1745 | break; |
| 1746 | case KVM_S390_SIGP_STOP: |
| 1747 | rc = __inject_sigp_stop(vcpu, irq); |
| 1748 | break; |
| 1749 | case KVM_S390_RESTART: |
| 1750 | rc = __inject_sigp_restart(vcpu, irq); |
| 1751 | break; |
| 1752 | case KVM_S390_INT_CLOCK_COMP: |
| 1753 | rc = __inject_ckc(vcpu); |
| 1754 | break; |
| 1755 | case KVM_S390_INT_CPU_TIMER: |
| 1756 | rc = __inject_cpu_timer(vcpu); |
| 1757 | break; |
| 1758 | case KVM_S390_INT_EXTERNAL_CALL: |
| 1759 | rc = __inject_extcall(vcpu, irq); |
| 1760 | break; |
| 1761 | case KVM_S390_INT_EMERGENCY: |
| 1762 | rc = __inject_sigp_emergency(vcpu, irq); |
| 1763 | break; |
| 1764 | case KVM_S390_MCHK: |
| 1765 | rc = __inject_mchk(vcpu, irq); |
| 1766 | break; |
| 1767 | case KVM_S390_INT_PFAULT_INIT: |
| 1768 | rc = __inject_pfault_init(vcpu, irq); |
| 1769 | break; |
| 1770 | case KVM_S390_INT_VIRTIO: |
| 1771 | case KVM_S390_INT_SERVICE: |
| 1772 | case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: |
| 1773 | default: |
| 1774 | rc = -EINVAL; |
| 1775 | } |
| 1776 | |
| 1777 | return rc; |
| 1778 | } |
| 1779 | |
| 1780 | int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) |
| 1781 | { |
| 1782 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; |
| 1783 | int rc; |
| 1784 | |
| 1785 | spin_lock(&li->lock); |
| 1786 | rc = do_inject_vcpu(vcpu, irq); |
| 1787 | spin_unlock(&li->lock); |
| 1788 | if (!rc) |
| 1789 | kvm_s390_vcpu_wakeup(vcpu); |
| 1790 | return rc; |
| 1791 | } |
| 1792 | |
| 1793 | static inline void clear_irq_list(struct list_head *_list) |
| 1794 | { |
| 1795 | struct kvm_s390_interrupt_info *inti, *n; |
| 1796 | |
| 1797 | list_for_each_entry_safe(inti, n, _list, list) { |
| 1798 | list_del(&inti->list); |
| 1799 | kfree(inti); |
| 1800 | } |
| 1801 | } |
| 1802 | |
| 1803 | static void inti_to_irq(struct kvm_s390_interrupt_info *inti, |
| 1804 | struct kvm_s390_irq *irq) |
| 1805 | { |
| 1806 | irq->type = inti->type; |
| 1807 | switch (inti->type) { |
| 1808 | case KVM_S390_INT_PFAULT_INIT: |
| 1809 | case KVM_S390_INT_PFAULT_DONE: |
| 1810 | case KVM_S390_INT_VIRTIO: |
| 1811 | irq->u.ext = inti->ext; |
| 1812 | break; |
| 1813 | case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: |
| 1814 | irq->u.io = inti->io; |
| 1815 | break; |
| 1816 | } |
| 1817 | } |
| 1818 | |
| 1819 | void kvm_s390_clear_float_irqs(struct kvm *kvm) |
| 1820 | { |
| 1821 | struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int; |
| 1822 | int i; |
| 1823 | |
| 1824 | spin_lock(&fi->lock); |
| 1825 | fi->pending_irqs = 0; |
| 1826 | memset(&fi->srv_signal, 0, sizeof(fi->srv_signal)); |
| 1827 | memset(&fi->mchk, 0, sizeof(fi->mchk)); |
| 1828 | for (i = 0; i < FIRQ_LIST_COUNT; i++) |
| 1829 | clear_irq_list(&fi->lists[i]); |
| 1830 | for (i = 0; i < FIRQ_MAX_COUNT; i++) |
| 1831 | fi->counters[i] = 0; |
| 1832 | spin_unlock(&fi->lock); |
| 1833 | }; |
| 1834 | |
| 1835 | static int get_all_floating_irqs(struct kvm *kvm, u8 __user *usrbuf, u64 len) |
| 1836 | { |
| 1837 | struct kvm_s390_interrupt_info *inti; |
| 1838 | struct kvm_s390_float_interrupt *fi; |
| 1839 | struct kvm_s390_irq *buf; |
| 1840 | struct kvm_s390_irq *irq; |
| 1841 | int max_irqs; |
| 1842 | int ret = 0; |
| 1843 | int n = 0; |
| 1844 | int i; |
| 1845 | |
| 1846 | if (len > KVM_S390_FLIC_MAX_BUFFER || len == 0) |
| 1847 | return -EINVAL; |
| 1848 | |
| 1849 | /* |
| 1850 | * We are already using -ENOMEM to signal |
| 1851 | * userspace it may retry with a bigger buffer, |
| 1852 | * so we need to use something else for this case |
| 1853 | */ |
| 1854 | buf = vzalloc(len); |
| 1855 | if (!buf) |
| 1856 | return -ENOBUFS; |
| 1857 | |
| 1858 | max_irqs = len / sizeof(struct kvm_s390_irq); |
| 1859 | |
| 1860 | fi = &kvm->arch.float_int; |
| 1861 | spin_lock(&fi->lock); |
| 1862 | for (i = 0; i < FIRQ_LIST_COUNT; i++) { |
| 1863 | list_for_each_entry(inti, &fi->lists[i], list) { |
| 1864 | if (n == max_irqs) { |
| 1865 | /* signal userspace to try again */ |
| 1866 | ret = -ENOMEM; |
| 1867 | goto out; |
| 1868 | } |
| 1869 | inti_to_irq(inti, &buf[n]); |
| 1870 | n++; |
| 1871 | } |
| 1872 | } |
| 1873 | if (test_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs)) { |
| 1874 | if (n == max_irqs) { |
| 1875 | /* signal userspace to try again */ |
| 1876 | ret = -ENOMEM; |
| 1877 | goto out; |
| 1878 | } |
| 1879 | irq = (struct kvm_s390_irq *) &buf[n]; |
| 1880 | irq->type = KVM_S390_INT_SERVICE; |
| 1881 | irq->u.ext = fi->srv_signal; |
| 1882 | n++; |
| 1883 | } |
| 1884 | if (test_bit(IRQ_PEND_MCHK_REP, &fi->pending_irqs)) { |
| 1885 | if (n == max_irqs) { |
| 1886 | /* signal userspace to try again */ |
| 1887 | ret = -ENOMEM; |
| 1888 | goto out; |
| 1889 | } |
| 1890 | irq = (struct kvm_s390_irq *) &buf[n]; |
| 1891 | irq->type = KVM_S390_MCHK; |
| 1892 | irq->u.mchk = fi->mchk; |
| 1893 | n++; |
| 1894 | } |
| 1895 | |
| 1896 | out: |
| 1897 | spin_unlock(&fi->lock); |
| 1898 | if (!ret && n > 0) { |
| 1899 | if (copy_to_user(usrbuf, buf, sizeof(struct kvm_s390_irq) * n)) |
| 1900 | ret = -EFAULT; |
| 1901 | } |
| 1902 | vfree(buf); |
| 1903 | |
| 1904 | return ret < 0 ? ret : n; |
| 1905 | } |
| 1906 | |
| 1907 | static int flic_ais_mode_get_all(struct kvm *kvm, struct kvm_device_attr *attr) |
| 1908 | { |
| 1909 | struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int; |
| 1910 | struct kvm_s390_ais_all ais; |
| 1911 | |
| 1912 | if (attr->attr < sizeof(ais)) |
| 1913 | return -EINVAL; |
| 1914 | |
| 1915 | if (!test_kvm_facility(kvm, 72)) |
| 1916 | return -EOPNOTSUPP; |
| 1917 | |
| 1918 | mutex_lock(&fi->ais_lock); |
| 1919 | ais.simm = fi->simm; |
| 1920 | ais.nimm = fi->nimm; |
| 1921 | mutex_unlock(&fi->ais_lock); |
| 1922 | |
| 1923 | if (copy_to_user((void __user *)attr->addr, &ais, sizeof(ais))) |
| 1924 | return -EFAULT; |
| 1925 | |
| 1926 | return 0; |
| 1927 | } |
| 1928 | |
| 1929 | static int flic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr) |
| 1930 | { |
| 1931 | int r; |
| 1932 | |
| 1933 | switch (attr->group) { |
| 1934 | case KVM_DEV_FLIC_GET_ALL_IRQS: |
| 1935 | r = get_all_floating_irqs(dev->kvm, (u8 __user *) attr->addr, |
| 1936 | attr->attr); |
| 1937 | break; |
| 1938 | case KVM_DEV_FLIC_AISM_ALL: |
| 1939 | r = flic_ais_mode_get_all(dev->kvm, attr); |
| 1940 | break; |
| 1941 | default: |
| 1942 | r = -EINVAL; |
| 1943 | } |
| 1944 | |
| 1945 | return r; |
| 1946 | } |
| 1947 | |
| 1948 | static inline int copy_irq_from_user(struct kvm_s390_interrupt_info *inti, |
| 1949 | u64 addr) |
| 1950 | { |
| 1951 | struct kvm_s390_irq __user *uptr = (struct kvm_s390_irq __user *) addr; |
| 1952 | void *target = NULL; |
| 1953 | void __user *source; |
| 1954 | u64 size; |
| 1955 | |
| 1956 | if (get_user(inti->type, (u64 __user *)addr)) |
| 1957 | return -EFAULT; |
| 1958 | |
| 1959 | switch (inti->type) { |
| 1960 | case KVM_S390_INT_PFAULT_INIT: |
| 1961 | case KVM_S390_INT_PFAULT_DONE: |
| 1962 | case KVM_S390_INT_VIRTIO: |
| 1963 | case KVM_S390_INT_SERVICE: |
| 1964 | target = (void *) &inti->ext; |
| 1965 | source = &uptr->u.ext; |
| 1966 | size = sizeof(inti->ext); |
| 1967 | break; |
| 1968 | case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: |
| 1969 | target = (void *) &inti->io; |
| 1970 | source = &uptr->u.io; |
| 1971 | size = sizeof(inti->io); |
| 1972 | break; |
| 1973 | case KVM_S390_MCHK: |
| 1974 | target = (void *) &inti->mchk; |
| 1975 | source = &uptr->u.mchk; |
| 1976 | size = sizeof(inti->mchk); |
| 1977 | break; |
| 1978 | default: |
| 1979 | return -EINVAL; |
| 1980 | } |
| 1981 | |
| 1982 | if (copy_from_user(target, source, size)) |
| 1983 | return -EFAULT; |
| 1984 | |
| 1985 | return 0; |
| 1986 | } |
| 1987 | |
| 1988 | static int enqueue_floating_irq(struct kvm_device *dev, |
| 1989 | struct kvm_device_attr *attr) |
| 1990 | { |
| 1991 | struct kvm_s390_interrupt_info *inti = NULL; |
| 1992 | int r = 0; |
| 1993 | int len = attr->attr; |
| 1994 | |
| 1995 | if (len % sizeof(struct kvm_s390_irq) != 0) |
| 1996 | return -EINVAL; |
| 1997 | else if (len > KVM_S390_FLIC_MAX_BUFFER) |
| 1998 | return -EINVAL; |
| 1999 | |
| 2000 | while (len >= sizeof(struct kvm_s390_irq)) { |
| 2001 | inti = kzalloc(sizeof(*inti), GFP_KERNEL); |
| 2002 | if (!inti) |
| 2003 | return -ENOMEM; |
| 2004 | |
| 2005 | r = copy_irq_from_user(inti, attr->addr); |
| 2006 | if (r) { |
| 2007 | kfree(inti); |
| 2008 | return r; |
| 2009 | } |
| 2010 | r = __inject_vm(dev->kvm, inti); |
| 2011 | if (r) { |
| 2012 | kfree(inti); |
| 2013 | return r; |
| 2014 | } |
| 2015 | len -= sizeof(struct kvm_s390_irq); |
| 2016 | attr->addr += sizeof(struct kvm_s390_irq); |
| 2017 | } |
| 2018 | |
| 2019 | return r; |
| 2020 | } |
| 2021 | |
| 2022 | static struct s390_io_adapter *get_io_adapter(struct kvm *kvm, unsigned int id) |
| 2023 | { |
| 2024 | if (id >= MAX_S390_IO_ADAPTERS) |
| 2025 | return NULL; |
| 2026 | return kvm->arch.adapters[id]; |
| 2027 | } |
| 2028 | |
| 2029 | static int register_io_adapter(struct kvm_device *dev, |
| 2030 | struct kvm_device_attr *attr) |
| 2031 | { |
| 2032 | struct s390_io_adapter *adapter; |
| 2033 | struct kvm_s390_io_adapter adapter_info; |
| 2034 | |
| 2035 | if (copy_from_user(&adapter_info, |
| 2036 | (void __user *)attr->addr, sizeof(adapter_info))) |
| 2037 | return -EFAULT; |
| 2038 | |
| 2039 | if ((adapter_info.id >= MAX_S390_IO_ADAPTERS) || |
| 2040 | (dev->kvm->arch.adapters[adapter_info.id] != NULL)) |
| 2041 | return -EINVAL; |
| 2042 | |
| 2043 | adapter = kzalloc(sizeof(*adapter), GFP_KERNEL); |
| 2044 | if (!adapter) |
| 2045 | return -ENOMEM; |
| 2046 | |
| 2047 | INIT_LIST_HEAD(&adapter->maps); |
| 2048 | init_rwsem(&adapter->maps_lock); |
| 2049 | atomic_set(&adapter->nr_maps, 0); |
| 2050 | adapter->id = adapter_info.id; |
| 2051 | adapter->isc = adapter_info.isc; |
| 2052 | adapter->maskable = adapter_info.maskable; |
| 2053 | adapter->masked = false; |
| 2054 | adapter->swap = adapter_info.swap; |
| 2055 | adapter->suppressible = (adapter_info.flags) & |
| 2056 | KVM_S390_ADAPTER_SUPPRESSIBLE; |
| 2057 | dev->kvm->arch.adapters[adapter->id] = adapter; |
| 2058 | |
| 2059 | return 0; |
| 2060 | } |
| 2061 | |
| 2062 | int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked) |
| 2063 | { |
| 2064 | int ret; |
| 2065 | struct s390_io_adapter *adapter = get_io_adapter(kvm, id); |
| 2066 | |
| 2067 | if (!adapter || !adapter->maskable) |
| 2068 | return -EINVAL; |
| 2069 | ret = adapter->masked; |
| 2070 | adapter->masked = masked; |
| 2071 | return ret; |
| 2072 | } |
| 2073 | |
| 2074 | static int kvm_s390_adapter_map(struct kvm *kvm, unsigned int id, __u64 addr) |
| 2075 | { |
| 2076 | struct s390_io_adapter *adapter = get_io_adapter(kvm, id); |
| 2077 | struct s390_map_info *map; |
| 2078 | int ret; |
| 2079 | |
| 2080 | if (!adapter || !addr) |
| 2081 | return -EINVAL; |
| 2082 | |
| 2083 | map = kzalloc(sizeof(*map), GFP_KERNEL); |
| 2084 | if (!map) { |
| 2085 | ret = -ENOMEM; |
| 2086 | goto out; |
| 2087 | } |
| 2088 | INIT_LIST_HEAD(&map->list); |
| 2089 | map->guest_addr = addr; |
| 2090 | map->addr = gmap_translate(kvm->arch.gmap, addr); |
| 2091 | if (map->addr == -EFAULT) { |
| 2092 | ret = -EFAULT; |
| 2093 | goto out; |
| 2094 | } |
| 2095 | ret = get_user_pages_fast(map->addr, 1, 1, &map->page); |
| 2096 | if (ret < 0) |
| 2097 | goto out; |
| 2098 | BUG_ON(ret != 1); |
| 2099 | down_write(&adapter->maps_lock); |
| 2100 | if (atomic_inc_return(&adapter->nr_maps) < MAX_S390_ADAPTER_MAPS) { |
| 2101 | list_add_tail(&map->list, &adapter->maps); |
| 2102 | ret = 0; |
| 2103 | } else { |
| 2104 | put_page(map->page); |
| 2105 | ret = -EINVAL; |
| 2106 | } |
| 2107 | up_write(&adapter->maps_lock); |
| 2108 | out: |
| 2109 | if (ret) |
| 2110 | kfree(map); |
| 2111 | return ret; |
| 2112 | } |
| 2113 | |
| 2114 | static int kvm_s390_adapter_unmap(struct kvm *kvm, unsigned int id, __u64 addr) |
| 2115 | { |
| 2116 | struct s390_io_adapter *adapter = get_io_adapter(kvm, id); |
| 2117 | struct s390_map_info *map, *tmp; |
| 2118 | int found = 0; |
| 2119 | |
| 2120 | if (!adapter || !addr) |
| 2121 | return -EINVAL; |
| 2122 | |
| 2123 | down_write(&adapter->maps_lock); |
| 2124 | list_for_each_entry_safe(map, tmp, &adapter->maps, list) { |
| 2125 | if (map->guest_addr == addr) { |
| 2126 | found = 1; |
| 2127 | atomic_dec(&adapter->nr_maps); |
| 2128 | list_del(&map->list); |
| 2129 | put_page(map->page); |
| 2130 | kfree(map); |
| 2131 | break; |
| 2132 | } |
| 2133 | } |
| 2134 | up_write(&adapter->maps_lock); |
| 2135 | |
| 2136 | return found ? 0 : -EINVAL; |
| 2137 | } |
| 2138 | |
| 2139 | void kvm_s390_destroy_adapters(struct kvm *kvm) |
| 2140 | { |
| 2141 | int i; |
| 2142 | struct s390_map_info *map, *tmp; |
| 2143 | |
| 2144 | for (i = 0; i < MAX_S390_IO_ADAPTERS; i++) { |
| 2145 | if (!kvm->arch.adapters[i]) |
| 2146 | continue; |
| 2147 | list_for_each_entry_safe(map, tmp, |
| 2148 | &kvm->arch.adapters[i]->maps, list) { |
| 2149 | list_del(&map->list); |
| 2150 | put_page(map->page); |
| 2151 | kfree(map); |
| 2152 | } |
| 2153 | kfree(kvm->arch.adapters[i]); |
| 2154 | } |
| 2155 | } |
| 2156 | |
| 2157 | static int modify_io_adapter(struct kvm_device *dev, |
| 2158 | struct kvm_device_attr *attr) |
| 2159 | { |
| 2160 | struct kvm_s390_io_adapter_req req; |
| 2161 | struct s390_io_adapter *adapter; |
| 2162 | int ret; |
| 2163 | |
| 2164 | if (copy_from_user(&req, (void __user *)attr->addr, sizeof(req))) |
| 2165 | return -EFAULT; |
| 2166 | |
| 2167 | adapter = get_io_adapter(dev->kvm, req.id); |
| 2168 | if (!adapter) |
| 2169 | return -EINVAL; |
| 2170 | switch (req.type) { |
| 2171 | case KVM_S390_IO_ADAPTER_MASK: |
| 2172 | ret = kvm_s390_mask_adapter(dev->kvm, req.id, req.mask); |
| 2173 | if (ret > 0) |
| 2174 | ret = 0; |
| 2175 | break; |
| 2176 | case KVM_S390_IO_ADAPTER_MAP: |
| 2177 | ret = kvm_s390_adapter_map(dev->kvm, req.id, req.addr); |
| 2178 | break; |
| 2179 | case KVM_S390_IO_ADAPTER_UNMAP: |
| 2180 | ret = kvm_s390_adapter_unmap(dev->kvm, req.id, req.addr); |
| 2181 | break; |
| 2182 | default: |
| 2183 | ret = -EINVAL; |
| 2184 | } |
| 2185 | |
| 2186 | return ret; |
| 2187 | } |
| 2188 | |
| 2189 | static int clear_io_irq(struct kvm *kvm, struct kvm_device_attr *attr) |
| 2190 | |
| 2191 | { |
| 2192 | const u64 isc_mask = 0xffUL << 24; /* all iscs set */ |
| 2193 | u32 schid; |
| 2194 | |
| 2195 | if (attr->flags) |
| 2196 | return -EINVAL; |
| 2197 | if (attr->attr != sizeof(schid)) |
| 2198 | return -EINVAL; |
| 2199 | if (copy_from_user(&schid, (void __user *) attr->addr, sizeof(schid))) |
| 2200 | return -EFAULT; |
| 2201 | kfree(kvm_s390_get_io_int(kvm, isc_mask, schid)); |
| 2202 | /* |
| 2203 | * If userspace is conforming to the architecture, we can have at most |
| 2204 | * one pending I/O interrupt per subchannel, so this is effectively a |
| 2205 | * clear all. |
| 2206 | */ |
| 2207 | return 0; |
| 2208 | } |
| 2209 | |
| 2210 | static int modify_ais_mode(struct kvm *kvm, struct kvm_device_attr *attr) |
| 2211 | { |
| 2212 | struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int; |
| 2213 | struct kvm_s390_ais_req req; |
| 2214 | int ret = 0; |
| 2215 | |
| 2216 | if (!test_kvm_facility(kvm, 72)) |
| 2217 | return -EOPNOTSUPP; |
| 2218 | |
| 2219 | if (copy_from_user(&req, (void __user *)attr->addr, sizeof(req))) |
| 2220 | return -EFAULT; |
| 2221 | |
| 2222 | if (req.isc > MAX_ISC) |
| 2223 | return -EINVAL; |
| 2224 | |
| 2225 | trace_kvm_s390_modify_ais_mode(req.isc, |
| 2226 | (fi->simm & AIS_MODE_MASK(req.isc)) ? |
| 2227 | (fi->nimm & AIS_MODE_MASK(req.isc)) ? |
| 2228 | 2 : KVM_S390_AIS_MODE_SINGLE : |
| 2229 | KVM_S390_AIS_MODE_ALL, req.mode); |
| 2230 | |
| 2231 | mutex_lock(&fi->ais_lock); |
| 2232 | switch (req.mode) { |
| 2233 | case KVM_S390_AIS_MODE_ALL: |
| 2234 | fi->simm &= ~AIS_MODE_MASK(req.isc); |
| 2235 | fi->nimm &= ~AIS_MODE_MASK(req.isc); |
| 2236 | break; |
| 2237 | case KVM_S390_AIS_MODE_SINGLE: |
| 2238 | fi->simm |= AIS_MODE_MASK(req.isc); |
| 2239 | fi->nimm &= ~AIS_MODE_MASK(req.isc); |
| 2240 | break; |
| 2241 | default: |
| 2242 | ret = -EINVAL; |
| 2243 | } |
| 2244 | mutex_unlock(&fi->ais_lock); |
| 2245 | |
| 2246 | return ret; |
| 2247 | } |
| 2248 | |
| 2249 | static int kvm_s390_inject_airq(struct kvm *kvm, |
| 2250 | struct s390_io_adapter *adapter) |
| 2251 | { |
| 2252 | struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int; |
| 2253 | struct kvm_s390_interrupt s390int = { |
| 2254 | .type = KVM_S390_INT_IO(1, 0, 0, 0), |
| 2255 | .parm = 0, |
| 2256 | .parm64 = (adapter->isc << 27) | 0x80000000, |
| 2257 | }; |
| 2258 | int ret = 0; |
| 2259 | |
| 2260 | if (!test_kvm_facility(kvm, 72) || !adapter->suppressible) |
| 2261 | return kvm_s390_inject_vm(kvm, &s390int); |
| 2262 | |
| 2263 | mutex_lock(&fi->ais_lock); |
| 2264 | if (fi->nimm & AIS_MODE_MASK(adapter->isc)) { |
| 2265 | trace_kvm_s390_airq_suppressed(adapter->id, adapter->isc); |
| 2266 | goto out; |
| 2267 | } |
| 2268 | |
| 2269 | ret = kvm_s390_inject_vm(kvm, &s390int); |
| 2270 | if (!ret && (fi->simm & AIS_MODE_MASK(adapter->isc))) { |
| 2271 | fi->nimm |= AIS_MODE_MASK(adapter->isc); |
| 2272 | trace_kvm_s390_modify_ais_mode(adapter->isc, |
| 2273 | KVM_S390_AIS_MODE_SINGLE, 2); |
| 2274 | } |
| 2275 | out: |
| 2276 | mutex_unlock(&fi->ais_lock); |
| 2277 | return ret; |
| 2278 | } |
| 2279 | |
| 2280 | static int flic_inject_airq(struct kvm *kvm, struct kvm_device_attr *attr) |
| 2281 | { |
| 2282 | unsigned int id = attr->attr; |
| 2283 | struct s390_io_adapter *adapter = get_io_adapter(kvm, id); |
| 2284 | |
| 2285 | if (!adapter) |
| 2286 | return -EINVAL; |
| 2287 | |
| 2288 | return kvm_s390_inject_airq(kvm, adapter); |
| 2289 | } |
| 2290 | |
| 2291 | static int flic_ais_mode_set_all(struct kvm *kvm, struct kvm_device_attr *attr) |
| 2292 | { |
| 2293 | struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int; |
| 2294 | struct kvm_s390_ais_all ais; |
| 2295 | |
| 2296 | if (!test_kvm_facility(kvm, 72)) |
| 2297 | return -EOPNOTSUPP; |
| 2298 | |
| 2299 | if (copy_from_user(&ais, (void __user *)attr->addr, sizeof(ais))) |
| 2300 | return -EFAULT; |
| 2301 | |
| 2302 | mutex_lock(&fi->ais_lock); |
| 2303 | fi->simm = ais.simm; |
| 2304 | fi->nimm = ais.nimm; |
| 2305 | mutex_unlock(&fi->ais_lock); |
| 2306 | |
| 2307 | return 0; |
| 2308 | } |
| 2309 | |
| 2310 | static int flic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr) |
| 2311 | { |
| 2312 | int r = 0; |
| 2313 | unsigned int i; |
| 2314 | struct kvm_vcpu *vcpu; |
| 2315 | |
| 2316 | switch (attr->group) { |
| 2317 | case KVM_DEV_FLIC_ENQUEUE: |
| 2318 | r = enqueue_floating_irq(dev, attr); |
| 2319 | break; |
| 2320 | case KVM_DEV_FLIC_CLEAR_IRQS: |
| 2321 | kvm_s390_clear_float_irqs(dev->kvm); |
| 2322 | break; |
| 2323 | case KVM_DEV_FLIC_APF_ENABLE: |
| 2324 | dev->kvm->arch.gmap->pfault_enabled = 1; |
| 2325 | break; |
| 2326 | case KVM_DEV_FLIC_APF_DISABLE_WAIT: |
| 2327 | dev->kvm->arch.gmap->pfault_enabled = 0; |
| 2328 | /* |
| 2329 | * Make sure no async faults are in transition when |
| 2330 | * clearing the queues. So we don't need to worry |
| 2331 | * about late coming workers. |
| 2332 | */ |
| 2333 | synchronize_srcu(&dev->kvm->srcu); |
| 2334 | kvm_for_each_vcpu(i, vcpu, dev->kvm) |
| 2335 | kvm_clear_async_pf_completion_queue(vcpu); |
| 2336 | break; |
| 2337 | case KVM_DEV_FLIC_ADAPTER_REGISTER: |
| 2338 | r = register_io_adapter(dev, attr); |
| 2339 | break; |
| 2340 | case KVM_DEV_FLIC_ADAPTER_MODIFY: |
| 2341 | r = modify_io_adapter(dev, attr); |
| 2342 | break; |
| 2343 | case KVM_DEV_FLIC_CLEAR_IO_IRQ: |
| 2344 | r = clear_io_irq(dev->kvm, attr); |
| 2345 | break; |
| 2346 | case KVM_DEV_FLIC_AISM: |
| 2347 | r = modify_ais_mode(dev->kvm, attr); |
| 2348 | break; |
| 2349 | case KVM_DEV_FLIC_AIRQ_INJECT: |
| 2350 | r = flic_inject_airq(dev->kvm, attr); |
| 2351 | break; |
| 2352 | case KVM_DEV_FLIC_AISM_ALL: |
| 2353 | r = flic_ais_mode_set_all(dev->kvm, attr); |
| 2354 | break; |
| 2355 | default: |
| 2356 | r = -EINVAL; |
| 2357 | } |
| 2358 | |
| 2359 | return r; |
| 2360 | } |
| 2361 | |
| 2362 | static int flic_has_attr(struct kvm_device *dev, |
| 2363 | struct kvm_device_attr *attr) |
| 2364 | { |
| 2365 | switch (attr->group) { |
| 2366 | case KVM_DEV_FLIC_GET_ALL_IRQS: |
| 2367 | case KVM_DEV_FLIC_ENQUEUE: |
| 2368 | case KVM_DEV_FLIC_CLEAR_IRQS: |
| 2369 | case KVM_DEV_FLIC_APF_ENABLE: |
| 2370 | case KVM_DEV_FLIC_APF_DISABLE_WAIT: |
| 2371 | case KVM_DEV_FLIC_ADAPTER_REGISTER: |
| 2372 | case KVM_DEV_FLIC_ADAPTER_MODIFY: |
| 2373 | case KVM_DEV_FLIC_CLEAR_IO_IRQ: |
| 2374 | case KVM_DEV_FLIC_AISM: |
| 2375 | case KVM_DEV_FLIC_AIRQ_INJECT: |
| 2376 | case KVM_DEV_FLIC_AISM_ALL: |
| 2377 | return 0; |
| 2378 | } |
| 2379 | return -ENXIO; |
| 2380 | } |
| 2381 | |
| 2382 | static int flic_create(struct kvm_device *dev, u32 type) |
| 2383 | { |
| 2384 | if (!dev) |
| 2385 | return -EINVAL; |
| 2386 | if (dev->kvm->arch.flic) |
| 2387 | return -EINVAL; |
| 2388 | dev->kvm->arch.flic = dev; |
| 2389 | return 0; |
| 2390 | } |
| 2391 | |
| 2392 | static void flic_destroy(struct kvm_device *dev) |
| 2393 | { |
| 2394 | dev->kvm->arch.flic = NULL; |
| 2395 | kfree(dev); |
| 2396 | } |
| 2397 | |
| 2398 | /* s390 floating irq controller (flic) */ |
| 2399 | struct kvm_device_ops kvm_flic_ops = { |
| 2400 | .name = "kvm-flic", |
| 2401 | .get_attr = flic_get_attr, |
| 2402 | .set_attr = flic_set_attr, |
| 2403 | .has_attr = flic_has_attr, |
| 2404 | .create = flic_create, |
| 2405 | .destroy = flic_destroy, |
| 2406 | }; |
| 2407 | |
| 2408 | static unsigned long get_ind_bit(__u64 addr, unsigned long bit_nr, bool swap) |
| 2409 | { |
| 2410 | unsigned long bit; |
| 2411 | |
| 2412 | bit = bit_nr + (addr % PAGE_SIZE) * 8; |
| 2413 | |
| 2414 | return swap ? (bit ^ (BITS_PER_LONG - 1)) : bit; |
| 2415 | } |
| 2416 | |
| 2417 | static struct s390_map_info *get_map_info(struct s390_io_adapter *adapter, |
| 2418 | u64 addr) |
| 2419 | { |
| 2420 | struct s390_map_info *map; |
| 2421 | |
| 2422 | if (!adapter) |
| 2423 | return NULL; |
| 2424 | |
| 2425 | list_for_each_entry(map, &adapter->maps, list) { |
| 2426 | if (map->guest_addr == addr) |
| 2427 | return map; |
| 2428 | } |
| 2429 | return NULL; |
| 2430 | } |
| 2431 | |
| 2432 | static int adapter_indicators_set(struct kvm *kvm, |
| 2433 | struct s390_io_adapter *adapter, |
| 2434 | struct kvm_s390_adapter_int *adapter_int) |
| 2435 | { |
| 2436 | unsigned long bit; |
| 2437 | int summary_set, idx; |
| 2438 | struct s390_map_info *info; |
| 2439 | void *map; |
| 2440 | |
| 2441 | info = get_map_info(adapter, adapter_int->ind_addr); |
| 2442 | if (!info) |
| 2443 | return -1; |
| 2444 | map = page_address(info->page); |
| 2445 | bit = get_ind_bit(info->addr, adapter_int->ind_offset, adapter->swap); |
| 2446 | set_bit(bit, map); |
| 2447 | idx = srcu_read_lock(&kvm->srcu); |
| 2448 | mark_page_dirty(kvm, info->guest_addr >> PAGE_SHIFT); |
| 2449 | set_page_dirty_lock(info->page); |
| 2450 | info = get_map_info(adapter, adapter_int->summary_addr); |
| 2451 | if (!info) { |
| 2452 | srcu_read_unlock(&kvm->srcu, idx); |
| 2453 | return -1; |
| 2454 | } |
| 2455 | map = page_address(info->page); |
| 2456 | bit = get_ind_bit(info->addr, adapter_int->summary_offset, |
| 2457 | adapter->swap); |
| 2458 | summary_set = test_and_set_bit(bit, map); |
| 2459 | mark_page_dirty(kvm, info->guest_addr >> PAGE_SHIFT); |
| 2460 | set_page_dirty_lock(info->page); |
| 2461 | srcu_read_unlock(&kvm->srcu, idx); |
| 2462 | return summary_set ? 0 : 1; |
| 2463 | } |
| 2464 | |
| 2465 | /* |
| 2466 | * < 0 - not injected due to error |
| 2467 | * = 0 - coalesced, summary indicator already active |
| 2468 | * > 0 - injected interrupt |
| 2469 | */ |
| 2470 | static int set_adapter_int(struct kvm_kernel_irq_routing_entry *e, |
| 2471 | struct kvm *kvm, int irq_source_id, int level, |
| 2472 | bool line_status) |
| 2473 | { |
| 2474 | int ret; |
| 2475 | struct s390_io_adapter *adapter; |
| 2476 | |
| 2477 | /* We're only interested in the 0->1 transition. */ |
| 2478 | if (!level) |
| 2479 | return 0; |
| 2480 | adapter = get_io_adapter(kvm, e->adapter.adapter_id); |
| 2481 | if (!adapter) |
| 2482 | return -1; |
| 2483 | down_read(&adapter->maps_lock); |
| 2484 | ret = adapter_indicators_set(kvm, adapter, &e->adapter); |
| 2485 | up_read(&adapter->maps_lock); |
| 2486 | if ((ret > 0) && !adapter->masked) { |
| 2487 | ret = kvm_s390_inject_airq(kvm, adapter); |
| 2488 | if (ret == 0) |
| 2489 | ret = 1; |
| 2490 | } |
| 2491 | return ret; |
| 2492 | } |
| 2493 | |
| 2494 | /* |
| 2495 | * Inject the machine check to the guest. |
| 2496 | */ |
| 2497 | void kvm_s390_reinject_machine_check(struct kvm_vcpu *vcpu, |
| 2498 | struct mcck_volatile_info *mcck_info) |
| 2499 | { |
| 2500 | struct kvm_s390_interrupt_info inti; |
| 2501 | struct kvm_s390_irq irq; |
| 2502 | struct kvm_s390_mchk_info *mchk; |
| 2503 | union mci mci; |
| 2504 | __u64 cr14 = 0; /* upper bits are not used */ |
| 2505 | int rc; |
| 2506 | |
| 2507 | mci.val = mcck_info->mcic; |
| 2508 | if (mci.sr) |
| 2509 | cr14 |= MCCK_CR14_RECOVERY_SUB_MASK; |
| 2510 | if (mci.dg) |
| 2511 | cr14 |= MCCK_CR14_DEGRAD_SUB_MASK; |
| 2512 | if (mci.w) |
| 2513 | cr14 |= MCCK_CR14_WARN_SUB_MASK; |
| 2514 | |
| 2515 | mchk = mci.ck ? &inti.mchk : &irq.u.mchk; |
| 2516 | mchk->cr14 = cr14; |
| 2517 | mchk->mcic = mcck_info->mcic; |
| 2518 | mchk->ext_damage_code = mcck_info->ext_damage_code; |
| 2519 | mchk->failing_storage_address = mcck_info->failing_storage_address; |
| 2520 | if (mci.ck) { |
| 2521 | /* Inject the floating machine check */ |
| 2522 | inti.type = KVM_S390_MCHK; |
| 2523 | rc = __inject_vm(vcpu->kvm, &inti); |
| 2524 | } else { |
| 2525 | /* Inject the machine check to specified vcpu */ |
| 2526 | irq.type = KVM_S390_MCHK; |
| 2527 | rc = kvm_s390_inject_vcpu(vcpu, &irq); |
| 2528 | } |
| 2529 | WARN_ON_ONCE(rc); |
| 2530 | } |
| 2531 | |
| 2532 | int kvm_set_routing_entry(struct kvm *kvm, |
| 2533 | struct kvm_kernel_irq_routing_entry *e, |
| 2534 | const struct kvm_irq_routing_entry *ue) |
| 2535 | { |
| 2536 | int ret; |
| 2537 | |
| 2538 | switch (ue->type) { |
| 2539 | case KVM_IRQ_ROUTING_S390_ADAPTER: |
| 2540 | e->set = set_adapter_int; |
| 2541 | e->adapter.summary_addr = ue->u.adapter.summary_addr; |
| 2542 | e->adapter.ind_addr = ue->u.adapter.ind_addr; |
| 2543 | e->adapter.summary_offset = ue->u.adapter.summary_offset; |
| 2544 | e->adapter.ind_offset = ue->u.adapter.ind_offset; |
| 2545 | e->adapter.adapter_id = ue->u.adapter.adapter_id; |
| 2546 | ret = 0; |
| 2547 | break; |
| 2548 | default: |
| 2549 | ret = -EINVAL; |
| 2550 | } |
| 2551 | |
| 2552 | return ret; |
| 2553 | } |
| 2554 | |
| 2555 | int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm, |
| 2556 | int irq_source_id, int level, bool line_status) |
| 2557 | { |
| 2558 | return -EINVAL; |
| 2559 | } |
| 2560 | |
| 2561 | int kvm_s390_set_irq_state(struct kvm_vcpu *vcpu, void __user *irqstate, int len) |
| 2562 | { |
| 2563 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; |
| 2564 | struct kvm_s390_irq *buf; |
| 2565 | int r = 0; |
| 2566 | int n; |
| 2567 | |
| 2568 | buf = vmalloc(len); |
| 2569 | if (!buf) |
| 2570 | return -ENOMEM; |
| 2571 | |
| 2572 | if (copy_from_user((void *) buf, irqstate, len)) { |
| 2573 | r = -EFAULT; |
| 2574 | goto out_free; |
| 2575 | } |
| 2576 | |
| 2577 | /* |
| 2578 | * Don't allow setting the interrupt state |
| 2579 | * when there are already interrupts pending |
| 2580 | */ |
| 2581 | spin_lock(&li->lock); |
| 2582 | if (li->pending_irqs) { |
| 2583 | r = -EBUSY; |
| 2584 | goto out_unlock; |
| 2585 | } |
| 2586 | |
| 2587 | for (n = 0; n < len / sizeof(*buf); n++) { |
| 2588 | r = do_inject_vcpu(vcpu, &buf[n]); |
| 2589 | if (r) |
| 2590 | break; |
| 2591 | } |
| 2592 | |
| 2593 | out_unlock: |
| 2594 | spin_unlock(&li->lock); |
| 2595 | out_free: |
| 2596 | vfree(buf); |
| 2597 | |
| 2598 | return r; |
| 2599 | } |
| 2600 | |
| 2601 | static void store_local_irq(struct kvm_s390_local_interrupt *li, |
| 2602 | struct kvm_s390_irq *irq, |
| 2603 | unsigned long irq_type) |
| 2604 | { |
| 2605 | switch (irq_type) { |
| 2606 | case IRQ_PEND_MCHK_EX: |
| 2607 | case IRQ_PEND_MCHK_REP: |
| 2608 | irq->type = KVM_S390_MCHK; |
| 2609 | irq->u.mchk = li->irq.mchk; |
| 2610 | break; |
| 2611 | case IRQ_PEND_PROG: |
| 2612 | irq->type = KVM_S390_PROGRAM_INT; |
| 2613 | irq->u.pgm = li->irq.pgm; |
| 2614 | break; |
| 2615 | case IRQ_PEND_PFAULT_INIT: |
| 2616 | irq->type = KVM_S390_INT_PFAULT_INIT; |
| 2617 | irq->u.ext = li->irq.ext; |
| 2618 | break; |
| 2619 | case IRQ_PEND_EXT_EXTERNAL: |
| 2620 | irq->type = KVM_S390_INT_EXTERNAL_CALL; |
| 2621 | irq->u.extcall = li->irq.extcall; |
| 2622 | break; |
| 2623 | case IRQ_PEND_EXT_CLOCK_COMP: |
| 2624 | irq->type = KVM_S390_INT_CLOCK_COMP; |
| 2625 | break; |
| 2626 | case IRQ_PEND_EXT_CPU_TIMER: |
| 2627 | irq->type = KVM_S390_INT_CPU_TIMER; |
| 2628 | break; |
| 2629 | case IRQ_PEND_SIGP_STOP: |
| 2630 | irq->type = KVM_S390_SIGP_STOP; |
| 2631 | irq->u.stop = li->irq.stop; |
| 2632 | break; |
| 2633 | case IRQ_PEND_RESTART: |
| 2634 | irq->type = KVM_S390_RESTART; |
| 2635 | break; |
| 2636 | case IRQ_PEND_SET_PREFIX: |
| 2637 | irq->type = KVM_S390_SIGP_SET_PREFIX; |
| 2638 | irq->u.prefix = li->irq.prefix; |
| 2639 | break; |
| 2640 | } |
| 2641 | } |
| 2642 | |
| 2643 | int kvm_s390_get_irq_state(struct kvm_vcpu *vcpu, __u8 __user *buf, int len) |
| 2644 | { |
| 2645 | int scn; |
| 2646 | unsigned long sigp_emerg_pending[BITS_TO_LONGS(KVM_MAX_VCPUS)]; |
| 2647 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; |
| 2648 | unsigned long pending_irqs; |
| 2649 | struct kvm_s390_irq irq; |
| 2650 | unsigned long irq_type; |
| 2651 | int cpuaddr; |
| 2652 | int n = 0; |
| 2653 | |
| 2654 | spin_lock(&li->lock); |
| 2655 | pending_irqs = li->pending_irqs; |
| 2656 | memcpy(&sigp_emerg_pending, &li->sigp_emerg_pending, |
| 2657 | sizeof(sigp_emerg_pending)); |
| 2658 | spin_unlock(&li->lock); |
| 2659 | |
| 2660 | for_each_set_bit(irq_type, &pending_irqs, IRQ_PEND_COUNT) { |
| 2661 | memset(&irq, 0, sizeof(irq)); |
| 2662 | if (irq_type == IRQ_PEND_EXT_EMERGENCY) |
| 2663 | continue; |
| 2664 | if (n + sizeof(irq) > len) |
| 2665 | return -ENOBUFS; |
| 2666 | store_local_irq(&vcpu->arch.local_int, &irq, irq_type); |
| 2667 | if (copy_to_user(&buf[n], &irq, sizeof(irq))) |
| 2668 | return -EFAULT; |
| 2669 | n += sizeof(irq); |
| 2670 | } |
| 2671 | |
| 2672 | if (test_bit(IRQ_PEND_EXT_EMERGENCY, &pending_irqs)) { |
| 2673 | for_each_set_bit(cpuaddr, sigp_emerg_pending, KVM_MAX_VCPUS) { |
| 2674 | memset(&irq, 0, sizeof(irq)); |
| 2675 | if (n + sizeof(irq) > len) |
| 2676 | return -ENOBUFS; |
| 2677 | irq.type = KVM_S390_INT_EMERGENCY; |
| 2678 | irq.u.emerg.code = cpuaddr; |
| 2679 | if (copy_to_user(&buf[n], &irq, sizeof(irq))) |
| 2680 | return -EFAULT; |
| 2681 | n += sizeof(irq); |
| 2682 | } |
| 2683 | } |
| 2684 | |
| 2685 | if (sca_ext_call_pending(vcpu, &scn)) { |
| 2686 | if (n + sizeof(irq) > len) |
| 2687 | return -ENOBUFS; |
| 2688 | memset(&irq, 0, sizeof(irq)); |
| 2689 | irq.type = KVM_S390_INT_EXTERNAL_CALL; |
| 2690 | irq.u.extcall.code = scn; |
| 2691 | if (copy_to_user(&buf[n], &irq, sizeof(irq))) |
| 2692 | return -EFAULT; |
| 2693 | n += sizeof(irq); |
| 2694 | } |
| 2695 | |
| 2696 | return n; |
| 2697 | } |