| xj | b04a402 | 2021-11-25 15:01:52 +0800 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 | 
|  | 2 | /* | 
|  | 3 | * Split spinlock implementation out into its own file, so it can be | 
|  | 4 | * compiled in a FTRACE-compatible way. | 
|  | 5 | */ | 
|  | 6 | #include <linux/kernel_stat.h> | 
|  | 7 | #include <linux/spinlock.h> | 
|  | 8 | #include <linux/debugfs.h> | 
|  | 9 | #include <linux/log2.h> | 
|  | 10 | #include <linux/gfp.h> | 
|  | 11 | #include <linux/slab.h> | 
|  | 12 | #include <linux/atomic.h> | 
|  | 13 |  | 
|  | 14 | #include <asm/paravirt.h> | 
|  | 15 | #include <asm/qspinlock.h> | 
|  | 16 |  | 
|  | 17 | #include <xen/interface/xen.h> | 
|  | 18 | #include <xen/events.h> | 
|  | 19 |  | 
|  | 20 | #include "xen-ops.h" | 
|  | 21 | #include "debugfs.h" | 
|  | 22 |  | 
|  | 23 | static DEFINE_PER_CPU(int, lock_kicker_irq) = -1; | 
|  | 24 | static DEFINE_PER_CPU(char *, irq_name); | 
|  | 25 | static DEFINE_PER_CPU(atomic_t, xen_qlock_wait_nest); | 
|  | 26 | static bool xen_pvspin = true; | 
|  | 27 |  | 
|  | 28 | static void xen_qlock_kick(int cpu) | 
|  | 29 | { | 
|  | 30 | int irq = per_cpu(lock_kicker_irq, cpu); | 
|  | 31 |  | 
|  | 32 | /* Don't kick if the target's kicker interrupt is not initialized. */ | 
|  | 33 | if (irq == -1) | 
|  | 34 | return; | 
|  | 35 |  | 
|  | 36 | xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR); | 
|  | 37 | } | 
|  | 38 |  | 
|  | 39 | /* | 
|  | 40 | * Halt the current CPU & release it back to the host | 
|  | 41 | */ | 
|  | 42 | static void xen_qlock_wait(u8 *byte, u8 val) | 
|  | 43 | { | 
|  | 44 | int irq = __this_cpu_read(lock_kicker_irq); | 
|  | 45 | atomic_t *nest_cnt = this_cpu_ptr(&xen_qlock_wait_nest); | 
|  | 46 |  | 
|  | 47 | /* If kicker interrupts not initialized yet, just spin */ | 
|  | 48 | if (irq == -1 || in_nmi()) | 
|  | 49 | return; | 
|  | 50 |  | 
|  | 51 | /* Detect reentry. */ | 
|  | 52 | atomic_inc(nest_cnt); | 
|  | 53 |  | 
|  | 54 | /* If irq pending already and no nested call clear it. */ | 
|  | 55 | if (atomic_read(nest_cnt) == 1 && xen_test_irq_pending(irq)) { | 
|  | 56 | xen_clear_irq_pending(irq); | 
|  | 57 | } else if (READ_ONCE(*byte) == val) { | 
|  | 58 | /* Block until irq becomes pending (or a spurious wakeup) */ | 
|  | 59 | xen_poll_irq(irq); | 
|  | 60 | } | 
|  | 61 |  | 
|  | 62 | atomic_dec(nest_cnt); | 
|  | 63 | } | 
|  | 64 |  | 
|  | 65 | static irqreturn_t dummy_handler(int irq, void *dev_id) | 
|  | 66 | { | 
|  | 67 | BUG(); | 
|  | 68 | return IRQ_HANDLED; | 
|  | 69 | } | 
|  | 70 |  | 
|  | 71 | void xen_init_lock_cpu(int cpu) | 
|  | 72 | { | 
|  | 73 | int irq; | 
|  | 74 | char *name; | 
|  | 75 |  | 
|  | 76 | if (!xen_pvspin) { | 
|  | 77 | if (cpu == 0) | 
|  | 78 | static_branch_disable(&virt_spin_lock_key); | 
|  | 79 | return; | 
|  | 80 | } | 
|  | 81 |  | 
|  | 82 | WARN(per_cpu(lock_kicker_irq, cpu) >= 0, "spinlock on CPU%d exists on IRQ%d!\n", | 
|  | 83 | cpu, per_cpu(lock_kicker_irq, cpu)); | 
|  | 84 |  | 
|  | 85 | name = kasprintf(GFP_KERNEL, "spinlock%d", cpu); | 
|  | 86 | irq = bind_ipi_to_irqhandler(XEN_SPIN_UNLOCK_VECTOR, | 
|  | 87 | cpu, | 
|  | 88 | dummy_handler, | 
|  | 89 | IRQF_PERCPU|IRQF_NOBALANCING, | 
|  | 90 | name, | 
|  | 91 | NULL); | 
|  | 92 |  | 
|  | 93 | if (irq >= 0) { | 
|  | 94 | disable_irq(irq); /* make sure it's never delivered */ | 
|  | 95 | per_cpu(lock_kicker_irq, cpu) = irq; | 
|  | 96 | per_cpu(irq_name, cpu) = name; | 
|  | 97 | } | 
|  | 98 |  | 
|  | 99 | printk("cpu %d spinlock event irq %d\n", cpu, irq); | 
|  | 100 | } | 
|  | 101 |  | 
|  | 102 | void xen_uninit_lock_cpu(int cpu) | 
|  | 103 | { | 
|  | 104 | if (!xen_pvspin) | 
|  | 105 | return; | 
|  | 106 |  | 
|  | 107 | unbind_from_irqhandler(per_cpu(lock_kicker_irq, cpu), NULL); | 
|  | 108 | per_cpu(lock_kicker_irq, cpu) = -1; | 
|  | 109 | kfree(per_cpu(irq_name, cpu)); | 
|  | 110 | per_cpu(irq_name, cpu) = NULL; | 
|  | 111 | } | 
|  | 112 |  | 
|  | 113 | PV_CALLEE_SAVE_REGS_THUNK(xen_vcpu_stolen); | 
|  | 114 |  | 
|  | 115 | /* | 
|  | 116 | * Our init of PV spinlocks is split in two init functions due to us | 
|  | 117 | * using paravirt patching and jump labels patching and having to do | 
|  | 118 | * all of this before SMP code is invoked. | 
|  | 119 | * | 
|  | 120 | * The paravirt patching needs to be done _before_ the alternative asm code | 
|  | 121 | * is started, otherwise we would not patch the core kernel code. | 
|  | 122 | */ | 
|  | 123 | void __init xen_init_spinlocks(void) | 
|  | 124 | { | 
|  | 125 |  | 
|  | 126 | /*  Don't need to use pvqspinlock code if there is only 1 vCPU. */ | 
|  | 127 | if (num_possible_cpus() == 1) | 
|  | 128 | xen_pvspin = false; | 
|  | 129 |  | 
|  | 130 | if (!xen_pvspin) { | 
|  | 131 | printk(KERN_DEBUG "xen: PV spinlocks disabled\n"); | 
|  | 132 | return; | 
|  | 133 | } | 
|  | 134 | printk(KERN_DEBUG "xen: PV spinlocks enabled\n"); | 
|  | 135 |  | 
|  | 136 | __pv_init_lock_hash(); | 
|  | 137 | pv_lock_ops.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath; | 
|  | 138 | pv_lock_ops.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock); | 
|  | 139 | pv_lock_ops.wait = xen_qlock_wait; | 
|  | 140 | pv_lock_ops.kick = xen_qlock_kick; | 
|  | 141 | pv_lock_ops.vcpu_is_preempted = PV_CALLEE_SAVE(xen_vcpu_stolen); | 
|  | 142 | } | 
|  | 143 |  | 
|  | 144 | static __init int xen_parse_nopvspin(char *arg) | 
|  | 145 | { | 
|  | 146 | xen_pvspin = false; | 
|  | 147 | return 0; | 
|  | 148 | } | 
|  | 149 | early_param("xen_nopvspin", xen_parse_nopvspin); | 
|  | 150 |  |