b.liu | e958203 | 2025-04-17 19:18:16 +0800 | [diff] [blame^] | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
| 2 | /* |
| 3 | * SMP support for BPA machines. |
| 4 | * |
| 5 | * Dave Engebretsen, Peter Bergner, and |
| 6 | * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com |
| 7 | * |
| 8 | * Plus various changes from other IBM teams... |
| 9 | */ |
| 10 | |
| 11 | #undef DEBUG |
| 12 | |
| 13 | #include <linux/kernel.h> |
| 14 | #include <linux/sched.h> |
| 15 | #include <linux/smp.h> |
| 16 | #include <linux/interrupt.h> |
| 17 | #include <linux/delay.h> |
| 18 | #include <linux/init.h> |
| 19 | #include <linux/spinlock.h> |
| 20 | #include <linux/cache.h> |
| 21 | #include <linux/err.h> |
| 22 | #include <linux/device.h> |
| 23 | #include <linux/cpu.h> |
| 24 | |
| 25 | #include <asm/ptrace.h> |
| 26 | #include <linux/atomic.h> |
| 27 | #include <asm/irq.h> |
| 28 | #include <asm/page.h> |
| 29 | #include <asm/pgtable.h> |
| 30 | #include <asm/io.h> |
| 31 | #include <asm/prom.h> |
| 32 | #include <asm/smp.h> |
| 33 | #include <asm/paca.h> |
| 34 | #include <asm/machdep.h> |
| 35 | #include <asm/cputable.h> |
| 36 | #include <asm/firmware.h> |
| 37 | #include <asm/rtas.h> |
| 38 | #include <asm/cputhreads.h> |
| 39 | #include <asm/code-patching.h> |
| 40 | |
| 41 | #include "interrupt.h" |
| 42 | #include <asm/udbg.h> |
| 43 | |
| 44 | #ifdef DEBUG |
| 45 | #define DBG(fmt...) udbg_printf(fmt) |
| 46 | #else |
| 47 | #define DBG(fmt...) |
| 48 | #endif |
| 49 | |
| 50 | /* |
| 51 | * The Primary thread of each non-boot processor was started from the OF client |
| 52 | * interface by prom_hold_cpus and is spinning on secondary_hold_spinloop. |
| 53 | */ |
| 54 | static cpumask_t of_spin_map; |
| 55 | |
| 56 | /** |
| 57 | * smp_startup_cpu() - start the given cpu |
| 58 | * |
| 59 | * At boot time, there is nothing to do for primary threads which were |
| 60 | * started from Open Firmware. For anything else, call RTAS with the |
| 61 | * appropriate start location. |
| 62 | * |
| 63 | * Returns: |
| 64 | * 0 - failure |
| 65 | * 1 - success |
| 66 | */ |
| 67 | static inline int smp_startup_cpu(unsigned int lcpu) |
| 68 | { |
| 69 | int status; |
| 70 | unsigned long start_here = |
| 71 | __pa(ppc_function_entry(generic_secondary_smp_init)); |
| 72 | unsigned int pcpu; |
| 73 | int start_cpu; |
| 74 | |
| 75 | if (cpumask_test_cpu(lcpu, &of_spin_map)) |
| 76 | /* Already started by OF and sitting in spin loop */ |
| 77 | return 1; |
| 78 | |
| 79 | pcpu = get_hard_smp_processor_id(lcpu); |
| 80 | |
| 81 | /* Fixup atomic count: it exited inside IRQ handler. */ |
| 82 | task_thread_info(paca_ptrs[lcpu]->__current)->preempt_count = 0; |
| 83 | |
| 84 | /* |
| 85 | * If the RTAS start-cpu token does not exist then presume the |
| 86 | * cpu is already spinning. |
| 87 | */ |
| 88 | start_cpu = rtas_token("start-cpu"); |
| 89 | if (start_cpu == RTAS_UNKNOWN_SERVICE) |
| 90 | return 1; |
| 91 | |
| 92 | status = rtas_call(start_cpu, 3, 1, NULL, pcpu, start_here, lcpu); |
| 93 | if (status != 0) { |
| 94 | printk(KERN_ERR "start-cpu failed: %i\n", status); |
| 95 | return 0; |
| 96 | } |
| 97 | |
| 98 | return 1; |
| 99 | } |
| 100 | |
| 101 | static void smp_cell_setup_cpu(int cpu) |
| 102 | { |
| 103 | if (cpu != boot_cpuid) |
| 104 | iic_setup_cpu(); |
| 105 | |
| 106 | /* |
| 107 | * change default DABRX to allow user watchpoints |
| 108 | */ |
| 109 | mtspr(SPRN_DABRX, DABRX_KERNEL | DABRX_USER); |
| 110 | } |
| 111 | |
| 112 | static int smp_cell_kick_cpu(int nr) |
| 113 | { |
| 114 | if (nr < 0 || nr >= nr_cpu_ids) |
| 115 | return -EINVAL; |
| 116 | |
| 117 | if (!smp_startup_cpu(nr)) |
| 118 | return -ENOENT; |
| 119 | |
| 120 | /* |
| 121 | * The processor is currently spinning, waiting for the |
| 122 | * cpu_start field to become non-zero After we set cpu_start, |
| 123 | * the processor will continue on to secondary_start |
| 124 | */ |
| 125 | paca_ptrs[nr]->cpu_start = 1; |
| 126 | |
| 127 | return 0; |
| 128 | } |
| 129 | |
| 130 | static struct smp_ops_t bpa_iic_smp_ops = { |
| 131 | .message_pass = iic_message_pass, |
| 132 | .probe = iic_request_IPIs, |
| 133 | .kick_cpu = smp_cell_kick_cpu, |
| 134 | .setup_cpu = smp_cell_setup_cpu, |
| 135 | .cpu_bootable = smp_generic_cpu_bootable, |
| 136 | }; |
| 137 | |
| 138 | /* This is called very early */ |
| 139 | void __init smp_init_cell(void) |
| 140 | { |
| 141 | int i; |
| 142 | |
| 143 | DBG(" -> smp_init_cell()\n"); |
| 144 | |
| 145 | smp_ops = &bpa_iic_smp_ops; |
| 146 | |
| 147 | /* Mark threads which are still spinning in hold loops. */ |
| 148 | if (cpu_has_feature(CPU_FTR_SMT)) { |
| 149 | for_each_present_cpu(i) { |
| 150 | if (cpu_thread_in_core(i) == 0) |
| 151 | cpumask_set_cpu(i, &of_spin_map); |
| 152 | } |
| 153 | } else |
| 154 | cpumask_copy(&of_spin_map, cpu_present_mask); |
| 155 | |
| 156 | cpumask_clear_cpu(boot_cpuid, &of_spin_map); |
| 157 | |
| 158 | /* Non-lpar has additional take/give timebase */ |
| 159 | if (rtas_token("freeze-time-base") != RTAS_UNKNOWN_SERVICE) { |
| 160 | smp_ops->give_timebase = rtas_give_timebase; |
| 161 | smp_ops->take_timebase = rtas_take_timebase; |
| 162 | } |
| 163 | |
| 164 | DBG(" <- smp_init_cell()\n"); |
| 165 | } |