| xj | b04a402 | 2021-11-25 15:01:52 +0800 | [diff] [blame] | 1 | /* | 
 | 2 |  * Copyright (c) 2013-2014 Linaro Ltd. | 
 | 3 |  * Copyright (c) 2013-2014 Hisilicon Limited. | 
 | 4 |  * | 
 | 5 |  * This program is free software; you can redistribute it and/or modify it | 
 | 6 |  * under the terms and conditions of the GNU General Public License, | 
 | 7 |  * version 2, as published by the Free Software Foundation. | 
 | 8 |  */ | 
 | 9 | #include <linux/init.h> | 
 | 10 | #include <linux/smp.h> | 
 | 11 | #include <linux/delay.h> | 
 | 12 | #include <linux/io.h> | 
 | 13 | #include <linux/memblock.h> | 
 | 14 | #include <linux/of_address.h> | 
 | 15 |  | 
 | 16 | #include <asm/cputype.h> | 
 | 17 | #include <asm/cp15.h> | 
 | 18 | #include <asm/cacheflush.h> | 
 | 19 | #include <asm/smp.h> | 
 | 20 | #include <asm/smp_plat.h> | 
 | 21 |  | 
 | 22 | #include "core.h" | 
 | 23 |  | 
 | 24 | /* bits definition in SC_CPU_RESET_REQ[x]/SC_CPU_RESET_DREQ[x] | 
 | 25 |  * 1 -- unreset; 0 -- reset | 
 | 26 |  */ | 
 | 27 | #define CORE_RESET_BIT(x)		(1 << x) | 
 | 28 | #define NEON_RESET_BIT(x)		(1 << (x + 4)) | 
 | 29 | #define CORE_DEBUG_RESET_BIT(x)		(1 << (x + 9)) | 
 | 30 | #define CLUSTER_L2_RESET_BIT		(1 << 8) | 
 | 31 | #define CLUSTER_DEBUG_RESET_BIT		(1 << 13) | 
 | 32 |  | 
 | 33 | /* | 
 | 34 |  * bits definition in SC_CPU_RESET_STATUS[x] | 
 | 35 |  * 1 -- reset status; 0 -- unreset status | 
 | 36 |  */ | 
 | 37 | #define CORE_RESET_STATUS(x)		(1 << x) | 
 | 38 | #define NEON_RESET_STATUS(x)		(1 << (x + 4)) | 
 | 39 | #define CORE_DEBUG_RESET_STATUS(x)	(1 << (x + 9)) | 
 | 40 | #define CLUSTER_L2_RESET_STATUS		(1 << 8) | 
 | 41 | #define CLUSTER_DEBUG_RESET_STATUS	(1 << 13) | 
 | 42 | #define CORE_WFI_STATUS(x)		(1 << (x + 16)) | 
 | 43 | #define CORE_WFE_STATUS(x)		(1 << (x + 20)) | 
 | 44 | #define CORE_DEBUG_ACK(x)		(1 << (x + 24)) | 
 | 45 |  | 
 | 46 | #define SC_CPU_RESET_REQ(x)		(0x520 + (x << 3))	/* reset */ | 
 | 47 | #define SC_CPU_RESET_DREQ(x)		(0x524 + (x << 3))	/* unreset */ | 
 | 48 | #define SC_CPU_RESET_STATUS(x)		(0x1520 + (x << 3)) | 
 | 49 |  | 
 | 50 | #define FAB_SF_MODE			0x0c | 
 | 51 | #define FAB_SF_INVLD			0x10 | 
 | 52 |  | 
 | 53 | /* bits definition in FB_SF_INVLD */ | 
 | 54 | #define FB_SF_INVLD_START		(1 << 8) | 
 | 55 |  | 
 | 56 | #define HIP04_MAX_CLUSTERS		4 | 
 | 57 | #define HIP04_MAX_CPUS_PER_CLUSTER	4 | 
 | 58 |  | 
 | 59 | #define POLL_MSEC	10 | 
 | 60 | #define TIMEOUT_MSEC	1000 | 
 | 61 |  | 
 | 62 | static void __iomem *sysctrl, *fabric; | 
 | 63 | static int hip04_cpu_table[HIP04_MAX_CLUSTERS][HIP04_MAX_CPUS_PER_CLUSTER]; | 
 | 64 | static DEFINE_SPINLOCK(boot_lock); | 
 | 65 | static u32 fabric_phys_addr; | 
 | 66 | /* | 
 | 67 |  * [0]: bootwrapper physical address | 
 | 68 |  * [1]: bootwrapper size | 
 | 69 |  * [2]: relocation address | 
 | 70 |  * [3]: relocation size | 
 | 71 |  */ | 
 | 72 | static u32 hip04_boot_method[4]; | 
 | 73 |  | 
 | 74 | static bool hip04_cluster_is_down(unsigned int cluster) | 
 | 75 | { | 
 | 76 | 	int i; | 
 | 77 |  | 
 | 78 | 	for (i = 0; i < HIP04_MAX_CPUS_PER_CLUSTER; i++) | 
 | 79 | 		if (hip04_cpu_table[cluster][i]) | 
 | 80 | 			return false; | 
 | 81 | 	return true; | 
 | 82 | } | 
 | 83 |  | 
 | 84 | static void hip04_set_snoop_filter(unsigned int cluster, unsigned int on) | 
 | 85 | { | 
 | 86 | 	unsigned long data; | 
 | 87 |  | 
 | 88 | 	if (!fabric) | 
 | 89 | 		BUG(); | 
 | 90 | 	data = readl_relaxed(fabric + FAB_SF_MODE); | 
 | 91 | 	if (on) | 
 | 92 | 		data |= 1 << cluster; | 
 | 93 | 	else | 
 | 94 | 		data &= ~(1 << cluster); | 
 | 95 | 	writel_relaxed(data, fabric + FAB_SF_MODE); | 
 | 96 | 	do { | 
 | 97 | 		cpu_relax(); | 
 | 98 | 	} while (data != readl_relaxed(fabric + FAB_SF_MODE)); | 
 | 99 | } | 
 | 100 |  | 
 | 101 | static int hip04_boot_secondary(unsigned int l_cpu, struct task_struct *idle) | 
 | 102 | { | 
 | 103 | 	unsigned int mpidr, cpu, cluster; | 
 | 104 | 	unsigned long data; | 
 | 105 | 	void __iomem *sys_dreq, *sys_status; | 
 | 106 |  | 
 | 107 | 	mpidr = cpu_logical_map(l_cpu); | 
 | 108 | 	cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); | 
 | 109 | 	cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); | 
 | 110 |  | 
 | 111 | 	if (!sysctrl) | 
 | 112 | 		return -ENODEV; | 
 | 113 | 	if (cluster >= HIP04_MAX_CLUSTERS || cpu >= HIP04_MAX_CPUS_PER_CLUSTER) | 
 | 114 | 		return -EINVAL; | 
 | 115 |  | 
 | 116 | 	spin_lock_irq(&boot_lock); | 
 | 117 |  | 
 | 118 | 	if (hip04_cpu_table[cluster][cpu]) | 
 | 119 | 		goto out; | 
 | 120 |  | 
 | 121 | 	sys_dreq = sysctrl + SC_CPU_RESET_DREQ(cluster); | 
 | 122 | 	sys_status = sysctrl + SC_CPU_RESET_STATUS(cluster); | 
 | 123 | 	if (hip04_cluster_is_down(cluster)) { | 
 | 124 | 		data = CLUSTER_DEBUG_RESET_BIT; | 
 | 125 | 		writel_relaxed(data, sys_dreq); | 
 | 126 | 		do { | 
 | 127 | 			cpu_relax(); | 
 | 128 | 			data = readl_relaxed(sys_status); | 
 | 129 | 		} while (data & CLUSTER_DEBUG_RESET_STATUS); | 
 | 130 | 		hip04_set_snoop_filter(cluster, 1); | 
 | 131 | 	} | 
 | 132 |  | 
 | 133 | 	data = CORE_RESET_BIT(cpu) | NEON_RESET_BIT(cpu) | \ | 
 | 134 | 	       CORE_DEBUG_RESET_BIT(cpu); | 
 | 135 | 	writel_relaxed(data, sys_dreq); | 
 | 136 | 	do { | 
 | 137 | 		cpu_relax(); | 
 | 138 | 	} while (data == readl_relaxed(sys_status)); | 
 | 139 |  | 
 | 140 | 	/* | 
 | 141 | 	 * We may fail to power up core again without this delay. | 
 | 142 | 	 * It's not mentioned in document. It's found by test. | 
 | 143 | 	 */ | 
 | 144 | 	udelay(20); | 
 | 145 |  | 
 | 146 | 	arch_send_wakeup_ipi_mask(cpumask_of(l_cpu)); | 
 | 147 |  | 
 | 148 | out: | 
 | 149 | 	hip04_cpu_table[cluster][cpu]++; | 
 | 150 | 	spin_unlock_irq(&boot_lock); | 
 | 151 |  | 
 | 152 | 	return 0; | 
 | 153 | } | 
 | 154 |  | 
 | 155 | #ifdef CONFIG_HOTPLUG_CPU | 
 | 156 | static void hip04_cpu_die(unsigned int l_cpu) | 
 | 157 | { | 
 | 158 | 	unsigned int mpidr, cpu, cluster; | 
 | 159 | 	bool last_man; | 
 | 160 |  | 
 | 161 | 	mpidr = cpu_logical_map(l_cpu); | 
 | 162 | 	cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); | 
 | 163 | 	cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); | 
 | 164 |  | 
 | 165 | 	spin_lock(&boot_lock); | 
 | 166 | 	hip04_cpu_table[cluster][cpu]--; | 
 | 167 | 	if (hip04_cpu_table[cluster][cpu] == 1) { | 
 | 168 | 		/* A power_up request went ahead of us. */ | 
 | 169 | 		spin_unlock(&boot_lock); | 
 | 170 | 		return; | 
 | 171 | 	} else if (hip04_cpu_table[cluster][cpu] > 1) { | 
 | 172 | 		pr_err("Cluster %d CPU%d boots multiple times\n", cluster, cpu); | 
 | 173 | 		BUG(); | 
 | 174 | 	} | 
 | 175 |  | 
 | 176 | 	last_man = hip04_cluster_is_down(cluster); | 
 | 177 | 	spin_unlock(&boot_lock); | 
 | 178 | 	if (last_man) { | 
 | 179 | 		/* Since it's Cortex A15, disable L2 prefetching. */ | 
 | 180 | 		asm volatile( | 
 | 181 | 		"mcr	p15, 1, %0, c15, c0, 3 \n\t" | 
 | 182 | 		"isb	\n\t" | 
 | 183 | 		"dsb	" | 
 | 184 | 		: : "r" (0x400) ); | 
 | 185 | 		v7_exit_coherency_flush(all); | 
 | 186 | 	} else { | 
 | 187 | 		v7_exit_coherency_flush(louis); | 
 | 188 | 	} | 
 | 189 |  | 
 | 190 | 	for (;;) | 
 | 191 | 		wfi(); | 
 | 192 | } | 
 | 193 |  | 
 | 194 | static int hip04_cpu_kill(unsigned int l_cpu) | 
 | 195 | { | 
 | 196 | 	unsigned int mpidr, cpu, cluster; | 
 | 197 | 	unsigned int data, tries, count; | 
 | 198 |  | 
 | 199 | 	mpidr = cpu_logical_map(l_cpu); | 
 | 200 | 	cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); | 
 | 201 | 	cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); | 
 | 202 | 	BUG_ON(cluster >= HIP04_MAX_CLUSTERS || | 
 | 203 | 	       cpu >= HIP04_MAX_CPUS_PER_CLUSTER); | 
 | 204 |  | 
 | 205 | 	count = TIMEOUT_MSEC / POLL_MSEC; | 
 | 206 | 	spin_lock_irq(&boot_lock); | 
 | 207 | 	for (tries = 0; tries < count; tries++) { | 
 | 208 | 		if (hip04_cpu_table[cluster][cpu]) | 
 | 209 | 			goto err; | 
 | 210 | 		cpu_relax(); | 
 | 211 | 		data = readl_relaxed(sysctrl + SC_CPU_RESET_STATUS(cluster)); | 
 | 212 | 		if (data & CORE_WFI_STATUS(cpu)) | 
 | 213 | 			break; | 
 | 214 | 		spin_unlock_irq(&boot_lock); | 
 | 215 | 		/* Wait for clean L2 when the whole cluster is down. */ | 
 | 216 | 		msleep(POLL_MSEC); | 
 | 217 | 		spin_lock_irq(&boot_lock); | 
 | 218 | 	} | 
 | 219 | 	if (tries >= count) | 
 | 220 | 		goto err; | 
 | 221 | 	data = CORE_RESET_BIT(cpu) | NEON_RESET_BIT(cpu) | \ | 
 | 222 | 	       CORE_DEBUG_RESET_BIT(cpu); | 
 | 223 | 	writel_relaxed(data, sysctrl + SC_CPU_RESET_REQ(cluster)); | 
 | 224 | 	for (tries = 0; tries < count; tries++) { | 
 | 225 | 		cpu_relax(); | 
 | 226 | 		data = readl_relaxed(sysctrl + SC_CPU_RESET_STATUS(cluster)); | 
 | 227 | 		if (data & CORE_RESET_STATUS(cpu)) | 
 | 228 | 			break; | 
 | 229 | 	} | 
 | 230 | 	if (tries >= count) | 
 | 231 | 		goto err; | 
 | 232 | 	if (hip04_cluster_is_down(cluster)) | 
 | 233 | 		hip04_set_snoop_filter(cluster, 0); | 
 | 234 | 	spin_unlock_irq(&boot_lock); | 
 | 235 | 	return 1; | 
 | 236 | err: | 
 | 237 | 	spin_unlock_irq(&boot_lock); | 
 | 238 | 	return 0; | 
 | 239 | } | 
 | 240 | #endif | 
 | 241 |  | 
 | 242 | static const struct smp_operations hip04_smp_ops __initconst = { | 
 | 243 | 	.smp_boot_secondary	= hip04_boot_secondary, | 
 | 244 | #ifdef CONFIG_HOTPLUG_CPU | 
 | 245 | 	.cpu_die		= hip04_cpu_die, | 
 | 246 | 	.cpu_kill		= hip04_cpu_kill, | 
 | 247 | #endif | 
 | 248 | }; | 
 | 249 |  | 
 | 250 | static bool __init hip04_cpu_table_init(void) | 
 | 251 | { | 
 | 252 | 	unsigned int mpidr, cpu, cluster; | 
 | 253 |  | 
 | 254 | 	mpidr = read_cpuid_mpidr(); | 
 | 255 | 	cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); | 
 | 256 | 	cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); | 
 | 257 |  | 
 | 258 | 	if (cluster >= HIP04_MAX_CLUSTERS || | 
 | 259 | 	    cpu >= HIP04_MAX_CPUS_PER_CLUSTER) { | 
 | 260 | 		pr_err("%s: boot CPU is out of bound!\n", __func__); | 
 | 261 | 		return false; | 
 | 262 | 	} | 
 | 263 | 	hip04_set_snoop_filter(cluster, 1); | 
 | 264 | 	hip04_cpu_table[cluster][cpu] = 1; | 
 | 265 | 	return true; | 
 | 266 | } | 
 | 267 |  | 
 | 268 | static int __init hip04_smp_init(void) | 
 | 269 | { | 
 | 270 | 	struct device_node *np, *np_sctl, *np_fab; | 
 | 271 | 	struct resource fab_res; | 
 | 272 | 	void __iomem *relocation; | 
 | 273 | 	int ret = -ENODEV; | 
 | 274 |  | 
 | 275 | 	np = of_find_compatible_node(NULL, NULL, "hisilicon,hip04-bootwrapper"); | 
 | 276 | 	if (!np) | 
 | 277 | 		goto err; | 
 | 278 | 	ret = of_property_read_u32_array(np, "boot-method", | 
 | 279 | 					 &hip04_boot_method[0], 4); | 
 | 280 | 	if (ret) | 
 | 281 | 		goto err; | 
 | 282 |  | 
 | 283 | 	ret = -ENODEV; | 
 | 284 | 	np_sctl = of_find_compatible_node(NULL, NULL, "hisilicon,sysctrl"); | 
 | 285 | 	if (!np_sctl) | 
 | 286 | 		goto err; | 
 | 287 | 	np_fab = of_find_compatible_node(NULL, NULL, "hisilicon,hip04-fabric"); | 
 | 288 | 	if (!np_fab) | 
 | 289 | 		goto err; | 
 | 290 |  | 
 | 291 | 	ret = memblock_reserve(hip04_boot_method[0], hip04_boot_method[1]); | 
 | 292 | 	if (ret) | 
 | 293 | 		goto err; | 
 | 294 |  | 
 | 295 | 	relocation = ioremap(hip04_boot_method[2], hip04_boot_method[3]); | 
 | 296 | 	if (!relocation) { | 
 | 297 | 		pr_err("failed to map relocation space\n"); | 
 | 298 | 		ret = -ENOMEM; | 
 | 299 | 		goto err_reloc; | 
 | 300 | 	} | 
 | 301 | 	sysctrl = of_iomap(np_sctl, 0); | 
 | 302 | 	if (!sysctrl) { | 
 | 303 | 		pr_err("failed to get sysctrl base\n"); | 
 | 304 | 		ret = -ENOMEM; | 
 | 305 | 		goto err_sysctrl; | 
 | 306 | 	} | 
 | 307 | 	ret = of_address_to_resource(np_fab, 0, &fab_res); | 
 | 308 | 	if (ret) { | 
 | 309 | 		pr_err("failed to get fabric base phys\n"); | 
 | 310 | 		goto err_fabric; | 
 | 311 | 	} | 
 | 312 | 	fabric_phys_addr = fab_res.start; | 
 | 313 | 	sync_cache_w(&fabric_phys_addr); | 
 | 314 | 	fabric = of_iomap(np_fab, 0); | 
 | 315 | 	if (!fabric) { | 
 | 316 | 		pr_err("failed to get fabric base\n"); | 
 | 317 | 		ret = -ENOMEM; | 
 | 318 | 		goto err_fabric; | 
 | 319 | 	} | 
 | 320 |  | 
 | 321 | 	if (!hip04_cpu_table_init()) { | 
 | 322 | 		ret = -EINVAL; | 
 | 323 | 		goto err_table; | 
 | 324 | 	} | 
 | 325 |  | 
 | 326 | 	/* | 
 | 327 | 	 * Fill the instruction address that is used after secondary core | 
 | 328 | 	 * out of reset. | 
 | 329 | 	 */ | 
 | 330 | 	writel_relaxed(hip04_boot_method[0], relocation); | 
 | 331 | 	writel_relaxed(0xa5a5a5a5, relocation + 4);	/* magic number */ | 
 | 332 | 	writel_relaxed(__pa_symbol(secondary_startup), relocation + 8); | 
 | 333 | 	writel_relaxed(0, relocation + 12); | 
 | 334 | 	iounmap(relocation); | 
 | 335 |  | 
 | 336 | 	smp_set_ops(&hip04_smp_ops); | 
 | 337 | 	return ret; | 
 | 338 | err_table: | 
 | 339 | 	iounmap(fabric); | 
 | 340 | err_fabric: | 
 | 341 | 	iounmap(sysctrl); | 
 | 342 | err_sysctrl: | 
 | 343 | 	iounmap(relocation); | 
 | 344 | err_reloc: | 
 | 345 | 	memblock_free(hip04_boot_method[0], hip04_boot_method[1]); | 
 | 346 | err: | 
 | 347 | 	return ret; | 
 | 348 | } | 
 | 349 | early_initcall(hip04_smp_init); |