blob: 1e1dfe59a469e2968efec87bcf30b38f0da80c40 [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Contains CPU specific errata definitions
4 *
5 * Copyright (C) 2014 ARM Ltd.
6 */
7
8#include <linux/arm-smccc.h>
9#include <linux/psci.h>
10#include <linux/types.h>
11#include <linux/cpu.h>
12#include <asm/cpu.h>
13#include <asm/cputype.h>
14#include <asm/cpufeature.h>
15#include <asm/smp_plat.h>
16#include <asm/vectors.h>
17
18static bool __maybe_unused
19is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
20{
21 const struct arm64_midr_revidr *fix;
22 u32 midr = read_cpuid_id(), revidr;
23
24 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
25 if (!is_midr_in_range(midr, &entry->midr_range))
26 return false;
27
28 midr &= MIDR_REVISION_MASK | MIDR_VARIANT_MASK;
29 revidr = read_cpuid(REVIDR_EL1);
30 for (fix = entry->fixed_revs; fix && fix->revidr_mask; fix++)
31 if (midr == fix->midr_rv && (revidr & fix->revidr_mask))
32 return false;
33
34 return true;
35}
36
37static bool __maybe_unused
38is_affected_midr_range_list(const struct arm64_cpu_capabilities *entry,
39 int scope)
40{
41 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
42 return is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list);
43}
44
45static bool __maybe_unused
46is_kryo_midr(const struct arm64_cpu_capabilities *entry, int scope)
47{
48 u32 model;
49
50 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
51
52 model = read_cpuid_id();
53 model &= MIDR_IMPLEMENTOR_MASK | (0xf00 << MIDR_PARTNUM_SHIFT) |
54 MIDR_ARCHITECTURE_MASK;
55
56 return model == entry->midr_range.model;
57}
58
59static bool
60has_mismatched_cache_type(const struct arm64_cpu_capabilities *entry,
61 int scope)
62{
63 u64 mask = arm64_ftr_reg_ctrel0.strict_mask;
64 u64 sys = arm64_ftr_reg_ctrel0.sys_val & mask;
65 u64 ctr_raw, ctr_real;
66
67 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
68
69 /*
70 * We want to make sure that all the CPUs in the system expose
71 * a consistent CTR_EL0 to make sure that applications behaves
72 * correctly with migration.
73 *
74 * If a CPU has CTR_EL0.IDC but does not advertise it via CTR_EL0 :
75 *
76 * 1) It is safe if the system doesn't support IDC, as CPU anyway
77 * reports IDC = 0, consistent with the rest.
78 *
79 * 2) If the system has IDC, it is still safe as we trap CTR_EL0
80 * access on this CPU via the ARM64_HAS_CACHE_IDC capability.
81 *
82 * So, we need to make sure either the raw CTR_EL0 or the effective
83 * CTR_EL0 matches the system's copy to allow a secondary CPU to boot.
84 */
85 ctr_raw = read_cpuid_cachetype() & mask;
86 ctr_real = read_cpuid_effective_cachetype() & mask;
87
88 return (ctr_real != sys) && (ctr_raw != sys);
89}
90
91static void
92cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *cap)
93{
94 u64 mask = arm64_ftr_reg_ctrel0.strict_mask;
95 bool enable_uct_trap = false;
96
97 /* Trap CTR_EL0 access on this CPU, only if it has a mismatch */
98 if ((read_cpuid_cachetype() & mask) !=
99 (arm64_ftr_reg_ctrel0.sys_val & mask))
100 enable_uct_trap = true;
101
102 /* ... or if the system is affected by an erratum */
103 if (cap->capability == ARM64_WORKAROUND_1542419)
104 enable_uct_trap = true;
105
106 if (enable_uct_trap)
107 sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0);
108}
109
110atomic_t arm64_el2_vector_last_slot = ATOMIC_INIT(-1);
111
112#include <asm/mmu_context.h>
113#include <asm/cacheflush.h>
114
115DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
116
117#ifdef CONFIG_KVM_INDIRECT_VECTORS
118extern char __smccc_workaround_1_smc_start[];
119extern char __smccc_workaround_1_smc_end[];
120extern char __smccc_workaround_3_smc_start[];
121extern char __smccc_workaround_3_smc_end[];
122extern char __spectre_bhb_loop_k8_start[];
123extern char __spectre_bhb_loop_k8_end[];
124extern char __spectre_bhb_loop_k24_start[];
125extern char __spectre_bhb_loop_k24_end[];
126extern char __spectre_bhb_loop_k32_start[];
127extern char __spectre_bhb_loop_k32_end[];
128extern char __spectre_bhb_clearbhb_start[];
129extern char __spectre_bhb_clearbhb_end[];
130
131static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
132 const char *hyp_vecs_end)
133{
134 void *dst = lm_alias(__bp_harden_hyp_vecs_start + slot * SZ_2K);
135 int i;
136
137 for (i = 0; i < SZ_2K; i += 0x80)
138 memcpy(dst + i, hyp_vecs_start, hyp_vecs_end - hyp_vecs_start);
139
140 __flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K);
141}
142
143static DEFINE_RAW_SPINLOCK(bp_lock);
144static void install_bp_hardening_cb(bp_hardening_cb_t fn,
145 const char *hyp_vecs_start,
146 const char *hyp_vecs_end)
147{
148 int cpu, slot = -1;
149
150 /*
151 * detect_harden_bp_fw() passes NULL for the hyp_vecs start/end if
152 * we're a guest. Skip the hyp-vectors work.
153 */
154 if (!hyp_vecs_start) {
155 __this_cpu_write(bp_hardening_data.fn, fn);
156 return;
157 }
158
159 raw_spin_lock(&bp_lock);
160 for_each_possible_cpu(cpu) {
161 if (per_cpu(bp_hardening_data.fn, cpu) == fn) {
162 slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu);
163 break;
164 }
165 }
166
167 if (slot == -1) {
168 slot = atomic_inc_return(&arm64_el2_vector_last_slot);
169 BUG_ON(slot >= BP_HARDEN_EL2_SLOTS);
170 __copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end);
171 }
172
173 if (fn != __this_cpu_read(bp_hardening_data.fn)) {
174 __this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
175 __this_cpu_write(bp_hardening_data.fn, fn);
176 __this_cpu_write(bp_hardening_data.template_start,
177 hyp_vecs_start);
178 }
179 raw_spin_unlock(&bp_lock);
180}
181#else
182#define __smccc_workaround_1_smc_start NULL
183#define __smccc_workaround_1_smc_end NULL
184
185static void install_bp_hardening_cb(bp_hardening_cb_t fn,
186 const char *hyp_vecs_start,
187 const char *hyp_vecs_end)
188{
189 __this_cpu_write(bp_hardening_data.fn, fn);
190}
191#endif /* CONFIG_KVM_INDIRECT_VECTORS */
192
193#include <uapi/linux/psci.h>
194#include <linux/arm-smccc.h>
195#include <linux/psci.h>
196
197static void call_smc_arch_workaround_1(void)
198{
199 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
200}
201
202static void call_hvc_arch_workaround_1(void)
203{
204 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
205}
206
207static void qcom_link_stack_sanitization(void)
208{
209 u64 tmp;
210
211 asm volatile("mov %0, x30 \n"
212 ".rept 16 \n"
213 "bl . + 4 \n"
214 ".endr \n"
215 "mov x30, %0 \n"
216 : "=&r" (tmp));
217}
218
219static bool __nospectre_v2;
220static int __init parse_nospectre_v2(char *str)
221{
222 __nospectre_v2 = true;
223 return 0;
224}
225early_param("nospectre_v2", parse_nospectre_v2);
226
227/*
228 * -1: No workaround
229 * 0: No workaround required
230 * 1: Workaround installed
231 */
232static int detect_harden_bp_fw(void)
233{
234 bp_hardening_cb_t cb;
235 void *smccc_start, *smccc_end;
236 struct arm_smccc_res res;
237 u32 midr = read_cpuid_id();
238
239 if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
240 return -1;
241
242 switch (psci_ops.conduit) {
243 case PSCI_CONDUIT_HVC:
244 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
245 ARM_SMCCC_ARCH_WORKAROUND_1, &res);
246 switch ((int)res.a0) {
247 case 1:
248 /* Firmware says we're just fine */
249 return 0;
250 case 0:
251 cb = call_hvc_arch_workaround_1;
252 /* This is a guest, no need to patch KVM vectors */
253 smccc_start = NULL;
254 smccc_end = NULL;
255 break;
256 default:
257 return -1;
258 }
259 break;
260
261 case PSCI_CONDUIT_SMC:
262 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
263 ARM_SMCCC_ARCH_WORKAROUND_1, &res);
264 switch ((int)res.a0) {
265 case 1:
266 /* Firmware says we're just fine */
267 return 0;
268 case 0:
269 cb = call_smc_arch_workaround_1;
270 smccc_start = __smccc_workaround_1_smc_start;
271 smccc_end = __smccc_workaround_1_smc_end;
272 break;
273 default:
274 return -1;
275 }
276 break;
277
278 default:
279 return -1;
280 }
281
282 if (((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR) ||
283 ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1))
284 cb = qcom_link_stack_sanitization;
285
286 if (IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR))
287 install_bp_hardening_cb(cb, smccc_start, smccc_end);
288
289 return 1;
290}
291
292DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
293
294int ssbd_state __read_mostly = ARM64_SSBD_KERNEL;
295static bool __ssb_safe = true;
296
297static const struct ssbd_options {
298 const char *str;
299 int state;
300} ssbd_options[] = {
301 { "force-on", ARM64_SSBD_FORCE_ENABLE, },
302 { "force-off", ARM64_SSBD_FORCE_DISABLE, },
303 { "kernel", ARM64_SSBD_KERNEL, },
304};
305
306static int __init ssbd_cfg(char *buf)
307{
308 int i;
309
310 if (!buf || !buf[0])
311 return -EINVAL;
312
313 for (i = 0; i < ARRAY_SIZE(ssbd_options); i++) {
314 int len = strlen(ssbd_options[i].str);
315
316 if (strncmp(buf, ssbd_options[i].str, len))
317 continue;
318
319 ssbd_state = ssbd_options[i].state;
320 return 0;
321 }
322
323 return -EINVAL;
324}
325early_param("ssbd", ssbd_cfg);
326
327void __init arm64_update_smccc_conduit(struct alt_instr *alt,
328 __le32 *origptr, __le32 *updptr,
329 int nr_inst)
330{
331 u32 insn;
332
333 BUG_ON(nr_inst != 1);
334
335 switch (psci_ops.conduit) {
336 case PSCI_CONDUIT_HVC:
337 insn = aarch64_insn_get_hvc_value();
338 break;
339 case PSCI_CONDUIT_SMC:
340 insn = aarch64_insn_get_smc_value();
341 break;
342 default:
343 return;
344 }
345
346 *updptr = cpu_to_le32(insn);
347}
348
349void __init arm64_enable_wa2_handling(struct alt_instr *alt,
350 __le32 *origptr, __le32 *updptr,
351 int nr_inst)
352{
353 BUG_ON(nr_inst != 1);
354 /*
355 * Only allow mitigation on EL1 entry/exit and guest
356 * ARCH_WORKAROUND_2 handling if the SSBD state allows it to
357 * be flipped.
358 */
359 if (arm64_get_ssbd_state() == ARM64_SSBD_KERNEL)
360 *updptr = cpu_to_le32(aarch64_insn_gen_nop());
361}
362
363void arm64_set_ssbd_mitigation(bool state)
364{
365 if (!IS_ENABLED(CONFIG_ARM64_SSBD)) {
366 pr_info_once("SSBD disabled by kernel configuration\n");
367 return;
368 }
369
370 if (this_cpu_has_cap(ARM64_SSBS)) {
371 if (state)
372 asm volatile(SET_PSTATE_SSBS(0));
373 else
374 asm volatile(SET_PSTATE_SSBS(1));
375
376 /*
377 * SSBS is self-synchronizing and is intended to affect
378 * subsequent speculative instructions, but some CPUs can
379 * speculate with a stale value of SSBS.
380 *
381 * Mitigate this with an unconditional speculation barrier, as
382 * CPUs could mis-speculate branches and bypass a conditional
383 * barrier.
384 */
385 if (IS_ENABLED(CONFIG_ARM64_ERRATUM_3194386))
386 spec_bar();
387
388 return;
389 }
390
391 switch (psci_ops.conduit) {
392 case PSCI_CONDUIT_HVC:
393 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL);
394 break;
395
396 case PSCI_CONDUIT_SMC:
397 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL);
398 break;
399
400 default:
401 WARN_ON_ONCE(1);
402 break;
403 }
404}
405
406static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
407 int scope)
408{
409 struct arm_smccc_res res;
410 bool required = true;
411 s32 val;
412 bool this_cpu_safe = false;
413
414 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
415
416 if (cpu_mitigations_off())
417 ssbd_state = ARM64_SSBD_FORCE_DISABLE;
418
419 /* delay setting __ssb_safe until we get a firmware response */
420 if (is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list))
421 this_cpu_safe = true;
422
423 if (this_cpu_has_cap(ARM64_SSBS)) {
424 if (!this_cpu_safe)
425 __ssb_safe = false;
426 required = false;
427 goto out_printmsg;
428 }
429
430 if (psci_ops.smccc_version == SMCCC_VERSION_1_0) {
431 ssbd_state = ARM64_SSBD_UNKNOWN;
432 if (!this_cpu_safe)
433 __ssb_safe = false;
434 return false;
435 }
436
437 switch (psci_ops.conduit) {
438 case PSCI_CONDUIT_HVC:
439 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
440 ARM_SMCCC_ARCH_WORKAROUND_2, &res);
441 break;
442
443 case PSCI_CONDUIT_SMC:
444 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
445 ARM_SMCCC_ARCH_WORKAROUND_2, &res);
446 break;
447
448 default:
449 ssbd_state = ARM64_SSBD_UNKNOWN;
450 if (!this_cpu_safe)
451 __ssb_safe = false;
452 return false;
453 }
454
455 val = (s32)res.a0;
456
457 switch (val) {
458 case SMCCC_RET_NOT_SUPPORTED:
459 ssbd_state = ARM64_SSBD_UNKNOWN;
460 if (!this_cpu_safe)
461 __ssb_safe = false;
462 return false;
463
464 /* machines with mixed mitigation requirements must not return this */
465 case SMCCC_RET_NOT_REQUIRED:
466 pr_info_once("%s mitigation not required\n", entry->desc);
467 ssbd_state = ARM64_SSBD_MITIGATED;
468 return false;
469
470 case SMCCC_RET_SUCCESS:
471 __ssb_safe = false;
472 required = true;
473 break;
474
475 case 1: /* Mitigation not required on this CPU */
476 required = false;
477 break;
478
479 default:
480 WARN_ON(1);
481 if (!this_cpu_safe)
482 __ssb_safe = false;
483 return false;
484 }
485
486 switch (ssbd_state) {
487 case ARM64_SSBD_FORCE_DISABLE:
488 arm64_set_ssbd_mitigation(false);
489 required = false;
490 break;
491
492 case ARM64_SSBD_KERNEL:
493 if (required) {
494 __this_cpu_write(arm64_ssbd_callback_required, 1);
495 arm64_set_ssbd_mitigation(true);
496 }
497 break;
498
499 case ARM64_SSBD_FORCE_ENABLE:
500 arm64_set_ssbd_mitigation(true);
501 required = true;
502 break;
503
504 default:
505 WARN_ON(1);
506 break;
507 }
508
509out_printmsg:
510 switch (ssbd_state) {
511 case ARM64_SSBD_FORCE_DISABLE:
512 pr_info_once("%s disabled from command-line\n", entry->desc);
513 break;
514
515 case ARM64_SSBD_FORCE_ENABLE:
516 pr_info_once("%s forced from command-line\n", entry->desc);
517 break;
518 }
519
520 return required;
521}
522
523static void cpu_enable_ssbd_mitigation(const struct arm64_cpu_capabilities *cap)
524{
525 if (ssbd_state != ARM64_SSBD_FORCE_DISABLE)
526 cap->matches(cap, SCOPE_LOCAL_CPU);
527}
528
529/* known invulnerable cores */
530static const struct midr_range arm64_ssb_cpus[] = {
531 MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
532 MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
533 MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
534 MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
535 {},
536};
537
538#ifdef CONFIG_ARM64_ERRATUM_1463225
539DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
540
541static bool
542has_cortex_a76_erratum_1463225(const struct arm64_cpu_capabilities *entry,
543 int scope)
544{
545 u32 midr = read_cpuid_id();
546 /* Cortex-A76 r0p0 - r3p1 */
547 struct midr_range range = MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1);
548
549 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
550 return is_midr_in_range(midr, &range) && is_kernel_in_hyp_mode();
551}
552#endif
553
554static void __maybe_unused
555cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused)
556{
557 sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCI, 0);
558}
559
560#define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \
561 .matches = is_affected_midr_range, \
562 .midr_range = MIDR_RANGE(model, v_min, r_min, v_max, r_max)
563
564#define CAP_MIDR_ALL_VERSIONS(model) \
565 .matches = is_affected_midr_range, \
566 .midr_range = MIDR_ALL_VERSIONS(model)
567
568#define MIDR_FIXED(rev, revidr_mask) \
569 .fixed_revs = (struct arm64_midr_revidr[]){{ (rev), (revidr_mask) }, {}}
570
571#define ERRATA_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \
572 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
573 CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max)
574
575#define CAP_MIDR_RANGE_LIST(list) \
576 .matches = is_affected_midr_range_list, \
577 .midr_range_list = list
578
579/* Errata affecting a range of revisions of given model variant */
580#define ERRATA_MIDR_REV_RANGE(m, var, r_min, r_max) \
581 ERRATA_MIDR_RANGE(m, var, r_min, var, r_max)
582
583/* Errata affecting a single variant/revision of a model */
584#define ERRATA_MIDR_REV(model, var, rev) \
585 ERRATA_MIDR_RANGE(model, var, rev, var, rev)
586
587/* Errata affecting all variants/revisions of a given a model */
588#define ERRATA_MIDR_ALL_VERSIONS(model) \
589 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
590 CAP_MIDR_ALL_VERSIONS(model)
591
592/* Errata affecting a list of midr ranges, with same work around */
593#define ERRATA_MIDR_RANGE_LIST(midr_list) \
594 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
595 CAP_MIDR_RANGE_LIST(midr_list)
596
597/* Track overall mitigation state. We are only mitigated if all cores are ok */
598static bool __hardenbp_enab = true;
599static bool __spectrev2_safe = true;
600
601int get_spectre_v2_workaround_state(void)
602{
603 if (__spectrev2_safe)
604 return ARM64_BP_HARDEN_NOT_REQUIRED;
605
606 if (!__hardenbp_enab)
607 return ARM64_BP_HARDEN_UNKNOWN;
608
609 return ARM64_BP_HARDEN_WA_NEEDED;
610}
611
612/*
613 * List of CPUs that do not need any Spectre-v2 mitigation at all.
614 */
615static const struct midr_range spectre_v2_safe_list[] = {
616 MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
617 MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
618 MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
619 MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
620 MIDR_ALL_VERSIONS(MIDR_HISI_TSV110),
621 { /* sentinel */ }
622};
623
624/*
625 * Track overall bp hardening for all heterogeneous cores in the machine.
626 * We are only considered "safe" if all booted cores are known safe.
627 */
628static bool __maybe_unused
629check_branch_predictor(const struct arm64_cpu_capabilities *entry, int scope)
630{
631 int need_wa;
632
633 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
634
635 /* If the CPU has CSV2 set, we're safe */
636 if (cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64PFR0_EL1),
637 ID_AA64PFR0_CSV2_SHIFT))
638 return false;
639
640 /* Alternatively, we have a list of unaffected CPUs */
641 if (is_midr_in_range_list(read_cpuid_id(), spectre_v2_safe_list))
642 return false;
643
644 /* Fallback to firmware detection */
645 need_wa = detect_harden_bp_fw();
646 if (!need_wa)
647 return false;
648
649 __spectrev2_safe = false;
650
651 if (!IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR)) {
652 pr_warn_once("spectrev2 mitigation disabled by kernel configuration\n");
653 __hardenbp_enab = false;
654 return false;
655 }
656
657 /* forced off */
658 if (__nospectre_v2 || cpu_mitigations_off()) {
659 pr_info_once("spectrev2 mitigation disabled by command line option\n");
660 __hardenbp_enab = false;
661 return false;
662 }
663
664 if (need_wa < 0) {
665 pr_warn_once("ARM_SMCCC_ARCH_WORKAROUND_1 missing from firmware\n");
666 __hardenbp_enab = false;
667 }
668
669 return (need_wa > 0);
670}
671
672static void
673cpu_enable_branch_predictor_hardening(const struct arm64_cpu_capabilities *cap)
674{
675 cap->matches(cap, SCOPE_LOCAL_CPU);
676}
677
678static const __maybe_unused struct midr_range tx2_family_cpus[] = {
679 MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
680 MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
681 {},
682};
683
684static bool __maybe_unused
685needs_tx2_tvm_workaround(const struct arm64_cpu_capabilities *entry,
686 int scope)
687{
688 int i;
689
690 if (!is_affected_midr_range_list(entry, scope) ||
691 !is_hyp_mode_available())
692 return false;
693
694 for_each_possible_cpu(i) {
695 if (MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0) != 0)
696 return true;
697 }
698
699 return false;
700}
701
702static bool __maybe_unused
703has_neoverse_n1_erratum_1542419(const struct arm64_cpu_capabilities *entry,
704 int scope)
705{
706 u32 midr = read_cpuid_id();
707 bool has_dic = read_cpuid_cachetype() & BIT(CTR_DIC_SHIFT);
708 const struct midr_range range = MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1);
709
710 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
711 return is_midr_in_range(midr, &range) && has_dic;
712}
713
714#ifdef CONFIG_HARDEN_EL2_VECTORS
715
716static const struct midr_range arm64_harden_el2_vectors[] = {
717 MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
718 MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
719 {},
720};
721
722#endif
723
724#ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI
725static const struct arm64_cpu_capabilities arm64_repeat_tlbi_list[] = {
726#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1009
727 {
728 ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0)
729 },
730 {
731 .midr_range.model = MIDR_QCOM_KRYO,
732 .matches = is_kryo_midr,
733 },
734#endif
735#ifdef CONFIG_ARM64_ERRATUM_1286807
736 {
737 ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 0),
738 },
739#endif
740 {},
741};
742#endif
743
744#ifdef CONFIG_CAVIUM_ERRATUM_27456
745const struct midr_range cavium_erratum_27456_cpus[] = {
746 /* Cavium ThunderX, T88 pass 1.x - 2.1 */
747 MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 1),
748 /* Cavium ThunderX, T81 pass 1.0 */
749 MIDR_REV(MIDR_THUNDERX_81XX, 0, 0),
750 {},
751};
752#endif
753
754#ifdef CONFIG_CAVIUM_ERRATUM_30115
755static const struct midr_range cavium_erratum_30115_cpus[] = {
756 /* Cavium ThunderX, T88 pass 1.x - 2.2 */
757 MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 2),
758 /* Cavium ThunderX, T81 pass 1.0 - 1.2 */
759 MIDR_REV_RANGE(MIDR_THUNDERX_81XX, 0, 0, 2),
760 /* Cavium ThunderX, T83 pass 1.0 */
761 MIDR_REV(MIDR_THUNDERX_83XX, 0, 0),
762 {},
763};
764#endif
765
766#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
767static const struct arm64_cpu_capabilities qcom_erratum_1003_list[] = {
768 {
769 ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0),
770 },
771 {
772 .midr_range.model = MIDR_QCOM_KRYO,
773 .matches = is_kryo_midr,
774 },
775 {},
776};
777#endif
778
779#ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
780static const struct midr_range workaround_clean_cache[] = {
781#if defined(CONFIG_ARM64_ERRATUM_826319) || \
782 defined(CONFIG_ARM64_ERRATUM_827319) || \
783 defined(CONFIG_ARM64_ERRATUM_824069)
784 /* Cortex-A53 r0p[012]: ARM errata 826319, 827319, 824069 */
785 MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 2),
786#endif
787#ifdef CONFIG_ARM64_ERRATUM_819472
788 /* Cortex-A53 r0p[01] : ARM errata 819472 */
789 MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 1),
790#endif
791 {},
792};
793#endif
794
795#ifdef CONFIG_ARM64_ERRATUM_1418040
796/*
797 * - 1188873 affects r0p0 to r2p0
798 * - 1418040 affects r0p0 to r3p1
799 */
800static const struct midr_range erratum_1418040_list[] = {
801 /* Cortex-A76 r0p0 to r3p1 */
802 MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1),
803 /* Neoverse-N1 r0p0 to r3p1 */
804 MIDR_RANGE(MIDR_NEOVERSE_N1, 0, 0, 3, 1),
805 {},
806};
807#endif
808
809#ifdef CONFIG_ARM64_ERRATUM_845719
810static const struct midr_range erratum_845719_list[] = {
811 /* Cortex-A53 r0p[01234] */
812 MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
813 /* Brahma-B53 r0p[0] */
814 MIDR_REV(MIDR_BRAHMA_B53, 0, 0),
815 {},
816};
817#endif
818
819#ifdef CONFIG_ARM64_ERRATUM_843419
820static const struct arm64_cpu_capabilities erratum_843419_list[] = {
821 {
822 /* Cortex-A53 r0p[01234] */
823 .matches = is_affected_midr_range,
824 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
825 MIDR_FIXED(0x4, BIT(8)),
826 },
827 {
828 /* Brahma-B53 r0p[0] */
829 .matches = is_affected_midr_range,
830 ERRATA_MIDR_REV(MIDR_BRAHMA_B53, 0, 0),
831 },
832 {},
833};
834#endif
835
836#ifdef CONFIG_ARM64_ERRATUM_1742098
837static struct midr_range broken_aarch32_aes[] = {
838 MIDR_RANGE(MIDR_CORTEX_A57, 0, 1, 0xf, 0xf),
839 MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
840 {},
841};
842#endif
843
844#ifdef CONFIG_ARM64_ERRATUM_3194386
845static const struct midr_range erratum_spec_ssbs_list[] = {
846 MIDR_ALL_VERSIONS(MIDR_CORTEX_A76),
847 MIDR_ALL_VERSIONS(MIDR_CORTEX_A77),
848 MIDR_ALL_VERSIONS(MIDR_CORTEX_A78),
849 MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C),
850 MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
851 MIDR_ALL_VERSIONS(MIDR_CORTEX_A715),
852 MIDR_ALL_VERSIONS(MIDR_CORTEX_A720),
853 MIDR_ALL_VERSIONS(MIDR_CORTEX_A725),
854 MIDR_ALL_VERSIONS(MIDR_CORTEX_X1),
855 MIDR_ALL_VERSIONS(MIDR_CORTEX_X1C),
856 MIDR_ALL_VERSIONS(MIDR_CORTEX_X2),
857 MIDR_ALL_VERSIONS(MIDR_CORTEX_X3),
858 MIDR_ALL_VERSIONS(MIDR_CORTEX_X4),
859 MIDR_ALL_VERSIONS(MIDR_CORTEX_X925),
860 MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1),
861 MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
862 MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N3),
863 MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1),
864 MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V2),
865 MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V3),
866 {}
867};
868#endif
869
870const struct arm64_cpu_capabilities arm64_errata[] = {
871#ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
872 {
873 .desc = "ARM errata 826319, 827319, 824069, 819472",
874 .capability = ARM64_WORKAROUND_CLEAN_CACHE,
875 ERRATA_MIDR_RANGE_LIST(workaround_clean_cache),
876 .cpu_enable = cpu_enable_cache_maint_trap,
877 },
878#endif
879#ifdef CONFIG_ARM64_ERRATUM_832075
880 {
881 /* Cortex-A57 r0p0 - r1p2 */
882 .desc = "ARM erratum 832075",
883 .capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE,
884 ERRATA_MIDR_RANGE(MIDR_CORTEX_A57,
885 0, 0,
886 1, 2),
887 },
888#endif
889#ifdef CONFIG_ARM64_ERRATUM_834220
890 {
891 /* Cortex-A57 r0p0 - r1p2 */
892 .desc = "ARM erratum 834220",
893 .capability = ARM64_WORKAROUND_834220,
894 ERRATA_MIDR_RANGE(MIDR_CORTEX_A57,
895 0, 0,
896 1, 2),
897 },
898#endif
899#ifdef CONFIG_ARM64_ERRATUM_843419
900 {
901 .desc = "ARM erratum 843419",
902 .capability = ARM64_WORKAROUND_843419,
903 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
904 .matches = cpucap_multi_entry_cap_matches,
905 .match_list = erratum_843419_list,
906 },
907#endif
908#ifdef CONFIG_ARM64_ERRATUM_845719
909 {
910 .desc = "ARM erratum 845719",
911 .capability = ARM64_WORKAROUND_845719,
912 ERRATA_MIDR_RANGE_LIST(erratum_845719_list),
913 },
914#endif
915#ifdef CONFIG_CAVIUM_ERRATUM_23154
916 {
917 /* Cavium ThunderX, pass 1.x */
918 .desc = "Cavium erratum 23154",
919 .capability = ARM64_WORKAROUND_CAVIUM_23154,
920 ERRATA_MIDR_REV_RANGE(MIDR_THUNDERX, 0, 0, 1),
921 },
922#endif
923#ifdef CONFIG_CAVIUM_ERRATUM_27456
924 {
925 .desc = "Cavium erratum 27456",
926 .capability = ARM64_WORKAROUND_CAVIUM_27456,
927 ERRATA_MIDR_RANGE_LIST(cavium_erratum_27456_cpus),
928 },
929#endif
930#ifdef CONFIG_CAVIUM_ERRATUM_30115
931 {
932 .desc = "Cavium erratum 30115",
933 .capability = ARM64_WORKAROUND_CAVIUM_30115,
934 ERRATA_MIDR_RANGE_LIST(cavium_erratum_30115_cpus),
935 },
936#endif
937 {
938 .desc = "Mismatched cache type (CTR_EL0)",
939 .capability = ARM64_MISMATCHED_CACHE_TYPE,
940 .matches = has_mismatched_cache_type,
941 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
942 .cpu_enable = cpu_enable_trap_ctr_access,
943 },
944#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
945 {
946 .desc = "Qualcomm Technologies Falkor/Kryo erratum 1003",
947 .capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003,
948 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
949 .matches = cpucap_multi_entry_cap_matches,
950 .match_list = qcom_erratum_1003_list,
951 },
952#endif
953#ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI
954 {
955 .desc = "Qualcomm erratum 1009, ARM erratum 1286807",
956 .capability = ARM64_WORKAROUND_REPEAT_TLBI,
957 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
958 .matches = cpucap_multi_entry_cap_matches,
959 .match_list = arm64_repeat_tlbi_list,
960 },
961#endif
962#ifdef CONFIG_ARM64_ERRATUM_858921
963 {
964 /* Cortex-A73 all versions */
965 .desc = "ARM erratum 858921",
966 .capability = ARM64_WORKAROUND_858921,
967 ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
968 },
969#endif
970 {
971 .desc = "Branch predictor hardening",
972 .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
973 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
974 .matches = check_branch_predictor,
975 .cpu_enable = cpu_enable_branch_predictor_hardening,
976 },
977#ifdef CONFIG_HARDEN_EL2_VECTORS
978 {
979 .desc = "EL2 vector hardening",
980 .capability = ARM64_HARDEN_EL2_VECTORS,
981 ERRATA_MIDR_RANGE_LIST(arm64_harden_el2_vectors),
982 },
983#endif
984 {
985 .desc = "Speculative Store Bypass Disable",
986 .capability = ARM64_SSBD,
987 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
988 .matches = has_ssbd_mitigation,
989 .cpu_enable = cpu_enable_ssbd_mitigation,
990 .midr_range_list = arm64_ssb_cpus,
991 },
992 {
993 .desc = "Spectre-BHB",
994 .capability = ARM64_SPECTRE_BHB,
995 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
996 .matches = is_spectre_bhb_affected,
997 .cpu_enable = spectre_bhb_enable_mitigation,
998 },
999#ifdef CONFIG_ARM64_ERRATUM_1418040
1000 {
1001 .desc = "ARM erratum 1418040",
1002 .capability = ARM64_WORKAROUND_1418040,
1003 ERRATA_MIDR_RANGE_LIST(erratum_1418040_list),
1004 /*
1005 * We need to allow affected CPUs to come in late, but
1006 * also need the non-affected CPUs to be able to come
1007 * in at any point in time. Wonderful.
1008 */
1009 .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
1010 },
1011#endif
1012#ifdef CONFIG_ARM64_ERRATUM_1165522
1013 {
1014 /* Cortex-A76 r0p0 to r2p0 */
1015 .desc = "ARM erratum 1165522",
1016 .capability = ARM64_WORKAROUND_1165522,
1017 ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0),
1018 },
1019#endif
1020#ifdef CONFIG_ARM64_ERRATUM_1463225
1021 {
1022 .desc = "ARM erratum 1463225",
1023 .capability = ARM64_WORKAROUND_1463225,
1024 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
1025 .matches = has_cortex_a76_erratum_1463225,
1026 },
1027#endif
1028#ifdef CONFIG_CAVIUM_TX2_ERRATUM_219
1029 {
1030 .desc = "Cavium ThunderX2 erratum 219 (KVM guest sysreg trapping)",
1031 .capability = ARM64_WORKAROUND_CAVIUM_TX2_219_TVM,
1032 ERRATA_MIDR_RANGE_LIST(tx2_family_cpus),
1033 .matches = needs_tx2_tvm_workaround,
1034 },
1035 {
1036 .desc = "Cavium ThunderX2 erratum 219 (PRFM removal)",
1037 .capability = ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM,
1038 ERRATA_MIDR_RANGE_LIST(tx2_family_cpus),
1039 },
1040#endif
1041#ifdef CONFIG_ARM64_ERRATUM_1542419
1042 {
1043 /* we depend on the firmware portion for correctness */
1044 .desc = "ARM erratum 1542419 (kernel portion)",
1045 .capability = ARM64_WORKAROUND_1542419,
1046 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
1047 .matches = has_neoverse_n1_erratum_1542419,
1048 .cpu_enable = cpu_enable_trap_ctr_access,
1049 },
1050#endif
1051#ifdef CONFIG_ARM64_ERRATUM_1742098
1052 {
1053 .desc = "ARM erratum 1742098",
1054 .capability = ARM64_WORKAROUND_1742098,
1055 CAP_MIDR_RANGE_LIST(broken_aarch32_aes),
1056 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
1057 },
1058#endif
1059#ifdef CONFIG_ARM64_ERRATUM_3194386
1060 {
1061 .desc = "SSBS not fully self-synchronizing",
1062 .capability = ARM64_WORKAROUND_SPECULATIVE_SSBS,
1063 ERRATA_MIDR_RANGE_LIST(erratum_spec_ssbs_list),
1064 },
1065#endif
1066 {
1067 }
1068};
1069
1070ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr,
1071 char *buf)
1072{
1073 return sprintf(buf, "Mitigation: __user pointer sanitization\n");
1074}
1075
1076static const char *get_bhb_affected_string(enum mitigation_state bhb_state)
1077{
1078 switch (bhb_state) {
1079 case SPECTRE_UNAFFECTED:
1080 return "";
1081 default:
1082 case SPECTRE_VULNERABLE:
1083 return ", but not BHB";
1084 case SPECTRE_MITIGATED:
1085 return ", BHB";
1086 }
1087}
1088
1089ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,
1090 char *buf)
1091{
1092 enum mitigation_state bhb_state = arm64_get_spectre_bhb_state();
1093 const char *bhb_str = get_bhb_affected_string(bhb_state);
1094 const char *v2_str = "Branch predictor hardening";
1095
1096 switch (get_spectre_v2_workaround_state()) {
1097 case ARM64_BP_HARDEN_NOT_REQUIRED:
1098 if (bhb_state == SPECTRE_UNAFFECTED)
1099 return sprintf(buf, "Not affected\n");
1100
1101 /*
1102 * Platforms affected by Spectre-BHB can't report
1103 * "Not affected" for Spectre-v2.
1104 */
1105 v2_str = "CSV2";
1106 fallthrough;
1107 case ARM64_BP_HARDEN_WA_NEEDED:
1108 return sprintf(buf, "Mitigation: %s%s\n", v2_str, bhb_str);
1109 case ARM64_BP_HARDEN_UNKNOWN:
1110 fallthrough;
1111 default:
1112 return sprintf(buf, "Vulnerable\n");
1113 }
1114}
1115
1116ssize_t cpu_show_spec_store_bypass(struct device *dev,
1117 struct device_attribute *attr, char *buf)
1118{
1119 if (__ssb_safe)
1120 return sprintf(buf, "Not affected\n");
1121
1122 switch (ssbd_state) {
1123 case ARM64_SSBD_KERNEL:
1124 case ARM64_SSBD_FORCE_ENABLE:
1125 if (IS_ENABLED(CONFIG_ARM64_SSBD))
1126 return sprintf(buf,
1127 "Mitigation: Speculative Store Bypass disabled via prctl\n");
1128 }
1129
1130 return sprintf(buf, "Vulnerable\n");
1131}
1132
1133/*
1134 * We try to ensure that the mitigation state can never change as the result of
1135 * onlining a late CPU.
1136 */
1137static void update_mitigation_state(enum mitigation_state *oldp,
1138 enum mitigation_state new)
1139{
1140 enum mitigation_state state;
1141
1142 do {
1143 state = READ_ONCE(*oldp);
1144 if (new <= state)
1145 break;
1146 } while (cmpxchg_relaxed(oldp, state, new) != state);
1147}
1148
1149/*
1150 * Spectre BHB.
1151 *
1152 * A CPU is either:
1153 * - Mitigated by a branchy loop a CPU specific number of times, and listed
1154 * in our "loop mitigated list".
1155 * - Mitigated in software by the firmware Spectre v2 call.
1156 * - Has the ClearBHB instruction to perform the mitigation.
1157 * - Has the 'Exception Clears Branch History Buffer' (ECBHB) feature, so no
1158 * software mitigation in the vectors is needed.
1159 * - Has CSV2.3, so is unaffected.
1160 */
1161static enum mitigation_state spectre_bhb_state;
1162
1163enum mitigation_state arm64_get_spectre_bhb_state(void)
1164{
1165 return spectre_bhb_state;
1166}
1167
1168/*
1169 * This must be called with SCOPE_LOCAL_CPU for each type of CPU, before any
1170 * SCOPE_SYSTEM call will give the right answer.
1171 */
1172u8 spectre_bhb_loop_affected(int scope)
1173{
1174 u8 k = 0;
1175 static u8 max_bhb_k;
1176
1177 if (scope == SCOPE_LOCAL_CPU) {
1178 static const struct midr_range spectre_bhb_k32_list[] = {
1179 MIDR_ALL_VERSIONS(MIDR_CORTEX_A78),
1180 MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C),
1181 MIDR_ALL_VERSIONS(MIDR_CORTEX_X1),
1182 MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
1183 MIDR_ALL_VERSIONS(MIDR_CORTEX_X2),
1184 MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
1185 MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1),
1186 {},
1187 };
1188 static const struct midr_range spectre_bhb_k24_list[] = {
1189 MIDR_ALL_VERSIONS(MIDR_CORTEX_A76),
1190 MIDR_ALL_VERSIONS(MIDR_CORTEX_A77),
1191 MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1),
1192 {},
1193 };
1194 static const struct midr_range spectre_bhb_k11_list[] = {
1195 MIDR_ALL_VERSIONS(MIDR_AMPERE1),
1196 {},
1197 };
1198 static const struct midr_range spectre_bhb_k8_list[] = {
1199 MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
1200 MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
1201 {},
1202 };
1203
1204 if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k32_list))
1205 k = 32;
1206 else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k24_list))
1207 k = 24;
1208 else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k11_list))
1209 k = 11;
1210 else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k8_list))
1211 k = 8;
1212
1213 max_bhb_k = max(max_bhb_k, k);
1214 } else {
1215 k = max_bhb_k;
1216 }
1217
1218 return k;
1219}
1220
1221static enum mitigation_state spectre_bhb_get_cpu_fw_mitigation_state(void)
1222{
1223 int ret;
1224 struct arm_smccc_res res;
1225
1226 if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
1227 return SPECTRE_VULNERABLE;
1228
1229 switch (psci_ops.conduit) {
1230 case PSCI_CONDUIT_HVC:
1231 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
1232 ARM_SMCCC_ARCH_WORKAROUND_3, &res);
1233 break;
1234
1235 case PSCI_CONDUIT_SMC:
1236 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
1237 ARM_SMCCC_ARCH_WORKAROUND_3, &res);
1238 break;
1239
1240 default:
1241 return SPECTRE_VULNERABLE;
1242 }
1243
1244 ret = res.a0;
1245 switch (ret) {
1246 case SMCCC_RET_SUCCESS:
1247 return SPECTRE_MITIGATED;
1248 case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
1249 return SPECTRE_UNAFFECTED;
1250 default:
1251 fallthrough;
1252 case SMCCC_RET_NOT_SUPPORTED:
1253 return SPECTRE_VULNERABLE;
1254 }
1255}
1256
1257static bool is_spectre_bhb_fw_affected(int scope)
1258{
1259 static bool system_affected;
1260 enum mitigation_state fw_state;
1261 bool has_smccc = (psci_ops.smccc_version >= SMCCC_VERSION_1_1);
1262 static const struct midr_range spectre_bhb_firmware_mitigated_list[] = {
1263 MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
1264 MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
1265 {},
1266 };
1267 bool cpu_in_list = is_midr_in_range_list(read_cpuid_id(),
1268 spectre_bhb_firmware_mitigated_list);
1269
1270 if (scope != SCOPE_LOCAL_CPU)
1271 return system_affected;
1272
1273 fw_state = spectre_bhb_get_cpu_fw_mitigation_state();
1274 if (cpu_in_list || (has_smccc && fw_state == SPECTRE_MITIGATED)) {
1275 system_affected = true;
1276 return true;
1277 }
1278
1279 return false;
1280}
1281
1282static bool supports_ecbhb(int scope)
1283{
1284 u64 mmfr1;
1285
1286 if (scope == SCOPE_LOCAL_CPU)
1287 mmfr1 = read_sysreg_s(SYS_ID_AA64MMFR1_EL1);
1288 else
1289 mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
1290
1291 return cpuid_feature_extract_unsigned_field(mmfr1,
1292 ID_AA64MMFR1_ECBHB_SHIFT);
1293}
1294
1295bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry,
1296 int scope)
1297{
1298 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
1299
1300 if (supports_csv2p3(scope))
1301 return false;
1302
1303 if (supports_clearbhb(scope))
1304 return true;
1305
1306 if (spectre_bhb_loop_affected(scope))
1307 return true;
1308
1309 if (is_spectre_bhb_fw_affected(scope))
1310 return true;
1311
1312 return false;
1313}
1314
1315static void this_cpu_set_vectors(enum arm64_bp_harden_el1_vectors slot)
1316{
1317 const char *v = arm64_get_bp_hardening_vector(slot);
1318
1319 if (slot < 0)
1320 return;
1321
1322 __this_cpu_write(this_cpu_vector, v);
1323
1324 /*
1325 * When KPTI is in use, the vectors are switched when exiting to
1326 * user-space.
1327 */
1328 if (arm64_kernel_unmapped_at_el0())
1329 return;
1330
1331 write_sysreg(v, vbar_el1);
1332 isb();
1333}
1334
1335#ifdef CONFIG_KVM_INDIRECT_VECTORS
1336static const char *kvm_bhb_get_vecs_end(const char *start)
1337{
1338 if (start == __smccc_workaround_3_smc_start)
1339 return __smccc_workaround_3_smc_end;
1340 else if (start == __spectre_bhb_loop_k8_start)
1341 return __spectre_bhb_loop_k8_end;
1342 else if (start == __spectre_bhb_loop_k24_start)
1343 return __spectre_bhb_loop_k24_end;
1344 else if (start == __spectre_bhb_loop_k32_start)
1345 return __spectre_bhb_loop_k32_end;
1346 else if (start == __spectre_bhb_clearbhb_start)
1347 return __spectre_bhb_clearbhb_end;
1348
1349 return NULL;
1350}
1351
1352static void kvm_setup_bhb_slot(const char *hyp_vecs_start)
1353{
1354 int cpu, slot = -1;
1355 const char *hyp_vecs_end;
1356
1357 if (!IS_ENABLED(CONFIG_KVM) || !is_hyp_mode_available())
1358 return;
1359
1360 hyp_vecs_end = kvm_bhb_get_vecs_end(hyp_vecs_start);
1361 if (WARN_ON_ONCE(!hyp_vecs_start || !hyp_vecs_end))
1362 return;
1363
1364 raw_spin_lock(&bp_lock);
1365 for_each_possible_cpu(cpu) {
1366 if (per_cpu(bp_hardening_data.template_start, cpu) == hyp_vecs_start) {
1367 slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu);
1368 break;
1369 }
1370 }
1371
1372 if (slot == -1) {
1373 slot = atomic_inc_return(&arm64_el2_vector_last_slot);
1374 BUG_ON(slot >= BP_HARDEN_EL2_SLOTS);
1375 __copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end);
1376 }
1377
1378 if (hyp_vecs_start != __this_cpu_read(bp_hardening_data.template_start)) {
1379 __this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
1380 __this_cpu_write(bp_hardening_data.template_start,
1381 hyp_vecs_start);
1382 }
1383 raw_spin_unlock(&bp_lock);
1384}
1385#else
1386#define __smccc_workaround_3_smc_start NULL
1387#define __spectre_bhb_loop_k8_start NULL
1388#define __spectre_bhb_loop_k24_start NULL
1389#define __spectre_bhb_loop_k32_start NULL
1390#define __spectre_bhb_clearbhb_start NULL
1391
1392static void kvm_setup_bhb_slot(const char *hyp_vecs_start) { }
1393#endif
1394
1395void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry)
1396{
1397 enum mitigation_state fw_state, state = SPECTRE_VULNERABLE;
1398
1399 if (!is_spectre_bhb_affected(entry, SCOPE_LOCAL_CPU))
1400 return;
1401
1402 if (get_spectre_v2_workaround_state() == ARM64_BP_HARDEN_UNKNOWN) {
1403 /* No point mitigating Spectre-BHB alone. */
1404 } else if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY)) {
1405 pr_info_once("spectre-bhb mitigation disabled by compile time option\n");
1406 } else if (cpu_mitigations_off()) {
1407 pr_info_once("spectre-bhb mitigation disabled by command line option\n");
1408 } else if (supports_ecbhb(SCOPE_LOCAL_CPU)) {
1409 state = SPECTRE_MITIGATED;
1410 } else if (supports_clearbhb(SCOPE_LOCAL_CPU)) {
1411 kvm_setup_bhb_slot(__spectre_bhb_clearbhb_start);
1412 this_cpu_set_vectors(EL1_VECTOR_BHB_CLEAR_INSN);
1413
1414 state = SPECTRE_MITIGATED;
1415 } else if (spectre_bhb_loop_affected(SCOPE_LOCAL_CPU)) {
1416 switch (spectre_bhb_loop_affected(SCOPE_SYSTEM)) {
1417 case 8:
1418 /*
1419 * A57/A72-r0 will already have selected the
1420 * spectre-indirect vector, which is sufficient
1421 * for BHB too.
1422 */
1423 if (!__this_cpu_read(bp_hardening_data.fn))
1424 kvm_setup_bhb_slot(__spectre_bhb_loop_k8_start);
1425 break;
1426 case 24:
1427 kvm_setup_bhb_slot(__spectre_bhb_loop_k24_start);
1428 break;
1429 case 32:
1430 kvm_setup_bhb_slot(__spectre_bhb_loop_k32_start);
1431 break;
1432 default:
1433 WARN_ON_ONCE(1);
1434 }
1435 this_cpu_set_vectors(EL1_VECTOR_BHB_LOOP);
1436
1437 state = SPECTRE_MITIGATED;
1438 } else if (is_spectre_bhb_fw_affected(SCOPE_LOCAL_CPU)) {
1439 fw_state = spectre_bhb_get_cpu_fw_mitigation_state();
1440 if (fw_state == SPECTRE_MITIGATED) {
1441 kvm_setup_bhb_slot(__smccc_workaround_3_smc_start);
1442 this_cpu_set_vectors(EL1_VECTOR_BHB_FW);
1443
1444 /*
1445 * With WA3 in the vectors, the WA1 calls can be
1446 * removed.
1447 */
1448 __this_cpu_write(bp_hardening_data.fn, NULL);
1449
1450 state = SPECTRE_MITIGATED;
1451 }
1452 }
1453
1454 update_mitigation_state(&spectre_bhb_state, state);
1455}
1456
1457/* Patched to correct the immediate */
1458void noinstr spectre_bhb_patch_loop_iter(struct alt_instr *alt,
1459 __le32 *origptr, __le32 *updptr, int nr_inst)
1460{
1461 u8 rd;
1462 u32 insn;
1463 u16 loop_count = spectre_bhb_loop_affected(SCOPE_SYSTEM);
1464
1465 BUG_ON(nr_inst != 1); /* MOV -> MOV */
1466
1467 if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY))
1468 return;
1469
1470 insn = le32_to_cpu(*origptr);
1471 rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, insn);
1472 insn = aarch64_insn_gen_movewide(rd, loop_count, 0,
1473 AARCH64_INSN_VARIANT_64BIT,
1474 AARCH64_INSN_MOVEWIDE_ZERO);
1475 *updptr++ = cpu_to_le32(insn);
1476}