blob: f6627ccbc2d81c49201d1a2ef5f7c008cc48dfea [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001// SPDX-License-Identifier: GPL-2.0
2// Copyright (C) 2017 Arm Ltd.
3#define pr_fmt(fmt) "sdei: " fmt
4
5#include <acpi/ghes.h>
6#include <linux/acpi.h>
7#include <linux/arm_sdei.h>
8#include <linux/arm-smccc.h>
9#include <linux/atomic.h>
10#include <linux/bitops.h>
11#include <linux/compiler.h>
12#include <linux/cpuhotplug.h>
13#include <linux/cpu.h>
14#include <linux/cpu_pm.h>
15#include <linux/errno.h>
16#include <linux/hardirq.h>
17#include <linux/kernel.h>
18#include <linux/kprobes.h>
19#include <linux/kvm_host.h>
20#include <linux/list.h>
21#include <linux/mutex.h>
22#include <linux/notifier.h>
23#include <linux/of.h>
24#include <linux/of_platform.h>
25#include <linux/percpu.h>
26#include <linux/platform_device.h>
27#include <linux/pm.h>
28#include <linux/ptrace.h>
29#include <linux/preempt.h>
30#include <linux/reboot.h>
31#include <linux/slab.h>
32#include <linux/smp.h>
33#include <linux/spinlock.h>
34#include <linux/uaccess.h>
35
36/*
37 * The call to use to reach the firmware.
38 */
39static asmlinkage void (*sdei_firmware_call)(unsigned long function_id,
40 unsigned long arg0, unsigned long arg1,
41 unsigned long arg2, unsigned long arg3,
42 unsigned long arg4, struct arm_smccc_res *res);
43
44/* entry point from firmware to arch asm code */
45static unsigned long sdei_entry_point;
46
47static int sdei_hp_state;
48
49struct sdei_event {
50 /* These three are protected by the sdei_list_lock */
51 struct list_head list;
52 bool reregister;
53 bool reenable;
54
55 u32 event_num;
56 u8 type;
57 u8 priority;
58
59 /* This pointer is handed to firmware as the event argument. */
60 union {
61 /* Shared events */
62 struct sdei_registered_event *registered;
63
64 /* CPU private events */
65 struct sdei_registered_event __percpu *private_registered;
66 };
67};
68
69/* Take the mutex for any API call or modification. Take the mutex first. */
70static DEFINE_MUTEX(sdei_events_lock);
71
72/* and then hold this when modifying the list */
73static DEFINE_SPINLOCK(sdei_list_lock);
74static LIST_HEAD(sdei_list);
75
76/* Private events are registered/enabled via IPI passing one of these */
77struct sdei_crosscall_args {
78 struct sdei_event *event;
79 atomic_t errors;
80 int first_error;
81};
82
83#define CROSSCALL_INIT(arg, event) (arg.event = event, \
84 arg.first_error = 0, \
85 atomic_set(&arg.errors, 0))
86
87static inline int sdei_do_cross_call(void *fn, struct sdei_event * event)
88{
89 struct sdei_crosscall_args arg;
90
91 CROSSCALL_INIT(arg, event);
92 on_each_cpu(fn, &arg, true);
93
94 return arg.first_error;
95}
96
97static inline void
98sdei_cross_call_return(struct sdei_crosscall_args *arg, int err)
99{
100 if (err && (atomic_inc_return(&arg->errors) == 1))
101 arg->first_error = err;
102}
103
104static int sdei_to_linux_errno(unsigned long sdei_err)
105{
106 switch (sdei_err) {
107 case SDEI_NOT_SUPPORTED:
108 return -EOPNOTSUPP;
109 case SDEI_INVALID_PARAMETERS:
110 return -EINVAL;
111 case SDEI_DENIED:
112 return -EPERM;
113 case SDEI_PENDING:
114 return -EINPROGRESS;
115 case SDEI_OUT_OF_RESOURCE:
116 return -ENOMEM;
117 }
118
119 /* Not an error value ... */
120 return sdei_err;
121}
122
123/*
124 * If x0 is any of these values, then the call failed, use sdei_to_linux_errno()
125 * to translate.
126 */
127static int sdei_is_err(struct arm_smccc_res *res)
128{
129 switch (res->a0) {
130 case SDEI_NOT_SUPPORTED:
131 case SDEI_INVALID_PARAMETERS:
132 case SDEI_DENIED:
133 case SDEI_PENDING:
134 case SDEI_OUT_OF_RESOURCE:
135 return true;
136 }
137
138 return false;
139}
140
141static int invoke_sdei_fn(unsigned long function_id, unsigned long arg0,
142 unsigned long arg1, unsigned long arg2,
143 unsigned long arg3, unsigned long arg4,
144 u64 *result)
145{
146 int err = 0;
147 struct arm_smccc_res res;
148
149 if (sdei_firmware_call) {
150 sdei_firmware_call(function_id, arg0, arg1, arg2, arg3, arg4,
151 &res);
152 if (sdei_is_err(&res))
153 err = sdei_to_linux_errno(res.a0);
154 } else {
155 /*
156 * !sdei_firmware_call means we failed to probe or called
157 * sdei_mark_interface_broken(). -EIO is not an error returned
158 * by sdei_to_linux_errno() and is used to suppress messages
159 * from this driver.
160 */
161 err = -EIO;
162 res.a0 = SDEI_NOT_SUPPORTED;
163 }
164
165 if (result)
166 *result = res.a0;
167
168 return err;
169}
170NOKPROBE_SYMBOL(invoke_sdei_fn);
171
172static struct sdei_event *sdei_event_find(u32 event_num)
173{
174 struct sdei_event *e, *found = NULL;
175
176 lockdep_assert_held(&sdei_events_lock);
177
178 spin_lock(&sdei_list_lock);
179 list_for_each_entry(e, &sdei_list, list) {
180 if (e->event_num == event_num) {
181 found = e;
182 break;
183 }
184 }
185 spin_unlock(&sdei_list_lock);
186
187 return found;
188}
189
190int sdei_api_event_context(u32 query, u64 *result)
191{
192 return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_CONTEXT, query, 0, 0, 0, 0,
193 result);
194}
195NOKPROBE_SYMBOL(sdei_api_event_context);
196
197static int sdei_api_event_get_info(u32 event, u32 info, u64 *result)
198{
199 return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_GET_INFO, event, info, 0,
200 0, 0, result);
201}
202
203static struct sdei_event *sdei_event_create(u32 event_num,
204 sdei_event_callback *cb,
205 void *cb_arg)
206{
207 int err;
208 u64 result;
209 struct sdei_event *event;
210 struct sdei_registered_event *reg;
211
212 lockdep_assert_held(&sdei_events_lock);
213
214 event = kzalloc(sizeof(*event), GFP_KERNEL);
215 if (!event)
216 return ERR_PTR(-ENOMEM);
217
218 INIT_LIST_HEAD(&event->list);
219 event->event_num = event_num;
220
221 err = sdei_api_event_get_info(event_num, SDEI_EVENT_INFO_EV_PRIORITY,
222 &result);
223 if (err) {
224 kfree(event);
225 return ERR_PTR(err);
226 }
227 event->priority = result;
228
229 err = sdei_api_event_get_info(event_num, SDEI_EVENT_INFO_EV_TYPE,
230 &result);
231 if (err) {
232 kfree(event);
233 return ERR_PTR(err);
234 }
235 event->type = result;
236
237 if (event->type == SDEI_EVENT_TYPE_SHARED) {
238 reg = kzalloc(sizeof(*reg), GFP_KERNEL);
239 if (!reg) {
240 kfree(event);
241 return ERR_PTR(-ENOMEM);
242 }
243
244 reg->event_num = event_num;
245 reg->priority = event->priority;
246
247 reg->callback = cb;
248 reg->callback_arg = cb_arg;
249 event->registered = reg;
250 } else {
251 int cpu;
252 struct sdei_registered_event __percpu *regs;
253
254 regs = alloc_percpu(struct sdei_registered_event);
255 if (!regs) {
256 kfree(event);
257 return ERR_PTR(-ENOMEM);
258 }
259
260 for_each_possible_cpu(cpu) {
261 reg = per_cpu_ptr(regs, cpu);
262
263 reg->event_num = event->event_num;
264 reg->priority = event->priority;
265 reg->callback = cb;
266 reg->callback_arg = cb_arg;
267 }
268
269 event->private_registered = regs;
270 }
271
272 if (sdei_event_find(event_num)) {
273 kfree(event->registered);
274 kfree(event);
275 event = ERR_PTR(-EBUSY);
276 } else {
277 spin_lock(&sdei_list_lock);
278 list_add(&event->list, &sdei_list);
279 spin_unlock(&sdei_list_lock);
280 }
281
282 return event;
283}
284
285static void sdei_event_destroy(struct sdei_event *event)
286{
287 lockdep_assert_held(&sdei_events_lock);
288
289 spin_lock(&sdei_list_lock);
290 list_del(&event->list);
291 spin_unlock(&sdei_list_lock);
292
293 if (event->type == SDEI_EVENT_TYPE_SHARED)
294 kfree(event->registered);
295 else
296 free_percpu(event->private_registered);
297
298 kfree(event);
299}
300
301static int sdei_api_get_version(u64 *version)
302{
303 return invoke_sdei_fn(SDEI_1_0_FN_SDEI_VERSION, 0, 0, 0, 0, 0, version);
304}
305
306int sdei_mask_local_cpu(void)
307{
308 int err;
309
310 err = invoke_sdei_fn(SDEI_1_0_FN_SDEI_PE_MASK, 0, 0, 0, 0, 0, NULL);
311 if (err && err != -EIO) {
312 pr_warn_once("failed to mask CPU[%u]: %d\n",
313 smp_processor_id(), err);
314 return err;
315 }
316
317 return 0;
318}
319
320static void _ipi_mask_cpu(void *ignored)
321{
322 WARN_ON_ONCE(preemptible());
323 sdei_mask_local_cpu();
324}
325
326int sdei_unmask_local_cpu(void)
327{
328 int err;
329
330 err = invoke_sdei_fn(SDEI_1_0_FN_SDEI_PE_UNMASK, 0, 0, 0, 0, 0, NULL);
331 if (err && err != -EIO) {
332 pr_warn_once("failed to unmask CPU[%u]: %d\n",
333 smp_processor_id(), err);
334 return err;
335 }
336
337 return 0;
338}
339
340static void _ipi_unmask_cpu(void *ignored)
341{
342 WARN_ON_ONCE(preemptible());
343 sdei_unmask_local_cpu();
344}
345
346static void _ipi_private_reset(void *ignored)
347{
348 int err;
349
350 WARN_ON_ONCE(preemptible());
351
352 err = invoke_sdei_fn(SDEI_1_0_FN_SDEI_PRIVATE_RESET, 0, 0, 0, 0, 0,
353 NULL);
354 if (err && err != -EIO)
355 pr_warn_once("failed to reset CPU[%u]: %d\n",
356 smp_processor_id(), err);
357}
358
359static int sdei_api_shared_reset(void)
360{
361 return invoke_sdei_fn(SDEI_1_0_FN_SDEI_SHARED_RESET, 0, 0, 0, 0, 0,
362 NULL);
363}
364
365static void sdei_mark_interface_broken(void)
366{
367 pr_err("disabling SDEI firmware interface\n");
368 on_each_cpu(&_ipi_mask_cpu, NULL, true);
369 sdei_firmware_call = NULL;
370}
371
372static int sdei_platform_reset(void)
373{
374 int err;
375
376 on_each_cpu(&_ipi_private_reset, NULL, true);
377 err = sdei_api_shared_reset();
378 if (err) {
379 pr_err("Failed to reset platform: %d\n", err);
380 sdei_mark_interface_broken();
381 }
382
383 return err;
384}
385
386static int sdei_api_event_enable(u32 event_num)
387{
388 return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_ENABLE, event_num, 0, 0, 0,
389 0, NULL);
390}
391
392/* Called directly by the hotplug callbacks */
393static void _local_event_enable(void *data)
394{
395 int err;
396 struct sdei_crosscall_args *arg = data;
397
398 err = sdei_api_event_enable(arg->event->event_num);
399
400 sdei_cross_call_return(arg, err);
401}
402
403int sdei_event_enable(u32 event_num)
404{
405 int err = -EINVAL;
406 struct sdei_event *event;
407
408 mutex_lock(&sdei_events_lock);
409 event = sdei_event_find(event_num);
410 if (!event) {
411 mutex_unlock(&sdei_events_lock);
412 return -ENOENT;
413 }
414
415
416 cpus_read_lock();
417 if (event->type == SDEI_EVENT_TYPE_SHARED)
418 err = sdei_api_event_enable(event->event_num);
419 else
420 err = sdei_do_cross_call(_local_event_enable, event);
421
422 if (!err) {
423 spin_lock(&sdei_list_lock);
424 event->reenable = true;
425 spin_unlock(&sdei_list_lock);
426 }
427 cpus_read_unlock();
428 mutex_unlock(&sdei_events_lock);
429
430 return err;
431}
432EXPORT_SYMBOL(sdei_event_enable);
433
434static int sdei_api_event_disable(u32 event_num)
435{
436 return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_DISABLE, event_num, 0, 0,
437 0, 0, NULL);
438}
439
440static void _ipi_event_disable(void *data)
441{
442 int err;
443 struct sdei_crosscall_args *arg = data;
444
445 err = sdei_api_event_disable(arg->event->event_num);
446
447 sdei_cross_call_return(arg, err);
448}
449
450int sdei_event_disable(u32 event_num)
451{
452 int err = -EINVAL;
453 struct sdei_event *event;
454
455 mutex_lock(&sdei_events_lock);
456 event = sdei_event_find(event_num);
457 if (!event) {
458 mutex_unlock(&sdei_events_lock);
459 return -ENOENT;
460 }
461
462 spin_lock(&sdei_list_lock);
463 event->reenable = false;
464 spin_unlock(&sdei_list_lock);
465
466 if (event->type == SDEI_EVENT_TYPE_SHARED)
467 err = sdei_api_event_disable(event->event_num);
468 else
469 err = sdei_do_cross_call(_ipi_event_disable, event);
470 mutex_unlock(&sdei_events_lock);
471
472 return err;
473}
474EXPORT_SYMBOL(sdei_event_disable);
475
476static int sdei_api_event_unregister(u32 event_num)
477{
478 return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_UNREGISTER, event_num, 0,
479 0, 0, 0, NULL);
480}
481
482/* Called directly by the hotplug callbacks */
483static void _local_event_unregister(void *data)
484{
485 int err;
486 struct sdei_crosscall_args *arg = data;
487
488 err = sdei_api_event_unregister(arg->event->event_num);
489
490 sdei_cross_call_return(arg, err);
491}
492
493static int _sdei_event_unregister(struct sdei_event *event)
494{
495 lockdep_assert_held(&sdei_events_lock);
496
497 if (event->type == SDEI_EVENT_TYPE_SHARED)
498 return sdei_api_event_unregister(event->event_num);
499
500 return sdei_do_cross_call(_local_event_unregister, event);
501}
502
503int sdei_event_unregister(u32 event_num)
504{
505 int err;
506 struct sdei_event *event;
507
508 WARN_ON(in_nmi());
509
510 mutex_lock(&sdei_events_lock);
511 event = sdei_event_find(event_num);
512 do {
513 if (!event) {
514 pr_warn("Event %u not registered\n", event_num);
515 err = -ENOENT;
516 break;
517 }
518
519 spin_lock(&sdei_list_lock);
520 event->reregister = false;
521 event->reenable = false;
522 spin_unlock(&sdei_list_lock);
523
524 err = _sdei_event_unregister(event);
525 if (err)
526 break;
527
528 sdei_event_destroy(event);
529 } while (0);
530 mutex_unlock(&sdei_events_lock);
531
532 return err;
533}
534EXPORT_SYMBOL(sdei_event_unregister);
535
536/*
537 * unregister events, but don't destroy them as they are re-registered by
538 * sdei_reregister_shared().
539 */
540static int sdei_unregister_shared(void)
541{
542 int err = 0;
543 struct sdei_event *event;
544
545 mutex_lock(&sdei_events_lock);
546 spin_lock(&sdei_list_lock);
547 list_for_each_entry(event, &sdei_list, list) {
548 if (event->type != SDEI_EVENT_TYPE_SHARED)
549 continue;
550
551 err = _sdei_event_unregister(event);
552 if (err)
553 break;
554 }
555 spin_unlock(&sdei_list_lock);
556 mutex_unlock(&sdei_events_lock);
557
558 return err;
559}
560
561static int sdei_api_event_register(u32 event_num, unsigned long entry_point,
562 void *arg, u64 flags, u64 affinity)
563{
564 return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_REGISTER, event_num,
565 (unsigned long)entry_point, (unsigned long)arg,
566 flags, affinity, NULL);
567}
568
569/* Called directly by the hotplug callbacks */
570static void _local_event_register(void *data)
571{
572 int err;
573 struct sdei_registered_event *reg;
574 struct sdei_crosscall_args *arg = data;
575
576 reg = per_cpu_ptr(arg->event->private_registered, smp_processor_id());
577 err = sdei_api_event_register(arg->event->event_num, sdei_entry_point,
578 reg, 0, 0);
579
580 sdei_cross_call_return(arg, err);
581}
582
583static int _sdei_event_register(struct sdei_event *event)
584{
585 int err;
586
587 lockdep_assert_held(&sdei_events_lock);
588
589 if (event->type == SDEI_EVENT_TYPE_SHARED)
590 return sdei_api_event_register(event->event_num,
591 sdei_entry_point,
592 event->registered,
593 SDEI_EVENT_REGISTER_RM_ANY, 0);
594
595 err = sdei_do_cross_call(_local_event_register, event);
596 if (err)
597 sdei_do_cross_call(_local_event_unregister, event);
598
599 return err;
600}
601
602int sdei_event_register(u32 event_num, sdei_event_callback *cb, void *arg)
603{
604 int err;
605 struct sdei_event *event;
606
607 WARN_ON(in_nmi());
608
609 mutex_lock(&sdei_events_lock);
610 do {
611 if (sdei_event_find(event_num)) {
612 pr_warn("Event %u already registered\n", event_num);
613 err = -EBUSY;
614 break;
615 }
616
617 event = sdei_event_create(event_num, cb, arg);
618 if (IS_ERR(event)) {
619 err = PTR_ERR(event);
620 pr_warn("Failed to create event %u: %d\n", event_num,
621 err);
622 break;
623 }
624
625 cpus_read_lock();
626 err = _sdei_event_register(event);
627 if (err) {
628 sdei_event_destroy(event);
629 pr_warn("Failed to register event %u: %d\n", event_num,
630 err);
631 } else {
632 spin_lock(&sdei_list_lock);
633 event->reregister = true;
634 spin_unlock(&sdei_list_lock);
635 }
636 cpus_read_unlock();
637 } while (0);
638 mutex_unlock(&sdei_events_lock);
639
640 return err;
641}
642EXPORT_SYMBOL(sdei_event_register);
643
644static int sdei_reregister_event(struct sdei_event *event)
645{
646 int err;
647
648 lockdep_assert_held(&sdei_events_lock);
649
650 err = _sdei_event_register(event);
651 if (err) {
652 pr_err("Failed to re-register event %u\n", event->event_num);
653 sdei_event_destroy(event);
654 return err;
655 }
656
657 if (event->reenable) {
658 if (event->type == SDEI_EVENT_TYPE_SHARED)
659 err = sdei_api_event_enable(event->event_num);
660 else
661 err = sdei_do_cross_call(_local_event_enable, event);
662 }
663
664 if (err)
665 pr_err("Failed to re-enable event %u\n", event->event_num);
666
667 return err;
668}
669
670static int sdei_reregister_shared(void)
671{
672 int err = 0;
673 struct sdei_event *event;
674
675 mutex_lock(&sdei_events_lock);
676 spin_lock(&sdei_list_lock);
677 list_for_each_entry(event, &sdei_list, list) {
678 if (event->type != SDEI_EVENT_TYPE_SHARED)
679 continue;
680
681 if (event->reregister) {
682 err = sdei_reregister_event(event);
683 if (err)
684 break;
685 }
686 }
687 spin_unlock(&sdei_list_lock);
688 mutex_unlock(&sdei_events_lock);
689
690 return err;
691}
692
693static int sdei_cpuhp_down(unsigned int cpu)
694{
695 struct sdei_event *event;
696 struct sdei_crosscall_args arg;
697
698 /* un-register private events */
699 spin_lock(&sdei_list_lock);
700 list_for_each_entry(event, &sdei_list, list) {
701 if (event->type == SDEI_EVENT_TYPE_SHARED)
702 continue;
703
704 CROSSCALL_INIT(arg, event);
705 /* call the cross-call function locally... */
706 _local_event_unregister(&arg);
707 if (arg.first_error)
708 pr_err("Failed to unregister event %u: %d\n",
709 event->event_num, arg.first_error);
710 }
711 spin_unlock(&sdei_list_lock);
712
713 return sdei_mask_local_cpu();
714}
715
716static int sdei_cpuhp_up(unsigned int cpu)
717{
718 struct sdei_event *event;
719 struct sdei_crosscall_args arg;
720
721 /* re-register/enable private events */
722 spin_lock(&sdei_list_lock);
723 list_for_each_entry(event, &sdei_list, list) {
724 if (event->type == SDEI_EVENT_TYPE_SHARED)
725 continue;
726
727 if (event->reregister) {
728 CROSSCALL_INIT(arg, event);
729 /* call the cross-call function locally... */
730 _local_event_register(&arg);
731 if (arg.first_error)
732 pr_err("Failed to re-register event %u: %d\n",
733 event->event_num, arg.first_error);
734 }
735
736 if (event->reenable) {
737 CROSSCALL_INIT(arg, event);
738 _local_event_enable(&arg);
739 if (arg.first_error)
740 pr_err("Failed to re-enable event %u: %d\n",
741 event->event_num, arg.first_error);
742 }
743 }
744 spin_unlock(&sdei_list_lock);
745
746 return sdei_unmask_local_cpu();
747}
748
749/* When entering idle, mask/unmask events for this cpu */
750static int sdei_pm_notifier(struct notifier_block *nb, unsigned long action,
751 void *data)
752{
753 int rv;
754
755 WARN_ON_ONCE(preemptible());
756
757 switch (action) {
758 case CPU_PM_ENTER:
759 rv = sdei_mask_local_cpu();
760 break;
761 case CPU_PM_EXIT:
762 case CPU_PM_ENTER_FAILED:
763 rv = sdei_unmask_local_cpu();
764 break;
765 default:
766 return NOTIFY_DONE;
767 }
768
769 if (rv)
770 return notifier_from_errno(rv);
771
772 return NOTIFY_OK;
773}
774
775static struct notifier_block sdei_pm_nb = {
776 .notifier_call = sdei_pm_notifier,
777};
778
779static int sdei_device_suspend(struct device *dev)
780{
781 on_each_cpu(_ipi_mask_cpu, NULL, true);
782
783 return 0;
784}
785
786static int sdei_device_resume(struct device *dev)
787{
788 on_each_cpu(_ipi_unmask_cpu, NULL, true);
789
790 return 0;
791}
792
793/*
794 * We need all events to be reregistered when we resume from hibernate.
795 *
796 * The sequence is freeze->thaw. Reboot. freeze->restore. We unregister
797 * events during freeze, then re-register and re-enable them during thaw
798 * and restore.
799 */
800static int sdei_device_freeze(struct device *dev)
801{
802 int err;
803
804 /* unregister private events */
805 cpuhp_remove_state(sdei_hp_state);
806
807 err = sdei_unregister_shared();
808 if (err)
809 return err;
810
811 return 0;
812}
813
814static int sdei_device_thaw(struct device *dev)
815{
816 int err;
817
818 /* re-register shared events */
819 err = sdei_reregister_shared();
820 if (err) {
821 pr_warn("Failed to re-register shared events...\n");
822 sdei_mark_interface_broken();
823 return err;
824 }
825
826 err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "SDEI",
827 &sdei_cpuhp_up, &sdei_cpuhp_down);
828 if (err < 0) {
829 pr_warn("Failed to re-register CPU hotplug notifier...\n");
830 return err;
831 }
832
833 sdei_hp_state = err;
834 return 0;
835}
836
837static int sdei_device_restore(struct device *dev)
838{
839 int err;
840
841 err = sdei_platform_reset();
842 if (err)
843 return err;
844
845 return sdei_device_thaw(dev);
846}
847
848static const struct dev_pm_ops sdei_pm_ops = {
849 .suspend = sdei_device_suspend,
850 .resume = sdei_device_resume,
851 .freeze = sdei_device_freeze,
852 .thaw = sdei_device_thaw,
853 .restore = sdei_device_restore,
854};
855
856/*
857 * Mask all CPUs and unregister all events on panic, reboot or kexec.
858 */
859static int sdei_reboot_notifier(struct notifier_block *nb, unsigned long action,
860 void *data)
861{
862 /*
863 * We are going to reset the interface, after this there is no point
864 * doing work when we take CPUs offline.
865 */
866 cpuhp_remove_state(sdei_hp_state);
867
868 sdei_platform_reset();
869
870 return NOTIFY_OK;
871}
872
873static struct notifier_block sdei_reboot_nb = {
874 .notifier_call = sdei_reboot_notifier,
875};
876
877static void sdei_smccc_smc(unsigned long function_id,
878 unsigned long arg0, unsigned long arg1,
879 unsigned long arg2, unsigned long arg3,
880 unsigned long arg4, struct arm_smccc_res *res)
881{
882 arm_smccc_smc(function_id, arg0, arg1, arg2, arg3, arg4, 0, 0, res);
883}
884NOKPROBE_SYMBOL(sdei_smccc_smc);
885
886static void sdei_smccc_hvc(unsigned long function_id,
887 unsigned long arg0, unsigned long arg1,
888 unsigned long arg2, unsigned long arg3,
889 unsigned long arg4, struct arm_smccc_res *res)
890{
891 arm_smccc_hvc(function_id, arg0, arg1, arg2, arg3, arg4, 0, 0, res);
892}
893NOKPROBE_SYMBOL(sdei_smccc_hvc);
894
895int sdei_register_ghes(struct ghes *ghes, sdei_event_callback *normal_cb,
896 sdei_event_callback *critical_cb)
897{
898 int err;
899 u64 result;
900 u32 event_num;
901 sdei_event_callback *cb;
902
903 if (!IS_ENABLED(CONFIG_ACPI_APEI_GHES))
904 return -EOPNOTSUPP;
905
906 event_num = ghes->generic->notify.vector;
907 if (event_num == 0) {
908 /*
909 * Event 0 is reserved by the specification for
910 * SDEI_EVENT_SIGNAL.
911 */
912 return -EINVAL;
913 }
914
915 err = sdei_api_event_get_info(event_num, SDEI_EVENT_INFO_EV_PRIORITY,
916 &result);
917 if (err)
918 return err;
919
920 if (result == SDEI_EVENT_PRIORITY_CRITICAL)
921 cb = critical_cb;
922 else
923 cb = normal_cb;
924
925 err = sdei_event_register(event_num, cb, ghes);
926 if (!err)
927 err = sdei_event_enable(event_num);
928
929 return err;
930}
931
932int sdei_unregister_ghes(struct ghes *ghes)
933{
934 int i;
935 int err;
936 u32 event_num = ghes->generic->notify.vector;
937
938 might_sleep();
939
940 if (!IS_ENABLED(CONFIG_ACPI_APEI_GHES))
941 return -EOPNOTSUPP;
942
943 /*
944 * The event may be running on another CPU. Disable it
945 * to stop new events, then try to unregister a few times.
946 */
947 err = sdei_event_disable(event_num);
948 if (err)
949 return err;
950
951 for (i = 0; i < 3; i++) {
952 err = sdei_event_unregister(event_num);
953 if (err != -EINPROGRESS)
954 break;
955
956 schedule();
957 }
958
959 return err;
960}
961
962static int sdei_get_conduit(struct platform_device *pdev)
963{
964 const char *method;
965 struct device_node *np = pdev->dev.of_node;
966
967 sdei_firmware_call = NULL;
968 if (np) {
969 if (of_property_read_string(np, "method", &method)) {
970 pr_warn("missing \"method\" property\n");
971 return CONDUIT_INVALID;
972 }
973
974 if (!strcmp("hvc", method)) {
975 sdei_firmware_call = &sdei_smccc_hvc;
976 return CONDUIT_HVC;
977 } else if (!strcmp("smc", method)) {
978 sdei_firmware_call = &sdei_smccc_smc;
979 return CONDUIT_SMC;
980 }
981
982 pr_warn("invalid \"method\" property: %s\n", method);
983 } else if (IS_ENABLED(CONFIG_ACPI) && !acpi_disabled) {
984 if (acpi_psci_use_hvc()) {
985 sdei_firmware_call = &sdei_smccc_hvc;
986 return CONDUIT_HVC;
987 } else {
988 sdei_firmware_call = &sdei_smccc_smc;
989 return CONDUIT_SMC;
990 }
991 }
992
993 return CONDUIT_INVALID;
994}
995
996static int sdei_probe(struct platform_device *pdev)
997{
998 int err;
999 u64 ver = 0;
1000 int conduit;
1001
1002 conduit = sdei_get_conduit(pdev);
1003 if (!sdei_firmware_call)
1004 return 0;
1005
1006 err = sdei_api_get_version(&ver);
1007 if (err == -EOPNOTSUPP)
1008 pr_err("advertised but not implemented in platform firmware\n");
1009 if (err) {
1010 pr_err("Failed to get SDEI version: %d\n", err);
1011 sdei_mark_interface_broken();
1012 return err;
1013 }
1014
1015 pr_info("SDEIv%d.%d (0x%x) detected in firmware.\n",
1016 (int)SDEI_VERSION_MAJOR(ver), (int)SDEI_VERSION_MINOR(ver),
1017 (int)SDEI_VERSION_VENDOR(ver));
1018
1019 if (SDEI_VERSION_MAJOR(ver) != 1) {
1020 pr_warn("Conflicting SDEI version detected.\n");
1021 sdei_mark_interface_broken();
1022 return -EINVAL;
1023 }
1024
1025 err = sdei_platform_reset();
1026 if (err)
1027 return err;
1028
1029 sdei_entry_point = sdei_arch_get_entry_point(conduit);
1030 if (!sdei_entry_point) {
1031 /* Not supported due to hardware or boot configuration */
1032 sdei_mark_interface_broken();
1033 return 0;
1034 }
1035
1036 err = cpu_pm_register_notifier(&sdei_pm_nb);
1037 if (err) {
1038 pr_warn("Failed to register CPU PM notifier...\n");
1039 goto error;
1040 }
1041
1042 err = register_reboot_notifier(&sdei_reboot_nb);
1043 if (err) {
1044 pr_warn("Failed to register reboot notifier...\n");
1045 goto remove_cpupm;
1046 }
1047
1048 err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "SDEI",
1049 &sdei_cpuhp_up, &sdei_cpuhp_down);
1050 if (err < 0) {
1051 pr_warn("Failed to register CPU hotplug notifier...\n");
1052 goto remove_reboot;
1053 }
1054
1055 sdei_hp_state = err;
1056
1057 return 0;
1058
1059remove_reboot:
1060 unregister_reboot_notifier(&sdei_reboot_nb);
1061
1062remove_cpupm:
1063 cpu_pm_unregister_notifier(&sdei_pm_nb);
1064
1065error:
1066 sdei_mark_interface_broken();
1067 return err;
1068}
1069
1070static const struct of_device_id sdei_of_match[] = {
1071 { .compatible = "arm,sdei-1.0" },
1072 {}
1073};
1074
1075static struct platform_driver sdei_driver = {
1076 .driver = {
1077 .name = "sdei",
1078 .pm = &sdei_pm_ops,
1079 .of_match_table = sdei_of_match,
1080 },
1081 .probe = sdei_probe,
1082};
1083
1084static bool __init sdei_present_dt(void)
1085{
1086 struct device_node *np, *fw_np;
1087
1088 fw_np = of_find_node_by_name(NULL, "firmware");
1089 if (!fw_np)
1090 return false;
1091
1092 np = of_find_matching_node(fw_np, sdei_of_match);
1093 if (!np)
1094 return false;
1095 of_node_put(np);
1096
1097 return true;
1098}
1099
1100static bool __init sdei_present_acpi(void)
1101{
1102 acpi_status status;
1103 struct platform_device *pdev;
1104 struct acpi_table_header *sdei_table_header;
1105
1106 if (acpi_disabled)
1107 return false;
1108
1109 status = acpi_get_table(ACPI_SIG_SDEI, 0, &sdei_table_header);
1110 if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
1111 const char *msg = acpi_format_exception(status);
1112
1113 pr_info("Failed to get ACPI:SDEI table, %s\n", msg);
1114 }
1115 if (ACPI_FAILURE(status))
1116 return false;
1117
1118 pdev = platform_device_register_simple(sdei_driver.driver.name, 0, NULL,
1119 0);
1120 if (IS_ERR(pdev))
1121 return false;
1122
1123 return true;
1124}
1125
1126static int __init sdei_init(void)
1127{
1128 if (sdei_present_dt() || sdei_present_acpi())
1129 platform_driver_register(&sdei_driver);
1130
1131 return 0;
1132}
1133
1134/*
1135 * On an ACPI system SDEI needs to be ready before HEST:GHES tries to register
1136 * its events. ACPI is initialised from a subsys_initcall(), GHES is initialised
1137 * by device_initcall(). We want to be called in the middle.
1138 */
1139subsys_initcall_sync(sdei_init);
1140
1141int sdei_event_handler(struct pt_regs *regs,
1142 struct sdei_registered_event *arg)
1143{
1144 int err;
1145 mm_segment_t orig_addr_limit;
1146 u32 event_num = arg->event_num;
1147
1148 orig_addr_limit = get_fs();
1149 set_fs(USER_DS);
1150
1151 err = arg->callback(event_num, regs, arg->callback_arg);
1152 if (err)
1153 pr_err_ratelimited("event %u on CPU %u failed with error: %d\n",
1154 event_num, smp_processor_id(), err);
1155
1156 set_fs(orig_addr_limit);
1157
1158 return err;
1159}
1160NOKPROBE_SYMBOL(sdei_event_handler);