blob: e126386fb78a83f966ebc7ac204105c5a1716d38 [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001// SPDX-License-Identifier: GPL-2.0
2#include <linux/ftrace.h>
3#include <linux/init.h>
4#include <linux/slab.h>
5#include <linux/mm_types.h>
6
7#include <asm/bugs.h>
8#include <asm/cacheflush.h>
9#include <asm/idmap.h>
10#include <asm/pgalloc.h>
11#include <asm/pgtable.h>
12#include <asm/memory.h>
13#include <asm/smp_plat.h>
14#include <asm/suspend.h>
15#include <asm/tlbflush.h>
16
17extern int __cpu_suspend(unsigned long, int (*)(unsigned long), u32 cpuid);
18extern void cpu_resume_mmu(void);
19
20#ifdef CONFIG_MMU
21int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
22{
23 struct mm_struct *mm = current->active_mm;
24 u32 __mpidr = cpu_logical_map(smp_processor_id());
25 int ret;
26
27 if (!idmap_pgd)
28 return -EINVAL;
29
30 /*
31 * Function graph tracer state gets incosistent when the kernel
32 * calls functions that never return (aka suspend finishers) hence
33 * disable graph tracing during their execution.
34 */
35 pause_graph_tracing();
36
37 /*
38 * Provide a temporary page table with an identity mapping for
39 * the MMU-enable code, required for resuming. On successful
40 * resume (indicated by a zero return code), we need to switch
41 * back to the correct page tables.
42 */
43 ret = __cpu_suspend(arg, fn, __mpidr);
44
45 unpause_graph_tracing();
46
47 if (ret == 0) {
48 cpu_switch_mm(mm->pgd, mm);
49 local_flush_bp_all();
50 local_flush_tlb_all();
51 check_other_bugs();
52 }
53
54 return ret;
55}
56#else
57int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
58{
59 u32 __mpidr = cpu_logical_map(smp_processor_id());
60 int ret;
61
62 pause_graph_tracing();
63 ret = __cpu_suspend(arg, fn, __mpidr);
64 unpause_graph_tracing();
65
66 return ret;
67}
68#define idmap_pgd NULL
69#endif
70
71/*
72 * This is called by __cpu_suspend() to save the state, and do whatever
73 * flushing is required to ensure that when the CPU goes to sleep we have
74 * the necessary data available when the caches are not searched.
75 */
76void __cpu_suspend_save(u32 *ptr, u32 ptrsz, u32 sp, u32 *save_ptr)
77{
78 u32 *ctx = ptr;
79
80 *save_ptr = virt_to_phys(ptr);
81
82 /* This must correspond to the LDM in cpu_resume() assembly */
83 *ptr++ = virt_to_phys(idmap_pgd);
84 *ptr++ = sp;
85 *ptr++ = virt_to_phys(cpu_do_resume);
86
87 cpu_do_suspend(ptr);
88
89 flush_cache_louis();
90
91 /*
92 * flush_cache_louis does not guarantee that
93 * save_ptr and ptr are cleaned to main memory,
94 * just up to the Level of Unification Inner Shareable.
95 * Since the context pointer and context itself
96 * are to be retrieved with the MMU off that
97 * data must be cleaned from all cache levels
98 * to main memory using "area" cache primitives.
99 */
100 __cpuc_flush_dcache_area(ctx, ptrsz);
101 __cpuc_flush_dcache_area(save_ptr, sizeof(*save_ptr));
102
103 outer_clean_range(*save_ptr, *save_ptr + ptrsz);
104 outer_clean_range(virt_to_phys(save_ptr),
105 virt_to_phys(save_ptr) + sizeof(*save_ptr));
106}
107
108extern struct sleep_save_sp sleep_save_sp;
109
110static int cpu_suspend_alloc_sp(void)
111{
112 void *ctx_ptr;
113 /* ctx_ptr is an array of physical addresses */
114 ctx_ptr = kcalloc(mpidr_hash_size(), sizeof(u32), GFP_KERNEL);
115
116 if (WARN_ON(!ctx_ptr))
117 return -ENOMEM;
118 sleep_save_sp.save_ptr_stash = ctx_ptr;
119 sleep_save_sp.save_ptr_stash_phys = virt_to_phys(ctx_ptr);
120 sync_cache_w(&sleep_save_sp);
121 return 0;
122}
123early_initcall(cpu_suspend_alloc_sp);