blob: c2f4c072c4230ef9fe79a1b5e39076eb633bf6a3 [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001/*
2 * linux/arch/arm/mach-mmp/mmp_cpuidle.c
3 *
4 * Author: Fangsuo Wu <fswu@marvell.com>
5 * Copyright: (C) 2013 marvell International Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 */
12#include <linux/cpu_pm.h>
13#include <linux/cpuidle.h>
14#include <linux/init.h>
15#include <linux/irqchip/arm-gic.h>
16#include <linux/kernel.h>
17#include <linux/pm_qos.h>
18#include <soc/asr/asrdcstat.h>
19#include <linux/cputype.h>
20#include <linux/delay.h>
21#include <asm/io.h>
22#include <asm/mcpm.h>
23#include <trace/events/pxa.h>
24
25#include "help_v7.h"
26#include <soc/asr/mmp_cpuidle.h>
27#include "reset.h"
28#include "pm.h"
29
30#include <asm/cacheflush.h>
31
32#define LPM_NUM 16
33#define INVALID_LPM -1
34#define DEFAULT_LPM_FLAG 0xFFFFFFFF
35
36struct platform_idle *mmp_idle;
37static int mmp_wake_saved;
38static int asr_cpu_target_lpm[MAX_NR_CLUSTERS][MAX_CPUS_PER_CLUSTER];
39static unsigned int asr_cpu_entered_state_ptr[MAX_NR_CLUSTERS][MAX_CPUS_PER_CLUSTER];
40static int asr_cluster_state[MAX_NR_CLUSTERS];
41
42/*
43 * find_couple_state - Find the maximum state platform can enter
44 *
45 * @index: pointer to variable which stores the maximum state
46 * @cluster: cluster number
47 *
48 * Must be called with function holds mmp_lpm_lock
49 */
50static void find_coupled_state(int *index, int cluster)
51{
52 int i;
53 int platform_lpm = DEFAULT_LPM_FLAG;
54
55 for (i = 0; i < MAX_CPUS_PER_CLUSTER; i++)
56 platform_lpm &= asr_cpu_target_lpm[cluster][i];
57
58 *index = min(find_first_zero_bit((void *)&platform_lpm,
59 LPM_NUM), \
60 pm_qos_request(PM_QOS_CPUIDLE_BLOCK)) - 1;
61}
62
63void asr_set_target_lpm_state(unsigned int cpu, unsigned int cluster,
64 int target_state, int *entered_state_ptr)
65{
66 asr_cpu_target_lpm[cluster][cpu] = (1 << (target_state + 1)) - 1;
67 asr_cpu_entered_state_ptr[cluster][cpu] = (unsigned int)entered_state_ptr;
68}
69
70static void asr_set_entered_state(unsigned int cpu, unsigned int cluster,
71 int entered_state)
72{
73 int *state;
74 if (asr_cpu_entered_state_ptr[cluster][cpu]) {
75 state = (int *)asr_cpu_entered_state_ptr[cluster][cpu];
76 *state = entered_state;
77 }
78}
79
80static void asr_clear_lpm_state(unsigned int cpu, unsigned int cluster)
81{
82 asr_cpu_target_lpm[cluster][cpu] = 0;
83 asr_cpu_entered_state_ptr[cluster][cpu] = 0;
84}
85
86static void asr_cpu_cache_disable(void)
87{
88 v7_exit_coherency_flush(louis);
89}
90
91static void asr_cluster_cache_disable(void)
92{
93 int mpidr, this_cluster, state;
94 mpidr = read_cpuid_mpidr();
95 this_cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
96
97 find_coupled_state(&state, this_cluster);
98
99 if (state >= mmp_idle->l2_flush_state)
100 v7_exit_coherency_flush(all);
101 else
102 v7_exit_coherency_flush(louis);
103}
104
105static int asr_wait_for_powerdown(unsigned int cpu, unsigned int cluster)
106{
107 return 0;
108}
109
110static void asr_cpu_powerdown_prepare(unsigned int cpu, unsigned int cluster)
111{
112 int state = mmp_idle->cpudown_state;
113
114 if (asr_cpu_target_lpm[cluster][cpu] == 0)
115 asr_set_target_lpm_state(cpu, cluster, mmp_idle->hotplug_state, 0);
116
117 asr_set_entered_state(cpu, cluster, state);
118
119 if(mmp_idle->ops->set_pmu)
120 mmp_idle->ops->set_pmu(cpu, state);
121
122 trace_pxa_cpu_idle(LPM_ENTRY(state), cpu, cluster);
123 cpu_dcstat_event(cpu_dcstat_clk, cpu, CPU_IDLE_ENTER, state);
124}
125
126static void asr_cluster_powerdown_prepare(unsigned int cluster)
127{
128 int mpidr, cpu, this_cluster;
129 int state = 0;
130
131 mpidr = read_cpuid_mpidr();
132 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
133 this_cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
134
135 BUG_ON(cluster != this_cluster);
136
137 cpu_cluster_pm_enter();
138
139 asr_cluster_state[this_cluster] = CLUSTER_DOWN;
140
141 find_coupled_state(&state, cluster);
142 asr_set_entered_state(cpu, this_cluster, state);
143
144 if (state >= mmp_idle->wakeup_state &&
145 state < mmp_idle->l2_flush_state &&
146 mmp_idle->ops->save_wakeup)
147 {
148 mmp_wake_saved = 1;
149 mmp_idle->ops->save_wakeup();
150 }
151
152 if(mmp_idle->ops->set_pmu)
153 mmp_idle->ops->set_pmu(cpu, state);
154
155 if ( (state >= mmp_idle->cpudown_state) && (state != LPM_D2_UDR) )
156 {
157 cpu_dcstat_event(cpu_dcstat_clk, cpu, CPU_M2_OR_DEEPER_ENTER,
158 state);
159 vol_dcstat_event(state);
160 vol_ledstatus_event(state);
161 }
162 trace_pxa_cpu_idle(LPM_ENTRY(state), cpu, cluster);
163 cpu_dcstat_event(cpu_dcstat_clk, cpu, CPU_IDLE_ENTER, state);
164}
165
166static int up_mode;
167static int __init __init_up(char *arg)
168{
169 up_mode = 1;
170 return 1;
171}
172__setup("up_mode", __init_up);
173
174static int mmp_pm_power_up(unsigned int cpu, unsigned int cluster)
175{
176 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
177 if (cluster >= MAX_NR_CLUSTERS || cpu >= MAX_CPUS_PER_CLUSTER) {
178 pr_info("!!!%s: cpu %u cluster %u\n", __func__, cpu, cluster);
179 return -EINVAL;
180 }
181
182 if (up_mode)
183 return -EINVAL;
184
185 cpu_dcstat_event(cpu_dcstat_clk, cpu, CPU_IDLE_EXIT, MAX_LPM_INDEX);
186
187 mmp_cpu_power_up(cpu, cluster);
188
189 return 0;
190}
191
192static void asr_cluster_is_up(unsigned int cluster)
193{
194 if (asr_cluster_state[cluster] != CLUSTER_DOWN)
195 return;
196
197 asr_cluster_state[cluster] = CLUSTER_UP;
198
199 vol_dcstat_event(MAX_LPM_INDEX);
200 vol_ledstatus_event(MAX_LPM_INDEX);
201
202 if (mmp_wake_saved && mmp_idle->ops->restore_wakeup) {
203 mmp_wake_saved = 0;
204 mmp_idle->ops->restore_wakeup();
205 }
206 cpu_cluster_pm_exit();
207}
208
209static void asr_cpu_is_up(unsigned int cpu, unsigned int cluster)
210{
211 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
212 BUG_ON(cluster >= MAX_NR_CLUSTERS || cpu >= MAX_CPUS_PER_CLUSTER);
213
214 cpu_dcstat_event(cpu_dcstat_clk, cpu, CPU_IDLE_EXIT, MAX_LPM_INDEX);
215
216 trace_pxa_cpu_idle(LPM_EXIT(0), cpu, cluster);
217
218 asr_clear_lpm_state(cpu, cluster);
219
220 if(mmp_idle->ops->clr_pmu)
221 mmp_idle->ops->clr_pmu(cpu);
222}
223
224/**
225 * mmp_platform_power_register - register platform power ops
226 *
227 * @idle: platform_idle structure points to platform power ops
228 *
229 * An error is returned if the registration has been done previously.
230 */
231int __init mmp_platform_power_register(struct platform_idle *idle)
232{
233 if (mmp_idle)
234 return -EBUSY;
235 mmp_idle = idle;
236
237#ifdef CONFIG_CPU_IDLE_MMP_V7
238 mcpm_platform_state_register(mmp_idle->states, mmp_idle->state_count);
239#endif
240
241 return 0;
242}
243
244static const struct mcpm_platform_ops mmp_pm_power_ops = {
245 .cpu_powerup = mmp_pm_power_up,
246 .cluster_powerdown_prepare = asr_cluster_powerdown_prepare,
247 .cpu_powerdown_prepare = asr_cpu_powerdown_prepare,
248 .cluster_cache_disable = asr_cluster_cache_disable,
249 .cpu_cache_disable = asr_cpu_cache_disable,
250 .cluster_is_up = asr_cluster_is_up,
251 .cpu_is_up = asr_cpu_is_up,
252 .wait_for_powerdown = asr_wait_for_powerdown,
253};
254
255static int __init mmp_pm_init(void)
256{
257 int ret;
258
259 /*
260 * TODO:Should check if hardware is initialized here.
261 * See vexpress_spc_check_loaded()
262 */
263 memset(asr_cpu_target_lpm, DEFAULT_LPM_FLAG, sizeof(asr_cpu_target_lpm));
264 memset(asr_cpu_entered_state_ptr, 0, sizeof(asr_cpu_entered_state_ptr));
265 memset(asr_cluster_state, 0, sizeof(asr_cluster_state));
266
267 ret = mcpm_platform_register(&mmp_pm_power_ops);
268 if (!ret)
269 pr_warning("Power ops has already been initialized\n");
270
271 if (mmp_idle->ops->power_up_setup) {
272 ret = mcpm_sync_init(mmp_idle->ops->power_up_setup);
273 if (!ret)
274 pr_info("mmp power management initialized\n");
275 } else
276 pr_warning("mmp power_up_setup function is NULL!\n");
277
278 return ret;
279}
280early_initcall(mmp_pm_init);