blob: 8000112075d16a99cf73a5fb8b72305e995a3af1 [file] [log] [blame]
xjb04a4022021-11-25 15:01:52 +08001// SPDX-License-Identifier: GPL-2.0
2/*
3 * System Control and Power Interface (SCMI) based CPUFreq Interface driver
4 *
5 * Copyright (C) 2018 ARM Ltd.
6 * Sudeep Holla <sudeep.holla@arm.com>
7 */
8
9#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10
11#include <linux/cpu.h>
12#include <linux/cpufreq.h>
13#include <linux/cpumask.h>
14#include <linux/cpu_cooling.h>
15#include <linux/energy_model.h>
16#include <linux/export.h>
17#include <linux/module.h>
18#include <linux/pm_opp.h>
19#include <linux/slab.h>
20#include <linux/scmi_protocol.h>
21#include <linux/types.h>
22
23struct scmi_data {
24 int domain_id;
25 struct device *cpu_dev;
26 struct thermal_cooling_device *cdev;
27};
28
29static const struct scmi_handle *handle;
30
31static unsigned int scmi_cpufreq_get_rate(unsigned int cpu)
32{
33 struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
34 struct scmi_perf_ops *perf_ops = handle->perf_ops;
35 struct scmi_data *priv = policy->driver_data;
36 unsigned long rate;
37 int ret;
38
39 ret = perf_ops->freq_get(handle, priv->domain_id, &rate, false);
40 if (ret)
41 return 0;
42 return rate / 1000;
43}
44
45/*
46 * perf_ops->freq_set is not a synchronous, the actual OPP change will
47 * happen asynchronously and can get notified if the events are
48 * subscribed for by the SCMI firmware
49 */
50static int
51scmi_cpufreq_set_target(struct cpufreq_policy *policy, unsigned int index)
52{
53 int ret;
54 struct scmi_data *priv = policy->driver_data;
55 struct scmi_perf_ops *perf_ops = handle->perf_ops;
56 u64 freq = policy->freq_table[index].frequency;
57
58 ret = perf_ops->freq_set(handle, priv->domain_id, freq * 1000, false);
59 if (!ret)
60 arch_set_freq_scale(policy->related_cpus, freq,
61 policy->cpuinfo.max_freq);
62 return ret;
63}
64
65static unsigned int scmi_cpufreq_fast_switch(struct cpufreq_policy *policy,
66 unsigned int target_freq)
67{
68 struct scmi_data *priv = policy->driver_data;
69 struct scmi_perf_ops *perf_ops = handle->perf_ops;
70
71 if (!perf_ops->freq_set(handle, priv->domain_id,
72 target_freq * 1000, true)) {
73 arch_set_freq_scale(policy->related_cpus, target_freq,
74 policy->cpuinfo.max_freq);
75 return target_freq;
76 }
77
78 return 0;
79}
80
81static int
82scmi_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask)
83{
84 int cpu, domain, tdomain;
85 struct device *tcpu_dev;
86
87 domain = handle->perf_ops->device_domain_id(cpu_dev);
88 if (domain < 0)
89 return domain;
90
91 for_each_possible_cpu(cpu) {
92 if (cpu == cpu_dev->id)
93 continue;
94
95 tcpu_dev = get_cpu_device(cpu);
96 if (!tcpu_dev)
97 continue;
98
99 tdomain = handle->perf_ops->device_domain_id(tcpu_dev);
100 if (tdomain == domain)
101 cpumask_set_cpu(cpu, cpumask);
102 }
103
104 return 0;
105}
106
107static int __maybe_unused
108scmi_get_cpu_power(unsigned long *power, unsigned long *KHz, int cpu)
109{
110 struct device *cpu_dev = get_cpu_device(cpu);
111 unsigned long Hz;
112 int ret, domain;
113
114 if (!cpu_dev) {
115 pr_err("failed to get cpu%d device\n", cpu);
116 return -ENODEV;
117 }
118
119 domain = handle->perf_ops->device_domain_id(cpu_dev);
120 if (domain < 0)
121 return domain;
122
123 /* Get the power cost of the performance domain. */
124 Hz = *KHz * 1000;
125 ret = handle->perf_ops->est_power_get(handle, domain, &Hz, power);
126 if (ret)
127 return ret;
128
129 /* The EM framework specifies the frequency in KHz. */
130 *KHz = Hz / 1000;
131
132 return 0;
133}
134
135static int scmi_cpufreq_init(struct cpufreq_policy *policy)
136{
137 int ret, nr_opp;
138 unsigned int latency;
139 struct device *cpu_dev;
140 struct scmi_data *priv;
141 struct cpufreq_frequency_table *freq_table;
142 struct em_data_callback em_cb = EM_DATA_CB(scmi_get_cpu_power);
143
144 cpu_dev = get_cpu_device(policy->cpu);
145 if (!cpu_dev) {
146 pr_err("failed to get cpu%d device\n", policy->cpu);
147 return -ENODEV;
148 }
149
150 ret = handle->perf_ops->device_opps_add(handle, cpu_dev);
151 if (ret) {
152 dev_warn(cpu_dev, "failed to add opps to the device\n");
153 return ret;
154 }
155
156 ret = scmi_get_sharing_cpus(cpu_dev, policy->cpus);
157 if (ret) {
158 dev_warn(cpu_dev, "failed to get sharing cpumask\n");
159 return ret;
160 }
161
162 ret = dev_pm_opp_set_sharing_cpus(cpu_dev, policy->cpus);
163 if (ret) {
164 dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n",
165 __func__, ret);
166 return ret;
167 }
168
169 ret = dev_pm_opp_get_opp_count(cpu_dev);
170 if (ret <= 0) {
171 dev_dbg(cpu_dev, "OPP table is not ready, deferring probe\n");
172 ret = -EPROBE_DEFER;
173 goto out_free_opp;
174 }
175 nr_opp = ret;
176
177 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
178 if (!priv) {
179 ret = -ENOMEM;
180 goto out_free_opp;
181 }
182
183 ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
184 if (ret) {
185 dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret);
186 goto out_free_priv;
187 }
188
189 priv->cpu_dev = cpu_dev;
190 priv->domain_id = handle->perf_ops->device_domain_id(cpu_dev);
191
192 policy->driver_data = priv;
193 policy->freq_table = freq_table;
194
195 /* SCMI allows DVFS request for any domain from any CPU */
196 policy->dvfs_possible_from_any_cpu = true;
197
198 latency = handle->perf_ops->transition_latency_get(handle, cpu_dev);
199 if (!latency)
200 latency = CPUFREQ_ETERNAL;
201
202 policy->cpuinfo.transition_latency = latency;
203
204 policy->fast_switch_possible = true;
205
206 em_register_perf_domain(policy->cpus, nr_opp, &em_cb);
207
208 return 0;
209
210out_free_priv:
211 kfree(priv);
212out_free_opp:
213 dev_pm_opp_cpumask_remove_table(policy->cpus);
214
215 return ret;
216}
217
218static int scmi_cpufreq_exit(struct cpufreq_policy *policy)
219{
220 struct scmi_data *priv = policy->driver_data;
221
222 cpufreq_cooling_unregister(priv->cdev);
223 dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
224 kfree(priv);
225 dev_pm_opp_cpumask_remove_table(policy->related_cpus);
226
227 return 0;
228}
229
230static void scmi_cpufreq_ready(struct cpufreq_policy *policy)
231{
232 struct scmi_data *priv = policy->driver_data;
233
234 priv->cdev = of_cpufreq_cooling_register(policy);
235}
236
237static struct cpufreq_driver scmi_cpufreq_driver = {
238 .name = "scmi",
239 .flags = CPUFREQ_STICKY | CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
240 CPUFREQ_NEED_INITIAL_FREQ_CHECK,
241 .verify = cpufreq_generic_frequency_table_verify,
242 .attr = cpufreq_generic_attr,
243 .target_index = scmi_cpufreq_set_target,
244 .fast_switch = scmi_cpufreq_fast_switch,
245 .get = scmi_cpufreq_get_rate,
246 .init = scmi_cpufreq_init,
247 .exit = scmi_cpufreq_exit,
248 .ready = scmi_cpufreq_ready,
249};
250
251static int scmi_cpufreq_probe(struct scmi_device *sdev)
252{
253 int ret;
254
255 handle = sdev->handle;
256
257 if (!handle || !handle->perf_ops)
258 return -ENODEV;
259
260 ret = cpufreq_register_driver(&scmi_cpufreq_driver);
261 if (ret) {
262 dev_err(&sdev->dev, "%s: registering cpufreq failed, err: %d\n",
263 __func__, ret);
264 }
265
266 return ret;
267}
268
269static void scmi_cpufreq_remove(struct scmi_device *sdev)
270{
271 cpufreq_unregister_driver(&scmi_cpufreq_driver);
272}
273
274static const struct scmi_device_id scmi_id_table[] = {
275 { SCMI_PROTOCOL_PERF },
276 { },
277};
278MODULE_DEVICE_TABLE(scmi, scmi_id_table);
279
280static struct scmi_driver scmi_cpufreq_drv = {
281 .name = "scmi-cpufreq",
282 .probe = scmi_cpufreq_probe,
283 .remove = scmi_cpufreq_remove,
284 .id_table = scmi_id_table,
285};
286module_scmi_driver(scmi_cpufreq_drv);
287
288MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
289MODULE_DESCRIPTION("ARM SCMI CPUFreq interface driver");
290MODULE_LICENSE("GPL v2");