blob: 81066c3aa89434798f1a692ecb594fa5c2f4232c [file] [log] [blame]
xjb04a4022021-11-25 15:01:52 +08001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2019 MediaTek Inc.
4 * Author: Ming-Fan Chen <ming-fan.chen@mediatek.com>
5 */
6
7#include <dt-bindings/interconnect/mtk,mmqos.h>
8#include <linux/clk.h>
9#include <linux/interconnect-provider.h>
10#include <linux/kthread.h>
11#include <linux/module.h>
12#include <linux/of_platform.h>
13#include <linux/platform_device.h>
14#include <linux/soc/mediatek/mtk_mmdvfs.h>
15#include <soc/mediatek/smi.h>
16
17#include "mmqos-mtk.h"
18
19#define SHIFT_ROUND(a, b) ((((a) - 1) >> (b)) + 1)
20#define icc_to_MBps(x) ((x) / 1000)
21
22static void mmqos_update_comm_bw(struct device *dev,
23 u32 comm_port, u32 freq, u64 mix_bw, u64 bw_peak, bool qos_bound)
24{
25 u32 comm_bw = 0;
26 u32 value;
27
28 if (!freq || !dev)
29 return;
30
31 if (mix_bw)
32 comm_bw = (mix_bw << 8) / freq;
33
34 if (comm_bw)
35 value = ((comm_bw > 0xfff) ? 0xfff : comm_bw) |
36 ((bw_peak > 0 || !qos_bound) ? 0x1000 : 0x3000);
37 else
38 value = 0x1200;
39
40 mtk_smi_common_bw_set(dev, comm_port, value);
41
42 dev_dbg(dev, "comm port=%d bw=%d freq=%d qos_bound=%d value=%#x\n",
43 comm_port, comm_bw, freq, qos_bound, value);
44}
45
46static int update_mm_clk(struct notifier_block *nb,
47 unsigned long value, void *v)
48{
49 struct mtk_mmqos *mmqos =
50 container_of(nb, struct mtk_mmqos, nb);
51 struct common_node *comm_node;
52 struct common_port_node *comm_port;
53
54 list_for_each_entry(comm_node, &mmqos->comm_list, list) {
55 comm_node->freq = clk_get_rate(comm_node->clk)/1000000;
56 list_for_each_entry(comm_port,
57 &comm_node->comm_port_list, list) {
58 mutex_lock(&comm_port->bw_lock);
59 if (comm_port->latest_mix_bw
60 || comm_port->latest_peak_bw) {
61 mmqos_update_comm_bw(comm_port->larb_dev,
62 comm_port->base->icc_node->id & 0xff,
63 comm_port->common->freq,
64 icc_to_MBps(comm_port->latest_mix_bw),
65 icc_to_MBps(comm_port->latest_peak_bw),
66 mmqos->qos_bound);
67 }
68 mutex_unlock(&comm_port->bw_lock);
69 }
70 }
71 return 0;
72}
73
74static void set_comm_icc_bw_handler(struct work_struct *work)
75{
76 struct common_node *comm_node = container_of(
77 work, struct common_node, work);
78 struct common_port_node *comm_port_node;
79 u32 avg_bw = 0, peak_bw = 0;
80
81 list_for_each_entry(comm_port_node, &comm_node->comm_port_list, list) {
82 mutex_lock(&comm_port_node->bw_lock);
83 avg_bw += comm_port_node->latest_avg_bw;
84 peak_bw += (comm_port_node->latest_peak_bw
85 & ~(MTK_MMQOS_MAX_BW));
86 mutex_unlock(&comm_port_node->bw_lock);
87 }
88 icc_set_bw(comm_node->icc_path, avg_bw, peak_bw);
89}
90
91static int mtk_mmqos_set(struct icc_node *src, struct icc_node *dst)
92{
93 struct larb_node *larb_node;
94 struct larb_port_node *larb_port_node;
95 struct common_port_node *comm_port_node;
96 struct common_node *comm_node;
97 struct mtk_mmqos *mmqos = container_of(dst->provider,
98 struct mtk_mmqos, prov);
99 u32 value = 1;
100
101 switch (dst->id >> 16) {
102 case MTK_MMQOS_NODE_COMMON:
103 comm_node = (struct common_node *)dst->data;
104 queue_work(mmqos->wq, &comm_node->work);
105 break;
106 case MTK_MMQOS_NODE_COMMON_PORT:
107 comm_port_node = (struct common_port_node *)dst->data;
108 mutex_lock(&comm_port_node->bw_lock);
109 comm_port_node->latest_mix_bw = comm_port_node->base->mix_bw;
110 comm_port_node->latest_peak_bw = dst->peak_bw;
111 comm_port_node->latest_avg_bw = dst->avg_bw;
112 mmqos_update_comm_bw(comm_port_node->larb_dev,
113 dst->id & 0xff, comm_port_node->common->freq,
114 icc_to_MBps(comm_port_node->latest_mix_bw),
115 icc_to_MBps(comm_port_node->latest_peak_bw),
116 mmqos->qos_bound);
117 mutex_unlock(&comm_port_node->bw_lock);
118 break;
119 case MTK_MMQOS_NODE_LARB:
120 larb_port_node = (struct larb_port_node *)src->data;
121 larb_node = (struct larb_node *)dst->data;
122 if (larb_port_node->base->mix_bw)
123 value = SHIFT_ROUND(
124 icc_to_MBps(larb_port_node->base->mix_bw),
125 larb_port_node->bw_ratio);
126 if (value > mmqos->max_ratio)
127 value = mmqos->max_ratio;
128 mtk_smi_larb_bw_set(
129 larb_node->larb_dev,
130 src->id & 0xff, value);
131
132 if ((dst->id & 0xff) == 1) {
133 mtk_smi_larb_bw_set(
134 larb_node->larb_dev, 9, 8);
135 mtk_smi_larb_bw_set(
136 larb_node->larb_dev, 11, 8);
137 }
138 break;
139 default:
140 break;
141 }
142 return 0;
143}
144
145static int mtk_mmqos_aggregate(struct icc_node *node,
146 u32 avg_bw, u32 peak_bw, u32 *agg_avg, u32 *agg_peak)
147{
148 struct mmqos_base_node *base_node = NULL;
149 u32 mix_bw = peak_bw;
150
151 switch (node->id >> 16) {
152 case MTK_MMQOS_NODE_LARB_PORT:
153 base_node = ((struct larb_node *)node->data)->base;
154 if (peak_bw)
155 mix_bw = SHIFT_ROUND(peak_bw * 3, 1);
156 break;
157 case MTK_MMQOS_NODE_COMMON_PORT:
158 base_node = ((struct common_port_node *)node->data)->base;
159 break;
160 default:
161 return 0;
162 }
163
164 if (base_node) {
165 if (*agg_avg == 0 && *agg_peak == 0)
166 base_node->mix_bw = 0;
167 base_node->mix_bw += peak_bw ? mix_bw : avg_bw;
168 }
169
170 *agg_avg += avg_bw;
171 if (peak_bw == MTK_MMQOS_MAX_BW)
172 *agg_peak |= MTK_MMQOS_MAX_BW;
173 else
174 *agg_peak += peak_bw;
175 return 0;
176}
177
178static struct icc_node *mtk_mmqos_xlate(
179 struct of_phandle_args *spec, void *data)
180{
181 struct icc_onecell_data *icc_data;
182 s32 i;
183
184 if (!spec || !data)
185 return ERR_PTR(-EPROBE_DEFER);
186
187 icc_data = (struct icc_onecell_data *)data;
188
189 for (i = 0; i < icc_data->num_nodes; i++)
190 if (icc_data->nodes[i]->id == spec->args[0])
191 return icc_data->nodes[i];
192
193 pr_notice("%s: invalid index %u\n", __func__, spec->args[0]);
194 return ERR_PTR(-EINVAL);
195}
196
197int mtk_mmqos_probe(struct platform_device *pdev)
198{
199 struct mtk_mmqos *mmqos;
200 struct of_phandle_iterator it;
201 struct icc_onecell_data *data;
202 struct icc_node *node, *temp;
203 struct mmqos_base_node *base_node;
204 struct common_node *comm_node;
205 struct common_port_node *comm_port_node;
206 struct larb_node *larb_node;
207 struct larb_port_node *larb_port_node;
208 struct mtk_smi_iommu smi_imu;
209 int i, id, num_larbs = 0, ret;
210 const struct mtk_mmqos_desc *mmqos_desc;
211 const struct mtk_node_desc *node_desc;
212 struct device *larb_dev;
213 struct mmqos_hrt *hrt;
214
215 mmqos = devm_kzalloc(&pdev->dev, sizeof(*mmqos), GFP_KERNEL);
216 if (!mmqos)
217 return -ENOMEM;
218 mmqos->dev = &pdev->dev;
219
220 of_for_each_phandle(
221 &it, ret, pdev->dev.of_node, "mediatek,larbs", NULL, 0) {
222 struct device_node *np;
223 struct platform_device *larb_pdev;
224
225 np = of_node_get(it.node);
226 if (!of_device_is_available(np))
227 continue;
228
229 larb_pdev = of_find_device_by_node(np);
230 if (!larb_pdev) {
231 larb_pdev = of_platform_device_create(
232 np, NULL, platform_bus_type.dev_root);
233 if (!larb_pdev || !larb_pdev->dev.driver) {
234 of_node_put(np);
235 return -EPROBE_DEFER;
236 }
237 }
238
239 if (of_property_read_u32(np, "mediatek,larb-id", &id))
240 id = num_larbs;
241 smi_imu.larb_imu[id].dev = &larb_pdev->dev;
242 num_larbs += 1;
243 }
244
245 INIT_LIST_HEAD(&mmqos->comm_list);
246
247 INIT_LIST_HEAD(&mmqos->prov.nodes);
248 mmqos->prov.set = mtk_mmqos_set;
249 mmqos->prov.aggregate = mtk_mmqos_aggregate;
250 mmqos->prov.xlate = mtk_mmqos_xlate;
251 mmqos->prov.dev = &pdev->dev;
252
253 ret = icc_provider_add(&mmqos->prov);
254 if (ret) {
255 dev_notice(&pdev->dev, "icc_provider_add failed:%d\n", ret);
256 return ret;
257 }
258
259 mmqos_desc = (struct mtk_mmqos_desc *)
260 of_device_get_match_data(&pdev->dev);
261 if (!mmqos_desc)
262 return -EINVAL;
263
264 data = devm_kzalloc(&pdev->dev,
265 sizeof(*data) + mmqos_desc->num_nodes * sizeof(node),
266 GFP_KERNEL);
267 if (!data)
268 return -ENOMEM;
269
270 for (i = 0; i < mmqos_desc->num_nodes; i++) {
271 node_desc = &mmqos_desc->nodes[i];
272 node = icc_node_create(node_desc->id);
273 if (IS_ERR(node)) {
274 ret = PTR_ERR(node);
275 goto err;
276 }
277 icc_node_add(node, &mmqos->prov);
278
279 if (node_desc->link != MMQOS_NO_LINK) {
280 ret = icc_link_create(node, node_desc->link);
281 if (ret)
282 goto err;
283 }
284 node->name = node_desc->name;
285
286 base_node = devm_kzalloc(
287 &pdev->dev, sizeof(*base_node), GFP_KERNEL);
288 if (!base_node) {
289 ret = -ENOMEM;
290 goto err;
291 }
292 base_node->icc_node = node;
293
294 switch (node->id >> 16) {
295 case MTK_MMQOS_NODE_COMMON:
296 comm_node = devm_kzalloc(
297 &pdev->dev, sizeof(*comm_node), GFP_KERNEL);
298 if (!comm_node) {
299 ret = -ENOMEM;
300 goto err;
301 }
302 INIT_WORK(&comm_node->work, set_comm_icc_bw_handler);
303 comm_node->clk = devm_clk_get(&pdev->dev,
304 mmqos_desc->comm_muxes[node->id & 0xff]);
305 if (IS_ERR(comm_node->clk)) {
306 dev_notice(&pdev->dev, "get clk fail:%s\n",
307 mmqos_desc->comm_muxes[
308 node->id & 0xff]);
309 ret = -EINVAL;
310 goto err;
311 }
312 comm_node->freq = clk_get_rate(comm_node->clk)/1000000;
313 INIT_LIST_HEAD(&comm_node->list);
314 list_add_tail(&comm_node->list, &mmqos->comm_list);
315 INIT_LIST_HEAD(&comm_node->comm_port_list);
316 comm_node->icc_path = of_icc_get(&pdev->dev,
317 mmqos_desc->comm_icc_path_names[
318 node->id & 0xff]);
319 if (IS_ERR_OR_NULL(comm_node->icc_path)) {
320 dev_notice(&pdev->dev,
321 "get icc_path fail:%s\n",
322 mmqos_desc->comm_icc_path_names[
323 node->id & 0xff]);
324 ret = -EINVAL;
325 goto err;
326 }
327 comm_node->base = base_node;
328 node->data = (void *)comm_node;
329 break;
330 case MTK_MMQOS_NODE_COMMON_PORT:
331 comm_port_node = devm_kzalloc(&pdev->dev,
332 sizeof(*comm_port_node), GFP_KERNEL);
333 if (!comm_port_node) {
334 ret = -ENOMEM;
335 goto err;
336 }
337 mutex_init(&comm_port_node->bw_lock);
338 comm_port_node->common = node->links[0]->data;
339 INIT_LIST_HEAD(&comm_port_node->list);
340 list_add_tail(&comm_port_node->list,
341 &comm_port_node->common->comm_port_list);
342 comm_port_node->base = base_node;
343 node->data = (void *)comm_port_node;
344 break;
345 case MTK_MMQOS_NODE_LARB:
346 larb_node = devm_kzalloc(
347 &pdev->dev, sizeof(*larb_node), GFP_KERNEL);
348 if (!larb_node) {
349 ret = -ENOMEM;
350 goto err;
351 }
352 comm_port_node = node->links[0]->data;
353 larb_dev = smi_imu.larb_imu[node->id &
354 (MTK_LARB_NR_MAX-1)].dev;
355 if (larb_dev) {
356 comm_port_node->larb_dev = larb_dev;
357 larb_node->larb_dev = larb_dev;
358 }
359 larb_node->base = base_node;
360 node->data = (void *)larb_node;
361 break;
362 case MTK_MMQOS_NODE_LARB_PORT:
363 larb_port_node = devm_kzalloc(&pdev->dev,
364 sizeof(*larb_port_node), GFP_KERNEL);
365 if (!larb_port_node) {
366 ret = -ENOMEM;
367 goto err;
368 }
369 larb_port_node->bw_ratio = node_desc->bw_ratio;
370 larb_port_node->base = base_node;
371 node->data = (void *)larb_port_node;
372 break;
373 default:
374 dev_notice(&pdev->dev,
375 "invalid node id:%#x\n", node->id);
376 ret = -EINVAL;
377 goto err;
378 }
379 data->nodes[i] = node;
380 }
381
382 data->num_nodes = mmqos_desc->num_nodes;
383 mmqos->prov.data = data;
384 mmqos->max_ratio = mmqos_desc->max_ratio;
385
386 mmqos->wq = create_singlethread_workqueue("mmqos_work_queue");
387 if (!mmqos->wq) {
388 dev_notice(&pdev->dev, "work queue create fail\n");
389 ret = -ENOMEM;
390 goto err;
391 }
392
393 hrt = devm_kzalloc(&pdev->dev, sizeof(*hrt), GFP_KERNEL);
394 if (!hrt) {
395 ret = -ENOMEM;
396 goto err;
397 }
398 memcpy(hrt, &mmqos_desc->hrt, sizeof(mmqos_desc->hrt));
399 mtk_mmqos_init_hrt(hrt);
400
401 mmqos->nb.notifier_call = update_mm_clk;
402 register_mmdvfs_notifier(&mmqos->nb);
403
404 ret = mtk_mmqos_register_hrt_sysfs(&pdev->dev);
405 if (ret)
406 dev_notice(&pdev->dev, "sysfs create fail\n");
407
408 platform_set_drvdata(pdev, mmqos);
409
410 return 0;
411
412err:
413 list_for_each_entry_safe(node, temp, &mmqos->prov.nodes, node_list) {
414 icc_node_del(node);
415 icc_node_destroy(node->id);
416 }
417 icc_provider_del(&mmqos->prov);
418 return ret;
419}
420EXPORT_SYMBOL_GPL(mtk_mmqos_probe);
421
422int mtk_mmqos_remove(struct platform_device *pdev)
423{
424 struct mtk_mmqos *mmqos = platform_get_drvdata(pdev);
425 struct icc_node *node, *temp;
426
427 list_for_each_entry_safe(node, temp, &mmqos->prov.nodes, node_list) {
428 icc_node_del(node);
429 icc_node_destroy(node->id);
430 }
431 icc_provider_del(&mmqos->prov);
432 unregister_mmdvfs_notifier(&mmqos->nb);
433 destroy_workqueue(mmqos->wq);
434 mtk_mmqos_unregister_hrt_sysfs(&pdev->dev);
435 return 0;
436}
437EXPORT_SYMBOL_GPL(mtk_mmqos_remove);
438
439MODULE_LICENSE("GPL v2");