blob: 0b96076be7a85a33f680a5cfad1a21c77bf8b4ad [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Support for asr spi controller
4 *
5 * Copyright (C) 2021 ASR Micro Limited
6 *
7 */
8
9#include <linux/clk.h>
10#include <linux/delay.h>
11#include <linux/devfreq.h>
12#include <linux/io.h>
13#include <linux/module.h>
14#include <linux/platform_device.h>
15#include <linux/pm_qos.h>
16#include <linux/of.h>
17#include <linux/slab.h>
18#include <linux/string.h>
19#include <linux/sched.h>
20#include <linux/platform_data/devfreq-pxa.h>
21#include <linux/interrupt.h>
22#include <linux/irq.h>
23#include <linux/sched/clock.h>
24#include <linux/clk-provider.h>
25#include <soc/asr/regs-addr.h>
26#include <trace/events/pxa.h>
27#include <linux/miscdevice.h>
28#include <linux/cputype.h>
29#include <soc/asr/asrdcstat.h>
30#include <linux/cpu_pm.h>
31
32#ifdef CONFIG_DEVFREQ_GOV_THROUGHPUT
33#include <linux/cpufreq.h>
34#endif
35#include "asr_memorybus.h"
36
37#define DDR_DEVFREQ_UPTHRESHOLD 65
38#define DDR_DEVFREQ_DOWNDIFFERENTIAL 5
39
40#define DDR_DEVFREQ_HIGHCPUFREQ 800000
41#define DDR_DEVFREQ_HIGHCPUFREQ_UPTHRESHOLD 30
42#define DDR_DEVFREQ_EFFICIENCY 90
43
44#define KHZ_TO_HZ 1000
45
46#define generate_evoc(gpu, cpu) ((gpu) || (cpu))
47extern struct pm_qos_object *pm_qos_array[];
48
49#ifdef CONFIG_DEVFREQ_GOV_THROUGHPUT
50static struct ddr_devfreq_data *ddrfreq_driver_data;
51
52/* default using 65% as upthreshold and 5% as downdifferential */
53static struct devfreq_throughput_data devfreq_throughput_data = {
54 .upthreshold = DDR_DEVFREQ_UPTHRESHOLD,
55 .downdifferential = DDR_DEVFREQ_DOWNDIFFERENTIAL,
56 .ddr_efficiency = DDR_DEVFREQ_EFFICIENCY,
57};
58
59static struct ddr_devfreq_data *ddrfreq_data;
60
61#define BIT_31 (0x1 << 31)
62#define BIT_2 (0x1 << 2)
63#define BIT_3 (0x1 << 3)
64#define BIT_4 (0x1 << 4)
65#define BIT_5 (0x1 << 5)
66
67typedef enum
68{
69 Samsung_ID = 0x01, // 0000 0001B
70 Qimonda_ID = 0x02, // 0000 0010B
71 Elpida_ID = 0x03, // 0000 0011B
72 Etron_ID = 0x04, // 0000 0100B
73 Nanya_ID = 0x05, // 0000 0101B
74 Hynix_ID = 0x06, // 0000 0110B
75 Mosel_ID = 0x07, // 0000 0111B
76 Winbond_ID = 0x08, // 0000 1000B
77 ESMT_ID = 0x09, // 0000 1001B
78 Reserved_ID = 0x0A, // 0000 1010B
79 Spansion_ID = 0x0B, // 0000 1011B
80 SST_ID = 0x0C, // 0000 1100B
81 ZMOS_ID = 0x0D, // 0000 1101B
82 Intel_ID = 0x0E, // 0000 1110B
83 UNIC_ID = 0x1A, // 0001 1010B
84 JSC_ID = 0x1C, // 0001 1100B
85 Fidelix_ID = 0xF8, // 1111 1000B
86 ESMT2_ID = 0xFD, // 1111 1101B
87 Numonyx_ID = 0xFE, // 1111 1110B
88 Micron_ID = 0xFF, // 1111 1111B
89 Toshiba_ID = 0x03 // seems same as elpida ID
90} DDR_Manufacturer_ID;
91
92static u32 soc_chipid;
93static u32 soc_ddrid;
94
95static unsigned long ddr_set_rate(struct ddr_devfreq_data *data,
96 unsigned long tgt_rate);
97static unsigned long old_ddrclk;
98static int ddr_lock_set_min_freq(void)
99{
100 unsigned long new_ddrclk;
101 struct ddr_devfreq_data *data;
102
103 data = ddrfreq_data;
104 new_ddrclk = data->ddr_freq_tbl[0];
105 mutex_lock(&data->devfreq->lock);
106 /* scaling to the min frequency before read ddr id */
107 old_ddrclk = clk_get_rate(data->ddr_clk) / KHZ_TO_HZ;
108 ddr_set_rate(data, new_ddrclk);
109 pr_info("Change ddr freq to lowest value. (%luKhz)\n", clk_get_rate(data->ddr_clk) / KHZ_TO_HZ);
110
111 return 0;
112}
113
114static int ddr_unlock_set_old_freq(void)
115{
116 struct ddr_devfreq_data *data;
117
118 data = ddrfreq_data;
119 ddr_set_rate(data, old_ddrclk);
120 mutex_unlock(&data->devfreq->lock);
121 pr_info("Change ddr freq to old value. (%luKhz)\n", old_ddrclk);
122
123 return 0;
124}
125
126
127static void delay_long_us(u32 us)
128{
129 int i, loops;
130
131 loops = us / 200;
132
133 for (i = 0; i < loops; i++)
134 udelay(200);
135}
136
137static void soc_init_ddrid(void __iomem *mc_base)
138{
139 u32 value, cs_value1, cs_value2, temp_value[3];
140 u32 cs0_size, ddr_size;
141 u32 temp_value1 = 0xff, cs1_valid = 0;
142 u8 mrr_temp_value = 0;
143
144 ddr_lock_set_min_freq();
145 /*
146 * MR8 only get the size of one DDR die, need to caculate the DDR size out
147 *
148 *
149 * MR8 value
150 *
151 * Type Read-only OP<1:0> 00B: S4 SDRAM
152 01B: S2 SDRAM
153 10B: N NVM
154 11B: Reserved
155
156 * Density Read-only OP<5:2> 0000B: 64Mb
157 0001B: 128Mb
158 0010B: 256Mb
159 0011B: 512Mb
160 0100B: 1Gb
161 0101B: 2Gb
162 0110B: 4Gb
163 0111B: 8Gb
164 1000B: 16Gb
165 1001B: 32Gb
166 all others: reserved
167 * I/O width Read-only OP<7:6> 00B: x32
168 01B: x16
169 10B: x8
170 11B: not used
171
172 *
173 */
174
175 /*
176 * step 1: read out the CS1 MR8
177 */
178 writel(0x12010008, (mc_base + 0x024));
179 delay_long_us(5000);
180
181 temp_value[0] = readl((mc_base + 0x370));
182 pr_info("CS1 MR8 1: 0x%08x\n", temp_value[0]);
183
184 writel(0x12010008, (mc_base + 0x024));
185 delay_long_us(5000);
186 temp_value[1] = readl((mc_base + 0x370));
187 pr_info("CS1 MR8 2: 0x%08x\n", temp_value[1]);
188
189 if (temp_value[0] != temp_value[1]) {
190 temp_value1 = 0xff;
191 cs_value1 = temp_value1;
192 } else {
193 temp_value1 = 0x00;
194 cs_value1 = temp_value[0];
195 }
196
197 /*
198 * step 2: read out the CS0 MR8
199 */
200 writel(0x11010008, (mc_base + 0x024));
201 delay_long_us(5000);
202
203 cs_value2 = readl((mc_base + 0x370));
204 pr_info("CS0 MR8: 0x%08x\n", cs_value2);
205
206 if (cs_value2 & BIT_31) {
207 mrr_temp_value = cs_value2 & 0xff;
208 pr_info("mrr_temp_value: 0x%08x\n", mrr_temp_value);
209
210 switch ((mrr_temp_value & (BIT_2 | BIT_3 | BIT_4 | BIT_5)) >> 2) {
211 case 0x1: // 128Mb
212 cs0_size = 1; // 1*16MB
213 break;
214
215 case 0x2: // 256Mb
216 cs0_size = 2; // 2*16MB
217 break;
218
219 case 0x3: // 512Mb
220 cs0_size = 4; // 4*16MB
221 break;
222
223 case 0x4: // 1Gb
224 cs0_size = 8; // 8*16MB
225 break;
226
227 case 0x5: // 2Gb
228 cs0_size = 16; // 16*16MB
229 break;
230
231 case 0x6: // 4Gb
232 cs0_size = 32; // 32*16MB
233 break;
234
235 case 0x0: // 64Mb, No such little DDR here
236 case 0x7: // No such large DDR here
237 case 0x8: // No such large DDR here
238 case 0x9: // No such large DDR here
239 default:
240 pr_info("If you see me, there should be something wrong!\n");
241 cs0_size = 0;
242 break;
243 }
244 } else {
245 cs0_size = 0;
246 pr_info("If you see me, there should be something wrong!\n");
247 }
248
249 /*
250 * CS1 MR8 might be random value in DDR phy buffer if CS1 is not exist,
251 * make sure CS1 MR8 to align with CS0 since CS0 MR8 is always right.
252 */
253 if (temp_value1 == 0x00) {
254 if (cs_value1 != cs_value2) {
255 cs1_valid = 0;
256 } else {
257 cs1_valid = 1;
258 }
259 } else {
260 cs1_valid = 0; // CS1 invalid
261 }
262
263 if (cs1_valid)
264 ddr_size = cs0_size * 2;
265 else
266 ddr_size = cs0_size;
267
268 pr_info("ddr_size: 0x%08x\n", ddr_size);
269
270 /*
271 * step 3: read out the CS0 MR5
272 */
273 writel(0x11010005, (mc_base + 0x024));
274 delay_long_us(5000);
275 value = readl((mc_base + 0x370));
276 pr_info("CS0 MR5: 0x%08x\n", value);
277
278 if (value & BIT_31) {
279 mrr_temp_value = value & 0xFF;
280 pr_info("mrr_temp_value: 0x%08x\n", mrr_temp_value);
281
282 switch (mrr_temp_value) {
283 case Samsung_ID:
284 pr_info("DDR Vendor: Samsung\n");
285 break;
286
287 case Qimonda_ID:
288 pr_info("DDR Vendor: Qimonda\n");
289 break;
290
291 /*
292 * NOTE:
293 *
294 * DDR vendor id of Toshiba is same as Elpida, there is no way to distinguish only use MR5.
295 * We only use Elpida id for now.
296 *
297 * Todo: might need to use more info to distinguish them.
298 */
299 case Elpida_ID: // Toshiba_ID
300 pr_info("DDR Vendor: Elpida\n");
301 break;
302
303 case Etron_ID:
304 pr_info("DDR Vendor: Etron\n");
305 break;
306
307 case Nanya_ID:
308 pr_info("DDR Vendor: Nanya\n");
309 break;
310
311 case Hynix_ID:
312 pr_info("DDR Vendor: Hynix\n");
313 break;
314
315 case Mosel_ID:
316 pr_info("DDR Vendor: Mosel\n");
317 break;
318
319 case Winbond_ID:
320 pr_info("DDR Vendor: Winbond\n");
321 break;
322
323 case ESMT_ID:
324 pr_info("DDR Vendor: ESMT\n");
325 break;
326
327 case ESMT2_ID:
328 mrr_temp_value = ESMT_ID;
329 pr_info("DDR Vendor: ESMT\n");
330 break;
331
332 case Reserved_ID:
333 pr_info("DDR Vendor: Reserved\n");
334 break;
335
336 case Spansion_ID:
337 pr_info("DDR Vendor: Spansion\n");
338 break;
339
340 case SST_ID:
341 pr_info("DDR Vendor: SST\n");
342 break;
343
344 case ZMOS_ID:
345 pr_info("DDR Vendor: ZMOS\n");
346 break;
347
348 case Intel_ID:
349 pr_info("DDR Vendor: Intel\n");
350 break;
351
352 case JSC_ID:
353 mrr_temp_value = 0x13; /* to match ddr package id */
354 pr_info("DDR Vendor: JSC\n");
355 break;
356
357 case Numonyx_ID:
358 pr_info("DDR Vendor: Numonyx\n");
359 break;
360
361 case Micron_ID:
362 pr_info("DDR Vendor: Micron\n");
363 break;
364
365 case Fidelix_ID:
366 pr_info("DDR Vendor: Fidelix\n");
367 break;
368
369 case UNIC_ID:
370 mrr_temp_value = ZMOS_ID;
371 pr_info("DDR Vendor: UNIC\n");
372 break;
373 default:
374 pr_info("Unsupported DDR Vendor ID: 0x%08x\n", (value & 0xFF));
375 break;
376 }
377 }
378
379 soc_ddrid = (mrr_temp_value << 8) | (ddr_size & 0xFF);
380
381 ddr_unlock_set_old_freq();
382
383 pr_info("soc_ddrid: 0x%08x\n", soc_ddrid);
384}
385
386static void soc_init_ids(struct ddr_devfreq_data *data)
387{
388 void __iomem *mc_base = data->dmc.hw_base;
389
390 soc_init_ddrid(mc_base);
391
392 soc_chipid = readl(regs_addr_get_va(REGS_ADDR_CIU));
393 pr_info("soc_chipid: 0x%08x\n", soc_chipid);
394 soc_chipid &= 0xffffff;
395}
396
397static ssize_t soc_id_read(struct file *filp, char __user *user_buf,
398 size_t count, loff_t *f_pos)
399{
400 ssize_t ret, len = 0;
401 char *kbuf;
402
403 if (*f_pos)
404 return 0;
405
406 kbuf = kzalloc(128, GFP_KERNEL);
407 if (!kbuf) {
408 pr_err("Cannot allocate buffer!\n");
409 return -ENOMEM;
410 }
411
412 /* allow multiple calls to validate the correctness of ddrid */
413 soc_init_ids(ddrfreq_data);
414
415 len += sprintf(kbuf + len, "chip_id: %08x\n", soc_chipid);
416 len += sprintf(kbuf + len, "ddr_id: %08x\n", soc_ddrid);
417
418 ret = simple_read_from_buffer(user_buf, count, f_pos, kbuf, len);
419 kfree(kbuf);
420
421 return ret;
422}
423
424static const struct file_operations soc_id_fops = {
425 .owner = THIS_MODULE,
426 .read = soc_id_read,
427};
428
429static struct miscdevice soc_id_miscdev = {
430 MISC_DYNAMIC_MINOR,
431 "soc_id",
432 &soc_id_fops
433};
434
435static void axi_mon_init(void * __iomem aximon_base)
436{
437 int i;
438
439 for (i = 0; i < NR_AXI_MON_PORT; i++)
440 writel(0x80000020, aximon_base + (AXI_MON_CTRL + (i << 4)));
441}
442
443static void get_aximon_data(void * __iomem aximon_base, struct ddr_stats_data *data)
444{
445 int i;
446
447 for (i = 0; i < NR_AXI_MON_PORT; i++) {
448 writel(0x80000065, aximon_base + (AXI_MON_CTRL + (i << 4)));
449 data->max_read_latency[i] = readl(aximon_base + (AXI_MON_DATA + (i << 4)));
450
451 writel(0x8000006D, aximon_base + (AXI_MON_CTRL + (i << 4)));
452 data->max_write_latency[i] = readl(aximon_base + (AXI_MON_DATA + (i << 4)));
453
454 writel(0x80000070, aximon_base + (AXI_MON_CTRL + (i << 4)));
455 data->axi_read_bytes[i] = readl(aximon_base + (AXI_MON_DATA + (i << 4)));
456
457 writel(0x80000072, aximon_base + (AXI_MON_CTRL + (i << 4)));
458 data->axi_write_bytes[i] = readl(aximon_base + (AXI_MON_DATA + (i << 4)));
459 }
460
461 /* reset and begin new sampling of axi monitor counters */
462 for (i = 0; i < NR_AXI_MON_PORT; i++) {
463 writel(0x00000000, aximon_base + (AXI_MON_CTRL + (i << 4)));
464 writel(0x80000020, aximon_base + (AXI_MON_CTRL + (i << 4)));
465 }
466}
467
468static inline void __update_dev_upthreshold(unsigned int upthrd,
469 struct devfreq_throughput_data *gov_data)
470{
471 int i;
472
473 for (i = 0; i < gov_data->table_len; i++) {
474 if (ddrfreq_driver_data->mode_4x_en) {
475 gov_data->throughput_table[i].up =
476 upthrd * devfreq_throughput_data.ddr_efficiency
477 * (gov_data->freq_table[i] / 100) / 100;
478 gov_data->throughput_table[i].down =
479 (upthrd - gov_data->downdifferential) *
480 devfreq_throughput_data.ddr_efficiency
481 * (gov_data->freq_table[i] / 100) / 100;
482 } else {
483 gov_data->throughput_table[i].up =
484 upthrd * gov_data->freq_table[i] / 100;
485 gov_data->throughput_table[i].down =
486 (upthrd - gov_data->downdifferential) *
487 gov_data->freq_table[i] / 100;
488 }
489 }
490}
491
492/* notifier to change the devfreq govoner's upthreshold */
493static int upthreshold_freq_notifer_call(struct notifier_block *nb,
494 unsigned long val, void *data)
495{
496 struct cpufreq_freqs *freq = data;
497 struct ddr_devfreq_data *cur_data =
498 container_of(nb, struct ddr_devfreq_data, freq_transition);
499 struct devfreq *devfreq = cur_data->devfreq;
500 struct devfreq_throughput_data *gov_data;
501 int evoc = 0;
502 unsigned int upthrd;
503
504 if (val != CPUFREQ_POSTCHANGE/* &&
505 val != GPUFREQ_POSTCHANGE_UP &&
506 val != GPUFREQ_POSTCHANGE_DOWN */)
507 return NOTIFY_OK;
508
509 mutex_lock(&devfreq->lock);
510
511 gov_data = devfreq->data;
512#if 0
513 if (val == GPUFREQ_POSTCHANGE_UP)
514 cur_data->gpu_up = 1;
515 else if (val == GPUFREQ_POSTCHANGE_DOWN)
516 cur_data->gpu_up = 0;
517 else
518#endif
519 if (freq->new >= cur_data->high_upthrd_swp)
520 cur_data->cpu_up = 1;
521 else
522 cur_data->cpu_up = 0;
523
524 evoc = generate_evoc(cur_data->gpu_up, cur_data->cpu_up);
525
526 if (evoc)
527 upthrd = cur_data->high_upthrd;
528 else
529 upthrd = gov_data->upthreshold;
530
531 __update_dev_upthreshold(upthrd, gov_data);
532
533 mutex_unlock(&devfreq->lock);
534
535 trace_pxa_ddr_upthreshold(upthrd);
536
537 return NOTIFY_OK;
538}
539
540int gpufeq_register_dev_notifier(struct srcu_notifier_head *gpu_notifier_chain)
541{
542 return srcu_notifier_chain_register(gpu_notifier_chain,
543 &ddrfreq_driver_data->freq_transition);
544}
545EXPORT_SYMBOL(gpufeq_register_dev_notifier);
546
547#endif /* CONFIG_DDR_DEVFREQ_GOV_THROUGHPUT */
548
549static int ddr_max;
550
551static int __init ddr_max_setup(char *str)
552{
553 int freq;
554
555 if (!get_option(&str, &freq))
556 return 0;
557 ddr_max = freq;
558 return 1;
559}
560
561__setup("ddr_max=", ddr_max_setup);
562
563static int ddr_min = 104000;
564
565static int __init ddr_min_setup(char *str)
566{
567 int freq;
568
569 if (!get_option(&str, &freq))
570 return 0;
571 ddr_min = freq;
572 return 1;
573}
574
575__setup("ddr_min=", ddr_min_setup);
576
577static void write_static_register(unsigned int val, unsigned int expected_val,
578 void *reg, unsigned int ver)
579{
580 if (ver == MCK5 || ver == NZAS_MC)
581 writel(val, reg);
582 else
583 /* this should never happen */
584 BUG_ON(1);
585}
586
587static unsigned int read_static_register(void *reg, unsigned int ver)
588{
589 unsigned int ret;
590
591 if (ver == MCK5 || ver == NZAS_MC)
592 ret = readl(reg);
593 else
594 /* this should never happen */
595 BUG_ON(1);
596 return ret;
597}
598
599static void stop_ddr_performance_counter(struct ddr_devfreq_data *data)
600{
601 void __iomem *mc_base = data->dmc.hw_base;
602 struct mck_pmu_regs_offset *regs = &data->dmc.mck_regs;
603 unsigned int ver = data->dmc.version;
604
605 /*
606 * Write to Performance Counter Configuration Register to
607 * disable counters.
608 */
609 write_static_register(0x0, 0x0, mc_base + regs->cfg, ver);
610}
611
612static void start_ddr_performance_counter(struct ddr_devfreq_data *data)
613{
614 void __iomem *mc_base = data->dmc.hw_base;
615 struct mck_pmu_regs_offset *regs = &data->dmc.mck_regs;
616 unsigned int ver = data->dmc.version;
617 unsigned int val;
618
619 /*
620 * Write to Performance Counter Configuration Register to
621 * enable counters and choose the events for counters.
622 */
623 switch (ver) {
624 case MCK5:
625 /*
626 * cnt1, event=0x00, clock cycles
627 * cnt2, event=0x1A, Read + Write command count
628 * cnt3, event=0x18, busy cycles
629 */
630 val = ((0x00 | 0x00) << 0) | ((0x80 | 0x00) << 8) |
631 ((0x80 | 0x1A) << 16) | ((0x80 | 0x18) << 24);
632 break;
633 case NZAS_MC:
634 /*
635 * cnt1, event=0x00, clock cycles
636 * cnt2, event=0x56, Read + Write command count
637 * cnt3, event=0x39, busy cycles
638 */
639 val = ((0x00 | 0x00) << 0) | ((0x80 | 0x00) << 8) |
640 ((0x80 | 0x56) << 16) | ((0x80 | 0x39) << 24);
641 break;
642 default:
643 /* this should never happen */
644 BUG_ON(1);
645 }
646
647 write_static_register(val, val, mc_base + regs->cfg, ver);
648}
649
650static void init_ddr_performance_counter(struct ddr_devfreq_data *data)
651{
652 unsigned int i;
653 void __iomem *mc_base = data->dmc.hw_base;
654 struct mck_pmu_regs_offset *regs = &data->dmc.mck_regs;
655 unsigned int ver = data->dmc.version;
656
657 /*
658 * Step1: Write to Performance Counter Configuration Register to
659 * disable counters.
660 */
661 write_static_register(0x0, 0x0, mc_base + regs->cfg, ver);
662
663 /*
664 * Step2: Write to Performance Counter Register to set the starting
665 * value.
666 */
667 for (i = 0; i < data->dmc.pmucnt_in_use; i++) {
668 write_static_register(0x0, 0x0,
669 mc_base + regs->cnt_base + i * 4, ver);
670 }
671
672 /*
673 * Step3: Write to Performance Counter Status Register to clear
674 * overflow flag.
675 */
676 write_static_register(0xf, 0x0, mc_base + regs->cnt_stat, ver);
677
678 /*
679 * Step4: Write to Performance Counter Control Register to select
680 * the desired settings
681 * bit18:16 0x0 = Divide clock by 1
682 * bit4 0x1 = Continue counting on any counter overflow
683 * bit0 0x0 = Enabled counters begin counting
684 */
685 write_static_register(0x10, 0x10, mc_base + regs->ctrl, ver);
686
687 /* Step5: Enable Performance Counter interrupt */
688 write_static_register(data->intr_en_val, data->intr_en_val, mc_base + regs->intr_en, ver);
689}
690
691static int ddr_rate2_index(struct ddr_devfreq_data *data)
692{
693 unsigned int rate;
694 int i;
695
696 rate = clk_get_rate(data->ddr_clk) / KHZ_TO_HZ;
697 for (i = 0; i < data->ddr_freq_tbl_len; i++)
698 if (data->ddr_freq_tbl[i] == rate)
699 return i;
700 dev_err(&data->devfreq->dev, "unknow ddr rate %d\n", rate);
701 return -1;
702}
703
704static unsigned int ddr_index2_rate(struct ddr_devfreq_data *data, int index)
705{
706 if ((index >= 0) && (index < data->ddr_freq_tbl_len))
707 return data->ddr_freq_tbl[index];
708 else {
709 dev_err(&data->devfreq->dev,
710 "unknow ddr index %d\n", index);
711 return 0;
712 }
713}
714
715/*
716 * overflow: 1 means overflow shouled be handled, 0 means not.
717 * start: 1 means performance counter is started after update, 0 means not.
718 */
719static void ddr_perf_cnt_update(struct ddr_devfreq_data *data, u32 overflow,
720 u32 start)
721{
722 struct perf_counters *ddr_ticks = data->dmc.ddr_ticks;
723 void *mc_base = (void *)data->dmc.hw_base;
724 struct mck_pmu_regs_offset *regs = &data->dmc.mck_regs;
725 unsigned int cnt, i, overflow_flag;
726 unsigned int ddr_idx = data->cur_ddr_idx;
727 unsigned long flags;
728 unsigned int ver = data->dmc.version;
729
730 if ((overflow != 1 && overflow != 0) || (start != 1 && start != 0)) {
731 dev_err(&data->devfreq->dev, "%s: parameter is not correct.\n",
732 __func__);
733 return;
734 }
735
736 if (ddr_idx >= data->ddr_freq_tbl_len) {
737 dev_err(&data->devfreq->dev, "%s: invalid ddr_idx %u\n",
738 __func__, ddr_idx);
739 return;
740 }
741
742 /*
743 * To make life simpler, only handle overflow case in IRQ->work path.
744 * So the overflow parameter will only be 1 in that path.
745 * If overflow is 0, keep polling here until that path complete if
746 * found overflow happen.
747 * The spin_unlock is to make sure the polling will not block the
748 * path we want to run.
749 */
750 while (1) {
751 spin_lock_irqsave(&data->lock, flags);
752
753 /* stop counters, to keep data synchronized */
754 stop_ddr_performance_counter(data);
755
756 overflow_flag =
757 read_static_register(mc_base + regs->cnt_stat, ver)
758 & 0xf;
759
760 /* If overflow, bypass the polling */
761 if (overflow)
762 break;
763
764 /* If overflow happen right now, wait for handler finishing */
765 if (!overflow_flag)
766 break;
767
768 spin_unlock_irqrestore(&data->lock, flags);
769
770 /* Take a breath here to let overflow work to get cpu */
771 usleep_range(100, 1000);
772 }
773
774 /* If overflow, clear pended overflow flag in MC */
775 if (overflow)
776 write_static_register(overflow_flag, 0x0,
777 mc_base + regs->cnt_stat, ver);
778
779 for (i = 0; i < data->dmc.pmucnt_in_use; i++) {
780 cnt = read_static_register(mc_base + regs->cnt_base + i * 4,
781 ver);
782
783 if (overflow_flag & (1 << i)) {
784 dev_dbg(&data->devfreq->dev,
785 "DDR perf counter overflow!\n");
786 ddr_ticks[ddr_idx].reg[i] += (1LLU << 32);
787 }
788 ddr_ticks[ddr_idx].reg[i] += cnt;
789
790 /* reset performance counter to 0x0 */
791 write_static_register(0x0, 0x0,
792 mc_base + regs->cnt_base + i * 4, ver);
793 }
794
795 if (start)
796 start_ddr_performance_counter(data);
797
798 spin_unlock_irqrestore(&data->lock, flags);
799}
800
801static int __init ddr_perf_cnt_init(struct ddr_devfreq_data *data)
802{
803 unsigned long flags;
804
805 spin_lock_irqsave(&data->lock, flags);
806 init_ddr_performance_counter(data);
807 start_ddr_performance_counter(data);
808 spin_unlock_irqrestore(&data->lock, flags);
809
810 data->cur_ddr_idx = ddr_rate2_index(data);
811
812 return 0;
813}
814
815static inline void ddr_perf_cnt_restart(struct ddr_devfreq_data *data)
816{
817 unsigned long flags;
818
819 spin_lock_irqsave(&data->lock, flags);
820 start_ddr_performance_counter(data);
821 spin_unlock_irqrestore(&data->lock, flags);
822}
823
824/*
825 * get the mck total_ticks, data_ticks, speed.
826 */
827static void get_ddr_cycles(struct ddr_devfreq_data *data,
828 unsigned long *total_ticks, unsigned long *data_ticks, int *speed)
829{
830 unsigned long flags;
831 unsigned int diff_ms;
832 unsigned long long time_stamp_cur;
833 static unsigned long long time_stamp_old;
834 struct perf_counters *ddr_ticks = data->dmc.ddr_ticks;
835 int i;
836 u64 *total_ticks_base = data->ddr_profiler.total_ticks_base;
837 u64 *data_ticks_base = data->ddr_profiler.data_ticks_base;
838
839 spin_lock_irqsave(&data->lock, flags);
840 *total_ticks = *data_ticks = 0;
841 for (i = 0; i < data->ddr_freq_tbl_len; i++) {
842 *total_ticks += ddr_ticks[i].reg[1] - total_ticks_base[i];
843 *data_ticks += ddr_ticks[i].reg[2] - data_ticks_base[i];
844 total_ticks_base[i] = ddr_ticks[i].reg[1];
845 data_ticks_base[i] = ddr_ticks[i].reg[2];
846 }
847
848 if (data->mode_4x_en)
849 *total_ticks = (*total_ticks) << 1;
850 *data_ticks = *data_ticks * data->bst_len / 2;
851 spin_unlock_irqrestore(&data->lock, flags);
852
853 time_stamp_cur = sched_clock();
854 diff_ms = (unsigned int)div_u64(time_stamp_cur - time_stamp_old,
855 1000000);
856 time_stamp_old = time_stamp_cur;
857
858 if (diff_ms != 0)
859 *speed = *data_ticks / diff_ms;
860 else
861 *speed = -1;
862}
863
864static int ddr_get_dev_status(struct device *dev,
865 struct devfreq_dev_status *stat)
866{
867 struct ddr_devfreq_data *data = dev_get_drvdata(dev);
868 struct devfreq *df = data->devfreq;
869 unsigned int workload;
870 unsigned long polling_jiffies;
871 unsigned long now = jiffies;
872
873 stat->current_frequency = clk_get_rate(data->ddr_clk) / KHZ_TO_HZ;
874 /*
875 * ignore the profiling if it is not from devfreq_monitor
876 * or there is no profiling
877 */
878 polling_jiffies = msecs_to_jiffies(df->profile->polling_ms);
879 if (!polling_jiffies || (polling_jiffies && data->last_polled_at &&
880 time_before(now, (data->last_polled_at + polling_jiffies)))) {
881 dev_dbg(dev,
882 "No profiling or interval is not expired %lu, %lu, %lu\n",
883 polling_jiffies, now, data->last_polled_at);
884 return -EINVAL;
885 }
886
887 ddr_perf_cnt_update(data, 0, 1);
888 get_ddr_cycles(data, &stat->total_time,
889 &stat->busy_time, &stat->throughput);
890 if (data->axi_mon_base) {
891 get_aximon_data(data->axi_mon_base, &data->ddr_stats);
892 data->ddr_stats.ddr_cycles = stat->total_time;
893 data->ddr_stats.data_cycles = stat->busy_time;
894 }
895 data->last_polled_at = now;
896
897 /* Ajust the workload calculation here to align with devfreq governor */
898 if (stat->busy_time >= (1 << 24) || stat->total_time >= (1 << 24)) {
899 stat->busy_time >>= 7;
900 stat->total_time >>= 7;
901 }
902
903 workload = cal_workload(stat->busy_time, stat->total_time);
904 data->workload = workload;
905
906 dev_dbg(dev, "workload is %d precent\n", workload);
907 dev_dbg(dev, "busy time is 0x%x, %u\n", (unsigned int)stat->busy_time,
908 (unsigned int)stat->busy_time);
909 dev_dbg(dev, "total time is 0x%x, %u\n\n",
910 (unsigned int)stat->total_time,
911 (unsigned int)stat->total_time);
912 dev_dbg(dev, "throughput is 0x%x, throughput * 8 (speed) is %u\n\n",
913 (unsigned int)stat->throughput, 8 * stat->throughput);
914
915 trace_pxa_ddr_workload(workload, stat->current_frequency,
916 stat->throughput);
917 return 0;
918}
919
920static unsigned long ddr_set_rate(struct ddr_devfreq_data *data,
921 unsigned long tgt_rate)
922{
923 unsigned long cur_freq, tgt_freq;
924 int ddr_idx;
925
926 cur_freq = clk_get_rate(data->ddr_clk);
927 tgt_freq = tgt_rate * KHZ_TO_HZ;
928
929 dev_dbg(&data->devfreq->dev, "%s: curfreq %lu, tgtfreq %lu\n",
930 __func__, cur_freq, tgt_freq);
931
932 /* update performance data before ddr clock change */
933 ddr_perf_cnt_update(data, 0, 0);
934
935 /* clk_set_rate will find a frequency larger or equal tgt_freq */
936 clk_set_rate(data->ddr_clk, tgt_freq);
937
938 /* re-init ddr performance counters after ddr clock change */
939 ddr_perf_cnt_restart(data);
940
941 ddr_idx = ddr_rate2_index(data);
942 if (ddr_idx >= 0) {
943 data->cur_ddr_idx = ddr_idx;
944 return data->ddr_freq_tbl[ddr_idx];
945 } else
946 dev_err(&data->devfreq->dev, "Failed to do ddr freq change\n");
947
948 return tgt_freq;
949}
950
951static void find_best_freq(struct ddr_devfreq_data *data, unsigned long *freq,
952 u32 flags)
953{
954 int i;
955 unsigned long temp = *freq;
956
957 u32 *freq_table = data->ddr_freq_tbl;
958 u32 len = data->ddr_freq_tbl_len;
959
960 if (*freq < freq_table[0]) {
961 *freq = freq_table[0];
962 return;
963 }
964 if (flags & DEVFREQ_FLAG_LEAST_UPPER_BOUND) {
965 for (i = 1; i < len; i++)
966 if (freq_table[i - 1] <= temp
967 && freq_table[i] > temp) {
968 *freq = freq_table[i - 1];
969 break;
970 }
971 } else {
972 for (i = 0; freq_table[i]; i++)
973 if (freq_table[i] >= temp) {
974 *freq = freq_table[i];
975 break;
976 }
977 }
978
979 if (i == len)
980 *freq = freq_table[i - 1];
981}
982
983static int ddr_target(struct device *dev, unsigned long *freq,
984 unsigned int flags)
985{
986 struct platform_device *pdev;
987 struct ddr_devfreq_data *data;
988 struct devfreq *df;
989 unsigned int *ddr_freq_table, ddr_freq_len;
990
991 pdev = container_of(dev, struct platform_device, dev);
992 data = platform_get_drvdata(pdev);
993
994 /* in normal case ddr fc will NOT be disabled */
995 if (unlikely(atomic_read(&data->is_disabled))) {
996 df = data->devfreq;
997 /*
998 * this function is called with df->locked, it is safe to
999 * read the polling_ms here
1000 */
1001 if (df->profile->polling_ms)
1002 dev_err(dev, "[WARN] ddr ll fc is disabled from "
1003 "debug interface, suggest to disable "
1004 "the profiling at first!\n");
1005 *freq = clk_get_rate(data->ddr_clk) / KHZ_TO_HZ;
1006 return 0;
1007 }
1008
1009 ddr_freq_table = &data->ddr_freq_tbl[0];
1010 ddr_freq_len = data->ddr_freq_tbl_len;
1011 dev_dbg(dev, "%s: %u\n", __func__, (unsigned int)*freq);
1012
1013 find_best_freq(data, freq, flags);
1014 *freq = ddr_set_rate(data, *freq);
1015
1016 return 0;
1017}
1018
1019static int configure_mck_pmu_regs(struct ddr_devfreq_data *data)
1020{
1021 unsigned int ver = data->dmc.version;
1022
1023 switch (ver) {
1024 case MCK5:
1025 case NZAS_MC:
1026 data->dmc.mck_regs.cfg = MCK5_PERF_CONFIG;
1027 data->dmc.mck_regs.cnt_stat = MCK5_PERF_STATUS;
1028 data->dmc.mck_regs.ctrl = MCK5_PERF_CONTRL;
1029 data->dmc.mck_regs.cnt_base = MCK5_PERF_CNT_BASE;
1030 data->dmc.mck_regs.intr_stat = MCK5_INTR_STATUS;
1031 data->dmc.mck_regs.intr_en = MCK5_INTR_EN;
1032
1033#ifndef CONFIG_OPTEE
1034 /* nsaid is enabled */
1035 if ((ver == NZAS_MC) && (read_static_register(data->dmc.hw_base + DDR_TZ_RANGE0_LOW, ver) & 0x1)) {
1036 data->dmc.mck_regs.adc_err_info = DDR_ADC_ERR_INFO;
1037 data->dmc.mck_regs.adc_err_addr_l = DDR_ADC_ERR_ADDR_L;
1038 data->dmc.mck_regs.adc_err_addr_h = DDR_ADC_ERR_ADDR_H;
1039 data->dmc.mck_regs.adc_err_id = DDR_ADC_ERR_ID;
1040 data->intr_en_val = DDR_INTR_EN_OVFL_ADC;
1041 } else {
1042 data->intr_en_val = DDR_INTR_EN_OVFL;
1043 }
1044#else
1045 data->intr_en_val = DDR_INTR_EN_OVFL;
1046#endif
1047
1048 pr_info("intr_en_val: 0x%x\n", data->intr_en_val);
1049 return 0;
1050 default:
1051 return -EINVAL;
1052 }
1053}
1054
1055static int ddr_get_cur_freq(struct device *dev, unsigned long *freq)
1056{
1057 struct platform_device *pdev;
1058 struct ddr_devfreq_data *data;
1059
1060 pdev = container_of(dev, struct platform_device, dev);
1061 data = platform_get_drvdata(pdev);
1062
1063 *freq = clk_get_rate(data->ddr_clk) / KHZ_TO_HZ;
1064
1065 return 0;
1066}
1067
1068static struct devfreq_dev_profile ddr_devfreq_profile = {
1069 /* Profiler is not enabled by default */
1070 .polling_ms = 0,
1071 .target = ddr_target,
1072 .get_dev_status = ddr_get_dev_status,
1073 .get_cur_freq = ddr_get_cur_freq,
1074};
1075
1076/* interface to change the switch point of high aggresive upthreshold */
1077static ssize_t high_swp_store(struct device *dev, struct device_attribute *attr,
1078 const char *buf, size_t size)
1079{
1080 struct platform_device *pdev;
1081 struct ddr_devfreq_data *data;
1082 struct devfreq *devfreq;
1083 unsigned int swp;
1084
1085 pdev = container_of(dev, struct platform_device, dev);
1086 data = platform_get_drvdata(pdev);
1087 devfreq = data->devfreq;
1088
1089 if (0x1 != sscanf(buf, "%u", &swp)) {
1090 dev_err(dev, "<ERR> wrong parameter\n");
1091 return -E2BIG;
1092 }
1093
1094 mutex_lock(&devfreq->lock);
1095 data->high_upthrd_swp = swp;
1096 mutex_unlock(&devfreq->lock);
1097
1098 return size;
1099}
1100
1101static ssize_t high_swp_show(struct device *dev,
1102 struct device_attribute *attr,
1103 char *buf)
1104{
1105 struct platform_device *pdev;
1106 struct ddr_devfreq_data *data;
1107
1108 pdev = container_of(dev, struct platform_device, dev);
1109 data = platform_get_drvdata(pdev);
1110 return sprintf(buf, "%u\n", data->high_upthrd_swp);
1111}
1112
1113/* interface to change the aggresive upthreshold value */
1114static ssize_t high_upthrd_store(struct device *dev,
1115 struct device_attribute *attr,
1116 const char *buf, size_t size)
1117{
1118 struct platform_device *pdev;
1119 struct ddr_devfreq_data *data;
1120 struct devfreq *devfreq;
1121 unsigned int high_upthrd;
1122
1123 pdev = container_of(dev, struct platform_device, dev);
1124 data = platform_get_drvdata(pdev);
1125 devfreq = data->devfreq;
1126
1127 if (0x1 != sscanf(buf, "%u", &high_upthrd)) {
1128 dev_err(dev, "<ERR> wrong parameter\n");
1129 return -E2BIG;
1130 }
1131
1132 mutex_lock(&devfreq->lock);
1133 data->high_upthrd = high_upthrd;
1134 if (data->cpu_up)
1135 __update_dev_upthreshold(high_upthrd, devfreq->data);
1136 mutex_unlock(&devfreq->lock);
1137
1138 return size;
1139}
1140
1141static ssize_t high_upthrd_show(struct device *dev,
1142 struct device_attribute *attr,
1143 char *buf)
1144{
1145 struct platform_device *pdev;
1146 struct ddr_devfreq_data *data;
1147
1148 pdev = container_of(dev, struct platform_device, dev);
1149 data = platform_get_drvdata(pdev);
1150 return sprintf(buf, "%u\n", data->high_upthrd);
1151}
1152
1153/* debug interface used to totally disable ddr fc */
1154static ssize_t disable_store(struct device *dev, struct device_attribute *attr,
1155 const char *buf, size_t size)
1156{
1157 struct platform_device *pdev;
1158 struct ddr_devfreq_data *data;
1159 int is_disabled;
1160
1161 pdev = container_of(dev, struct platform_device, dev);
1162 data = platform_get_drvdata(pdev);
1163
1164 if (0x1 != sscanf(buf, "%d", &is_disabled)) {
1165 dev_err(dev, "<ERR> wrong parameter\n");
1166 return -E2BIG;
1167 }
1168
1169 is_disabled = !!is_disabled;
1170 if (is_disabled == atomic_read(&data->is_disabled)) {
1171 dev_info_ratelimited(dev, "[WARNING] ddr fc is already %s\n",
1172 atomic_read(&data->is_disabled) ?
1173 "disabled" : "enabled");
1174 return size;
1175 }
1176
1177 if (is_disabled)
1178 atomic_inc(&data->is_disabled);
1179 else
1180 atomic_dec(&data->is_disabled);
1181
1182 dev_info(dev, "[WARNING]ddr fc is %s from debug interface!\n",
1183 atomic_read(&data->is_disabled) ? "disabled" : "enabled");
1184 return size;
1185}
1186
1187static ssize_t disable_show(struct device *dev, struct device_attribute *attr,
1188 char *buf)
1189{
1190 struct platform_device *pdev;
1191 struct ddr_devfreq_data *data;
1192
1193 pdev = container_of(dev, struct platform_device, dev);
1194 data = platform_get_drvdata(pdev);
1195 return sprintf(buf, "ddr fc is_disabled = %d\n",
1196 atomic_read(&data->is_disabled));
1197}
1198
1199/*
1200 * Debug interface used to change ddr rate.
1201 * It will ignore all devfreq and Qos requests.
1202 * Use interface disable_ddr_fc prior to it.
1203 */
1204static ssize_t ddr_freq_store(struct device *dev, struct device_attribute *attr,
1205 const char *buf, size_t size)
1206{
1207 struct platform_device *pdev;
1208 struct ddr_devfreq_data *data;
1209 int freq;
1210
1211 pdev = container_of(dev, struct platform_device, dev);
1212 data = platform_get_drvdata(pdev);
1213
1214 if (!atomic_read(&data->is_disabled)) {
1215 dev_err(dev, "<ERR> It will change ddr rate,"
1216 "disable ddr fc at first\n");
1217 return -EPERM;
1218 }
1219
1220 if (0x1 != sscanf(buf, "%d", &freq)) {
1221 dev_err(dev, "<ERR> wrong parameter, "
1222 "echo freq > ddr_freq to set ddr rate(unit Khz)\n");
1223 return -E2BIG;
1224 }
1225 ddr_set_rate(data, freq);
1226
1227 dev_dbg(dev, "ddr freq read back: %lu\n",
1228 clk_get_rate(data->ddr_clk) / KHZ_TO_HZ);
1229
1230 return size;
1231}
1232
1233static ssize_t ddr_freq_show(struct device *dev, struct device_attribute *attr,
1234 char *buf)
1235{
1236 struct platform_device *pdev;
1237 struct ddr_devfreq_data *data;
1238
1239 pdev = container_of(dev, struct platform_device, dev);
1240 data = platform_get_drvdata(pdev);
1241 return sprintf(buf, "current ddr freq is: %lu\n",
1242 clk_get_rate(data->ddr_clk) / KHZ_TO_HZ);
1243}
1244
1245/* debug interface to enable/disable perf counter during AP suspend */
1246static ssize_t stop_perf_store(struct device *dev,
1247 struct device_attribute *attr, const char *buf, size_t size)
1248{
1249 struct platform_device *pdev;
1250 struct ddr_devfreq_data *data;
1251 int is_stopped;
1252
1253 pdev = container_of(dev, struct platform_device, dev);
1254 data = platform_get_drvdata(pdev);
1255
1256 if (0x1 != sscanf(buf, "%d", &is_stopped)) {
1257 dev_err(dev, "<ERR> wrong parameter\n");
1258 return -E2BIG;
1259 }
1260
1261 is_stopped = !!is_stopped;
1262 if (is_stopped == atomic_read(&data->is_stopped)) {
1263 dev_info(dev, "perf counter has been already %s in suspend\n",
1264 atomic_read(&data->is_stopped) ? "off" : "on");
1265 return size;
1266 }
1267
1268 if (is_stopped)
1269 atomic_inc(&data->is_stopped);
1270 else
1271 atomic_dec(&data->is_stopped);
1272
1273 dev_info(dev, "perf counter is %s from debug interface!\n",
1274 atomic_read(&data->is_stopped) ? "off" : "on");
1275 return size;
1276}
1277
1278static ssize_t stop_perf_show(struct device *dev, struct device_attribute *attr,
1279 char *buf)
1280{
1281 struct platform_device *pdev;
1282 struct ddr_devfreq_data *data;
1283
1284 pdev = container_of(dev, struct platform_device, dev);
1285 data = platform_get_drvdata(pdev);
1286 return sprintf(buf, "perf counter is_stopped = %d\n",
1287 atomic_read(&data->is_stopped));
1288}
1289
1290
1291/* used to collect ddr cnt during 20ms */
1292int ddr_profiling_show(struct clk_dc_stat_info *dc_stat_info)
1293{
1294 struct ddr_devfreq_data *data;
1295 struct perf_counters *ddr_ticks, *ddr_ticks_base, *ddr_ticks_diff;
1296 int i, j, k, len = 0;
1297 unsigned long flags;
1298 unsigned int ver;
1299 unsigned int glob_ratio, idle_ratio, busy_ratio, data_ratio, util_ratio;
1300 unsigned int tmp_total, tmp_rw_cmd, tmp_busy;
1301 unsigned int tmp_data_cycle, cnttime_ms, cnttime_ms_ddr;
1302 u64 glob_ticks;
1303
1304 data = ddrfreq_data;
1305 ddr_ticks = data->dmc.ddr_ticks;
1306 ddr_ticks_base = data->ddr_stats.ddr_ticks_base;
1307 ddr_ticks_diff = data->ddr_stats.ddr_ticks_diff;
1308 ver = data->dmc.version;
1309 idle_ratio = busy_ratio = data_ratio = util_ratio = 0;
1310
1311 /* If ddr stat is working, need get latest data */
1312 if (data->ddr_stats.is_ddr_stats_working) {
1313 ktime_get_ts64(&data->stop_ts);
1314 ddr_perf_cnt_update(data, 0, 1);
1315 spin_lock_irqsave(&data->lock, flags);
1316 for (i = 0; i < data->ddr_freq_tbl_len; i++)
1317 for (j = 0; j < data->dmc.pmucnt_in_use; j++)
1318 ddr_ticks_diff[i].reg[j] =
1319 ddr_ticks[i].reg[j] -
1320 ddr_ticks_base[i].reg[j];
1321 spin_unlock_irqrestore(&data->lock, flags);
1322 }
1323 cnttime_ms = (data->stop_ts.tv_sec - data->start_ts.tv_sec) * MSEC_PER_SEC +
1324 (data->stop_ts.tv_nsec - data->start_ts.tv_nsec) / NSEC_PER_MSEC;
1325
1326
1327 cnttime_ms_ddr = 0;
1328 for (i = 0; i < data->ddr_freq_tbl_len; i++) {
1329 cnttime_ms_ddr += div_u64(ddr_ticks_diff[i].reg[1],
1330 ddr_index2_rate(data, i));
1331 }
1332
1333 /* ddr duty cycle show */
1334 glob_ticks = 0;
1335
1336 spin_lock_irqsave(&data->lock, flags);
1337
1338 for (i = 0; i < data->ddr_freq_tbl_len; i++)
1339 glob_ticks += ddr_ticks_diff[i].reg[1];
1340
1341 k = 0;
1342 while ((glob_ticks >> k) > 0x7FFF)
1343 k++;
1344
1345 for (i = 0; i < data->ddr_freq_tbl_len; i++) {
1346 if ((u32)(glob_ticks >> k) != 0)
1347 glob_ratio = (u32)(ddr_ticks_diff[i].reg[1] >> k)
1348 * 100000 / (u32)(glob_ticks >> k) + 5;
1349 else
1350 glob_ratio = 0;
1351
1352 j = 0;
1353 while ((ddr_ticks_diff[i].reg[1] >> j) > 0x7FFF)
1354 j++;
1355
1356 tmp_total = ddr_ticks_diff[i].reg[1] >> j;
1357 tmp_rw_cmd = ddr_ticks_diff[i].reg[2] >> j;
1358
1359 if (ver == MCK5 || ver == NZAS_MC)
1360 tmp_busy = ddr_ticks_diff[i].reg[3] >> j;
1361 else
1362 /* this should never happen */
1363 BUG_ON(1);
1364
1365 if (tmp_total != 0) {
1366 tmp_data_cycle = tmp_rw_cmd * data->bst_len / 2;
1367 if (data->mode_4x_en)
1368 tmp_data_cycle = tmp_data_cycle >> 1;
1369
1370 data_ratio = tmp_data_cycle * 100000 / tmp_total + 5;
1371
1372 if (ver == MCK5 || ver == NZAS_MC) {
1373 busy_ratio = tmp_busy * 100000 / tmp_total + 5;
1374
1375 idle_ratio = (tmp_total - tmp_busy)
1376 * 100000 / tmp_total + 5;
1377
1378 util_ratio = tmp_data_cycle * 100000
1379 / tmp_busy + 5;
1380 }
1381 } else {
1382 idle_ratio = 0;
1383 busy_ratio = 0;
1384 data_ratio = 0;
1385 util_ratio = 0;
1386 }
1387
1388 dc_stat_info->ops_dcstat[i].ddr_glob_ratio = glob_ratio;
1389 dc_stat_info->ops_dcstat[i].ddr_idle_ratio = idle_ratio;
1390 dc_stat_info->ops_dcstat[i].ddr_busy_ratio = busy_ratio;
1391 dc_stat_info->ops_dcstat[i].ddr_data_ratio = data_ratio;
1392 dc_stat_info->ops_dcstat[i].ddr_util_ratio = util_ratio;
1393 }
1394 spin_unlock_irqrestore(&data->lock, flags);
1395
1396 return len;
1397}
1398
1399/* used to collect ddr cnt during a time */
1400int ddr_profiling_store(int start)
1401{
1402 struct ddr_devfreq_data *data;
1403 unsigned int cap_flag, i, j;
1404 unsigned long flags;
1405 struct perf_counters *ddr_ticks_base;
1406 struct perf_counters *ddr_ticks_diff;
1407
1408 data = ddrfreq_data;
1409 ddr_ticks_base = data->ddr_stats.ddr_ticks_base;
1410 ddr_ticks_diff = data->ddr_stats.ddr_ticks_diff;
1411
1412 cap_flag = start;
1413
1414 if (cap_flag == 1) {
1415 ddr_perf_cnt_update(data, 0, 1);
1416 spin_lock_irqsave(&data->lock, flags);
1417 for (i = 0; i < data->ddr_freq_tbl_len; i++) {
1418 memcpy(ddr_ticks_base[i].reg,
1419 data->dmc.ddr_ticks[i].reg,
1420 sizeof(u64) * data->dmc.pmucnt_in_use);
1421 }
1422 spin_unlock_irqrestore(&data->lock, flags);
1423 ktime_get_ts64(&data->start_ts);
1424 data->ddr_stats.is_ddr_stats_working = 1;
1425 } else if (cap_flag == 0 && data->ddr_stats.is_ddr_stats_working == 1) {
1426 data->ddr_stats.is_ddr_stats_working = 0;
1427 ktime_get_ts64(&data->stop_ts);
1428 ddr_perf_cnt_update(data, 0, 1);
1429 /* When stop ddr stats, get a snapshot of current result */
1430 spin_lock_irqsave(&data->lock, flags);
1431 for (i = 0; i < data->ddr_freq_tbl_len; i++)
1432 for (j = 0; j < data->dmc.pmucnt_in_use; j++)
1433 ddr_ticks_diff[i].reg[j] =
1434 data->dmc.ddr_ticks[i].reg[j] -
1435 ddr_ticks_base[i].reg[j];
1436 spin_unlock_irqrestore(&data->lock, flags);
1437 }
1438
1439 return 0;
1440}
1441
1442static ssize_t normal_upthrd_show(struct device *dev,
1443 struct device_attribute *attr,
1444 char *buf)
1445{
1446 return sprintf(buf, "%u\n", devfreq_throughput_data.upthreshold);
1447}
1448
1449static ssize_t normal_upthrd_store(struct device *dev,
1450 struct device_attribute *attr,
1451 const char *buf, size_t size)
1452{
1453 struct platform_device *pdev;
1454 struct ddr_devfreq_data *data;
1455 struct devfreq *devfreq;
1456 unsigned int normal_upthrd;
1457
1458 pdev = container_of(dev, struct platform_device, dev);
1459 data = platform_get_drvdata(pdev);
1460 devfreq = data->devfreq;
1461
1462 if (0x1 != sscanf(buf, "%u", &normal_upthrd)) {
1463 dev_err(dev, "<ERR> wrong parameter\n");
1464 return -E2BIG;
1465 }
1466
1467 mutex_lock(&devfreq->lock);
1468
1469 devfreq_throughput_data.upthreshold = normal_upthrd;
1470
1471 if (!data->cpu_up)
1472 __update_dev_upthreshold(normal_upthrd, devfreq->data);
1473
1474 mutex_unlock(&devfreq->lock);
1475
1476 return size;
1477}
1478
1479static ssize_t upthrd_downdiff_show(struct device *dev,
1480 struct device_attribute *attr,
1481 char *buf)
1482{
1483 return sprintf(buf, "%u\n", devfreq_throughput_data.downdifferential);
1484}
1485
1486static ssize_t upthrd_downdiff_store(struct device *dev,
1487 struct device_attribute *attr,
1488 const char *buf, size_t size)
1489{
1490 struct platform_device *pdev;
1491 struct ddr_devfreq_data *data;
1492 struct devfreq *devfreq;
1493 unsigned int upthrd_downdiff;
1494
1495 pdev = container_of(dev, struct platform_device, dev);
1496 data = platform_get_drvdata(pdev);
1497 devfreq = data->devfreq;
1498
1499 if (0x1 != sscanf(buf, "%u", &upthrd_downdiff)) {
1500 dev_err(dev, "<ERR> wrong parameter\n");
1501 return -E2BIG;
1502 }
1503
1504 mutex_lock(&devfreq->lock);
1505
1506 devfreq_throughput_data.downdifferential = upthrd_downdiff;
1507
1508 if (data->cpu_up)
1509 __update_dev_upthreshold(data->high_upthrd, devfreq->data);
1510 else
1511 __update_dev_upthreshold(devfreq_throughput_data.upthreshold,
1512 devfreq->data);
1513
1514 mutex_unlock(&devfreq->lock);
1515
1516 return size;
1517}
1518
1519static ssize_t ddr_workload_show(struct device *dev, struct device_attribute *attr,
1520 char *buf)
1521{
1522 struct platform_device *pdev;
1523 struct ddr_devfreq_data *data;
1524 int i, count = 0;
1525 u64 axi_total_bytes = 0;
1526
1527 pdev = container_of(dev, struct platform_device, dev);
1528 data = platform_get_drvdata(pdev);
1529 if (data->axi_mon_base && data->devfreq->profile->polling_ms) {
1530 count += sprintf(buf + count, "AXI R TPT:\n");
1531 for (i = 0; i < NR_AXI_MON_PORT; i++) {
1532 count += sprintf(buf + count, "Port%d: %15d Kbytes / s\n",
1533 i, (data->ddr_stats.axi_read_bytes[i] / 1024)
1534 * (16 * 1000 / data->devfreq->profile->polling_ms));
1535 axi_total_bytes += data->ddr_stats.axi_read_bytes[i];
1536 }
1537
1538 count += sprintf(buf + count, "AXI W TPT:\n");
1539 for (i = 0; i < NR_AXI_MON_PORT; i++) {
1540 count += sprintf(buf + count, "Port%d: %15d Kbytes / s\n",
1541 i, (data->ddr_stats.axi_write_bytes[i] / 1024)
1542 * (16 * 1000 / data->devfreq->profile->polling_ms));
1543 axi_total_bytes += data->ddr_stats.axi_write_bytes[i];
1544 }
1545 count += sprintf(buf + count, "AXI_tp_rate: %lld / 100\n",
1546 div_u64((axi_total_bytes * 25), (data->ddr_stats.ddr_cycles >> 4)));
1547 count += sprintf(buf + count, "ddr efficiency: %lld / 100\n\n",
1548 div_u64((axi_total_bytes * 25), (data->ddr_stats.data_cycles >> 4)));
1549
1550 count += sprintf(buf + count, "max read latency:\n");
1551 for (i = 0; i < NR_AXI_MON_PORT; i++)
1552 count += sprintf(buf + count, "Port%d: %15d\n",
1553 i, data->ddr_stats.max_read_latency[i]);
1554
1555 count += sprintf(buf + count, "max write latency:\n");
1556 for (i = 0; i < NR_AXI_MON_PORT; i++)
1557 count += sprintf(buf + count, "Port%d: %15d\n",
1558 i, data->ddr_stats.max_write_latency[i]);
1559 }
1560 count += sprintf(buf + count, "ddr workload: %3d / 100\n", data->workload);
1561 return count;
1562}
1563
1564static struct pm_qos_request ddrfreq_qos_boot_max;
1565static struct pm_qos_request ddrfreq_qos_boot_min;
1566
1567static DEVICE_ATTR(stop_perf_in_suspend, S_IRUGO | S_IWUSR,
1568 stop_perf_show, stop_perf_store);
1569static DEVICE_ATTR(high_upthrd_swp, S_IRUGO | S_IWUSR,
1570 high_swp_show, high_swp_store);
1571static DEVICE_ATTR(high_upthrd, S_IRUGO | S_IWUSR,
1572 high_upthrd_show, high_upthrd_store);
1573static DEVICE_ATTR(disable_ddr_fc, S_IRUGO | S_IWUSR,
1574 disable_show, disable_store);
1575static DEVICE_ATTR(ddr_freq, S_IRUGO | S_IWUSR,
1576 ddr_freq_show, ddr_freq_store);
1577static DEVICE_ATTR(normal_upthrd, S_IRUGO | S_IWUSR,
1578 normal_upthrd_show, normal_upthrd_store);
1579static DEVICE_ATTR(upthrd_downdiff, S_IRUGO | S_IWUSR,
1580 upthrd_downdiff_show, upthrd_downdiff_store);
1581static DEVICE_ATTR(workload, S_IRUGO, ddr_workload_show, NULL);
1582
1583/*
1584 * Overflow interrupt handler
1585 * Basing on DE's suggestion, the flow to clear interrutp is:
1586 * 1. Disable interrupt.
1587 * 2. Read interrupt status to clear it.
1588 * 3. Enable interrupt again.
1589 * But DE also suggest to clear overflow flag here. By confirming, the only
1590 * side effect not to clear the flag is the next overflow event, no matter
1591 * same event triggering the interrupt or not, will not trigger interrupt
1592 * again. Since the work will check all overflow events and clear, there
1593 * will be no issue not to clear the overflow event on top half.
1594 */
1595static irqreturn_t ddrc_overflow_handler(int irq, void *dev_id)
1596{
1597 struct ddr_devfreq_data *data = dev_id;
1598 void *mc_base = (void *)data->dmc.hw_base;
1599 struct mck_pmu_regs_offset *regs = &data->dmc.mck_regs;
1600 unsigned int ver = data->dmc.version;
1601 u32 int_flag;
1602
1603 /*
1604 * Step1: Write to SDRAM Interrupt Enable Register to disable
1605 * interrupt
1606 */
1607 write_static_register(0x0, 0x0, mc_base + regs->intr_en, ver);
1608 /* Step2: Read SDRAM Interrupt Status Register to clear interrupt */
1609 int_flag = read_static_register(mc_base + regs->intr_stat, ver) & data->intr_en_val;
1610 if (NZAS_MC == ver)
1611 write_static_register(int_flag, 0x0, mc_base + regs->intr_stat, ver);
1612 if (!int_flag) {
1613 if (!cpu_is_asr1828())
1614 pr_err("No pended MC interrupt when handling it.\n"
1615 "This should not happen.\n");
1616 write_static_register(data->intr_en_val, data->intr_en_val, mc_base + regs->intr_en, ver);
1617 return IRQ_HANDLED;
1618 }
1619
1620#ifndef CONFIG_OPTEE
1621 if (int_flag & 0x4) {
1622 pr_err("ddr error: 0x%08x 0x%08x 0x%08x 0x%08x\n",
1623 read_static_register(mc_base + regs->adc_err_info, ver),
1624 read_static_register(mc_base + regs->adc_err_addr_l, ver),
1625 read_static_register(mc_base + regs->adc_err_addr_h, ver),
1626 read_static_register(mc_base + regs->adc_err_id, ver));
1627 /* clear error info */
1628 write_static_register((read_static_register(mc_base + regs->adc_err_info, ver) | DDR_ADC_INFO_CLR),
1629 (read_static_register(mc_base + regs->adc_err_info, ver) | DDR_ADC_INFO_CLR),
1630 (mc_base + regs->adc_err_info), ver);
1631 }
1632#endif
1633
1634 /*
1635 * Step3: Write to SDRAM Interrupt Enable Register to enable
1636 * interrupt again
1637 */
1638 write_static_register(data->intr_en_val, data->intr_en_val, mc_base + regs->intr_en, ver);
1639
1640 /* overflow */
1641 if (int_flag & 0x1)
1642 schedule_work(&data->overflow_work);
1643
1644 return IRQ_HANDLED;
1645}
1646
1647/*
1648 * Queued work for overflow interrupt.
1649 * When update, the overflow flag will be checked and cleared.
1650 */
1651static void ddrc_overflow_worker(struct work_struct *work)
1652{
1653 struct ddr_devfreq_data *data = container_of(work,
1654 struct ddr_devfreq_data, overflow_work);
1655 u32 overflow_flag;
1656 void *mc_base = (void *)data->dmc.hw_base;
1657 struct mck_pmu_regs_offset *regs = &data->dmc.mck_regs;
1658 unsigned int ver = data->dmc.version;
1659
1660 /* Check if there is unexpected behavior */
1661 overflow_flag = read_static_register(mc_base + regs->cnt_stat, ver)
1662 & 0xf;
1663 if (!overflow_flag) {
1664 if (ver == MCK5 || ver == NZAS_MC) {
1665 pr_warn("No overflag pended when interrupt happen.\n"
1666 "This should rarely happen.\n");
1667 } else
1668 /* this should never happen */
1669 BUG_ON(1);
1670 }
1671
1672 /* update stat and clear overflow flag */
1673 ddr_perf_cnt_update(data, 1, 1);
1674}
1675
1676#ifdef CONFIG_CPU_ASR1903
1677int devfreq_cpu_pm_notify(unsigned long pm_action)
1678{
1679 static bool devfreq_pm_flag = false;
1680
1681 if (ddrfreq_data) {
1682 if (pm_action == CPU_PM_ENTER) {
1683 ddr_perf_cnt_update(ddrfreq_data, 0, 0);
1684 devfreq_pm_flag = true;
1685 } else if ((pm_action == CPU_PM_EXIT) && (devfreq_pm_flag == true)) {
1686 init_ddr_performance_counter(ddrfreq_data);
1687 ddr_perf_cnt_restart(ddrfreq_data);
1688 devfreq_pm_flag = false;
1689 }
1690 }
1691 return NOTIFY_OK;
1692}
1693#endif
1694
1695static int ddr_devfreq_probe(struct platform_device *pdev)
1696{
1697 int i = 0, res;
1698 int ret = 0;
1699 struct device *dev = &pdev->dev;
1700 struct ddr_devfreq_data *data = NULL;
1701 struct devfreq_frequency_table *tbl;
1702 unsigned int reg_info[2];
1703 unsigned int freq_qos = 0;
1704 unsigned int tmp, ver, pmucnt_in_use;
1705 struct resource *irqres;
1706 void __iomem *apmu_base = NULL;
1707 struct resource *r;
1708
1709 data = devm_kzalloc(dev, sizeof(struct ddr_devfreq_data), GFP_KERNEL);
1710 if (data == NULL) {
1711 dev_err(dev, "Cannot allocate memory for devfreq data.\n");
1712 return -ENOMEM;
1713 }
1714
1715 data->ddr_clk = __clk_lookup("ddr");
1716 if (IS_ERR(data->ddr_clk)) {
1717 dev_err(dev, "Cannot get clk ptr.\n");
1718 return PTR_ERR(data->ddr_clk);
1719 }
1720
1721 if (IS_ENABLED(CONFIG_OF)) {
1722 if (of_property_read_u32_array(pdev->dev.of_node,
1723 "reg", reg_info, 2)) {
1724 dev_err(dev, "Failed to get register info\n");
1725 return -ENODATA;
1726 }
1727 } else {
1728 reg_info[0] = DEFAULT_MCK_BASE_ADDR;
1729 reg_info[1] = DEFAULT_MCK_REG_SIZE;
1730 }
1731
1732 /* axi monitor registers */
1733 r = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1734 if (!r) {
1735 dev_err(&pdev->dev, "%s: no aximonitor defined\n", __func__);
1736 } else {
1737 data->axi_mon_base = ioremap(r->start, resource_size(r));
1738 axi_mon_init(data->axi_mon_base);
1739 }
1740
1741 if (cpu_is_asr1901() || cpu_is_asr1906() || cpu_is_asr18xx() || cpu_is_asr1903()) {
1742 apmu_base = regs_addr_get_va(REGS_ADDR_APMU);
1743 if (cpu_is_asr1901() || cpu_is_asr1906() || (apmu_base && (readl(apmu_base +
1744 APMU_MC_HW_SLP_TYPE) & MODE_4X_EN))) {
1745 data->mode_4x_en = 1;
1746 pr_info("ddr clk 4x mode enabled\n");
1747 }
1748 }
1749
1750 data->dmc.hw_base = ioremap(reg_info[0], reg_info[1]);
1751
1752 /* read MCK controller version */
1753 data->dmc.version = MCK_UNKNOWN;
1754
1755 tmp = readl(data->dmc.hw_base);
1756
1757 ver = (tmp & MCK5_VER_MASK) >> MCK5_VER_SHIFT;
1758 if (cpu_is_asr1901() || cpu_is_asr1906())
1759 ver = NZAS_MC;
1760 if (ver == MCK5 || ver == NZAS_MC) {
1761 data->dmc.version = ver;
1762 data->dmc.pmucnt_in_use = DEFAULT_PERCNT_IN_USE;
1763 }
1764
1765 if (data->dmc.version == MCK_UNKNOWN) {
1766 dev_err(dev, "Unsupported mck version!\n");
1767 return -EINVAL;
1768 }
1769 dev_info(dev, "dmcu%d controller is detected!\n", ver);
1770
1771 configure_mck_pmu_regs(data);
1772
1773 /* get ddr burst length */
1774 if (data->dmc.version == NZAS_MC) {
1775 data->bst_len = 1 << ((read_static_register(data->dmc.hw_base +
1776 NZAS_MC_MC_Control_0, ver) & NZAS_MC_MC_Control_0_BL_MASK)
1777 >> NZAS_MC_MC_Control_0_BL_SHIFT);
1778 } else if (data->dmc.version == MCK5) {
1779 data->bst_len = 1 << ((read_static_register(data->dmc.hw_base +
1780 MCK5_CH0_SDRAM_CFG1, ver) & MCK5_CH0_SDRAM_CFG1_BL_MASK)
1781 >> MCK5_CH0_SDRAM_CFG1_BL_SHIFT);
1782 }
1783
1784 dev_info(dev, "ddr burst length = %d\n", data->bst_len);
1785
1786 /* save ddr frequency tbl */
1787 i = 0;
1788 tbl = devfreq_frequency_get_table(DEVFREQ_DDR);
1789 if (tbl) {
1790 while (tbl->frequency != DEVFREQ_TABLE_END) {
1791 data->ddr_freq_tbl[i] = tbl->frequency;
1792 tbl++;
1793 i++;
1794 }
1795 data->ddr_freq_tbl_len = i;
1796 }
1797
1798 ddr_devfreq_profile.initial_freq =
1799 clk_get_rate(data->ddr_clk) / KHZ_TO_HZ;
1800
1801 /* set the frequency table of devfreq profile */
1802 if (data->ddr_freq_tbl_len) {
1803 ddr_devfreq_profile.freq_table = (unsigned long *)data->ddr_freq_tbl;
1804 ddr_devfreq_profile.max_state = data->ddr_freq_tbl_len;
1805 for (i = 0; i < data->ddr_freq_tbl_len; i++)
1806 dev_pm_opp_add(dev, data->ddr_freq_tbl[i], 1000);
1807 }
1808
1809 /* allocate memory for performnace counter related arrays */
1810 pmucnt_in_use = data->dmc.pmucnt_in_use;
1811 for (i = 0; i < data->ddr_freq_tbl_len; i++) {
1812 data->dmc.ddr_ticks[i].reg = devm_kzalloc(dev,
1813 sizeof(u64) * pmucnt_in_use, GFP_KERNEL);
1814 if (data->dmc.ddr_ticks[i].reg == NULL) {
1815 dev_err(dev, "Cannot allocate memory for perf_cnt.\n");
1816 return -ENOMEM;
1817 }
1818 data->ddr_stats.ddr_ticks_base[i].reg = devm_kzalloc(dev,
1819 sizeof(u64) * pmucnt_in_use, GFP_KERNEL);
1820 if (data->ddr_stats.ddr_ticks_base[i].reg == NULL) {
1821 dev_err(dev, "Cannot allocate memory for ddr_stats.\n");
1822 return -ENOMEM;
1823 }
1824 data->ddr_stats.ddr_ticks_diff[i].reg = devm_kzalloc(dev,
1825 sizeof(u64) * pmucnt_in_use, GFP_KERNEL);
1826 if (data->ddr_stats.ddr_ticks_diff[i].reg == NULL) {
1827 dev_err(dev, "Cannot allocate memory for ddr_stats.\n");
1828 return -ENOMEM;
1829 }
1830 }
1831
1832 irqres = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1833 if (!irqres)
1834 return -ENODEV;
1835 data->irq = irqres->start;
1836 if (cpu_is_asr1828())
1837 ret = request_irq(data->irq, ddrc_overflow_handler, IRQF_SHARED, dev_name(dev),
1838 data);
1839 else
1840 ret = request_irq(data->irq, ddrc_overflow_handler, 0, dev_name(dev),
1841 data);
1842 if (ret) {
1843 dev_err(dev, "Cannot request irq for MC!\n");
1844 return -ENODEV;
1845 }
1846 INIT_WORK(&data->overflow_work, ddrc_overflow_worker);
1847
1848 /*
1849 * Initilize the devfreq QoS if freq-qos flag is enabled.
1850 * By default, the flag is disabled.
1851 */
1852 freq_qos = 0;
1853
1854 if (IS_ENABLED(CONFIG_OF)) {
1855 if (of_property_read_bool(pdev->dev.of_node, "marvell,qos"))
1856 freq_qos = 1;
1857 }
1858
1859 if (freq_qos) {
1860 ddr_devfreq_profile.min_qos_type = PM_QOS_DDR_DEVFREQ_MIN;
1861 ddr_devfreq_profile.max_qos_type = PM_QOS_DDR_DEVFREQ_MAX;
1862 }
1863
1864 /* by default, disable performance counter when AP enters suspend */
1865 atomic_set(&data->is_stopped, 1);
1866
1867#ifdef CONFIG_DEVFREQ_GOV_THROUGHPUT
1868 devfreq_throughput_data.freq_table = data->ddr_freq_tbl;
1869 devfreq_throughput_data.table_len = data->ddr_freq_tbl_len;
1870
1871 devfreq_throughput_data.throughput_table =
1872 kzalloc(devfreq_throughput_data.table_len
1873 * sizeof(struct throughput_threshold), GFP_KERNEL);
1874 if (NULL == devfreq_throughput_data.throughput_table) {
1875 dev_err(dev,
1876 "Cannot allocate memory for throughput table\n");
1877 return -ENOMEM;
1878 }
1879
1880 for (i = 0; i < devfreq_throughput_data.table_len; i++) {
1881 if (data->mode_4x_en) {
1882 devfreq_throughput_data.throughput_table[i].up =
1883 devfreq_throughput_data.upthreshold
1884 * devfreq_throughput_data.ddr_efficiency
1885 * (devfreq_throughput_data.freq_table[i] / 100) / 100;
1886 devfreq_throughput_data.throughput_table[i].down =
1887 (devfreq_throughput_data.upthreshold
1888 - devfreq_throughput_data.downdifferential)
1889 * devfreq_throughput_data.ddr_efficiency
1890 * (devfreq_throughput_data.freq_table[i] / 100) / 100;
1891 } else {
1892 devfreq_throughput_data.throughput_table[i].up =
1893 devfreq_throughput_data.upthreshold
1894 * devfreq_throughput_data.freq_table[i] / 100;
1895 devfreq_throughput_data.throughput_table[i].down =
1896 (devfreq_throughput_data.upthreshold
1897 - devfreq_throughput_data.downdifferential)
1898 * devfreq_throughput_data.freq_table[i] / 100;
1899 }
1900 }
1901#endif /* CONFIG_DEVFREQ_GOV_THROUGHPUT */
1902
1903 spin_lock_init(&data->lock);
1904
1905 data->devfreq = devfreq_add_device(&pdev->dev, &ddr_devfreq_profile,
1906 "throughput", &devfreq_throughput_data);
1907 if (IS_ERR(data->devfreq)) {
1908 dev_err(dev, "devfreq add error !\n");
1909 ret = (unsigned long)data->devfreq;
1910 goto err_devfreq_add;
1911 }
1912
1913 data->high_upthrd_swp = DDR_DEVFREQ_HIGHCPUFREQ;
1914 data->high_upthrd = DDR_DEVFREQ_HIGHCPUFREQ_UPTHRESHOLD;
1915 data->cpu_up = 0;
1916 data->gpu_up = 0;
1917#ifdef CONFIG_DEVFREQ_GOV_THROUGHPUT
1918 data->freq_transition.notifier_call = upthreshold_freq_notifer_call;
1919 ddrfreq_driver_data = data;
1920#endif /* CONFIG_DEVFREQ_GOV_THROUGHPUT */
1921 ddrfreq_data = data;
1922
1923 /* init default devfreq min_freq and max_freq */
1924 data->devfreq->min_freq = data->devfreq->qos_min_freq =
1925 data->ddr_freq_tbl[0];
1926 data->devfreq->max_freq = data->devfreq->qos_max_freq =
1927 data->ddr_freq_tbl[data->ddr_freq_tbl_len - 1];
1928 data->last_polled_at = jiffies;
1929
1930 res = device_create_file(&pdev->dev, &dev_attr_disable_ddr_fc);
1931 if (res) {
1932 dev_err(dev,
1933 "device attr disable_ddr_fc create fail: %d\n", res);
1934 ret = -ENOENT;
1935 goto err_file_create0;
1936 }
1937
1938 res = device_create_file(&pdev->dev, &dev_attr_ddr_freq);
1939 if (res) {
1940 dev_err(dev, "device attr ddr_freq create fail: %d\n", res);
1941 ret = -ENOENT;
1942 goto err_file_create1;
1943 }
1944
1945 res = device_create_file(&pdev->dev, &dev_attr_stop_perf_in_suspend);
1946 if (res) {
1947 dev_err(dev,
1948 "device attr stop_perf_in_suspend create fail: %d\n", res);
1949 ret = -ENOENT;
1950 goto err_file_create2;
1951 }
1952
1953#ifdef CONFIG_DEVFREQ_GOV_THROUGHPUT
1954 res = device_create_file(&pdev->dev, &dev_attr_high_upthrd_swp);
1955 if (res) {
1956 dev_err(dev,
1957 "device attr high_upthrd_swp create fail: %d\n", res);
1958 ret = -ENOENT;
1959 goto err_file_create3;
1960 }
1961
1962 res = device_create_file(&pdev->dev, &dev_attr_high_upthrd);
1963 if (res) {
1964 dev_err(dev,
1965 "device attr high_upthrd create fail: %d\n", res);
1966 ret = -ENOENT;
1967 goto err_file_create4;
1968 }
1969
1970 /*
1971 * register the notifier to cpufreq driver,
1972 * it is triggered when core freq-chg is done
1973 */
1974 cpufreq_register_notifier(&data->freq_transition,
1975 CPUFREQ_TRANSITION_NOTIFIER);
1976#endif
1977
1978 res = device_create_file(&pdev->dev, &dev_attr_normal_upthrd);
1979 if (res) {
1980 dev_err(dev,
1981 "device attr normal_upthrd create fail: %d\n", res);
1982 ret = -ENOENT;
1983 goto err_file_create5;
1984 }
1985
1986 res = device_create_file(&pdev->dev, &dev_attr_upthrd_downdiff);
1987 if (res) {
1988 dev_err(dev,
1989 "device attr upthrd_downdiff create fail: %d\n", res);
1990 ret = -ENOENT;
1991 goto err_file_create6;
1992 }
1993
1994 res = device_create_file(&pdev->dev, &dev_attr_workload);
1995 if (res) {
1996 dev_err(dev,
1997 "device attr workload create fail: %d\n", res);
1998 ret = -ENOENT;
1999 goto err_file_create7;
2000 }
2001
2002 platform_set_drvdata(pdev, data);
2003 ddr_perf_cnt_init(data);
2004
2005 if (ddr_max) {
2006 tmp = data->ddr_freq_tbl[data->ddr_freq_tbl_len - 1];
2007 for (i = 1; i < data->ddr_freq_tbl_len; i++)
2008 if ((data->ddr_freq_tbl[i - 1] <= ddr_max) &&
2009 (data->ddr_freq_tbl[i] > ddr_max)) {
2010 tmp = data->ddr_freq_tbl[i - 1];
2011 break;
2012 }
2013
2014 ddrfreq_qos_boot_max.name = "boot_ddr_max";
2015 pm_qos_add_request(&ddrfreq_qos_boot_max,
2016 PM_QOS_DDR_DEVFREQ_MAX, tmp);
2017 }
2018
2019 if (ddr_min) {
2020 tmp = data->ddr_freq_tbl[0];
2021 for (i = 1; i < data->ddr_freq_tbl_len + 1; i++)
2022 if (data->ddr_freq_tbl[i - 1] >= ddr_min) {
2023 tmp = data->ddr_freq_tbl[i - 1];
2024 break;
2025 }
2026
2027 ddrfreq_qos_boot_min.name = "boot_ddr_min";
2028 pm_qos_add_request(&ddrfreq_qos_boot_min,
2029 PM_QOS_DDR_DEVFREQ_MIN, tmp);
2030 }
2031
2032 ret = misc_register(&soc_id_miscdev);
2033 if (ret) {
2034 pr_err("%s: fail to register misc dev\n", __func__);
2035 goto err_file_create7;
2036 }
2037
2038 return 0;
2039
2040err_file_create7:
2041 device_remove_file(&pdev->dev, &dev_attr_upthrd_downdiff);
2042err_file_create6:
2043 device_remove_file(&pdev->dev, &dev_attr_normal_upthrd);
2044err_file_create5:
2045 device_remove_file(&pdev->dev, &dev_attr_high_upthrd);
2046err_file_create4:
2047 device_remove_file(&pdev->dev, &dev_attr_high_upthrd_swp);
2048err_file_create3:
2049 device_remove_file(&pdev->dev, &dev_attr_stop_perf_in_suspend);
2050err_file_create2:
2051 device_remove_file(&pdev->dev, &dev_attr_ddr_freq);
2052err_file_create1:
2053 device_remove_file(&pdev->dev, &dev_attr_disable_ddr_fc);
2054err_file_create0:
2055 devfreq_remove_device(data->devfreq);
2056err_devfreq_add:
2057
2058#ifdef CONFIG_DEVFREQ_GOV_THROUGHPUT
2059 kfree(devfreq_throughput_data.throughput_table);
2060#endif /* CONFIG_DEVFREQ_GOV_THROUGHPUT */
2061
2062 free_irq(data->irq, data);
2063
2064 return ret;
2065}
2066
2067static int ddr_devfreq_remove(struct platform_device *pdev)
2068{
2069 struct ddr_devfreq_data *data = platform_get_drvdata(pdev);
2070
2071 device_remove_file(&pdev->dev, &dev_attr_disable_ddr_fc);
2072 device_remove_file(&pdev->dev, &dev_attr_ddr_freq);
2073 device_remove_file(&pdev->dev, &dev_attr_stop_perf_in_suspend);
2074 device_remove_file(&pdev->dev, &dev_attr_high_upthrd_swp);
2075 device_remove_file(&pdev->dev, &dev_attr_high_upthrd);
2076 device_remove_file(&pdev->dev, &dev_attr_normal_upthrd);
2077 device_remove_file(&pdev->dev, &dev_attr_upthrd_downdiff);
2078
2079 devfreq_remove_device(data->devfreq);
2080
2081#ifdef CONFIG_DEVFREQ_GOV_THROUGHPUT
2082 kfree(devfreq_throughput_data.throughput_table);
2083#endif /* CONFIG_DEVFREQ_GOV_THROUGHPUT */
2084
2085 free_irq(data->irq, data);
2086 cancel_work_sync(&data->overflow_work);
2087
2088 return 0;
2089}
2090
2091static const struct of_device_id devfreq_ddr_dt_match[] = {
2092 {.compatible = "marvell,devfreq-ddr" },
2093 {},
2094};
2095MODULE_DEVICE_TABLE(of, devfreq_ddr_dt_match);
2096
2097#ifdef CONFIG_PM
2098static unsigned long saved_ddrclk;
2099static int mck_suspend(struct device *dev)
2100{
2101 struct list_head *list_min;
2102 struct plist_node *node;
2103 struct pm_qos_request *req;
2104 unsigned int qos_min, i = 0;
2105 unsigned long new_ddrclk, cp_request = 0;
2106 struct platform_device *pdev;
2107 struct ddr_devfreq_data *data;
2108 unsigned long flags;
2109
2110 pdev = container_of(dev, struct platform_device, dev);
2111 data = platform_get_drvdata(pdev);
2112
2113 new_ddrclk = data->ddr_freq_tbl[0];
2114
2115 mutex_lock(&data->devfreq->lock);
2116
2117 /* scaling to the min frequency before entering suspend */
2118 saved_ddrclk = clk_get_rate(data->ddr_clk) / KHZ_TO_HZ;
2119 qos_min = (unsigned int)pm_qos_request(PM_QOS_DDR_DEVFREQ_MIN);
2120 list_min = &pm_qos_array[PM_QOS_DDR_DEVFREQ_MIN]
2121 ->constraints->list.node_list;
2122 list_for_each_entry(node, list_min, node_list) {
2123 req = container_of(node, struct pm_qos_request, node);
2124 if (req->name && !strcmp(req->name, "cp") &&
2125 (node->prio > data->ddr_freq_tbl[0])) {
2126 dev_info(dev, "%s request min qos\n",
2127 req->name);
2128 cp_request = 1;
2129 break;
2130 }
2131 }
2132
2133 /* if CP request QOS min, set rate as CP request */
2134 if (cp_request) {
2135 do {
2136 if (node->prio == data->ddr_freq_tbl[i]) {
2137 new_ddrclk = data->ddr_freq_tbl[i];
2138 break;
2139 }
2140 i++;
2141 } while (i < data->ddr_freq_tbl_len);
2142
2143 if (i == data->ddr_freq_tbl_len)
2144 dev_err(dev, "DDR qos value is wrong!\n");
2145 }
2146
2147 ddr_set_rate(data, new_ddrclk);
2148 pr_pm_debug("Change ddr freq to lowest value. (cur: %luKhz)\n",
2149 clk_get_rate(data->ddr_clk) / KHZ_TO_HZ);
2150
2151 if (atomic_read(&data->is_stopped)) {
2152 dev_dbg(dev, "disable perf_counter before suspend!\n");
2153 spin_lock_irqsave(&data->lock, flags);
2154 stop_ddr_performance_counter(data);
2155 spin_unlock_irqrestore(&data->lock, flags);
2156 }
2157
2158 mutex_unlock(&data->devfreq->lock);
2159
2160 return 0;
2161}
2162
2163static int mck_resume(struct device *dev)
2164{
2165 struct platform_device *pdev;
2166 struct ddr_devfreq_data *data;
2167 unsigned long flags;
2168
2169 pdev = container_of(dev, struct platform_device, dev);
2170 data = platform_get_drvdata(pdev);
2171
2172 mutex_lock(&data->devfreq->lock);
2173
2174 if (atomic_read(&data->is_stopped)) {
2175 dev_dbg(dev, "restart perf_counter after resume!\n");
2176 spin_lock_irqsave(&data->lock, flags);
2177 start_ddr_performance_counter(data);
2178 spin_unlock_irqrestore(&data->lock, flags);
2179 }
2180
2181 /* scaling to saved frequency after exiting suspend */
2182 ddr_set_rate(data, saved_ddrclk);
2183 pr_pm_debug("Change ddr freq to saved value. (cur: %luKhz)\n",
2184 clk_get_rate(data->ddr_clk) / KHZ_TO_HZ);
2185 mutex_unlock(&data->devfreq->lock);
2186 return 0;
2187}
2188
2189static const struct dev_pm_ops mck_pm_ops = {
2190 .suspend = mck_suspend,
2191 .resume = mck_resume,
2192};
2193#endif
2194
2195static struct platform_driver ddr_devfreq_driver = {
2196 .probe = ddr_devfreq_probe,
2197 .remove = ddr_devfreq_remove,
2198 .driver = {
2199 .name = "devfreq-ddr",
2200 .of_match_table = of_match_ptr(devfreq_ddr_dt_match),
2201 .owner = THIS_MODULE,
2202#ifdef CONFIG_PM
2203 .pm = &mck_pm_ops,
2204#endif
2205 },
2206};
2207
2208static int __init ddr_devfreq_init(void)
2209{
2210 return platform_driver_register(&ddr_devfreq_driver);
2211}
2212fs_initcall(ddr_devfreq_init);
2213
2214static void __exit ddr_devfreq_exit(void)
2215{
2216 platform_driver_unregister(&ddr_devfreq_driver);
2217}
2218module_exit(ddr_devfreq_exit);
2219
2220MODULE_LICENSE("GPL");
2221MODULE_DESCRIPTION("asr memorybus devfreq driver");