blob: 559a15ef3a30ec064ae7bb9cbb05b911be7a3e4c [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001/*
2 * mmp core clock operation source file
3 *
4 * Copyright: (C) Copyright 2018 ASR Microelectronics (Shanghai) Co., Ltd.
5 * Xuhong Gao <xuhonggao@marvell.com>
6 *
7 * This file is licensed under the terms of the GNU General Public
8 * License version 2. This program is licensed "as is" without any
9 * warranty of any kind, whether express or implied.
10 */
11
12#include <linux/kernel.h>
13#include <linux/clk.h>
14#include <linux/io.h>
15#include <linux/err.h>
16#include <linux/delay.h>
17#include <linux/slab.h>
18#include <linux/cpufreq.h>
19#include <linux/devfreq.h>
20#include <soc/asr/asrdcstat.h>
21#include <soc/asr/asrcpdvc.h>
22#include "clk.h"
23#include "clk-asr1803.h"
24#include <linux/clk-provider.h>
25#include <dt-bindings/clock/asr,asr1803.h>
26#ifdef CONFIG_DEBUG_FS
27#include <soc/asr/debugfs-asr.h>
28#endif
29#include <linux/cputype.h>
30#include <trace/events/pxa.h>
31#ifdef CONFIG_PXA_MIPSRAM
32#include <linux/mipsram.h>
33#include <mach/mipsram_pm_event.h>
34#endif
35
36#define APMU_REG(apmu_base, x) (apmu_base + (x))
37#define APMU_MC_HW_SLP_TYPE(c) APMU_REG(c, 0x00B0)
38#define APMU_CKPHY_FC_CTRL(c) APMU_REG(c, 0x015C)
39#define MPMU_REG(mpmu_base, x) (mpmu_base + (x))
40#define MPMU_CMPR0 (0x400)
41#define MPMU_CMPR1 (0x404)
42#define AP_DDR_ROP (MPMU_CMPR0)
43#define CP_DDR_ROP (MPMU_CMPR1)
44
45#define APMU_DDR_CLK_FC_REQ (0x1 << 24)
46
47#define DDR_FC_RIPC_ADDR (0xd403d200)
48#define DFC_LEVEL(c, i) APMU_REG(c, (0x190 + ((i) << 2)))
49#define DFC_STATUS(c) APMU_REG(c, 0x188)
50#define DFC_AP(c) APMU_REG(c, 0x180)
51
52#define APMU_IMR(c) APMU_REG(c, 0x0098)
53#define APMU_IRWC(c) APMU_REG(c, 0x009C)
54#define APMU_ISR(c) APMU_REG(c, 0x00A0)
55#define APMU_PLL_SEL_STATUS(c) APMU_REG(c, 0x00c4)
56
57#define ASR1903_A0P_DCLK_SRC_MASK (0x1F << 16)
58#define ASR1903_A0P_DCLK_SRC_SEL_SHIFT (18)
59#define ASR1903_A0P_DCLK_SRC_DIV_SHIFT (16)
60
61DEFINE_SPINLOCK(ripc_spinlock);
62EXPORT_SYMBOL(ripc_spinlock);
63
64static DEFINE_SPINLOCK(ddr_fc_seq_lock);
65static struct task_struct *ddr_fc_seqlock_owner;
66static int ddr_fc_seqlock_cnt;
67
68static void __iomem *hwlock_addr;
69
70/* parameter passed from cmdline to identify DDR mode */
71enum ddr_type ddr_mode = DDR_400M;
72static int __init __init_ddr_mode(char *arg)
73{
74 int n;
75 if (!get_option(&arg, &n))
76 return 0;
77
78 if ((n >= DDR_TYPE_MAX) || (n < DDR_400M))
79 pr_info("WARNING: unknown DDR type!");
80 else
81 ddr_mode = n;
82
83 return 1;
84}
85__setup("ddr_mode=", __init_ddr_mode);
86
87enum dfc_cause {
88 CP_LPM_DFC = 0,
89 AP_ACTIVE_DFC = 0x1,
90 CP_ACTIVE_DFC = 0x2,
91 DP_ACTIVE_DFC = 0x4,
92};
93
94union pmua_ckphy_fc_ctrl {
95 struct {
96 unsigned int dpll_div:4;
97 unsigned int reserved1:28;
98 } b;
99 unsigned int v;
100};
101
102
103union dfc_ap {
104 struct {
105 unsigned int dfc_req:1;
106 unsigned int fl:3;
107 /* rsv bits */
108 unsigned int reserved:28;
109 } b;
110 unsigned int v;
111};
112
113union dfc_status {
114 struct {
115 unsigned int dfc_status:1;
116 unsigned int cfl:3;
117 unsigned int tfl:3;
118 unsigned int dfc_cause:4;
119 unsigned int reserved:21;
120 } b;
121 unsigned int v;
122};
123
124/* lock declaration */
125static LIST_HEAD(ddr_combined_clk_list);
126
127static struct ddr_opt *cur_ddr_op;
128
129static struct clk *clk_dclk;
130
131static bool ddr_is_hwdfc = false;
132
133int get_fc_ripc_lock(void)
134{
135 int cnt = 0;
136 unsigned long flags;
137
138 spin_lock_irqsave(&ripc_spinlock, flags);
139 while (__raw_readl(hwlock_addr)) {
140 spin_unlock_irqrestore(&ripc_spinlock, flags);
141 cpu_relax();
142 udelay(1);
143 cnt++;
144 if (cnt >= 200) {
145 pr_warn("AP: fail to lock DDR_FC ripc!\n");
146 cnt = 0;
147 return -EBUSY;
148 }
149 spin_lock_irqsave(&ripc_spinlock, flags);
150 }
151 spin_unlock_irqrestore(&ripc_spinlock, flags);
152 return 0;
153}
154
155void put_fc_ripc_lock(void)
156{
157 unsigned long flags;
158
159 spin_lock_irqsave(&ripc_spinlock, flags);
160 __raw_writel(1, hwlock_addr);
161 spin_unlock_irqrestore(&ripc_spinlock, flags);
162}
163
164static void get_ddr_fc_spinlock(void)
165{
166 unsigned long flags;
167 local_irq_save(flags);
168 if (!spin_trylock(&ddr_fc_seq_lock)) {
169 if (ddr_fc_seqlock_owner == current) {
170 ddr_fc_seqlock_cnt++;
171 local_irq_restore(flags);
172 return;
173 }
174 spin_lock(&ddr_fc_seq_lock);
175 }
176 WARN_ON_ONCE(ddr_fc_seqlock_owner != NULL);
177 WARN_ON_ONCE(ddr_fc_seqlock_cnt != 0);
178 ddr_fc_seqlock_owner = current;
179 ddr_fc_seqlock_cnt = 1;
180
181 local_irq_restore(flags);
182}
183
184static void put_ddr_fc_spinlock(void)
185{
186 unsigned long flags;
187 local_irq_save(flags);
188
189 WARN_ON_ONCE(ddr_fc_seqlock_owner != current);
190 WARN_ON_ONCE(ddr_fc_seqlock_cnt == 0);
191
192 if (--ddr_fc_seqlock_cnt) {
193 local_irq_restore(flags);
194 return;
195 }
196 ddr_fc_seqlock_owner = NULL;
197 spin_unlock(&ddr_fc_seq_lock);
198
199 local_irq_restore(flags);
200}
201
202static void wait_for_hwdfc_done(void __iomem *apmu_base)
203{
204 int timeout = 10000;
205 int dfc_timeout;
206 union dfc_status status;
207
208 /* polling ISR */
209 while (!((1 << 1) & __raw_readl(APMU_ISR(apmu_base))) && timeout)
210 timeout--;
211 if (timeout <= 0) {
212 /* enhancement to check DFC related status */
213 pr_err("APMU_ISR %x, CUR_DLV %d,"
214 " DFC_AP %x, DFC_STATUS %x\n",
215 __raw_readl(APMU_ISR(apmu_base)),
216 cur_ddr_op->ddr_freq_level,
217 __raw_readl(DFC_AP(apmu_base)),
218 __raw_readl(DFC_STATUS(apmu_base)));
219 WARN(1, "HWDFC frequency change timeout!\n");
220 pr_err("APMU_ISR %x\n",
221 __raw_readl(APMU_ISR(apmu_base)));
222 }
223
224 status.v = __raw_readl(DFC_STATUS(apmu_base));
225 if (!status.b.dfc_status)
226 goto out;
227
228 /* polling hwdfc and may timeout */
229 dfc_timeout = 8;
230 while (status.b.dfc_status && dfc_timeout) {
231 dfc_timeout--;
232 udelay(10);
233 status.v = __raw_readl(DFC_STATUS(apmu_base));
234 }
235 if (dfc_timeout <= 0) {
236 /* enhancement to check DFC related status */
237 pr_err("APMU_ISR %x, CUR_DLV %d,"
238 " DFC_AP %x, DFC_STATUS %x\n",
239 __raw_readl(APMU_ISR(apmu_base)),
240 cur_ddr_op->ddr_freq_level,
241 __raw_readl(DFC_AP(apmu_base)),
242 __raw_readl(DFC_STATUS(apmu_base)));
243 WARN(1, "HWDFC frequency change timeout!\n");
244 pr_err("APMU_ISR %x\n",
245 __raw_readl(APMU_ISR(apmu_base)));
246 }
247
248out:
249 /* only clear AP fc done signal */
250 __raw_writel(__raw_readl(APMU_ISR(apmu_base)) & ~(1 << 1),
251 APMU_ISR(apmu_base));
252}
253
254static unsigned int ddr_rate2_op_index(struct clk_hw *hw, unsigned int rate)
255{
256 unsigned int index;
257 struct clk_ddr *ddr = to_clk_ddr(hw);
258 struct ddr_opt *ddr_opt;
259 unsigned int ddr_opt_size;
260 ddr_opt = ddr->params->ddr_opt;
261 ddr_opt_size = ddr->params->ddr_opt_size;
262
263 if (unlikely(rate > ddr_opt[ddr_opt_size - 1].dclk))
264 return ddr_opt_size - 1;
265
266 for (index = 0; index < ddr_opt_size; index++)
267 if (ddr_opt[index].dclk >= rate)
268 break;
269
270 return index;
271}
272
273static int get_ddr_volt_level(struct clk_ddr *ddr, unsigned long freq)
274{
275 int i;
276 unsigned long *array = ddr->params->hwdfc_freq_table;
277 int table_size = ddr->params->hwdfc_table_size;
278 for (i = 0; i < table_size; i++)
279 if (freq <= array[i])
280 break;
281 if (i == table_size)
282 i--;
283 return i;
284}
285
286static void get_cur_ddr_op(struct clk_hw *hw,
287 struct ddr_opt *cop)
288{
289 union pmua_ckphy_fc_ctrl ckphy_fc_ctrl;
290 u32 clk_div;
291 struct clk_ddr *ddr = to_clk_ddr(hw);
292 void __iomem *apmu_base = ddr->params->apmu_base;
293
294 ckphy_fc_ctrl.v = __raw_readl(APMU_CKPHY_FC_CTRL(apmu_base));
295 clk_div = (ckphy_fc_ctrl.b.dpll_div >> 2);
296 pr_debug("ckphy_fc_ctrl: 0x%x\n", ckphy_fc_ctrl.v);
297 BUG_ON(!cop->ddr_parent);
298 cop->ddr_clk_src = clk_get_rate(cop->ddr_parent) / MHZ;
299 cop->dclk = cop->ddr_clk_src / (clk_div + 1) / 2;
300}
301
302#ifdef CONFIG_DDR_DEVFREQ
303static struct devfreq_frequency_table *ddr_devfreq_tbl;
304
305static void __init_ddr_devfreq_table(struct clk_hw *hw)
306{
307 struct ddr_opt *ddr_opt;
308 unsigned int ddr_opt_size = 0, i = 0;
309 struct clk_ddr *ddr = to_clk_ddr(hw);
310
311 ddr_opt_size = ddr->params->ddr_opt_size;
312 ddr_devfreq_tbl =
313 kmalloc(sizeof(struct devfreq_frequency_table)
314 * (ddr_opt_size + 1), GFP_KERNEL);
315 if (!ddr_devfreq_tbl)
316 return;
317
318 ddr_opt = ddr->params->ddr_opt;
319 for (i = 0; i < ddr_opt_size; i++) {
320 ddr_devfreq_tbl[i].index = i;
321 ddr_devfreq_tbl[i].frequency =
322 ddr_opt[i].dclk * MHZ_TO_KHZ;
323 }
324
325 ddr_devfreq_tbl[i].index = i;
326 ddr_devfreq_tbl[i].frequency = DEVFREQ_TABLE_END;
327
328 devfreq_frequency_table_register(ddr_devfreq_tbl, DEVFREQ_DDR);
329}
330#endif
331
332static void asr1803_ddr_fc_seq(struct clk_hw *hw, struct ddr_opt *cop,
333 struct ddr_opt *top);
334static int __ddr_hwdfc_seq(struct clk_hw *hw, unsigned int level);
335static void clk_ddr_init(struct clk_hw *hw)
336{
337 struct clk *parent, *clk;
338 struct clk_ddr *ddr = to_clk_ddr(hw);
339 struct ddr_opt *ddr_opt, *cop, cur_op;
340 unsigned int ddr_opt_size = 0, i;
341 unsigned int op_index;
342 struct parents_table *parent_table = ddr->params->parent_table;
343 int parent_table_size = ddr->params->parent_table_size;
344#ifdef CONFIG_ASR_CLK_DCSTAT
345 unsigned int idx = 0;
346 unsigned long op[MAX_OP_NUM];
347#endif
348 unsigned int value, volt_level;
349 unsigned int val;
350 void __iomem *apmu_base = ddr->params->apmu_base;
351 struct dfc_level_reg_offset *offset = ddr->params->dfc_level_reg_offset;
352 ddr_opt = ddr->params->ddr_opt;
353 ddr_opt_size = ddr->params->ddr_opt_size;
354
355 for (i = 0; i < parent_table_size; i++) {
356 clk = __clk_lookup(parent_table[i].parent_name);
357 if (!IS_ERR(clk))
358 parent_table[i].parent = clk;
359 else
360 pr_err("%s : can't find clk %s\n", __func__,
361 parent_table[i].parent_name);
362 }
363
364 pr_info("dclk(src:sel,div,tblindex):: ");
365 for (i = 0; i < ddr_opt_size; i++) {
366 cop = &ddr_opt[i];
367 parent = hwsel2parent(parent_table, parent_table_size,
368 cop->ddr_clk_sel);
369 BUG_ON(IS_ERR(parent));
370 cop->ddr_parent = parent;
371 cop->ddr_clk_src =
372 clk_get_rate(parent) / MHZ;
373 cop->dclk_div =
374 cop->ddr_clk_src / (2 * cop->dclk) - 1;
375 cop->dclk_div = (cop->dclk_div << 2);
376
377 printk(KERN_CONT " %d(%d:%d,%d,%d); ",
378 cop->dclk, cop->ddr_clk_src,
379 cop->ddr_clk_sel, cop->dclk_div,
380 cop->ddr_tbl_index);
381
382 if (ddr->flags & MMP_DDR_HWDFC_FEAT) {
383 cop->ddr_freq_level = i;
384 value = __raw_readl(DFC_LEVEL(apmu_base, i));
385#ifdef CONFIG_CPU_ASR1903
386 if (cpu_is_asr1903() && (!cpu_is_asr1903_z1())) {
387 value &= ~(MASK(offset->dclksel_width) <<
388 offset->dclksel_shift);
389 value |= (cop->ddr_clk_sel &
390 MASK(offset->dclksel_width)) <<
391 offset->dclksel_shift;
392 }
393#endif
394 value &= ~(MASK(offset->ddr_clk_div_width) <<
395 offset->ddr_clk_div_shift);
396 value |= ((cop->dclk_div >> 2) &
397 MASK(offset->ddr_clk_div_width)) <<
398 offset->ddr_clk_div_shift;
399
400 value &= ~(MASK(offset->mc_table_num_width) <<
401 offset->mc_table_num_shift);
402 value |= (cop->ddr_tbl_index &
403 MASK(offset->mc_table_num_width)) <<
404 offset->mc_table_num_shift;
405
406 /* Dvc level is filled as 0 here */
407 value &= ~(MASK(offset->volt_level_width) <<
408 offset->volt_level_shift);
409 value |= (0 & MASK(offset->volt_level_width)) <<
410 offset->volt_level_shift;
411 __raw_writel(value, DFC_LEVEL(apmu_base, i));
412 }
413 }
414 printk(KERN_CONT "\n");
415 cur_op = ddr_opt[0];
416 get_cur_ddr_op(hw, &cur_op);
417 op_index = ddr_rate2_op_index(hw, cur_op.dclk);
418 cur_ddr_op = &ddr_opt[op_index];
419 if ((cur_op.ddr_clk_src != cur_ddr_op->ddr_clk_src) ||
420 (cur_op.dclk != cur_ddr_op->dclk)) {
421 WARN_ON("Boot DDR PP is not supported!");
422 if (ddr->flags & MMP_DDR_HWDFC_FEAT) {
423 __ddr_hwdfc_seq(hw, 0);
424 __ddr_hwdfc_seq(hw, op_index);
425 } else
426 asr1803_ddr_fc_seq(hw, &cur_op, cur_ddr_op);
427 }
428
429 if (ddr->flags & MMP_DDR_HWDFC_FEAT) {
430 ddr_is_hwdfc = true;
431
432 val = __raw_readl(APMU_MC_HW_SLP_TYPE(apmu_base));
433 /* enable tbl based FC */
434 val &= ~(0x1 << ddr->params->ddr_offset->tbl_enable_shift);
435 __raw_writel(val, APMU_MC_HW_SLP_TYPE(apmu_base));
436 /*
437 * HW thinks default DFL is 0, we have to make sure HW
438 * get the correct DFL by first change it to 0, then change
439 * it to current DFL
440 */
441 asr1803_ddr_fc_seq(hw, &cur_op, &ddr_opt[0]);
442 __ddr_hwdfc_seq(hw, 0);
443 __ddr_hwdfc_seq(hw, op_index);
444 /*
445 * Fill dvc level in DFC_LEVEL, this will not trigger dvc
446 * Level change since default level is 0 for all DFC_LEVEL regs
447 */
448 for (i = 0; i < ddr_opt_size; i++) {
449 cop = &ddr_opt[i];
450 volt_level = get_ddr_volt_level(ddr,
451 cop->dclk * MHZ_TO_KHZ);
452 value = __raw_readl(DFC_LEVEL(apmu_base, i));
453 value &= ~(MASK(offset->volt_level_width) <<
454 offset->volt_level_shift);
455 value |= (volt_level &
456 MASK(offset->volt_level_width)) <<
457 offset->volt_level_shift;
458 __raw_writel(value, DFC_LEVEL(apmu_base, i));
459 }
460 }
461
462 /* hw->clk->rate = ddr_opt[op_index].dclk * MHZ; */
463 clk_set_rate(hw->clk, ddr_opt[op_index].dclk * MHZ);
464#ifdef CONFIG_ASR_CLK_DCSTAT
465 if (ddr->params->dcstat_support) {
466 idx = 0;
467 for (i = 0; i < ddr_opt_size; i++) {
468 cop = &ddr_opt[i];
469 op[idx++] = cop->dclk * MHZ;
470 }
471 clk_register_dcstat(hw->clk, op, idx);
472 }
473#endif
474
475 clk_dclk = hw->clk;
476#ifdef CONFIG_DDR_DEVFREQ
477 __init_ddr_devfreq_table(hw);
478#endif
479 hwlock_addr = ioremap(DDR_FC_RIPC_ADDR, 0x10);
480 if (!hwlock_addr)
481 pr_err("Cannot map RIPC IOMEM\n");
482 get_fc_ripc_lock();
483 /* write initial value into AP_DDR_ROP */
484 __raw_writel((0x1 << op_index),
485 MPMU_REG(ddr->params->mpmu_base, AP_DDR_ROP));
486 put_fc_ripc_lock();
487}
488
489static long clk_ddr_round_rate(struct clk_hw *hw, unsigned long rate,
490 unsigned long * parent_rate)
491{
492 unsigned int index;
493 struct clk_ddr *ddr = to_clk_ddr(hw);
494 struct ddr_opt *ddr_opt;
495 unsigned int ddr_opt_size;
496 ddr_opt = ddr->params->ddr_opt;
497 ddr_opt_size = ddr->params->ddr_opt_size;
498
499 rate /= MHZ;
500
501 if (unlikely(rate > ddr_opt[ddr_opt_size - 1].dclk))
502 return ddr_opt[ddr_opt_size - 1].dclk * MHZ;
503
504 for (index = 0; index < ddr_opt_size; index++)
505 if (ddr_opt[index].dclk >= rate)
506 break;
507
508 return ddr_opt[index].dclk * MHZ;
509}
510
511static inline bool check_hwdfc_inpro(void __iomem *apmu_base,
512 unsigned int expected_lvl)
513{
514 union dfc_status status;
515 int max_delay = 200;
516 while (max_delay) {
517 status.v = __raw_readl(DFC_STATUS(apmu_base));
518 if ((expected_lvl <= status.b.cfl) &&
519 ((!status.b.dfc_status) ||
520 (status.b.dfc_status &&
521 (status.b.dfc_cause != AP_ACTIVE_DFC))))
522 return false;
523 udelay(5);
524 max_delay--;
525 }
526 return true;
527}
528
529static int __ddr_hwdfc_seq(struct clk_hw *hw, unsigned int level)
530{
531 unsigned int dfc_ap;
532 struct clk_ddr *ddr = to_clk_ddr(hw);
533 struct dfc_ap_reg_offset *offset = ddr->params->dfc_ap_reg_offset;
534 void __iomem *apmu_base = ddr->params->apmu_base;
535 bool inpro = false;
536 union dfc_status status;
537 int max_delay = 100;
538
539 /* wait for DFC triggered by CP/MSA is done */
540 status.v = __raw_readl(DFC_STATUS(apmu_base));
541 while (max_delay && status.b.dfc_status) {
542 udelay(10);
543 max_delay--;
544 status.v = __raw_readl(DFC_STATUS(apmu_base));
545 }
546 if (unlikely(max_delay <= 0)) {
547 WARN(1, "AP cannot start HWDFC as DFC is in progress!\n");
548 pr_err("DFCAP %x, DFCSTATUS %x,\n",
549 __raw_readl(DFC_AP(apmu_base)),
550 __raw_readl(DFC_STATUS(apmu_base)));
551 return -EAGAIN;
552 }
553 /* Check if AP ISR is set, if set, clear it */
554 prefc_check_isr(apmu_base);
555
556 /* trigger AP HWDFC */
557 dfc_ap = __raw_readl(DFC_AP(apmu_base));
558 dfc_ap &= ~(MASK(offset->freq_level_width) <<
559 offset->freq_level_shift);
560 dfc_ap |= (level & MASK(offset->freq_level_width)) <<
561 offset->freq_level_shift;
562 dfc_ap |= 1 << offset->dfc_req_shift;
563 __raw_writel(dfc_ap, DFC_AP(apmu_base));
564
565 /* Check dfc status and done */
566 inpro = check_hwdfc_inpro(apmu_base, level);
567 if (likely(!inpro)) {
568 /* wait for dfc_status to become 0 */
569 wait_for_hwdfc_done(apmu_base);
570 } else {
571 WARN(1, "HW-DFC failed! expect LV %d\n", level);
572 pr_err("DFCAP %x, DFCSTATUS %x, PLLSEL %x\n",
573 __raw_readl(DFC_AP(apmu_base)),
574 __raw_readl(DFC_STATUS(apmu_base)),
575 __raw_readl(APMU_PLL_SEL_STATUS(apmu_base)));
576 }
577 return 0;
578}
579
580static int ddr_hwdfc_seq(struct clk_hw *hw, struct ddr_opt *cop,
581 struct ddr_opt *top)
582{
583 int ret = 0;
584
585 trace_pxa_ddr_clk_chg(CLK_CHG_ENTRY, cop->dclk, top->dclk);
586 ret = __ddr_hwdfc_seq(hw, top->ddr_freq_level);
587 trace_pxa_ddr_clk_chg(CLK_CHG_EXIT, cop->dclk, top->dclk);
588 return ret;
589}
590
591static int set_hwdfc_freq(struct clk_hw *hw, struct ddr_opt *old,
592 struct ddr_opt *new)
593{
594 unsigned long flags;
595 int ret = 0;
596
597 pr_debug("DDR set_freq start: old %u, new %u\n",
598 old->dclk, new->dclk);
599
600 clk_prepare_enable(new->ddr_parent);
601 local_irq_save(flags);
602 ret = ddr_hwdfc_seq(hw, old, new);
603 if (unlikely(ret == -EAGAIN)) {
604 /* still stay at old freq and src */
605 local_irq_restore(flags);
606 clk_disable_unprepare(new->ddr_parent);
607 goto out;
608 }
609 local_irq_restore(flags);
610 clk_disable_unprepare(old->ddr_parent);
611
612 pr_debug("DDR set_freq end: old %u, new %u\n",
613 old->dclk, new->dclk);
614out:
615 return ret;
616}
617
618static void set_ddr_tbl_index(struct clk_hw *hw, unsigned int index)
619{
620 unsigned int regval;
621
622 struct clk_ddr *ddr = to_clk_ddr(hw);
623 void __iomem *apmu_base = ddr->params->apmu_base;
624 struct ddr_reg_offset *offset = ddr->params->ddr_offset;
625
626 index &= MASK(offset->tbl_index_width);
627 regval = __raw_readl(APMU_MC_HW_SLP_TYPE(apmu_base));
628 /* clear ddr tbl index */
629 regval &= ~(MASK(offset->tbl_index_width) <<
630 offset->tbl_index_shift);
631 /* set ddr tbl index */
632 regval |= (index << offset->tbl_index_shift);
633
634 __raw_writel(regval, APMU_MC_HW_SLP_TYPE(apmu_base));
635}
636
637static void asr1803_ddr_fc_seq(struct clk_hw *hw, struct ddr_opt *cop,
638 struct ddr_opt *top)
639{
640 unsigned int regval = 0;
641 struct clk_ddr *ddr = to_clk_ddr(hw);
642 void __iomem *apmu_base = ddr->params->apmu_base;
643 union pmua_ckphy_fc_ctrl ckphy_fc_ctrl;
644 int timeout = 200000;
645
646 trace_pxa_ddr_clk_chg(CLK_CHG_ENTRY, cop->dclk, top->dclk);
647 /* 0.2) Check if AP ISR is set, if set, clear it */
648 prefc_check_isr(apmu_base);
649
650 ckphy_fc_ctrl.v = __raw_readl(APMU_CKPHY_FC_CTRL(apmu_base));
651 regval = __raw_readl(APMU_MC_HW_SLP_TYPE(apmu_base));
652 /* 2) issue DDR FC */
653 if ((cop->ddr_clk_src != top->ddr_clk_src) ||
654 (cop->dclk != top->dclk)) {
655 /* 2.2) enable tbl based FC and set DDR tbl num */
656 set_ddr_tbl_index(hw, top->ddr_tbl_index);
657
658 /* 2.3) select div for dclk */
659 ckphy_fc_ctrl.b.dpll_div = top->dclk_div;
660
661 regval = __raw_readl(APMU_MC_HW_SLP_TYPE(apmu_base));
662#ifdef CONFIG_CPU_ASR1903
663 if (cpu_is_asr1903() && (!cpu_is_asr1903_z1())) {
664 regval &= ~ASR1903_A0P_DCLK_SRC_MASK;
665 regval |= ((top->dclk_div >> 2) << ASR1903_A0P_DCLK_SRC_DIV_SHIFT);
666 regval |= (top->ddr_clk_sel << ASR1903_A0P_DCLK_SRC_SEL_SHIFT);
667 }
668#endif
669 /* 2.4) set ddr FC req bit */
670 regval |= (0x1 << 24);
671 }
672
673 __raw_writel(ckphy_fc_ctrl.v, APMU_CKPHY_FC_CTRL(apmu_base));
674 /* 3) set div and FC req bit trigger DDR FC */
675 pr_debug("DDR FC APMU_CKPHY_FC_CTRL[%x]\n", ckphy_fc_ctrl.v);
676 pr_debug("DDR FC APMU_MC_HW_SLP_TYPE[%x]\n", regval);
677 dmb();
678 __raw_writel(regval, APMU_MC_HW_SLP_TYPE(apmu_base));
679
680 while ((APMU_DDR_CLK_FC_REQ & __raw_readl(APMU_MC_HW_SLP_TYPE(apmu_base)))
681 && timeout)
682 timeout--;
683
684 if (timeout <= 0) {
685 pr_err("APMU_MC_HW_SLP_TYPE %x, fc_type DDR_FC\n",
686 __raw_readl(APMU_MC_HW_SLP_TYPE(apmu_base)));
687 WARN(1, "AP core frequency change timeout!\n");
688 }
689
690 trace_pxa_ddr_clk_chg(CLK_CHG_EXIT, cop->dclk, top->dclk);
691}
692
693static int asr1803_set_ddr_freq(struct clk_hw *hw, struct ddr_opt *old,
694 struct ddr_opt *new)
695{
696 struct ddr_opt cop;
697 struct clk *ddr_old_parent;
698 void __iomem *apmu_base;
699 int ret = 0;
700 struct ddr_opt *ddr_opt;
701 struct clk_ddr *ddr;
702 unsigned long flags;
703 ddr = to_clk_ddr(hw);
704 ddr_opt = ddr->params->ddr_opt;
705 apmu_base = ddr->params->apmu_base;
706
707 pr_debug("DDR set_freq start: old %u, new %u\n",
708 old->dclk, new->dclk);
709
710 cop = *old;
711 get_cur_ddr_op(hw, &cop);
712 if (unlikely((cop.ddr_clk_src != old->ddr_clk_src) ||
713 (cop.dclk != old->dclk))) {
714 pr_err(" dsrc dclk");
715 pr_err("OLD %d %d\n", old->ddr_clk_src, old->dclk);
716 pr_err("CUR %d %d\n", cop.ddr_clk_src, cop.dclk);
717 pr_err("NEW %d %d\n", new->ddr_clk_src, new->dclk);
718 dump_stack();
719 }
720
721 ddr_old_parent = cop.ddr_parent;
722 clk_prepare_enable(new->ddr_parent);
723
724 /* Get lock in irq disable status to short AP hold lock time */
725 local_irq_save(flags);
726 asr1803_ddr_fc_seq(hw, &cop, new);
727 local_irq_restore(flags);
728
729 cop = *new;
730 get_cur_ddr_op(hw, &cop);
731 if (unlikely((cop.ddr_clk_src != new->ddr_clk_src) ||
732 (cop.dclk != new->dclk))) {
733 clk_disable_unprepare(new->ddr_parent);
734 pr_err("DDR:unsuccessful frequency change!\n");
735 pr_err(" dsrc dclk");
736 pr_err("CUR %d %d\n", cop.ddr_clk_src, cop.dclk);
737 pr_err("NEW %d %d\n", new->ddr_clk_src, new->dclk);
738 pr_err("APMU_CKPHY_FC_CTRL %x, "
739 "APMU_MC_HW_SLP_TYPE %x\n",
740 __raw_readl(APMU_CKPHY_FC_CTRL(apmu_base)),
741 __raw_readl(APMU_MC_HW_SLP_TYPE(apmu_base)));
742 ret = -EAGAIN;
743 goto out;
744 }
745
746 clk_disable_unprepare(ddr_old_parent);
747 pr_debug("DDR set_freq end: old %u, new %u\n",
748 old->dclk, new->dclk);
749out:
750 return ret;
751}
752
753static int asr1803_ripc_set_ddr_freq(struct clk_hw *hw, struct ddr_opt *old,
754 struct ddr_opt *new)
755{
756 struct ddr_opt cop;
757 struct clk *ddr_old_parent;
758 void __iomem *apmu_base;
759 int ret = 0;
760 struct ddr_opt *ddr_opt;
761 struct clk_ddr *ddr;
762 unsigned long flags;
763 u32 cp_ddr_rop, ap_ddr_rop, value;
764 u32 target_ddr_op, cp_ddr_cop, ap_ddr_cop;
765 ddr = to_clk_ddr(hw);
766 ddr_opt = ddr->params->ddr_opt;
767 apmu_base = ddr->params->apmu_base;
768
769 ap_ddr_cop = ddr_rate2_op_index(hw, old->dclk);
770 cop = *old;
771
772 /* Get lock in irq disable status to short AP hold lock time */
773 local_irq_save(flags);
774 ret = get_fc_ripc_lock();
775 if (ret) {
776 local_irq_restore(flags);
777 return ret;
778 }
779 get_cur_ddr_op(hw, &cop);
780 pr_debug("DDR set_freq start: old %u, new %u\n",
781 old->dclk, new->dclk);
782
783 ap_ddr_rop = ddr_rate2_op_index(hw, new->dclk);
784
785 /* get current cp op */
786 cp_ddr_rop = __raw_readl(MPMU_REG(ddr->params->mpmu_base, CP_DDR_ROP));
787 /* check cur op from high to low */
788 if (cp_ddr_rop & (0x1 << 2))
789 cp_ddr_cop = 2;
790 else if (cp_ddr_rop & (0x1 << 1))
791 cp_ddr_cop = 1;
792 else
793 cp_ddr_cop = 0;
794
795 if ((ap_ddr_rop == ap_ddr_cop) ||
796 (ap_ddr_cop <= cp_ddr_cop && ap_ddr_rop <= cp_ddr_cop))
797 goto out;
798
799 target_ddr_op = max(ap_ddr_rop, cp_ddr_cop);
800 if (target_ddr_op >= ddr->params->ddr_opt_size) {
801 pr_warn("!!!!!!!!!!!!!!! ddr target_opt: %d\n", target_ddr_op);
802 target_ddr_op = ddr->params->ddr_opt_size - 1;
803 }
804
805 /* update new ddr opt */
806 if (ddr_opt[target_ddr_op].dclk > new->dclk)
807 new = &ddr_opt[target_ddr_op];
808
809 ddr_old_parent = cop.ddr_parent;
810 clk_prepare_enable(new->ddr_parent);
811 asr1803_ddr_fc_seq(hw, &cop, new);
812
813 cop = *new;
814 get_cur_ddr_op(hw, &cop);
815 target_ddr_op = ddr_rate2_op_index(hw, cop.dclk);
816
817 if (unlikely((cop.ddr_clk_src != new->ddr_clk_src) ||
818 (cop.dclk != new->dclk))) {
819 clk_disable_unprepare(new->ddr_parent);
820 pr_err("DDR:unsuccessful frequency change!\n");
821 pr_err(" dsrc dclk");
822 pr_err("CUR %d %d\n", cop.ddr_clk_src, cop.dclk);
823 pr_err("NEW %d %d\n", new->ddr_clk_src, new->dclk);
824 pr_err("APMU_CKPHY_FC_CTRL %x, "
825 "APMU_MC_HW_SLP_TYPE %x\n",
826 __raw_readl(APMU_CKPHY_FC_CTRL(apmu_base)),
827 __raw_readl(APMU_MC_HW_SLP_TYPE(apmu_base)));
828 ret = -EAGAIN;
829 goto out;
830 }
831
832 clk_disable_unprepare(ddr_old_parent);
833 pr_debug("DDR set_freq end: old %u, new %u\n",
834 old->dclk, new->dclk);
835
836out:
837 /* write ap_ddr_rop into AP_DDR_ROP */
838 ap_ddr_rop = (0x1 << ap_ddr_rop);
839 __raw_writel(ap_ddr_rop, MPMU_REG(ddr->params->mpmu_base, AP_DDR_ROP));
840 value = __raw_readl(MPMU_REG(ddr->params->mpmu_base, AP_DDR_ROP));
841 if (value != ap_ddr_rop)
842 pr_err("AP_DDR_ROP Write failure: target 0x%x, final value 0x%X\n",
843 ap_ddr_rop, value);
844 put_fc_ripc_lock();
845 local_irq_restore(flags);
846 return ret;
847}
848
849int register_clk_bind2ddr(struct clk *clk, unsigned long max_freq,
850 struct ddr_combclk_relation *relationtbl,
851 unsigned int num_relationtbl)
852{
853 struct ddr_combined_clk *comclk;
854
855 /* search the list of the registation for this clk */
856 list_for_each_entry(comclk, &ddr_combined_clk_list, node)
857 if (comclk->clk == clk)
858 break;
859
860 /* if clk wasn't in the list, allocate new dcstat info */
861 if (comclk->clk != clk) {
862 comclk = kzalloc(sizeof(struct ddr_combined_clk), GFP_KERNEL);
863 if (!comclk)
864 return -ENOMEM;
865
866 comclk->clk = clk;
867 comclk->maxrate = max_freq;
868 comclk->relationtbl = relationtbl;
869 comclk->num_relationtbl = num_relationtbl;
870 list_add(&comclk->node, &ddr_combined_clk_list);
871 }
872 return 0;
873}
874
875static int trigger_bind2ddr_clk_rate(unsigned long ddr_rate)
876{
877 struct ddr_combined_clk *comclk;
878 unsigned long tgt, cur;
879 int ret = 0, i = 0;
880 list_for_each_entry(comclk, &ddr_combined_clk_list, node) {
881 if (!comclk->relationtbl)
882 continue;
883 i = 0;
884 while (i < comclk->num_relationtbl - 1) {
885 if ((ddr_rate >= comclk->relationtbl[i].dclk_rate) &&
886 (ddr_rate < comclk->relationtbl[i + 1].dclk_rate))
887 break;
888 i++;
889 }
890 tgt = min(comclk->relationtbl[i].combclk_rate, comclk->maxrate);
891 pr_debug("%s Start rate change to %lu\n",
892 __clk_get_name(comclk->clk), tgt);
893 ret = clk_set_rate(comclk->clk, tgt);
894 if (ret) {
895 pr_info("%s failed to change clk %s rate\n",
896 __func__, __clk_get_name(comclk->clk));
897 continue;
898 }
899 cur = clk_get_rate(comclk->clk);
900 if (cur != tgt) {
901 pr_info("clk %s: cur %lu, tgt %lu\n",
902 __clk_get_name(comclk->clk), cur, tgt);
903 WARN_ON(1);
904 }
905 }
906
907 return ret;
908}
909
910static int clk_ddr_setrate(struct clk_hw *hw, unsigned long rate,
911 unsigned long parent_rate)
912{
913 struct ddr_opt *md_new, *md_old;
914 unsigned int index;
915 struct ddr_opt *ddr_opt;
916 struct clk_ddr *ddr = to_clk_ddr(hw);
917 int ret = 0;
918 ddr_opt = ddr->params->ddr_opt;
919
920 rate /= MHZ;
921 index = ddr_rate2_op_index(hw, rate);
922 md_new = &ddr_opt[index];
923 if (md_new == cur_ddr_op)
924 goto out;
925
926 acquire_fc_mutex();
927 md_old = cur_ddr_op;
928
929 get_ddr_fc_spinlock();
930 if (likely(ddr->flags & MMP_DDR_HWDFC_FEAT))
931 ret = set_hwdfc_freq(hw, md_old, md_new);
932 else if (likely(ddr->flags & MMP_DDR_RIPC_LOCK_FC))
933 ret = asr1803_ripc_set_ddr_freq(hw, md_old, md_new);
934 else
935 ret = asr1803_set_ddr_freq(hw, md_old, md_new);
936 put_ddr_fc_spinlock();
937 release_fc_mutex();
938 if (ret)
939 goto err;
940
941 cur_ddr_op = md_new;
942 clk_hw_reparent(hw, __clk_get_hw(md_new->ddr_parent));
943
944out:
945 trigger_bind2ddr_clk_rate(rate * MHZ);
946err:
947 return ret;
948}
949
950static unsigned long clk_ddr_recalc_rate(struct clk_hw *hw,
951 unsigned long parent_rate)
952{
953 struct clk_ddr *ddr = to_clk_ddr(hw);
954 void __iomem *apmu_base = ddr->params->apmu_base;
955 union dfc_status dfc_status;
956 union dfc_ap dfc_ap;
957 struct ddr_opt *ddr_opt = ddr->params->ddr_opt;
958 u32 dfc_lvl;
959
960 if (ddr->flags & MMP_DDR_HWDFC_FEAT) {
961 if (ddr->flags & MMP_DDR_RATE_AP_ONLY) {
962 dfc_ap.v = __raw_readl(DFC_AP(apmu_base));
963 return ddr_opt[dfc_ap.b.fl].dclk * MHZ;
964 } else {
965 dfc_status.v = __raw_readl(DFC_STATUS(apmu_base));
966 dfc_lvl = dfc_status.b.cfl;
967 if (dfc_lvl >= ddr->params->ddr_opt_size)
968 dfc_lvl = dfc_ap.b.fl;
969 return ddr_opt[dfc_lvl].dclk * MHZ;
970 }
971 } else {
972 if (cur_ddr_op)
973 return cur_ddr_op->dclk * MHZ;
974 else
975 pr_err("%s: cur_ddraxi_op NULL\n", __func__);
976 }
977
978 return 0;
979}
980
981static u8 clk_ddr_get_parent(struct clk_hw *hw)
982{
983 struct clk *parent, *clk;
984 u32 src_sel;
985 struct clk_ddr *ddr;
986 void __iomem *apmu_base;
987 struct parents_table *parent_table;
988 int parent_table_size;
989 u8 i = 0;
990 clk = hw->clk;
991 ddr = to_clk_ddr(hw);
992 apmu_base = ddr->params->apmu_base;
993 parent_table = ddr->params->parent_table;
994 parent_table_size = ddr->params->parent_table_size;
995
996 /*there is only one DDR clock source for 1802s*/
997 src_sel = 0;
998 for (i = 0; i < parent_table_size; i++) {
999 if (parent_table[i].hw_sel_val == src_sel)
1000 break;
1001 }
1002 if (i == parent_table_size) {
1003 pr_err("%s: Cannot find parent for ddr!\n", __func__);
1004 BUG_ON(1);
1005 }
1006 parent = clk_get_sys(NULL, parent_table[i].parent_name);
1007 WARN_ON(!parent);
1008 if (parent) {
1009 for (i = 0; i < clk_hw_get_num_parents(hw); i++) {
1010 if (!strcmp(clk_hw_get_name(clk_hw_get_parent_by_index(hw, i)),
1011 __clk_get_name(parent)))
1012 break;
1013 }
1014 }
1015 return i;
1016}
1017
1018/* Interface used to get ddr op num */
1019unsigned int get_ddr_op_num(void)
1020{
1021 struct clk_ddr *ddr;
1022 struct clk *clk = __clk_lookup("ddr");
1023 if (IS_ERR(clk)) {
1024 WARN_ON(1);
1025 return 0;
1026 } else {
1027 ddr = to_clk_ddr(__clk_get_hw(clk));
1028 return ddr->params->ddr_opt_size;
1029 }
1030}
1031
1032/* Interface used to get ddr avaliable rate, unit khz */
1033unsigned int get_ddr_op_rate(unsigned int index)
1034{
1035 struct ddr_opt *ddr_opt;
1036 struct clk_ddr *ddr;
1037 struct clk *clk = __clk_lookup("ddr");
1038 if (IS_ERR(clk)) {
1039 WARN_ON(1);
1040 return 0;
1041 } else {
1042 ddr = to_clk_ddr(__clk_get_hw(clk));
1043 if (index >= ddr->params->ddr_opt_size) {
1044 pr_err("%s index out of range!\n", __func__);
1045 return -EINVAL;
1046 }
1047
1048 ddr_opt = ddr->params->ddr_opt;
1049 return ddr_opt[index].dclk * MHZ_TO_KHZ;
1050 }
1051}
1052
1053struct clk_ops ddr_clk_ops = {
1054 .init = clk_ddr_init,
1055 .round_rate = clk_ddr_round_rate,
1056 .set_rate = clk_ddr_setrate,
1057 .recalc_rate = clk_ddr_recalc_rate,
1058 .get_parent = clk_ddr_get_parent,
1059};
1060
1061struct clk *mmp_clk_register_ddr(const char *name, const char **parent_name,
1062 u8 num_parents, unsigned long flags, u32 ddr_flags,
1063 spinlock_t *lock, struct ddr_params *params)
1064{
1065 struct clk_ddr *ddr;
1066 struct clk *clk;
1067 struct clk_init_data init;
1068
1069 ddr = kzalloc(sizeof(*ddr), GFP_KERNEL);
1070 if (!ddr)
1071 return NULL;
1072
1073 init.name = name;
1074 init.ops = &ddr_clk_ops;
1075 init.flags = flags;
1076 init.parent_names = parent_name;
1077 init.num_parents = num_parents;
1078
1079 ddr->flags = ddr_flags;
1080 ddr->lock = lock;
1081 ddr->params = params;
1082 ddr->hw.init = &init;
1083
1084 clk = clk_register(NULL, &ddr->hw);
1085 if (IS_ERR(clk))
1086 kfree(ddr);
1087
1088 return clk;
1089}
1090
1091static DEFINE_SPINLOCK(fc_seq_lock);
1092
1093struct ddr_reg_offset asr1803_ddr_reg_off = {
1094 .tbl_enable_shift = 7,
1095 .tbl_index_shift = 5,
1096 .tbl_index_width = 2,
1097};
1098
1099struct parents_table asr1803_ddr_parent_table[] = {
1100 {
1101 .parent_name = "dpll_1066",
1102 .hw_sel_val = 0x0,
1103 },
1104};
1105
1106static const char *asr1803_ddr_parent[] = {"dpll_1066",};
1107
1108static struct ddr_opt asr1803_lpddr533_oparray[] = {
1109 {
1110 .dclk = 266,
1111 .ddr_tbl_index = 0,
1112 .ddr_clk_sel = 0x0,
1113 .ddr_freq_level = 0,
1114 },
1115 {
1116 .dclk = 533,
1117 .ddr_tbl_index = 1,
1118 .ddr_clk_sel = 0x0,
1119 .ddr_freq_level = 1,
1120 },
1121};
1122
1123#ifdef CONFIG_CPU_ASR1903
1124struct parents_table asr1903_a0p_ddr_parent_table[] = {
1125 {
1126 .parent_name = "dpll_1066",
1127 .hw_sel_val = 0x0,
1128 },
1129};
1130
1131static struct ddr_opt asr1903_lpddr533_oparray[] = {
1132 {
1133 .dclk = 266,
1134 .ddr_tbl_index = 0,
1135 .ddr_clk_sel = 0x0,
1136 .ddr_freq_level = 0,
1137 },
1138 {
1139 .dclk = 533,
1140 .ddr_tbl_index = 1,
1141 .ddr_clk_sel = 0x0,
1142 .ddr_freq_level = 1,
1143 },
1144};
1145#endif
1146
1147static struct dfc_level_reg_offset dfc_level_reg_off = {
1148 .dclksel_shift = 11,
1149 .dclksel_width = 3,
1150 .ddr_clk_div_shift = 14,
1151 .ddr_clk_div_width = 2,
1152 .mc_table_num_shift = 4,
1153 .mc_table_num_width = 2,
1154 .volt_level_shift = 0,
1155 .volt_level_width = 4,
1156};
1157
1158static struct dfc_ap_reg_offset dfc_ap_reg_off = {
1159 .dfc_req_shift = 0,
1160 .freq_level_shift = 1,
1161 .freq_level_width = 3,
1162};
1163
1164struct ddr_params ddr_params = {
1165 .dfc_level_reg_offset = &dfc_level_reg_off,
1166 .dfc_ap_reg_offset = &dfc_ap_reg_off,
1167 .ddr_offset = &asr1803_ddr_reg_off,
1168 .parent_table = asr1803_ddr_parent_table,
1169 .parent_table_size = ARRAY_SIZE(asr1803_ddr_parent_table),
1170 .ddr_opt = asr1803_lpddr533_oparray,
1171 .ddr_opt_size = ARRAY_SIZE(asr1803_lpddr533_oparray),
1172#ifdef CONFIG_ASR_CLK_DCSTAT
1173 .dcstat_support = true,
1174#endif
1175};
1176
1177
1178struct ddr_combclk_relation aclk_dclk_relationtbl_1903_1828[] = {
1179 {.dclk_rate = 266000000, .combclk_rate = 208000000},
1180 {.dclk_rate = 533000000, .combclk_rate = 312000000},
1181};
1182
1183struct ddr_combclk_relation aclk_dclk_relationtbl_1803[] = {
1184 {.dclk_rate = 266000000, .combclk_rate = 208000000},
1185 {.dclk_rate = 533000000, .combclk_rate = 208000000},
1186};
1187
1188
1189static struct ddr_dfc_info ddrdfcinfo;
1190
1191static void find_ddr_level(struct ddr_opt *ddr_op_array)
1192{
1193 int i;
1194 ddrdfcinfo.ddr_idle = 0;
1195 for (i = 0; i <= sizeof(ddr_op_array); i++) {
1196 if (ddrdfcinfo.ddr_active == 0) {
1197 /* 266 is ok ? */
1198 if (ddr_op_array[i].dclk >= 266) {
1199 ddrdfcinfo.ddr_active = i;
1200 }
1201 }
1202 if (ddrdfcinfo.ddr_high == 0) {
1203 if (ddr_op_array[i].dclk >= 400) {
1204 ddrdfcinfo.ddr_high = i;
1205 }
1206 }
1207 if (ddrdfcinfo.ddr_active && ddrdfcinfo.ddr_high)
1208 break;
1209 }
1210 return;
1211}
1212
1213static void init_ddr_dfc_info(void)
1214{
1215 memset(&ddrdfcinfo, 0, sizeof(ddrdfcinfo));
1216 find_ddr_level(asr1803_lpddr533_oparray);
1217 fillddrdfcinfo(&ddrdfcinfo);
1218 return;
1219}
1220
1221
1222void __init asr1803_ddrc_init(struct asr1803_clk_unit *asr_unit)
1223{
1224 struct mmp_clk_unit *unit = &asr_unit->unit;
1225 struct clk *clk = NULL;
1226 struct clk *combclk = NULL;
1227 int combclk_maxfreq;
1228
1229 ddr_params.apmu_base = asr_unit->apmu_base;
1230 ddr_params.mpmu_base = asr_unit->mpmu_base;
1231 ddr_params.dmcu_base = asr_unit->ddrc_base;
1232
1233 if (cpu_is_asr1806() || cpu_is_asr1903()) {
1234 ddr_params.hwdfc_freq_table = freqs_cmb[ddr_cmbindex()];
1235 ddr_params.hwdfc_table_size = MAX_PMIC_LEVEL * sizeof(freqs_cmb[ddr_cmbindex()][0]);
1236#ifdef CONFIG_CPU_ASR1903
1237 if (cpu_is_asr1903() && (!cpu_is_asr1903_z1())) {
1238 ddr_params.parent_table = asr1903_a0p_ddr_parent_table;
1239 ddr_params.parent_table_size = ARRAY_SIZE(asr1903_a0p_ddr_parent_table);
1240 ddr_params.ddr_opt = asr1903_lpddr533_oparray;
1241 ddr_params.ddr_opt_size = ARRAY_SIZE(asr1903_lpddr533_oparray);
1242 }
1243#endif
1244 }
1245
1246 init_ddr_dfc_info();
1247
1248 asr_clk_parents_lookup(ddr_params.parent_table,
1249 ddr_params.parent_table_size);
1250 if (cpu_is_asr1806() || cpu_is_asr1903()) {
1251 clk = mmp_clk_register_ddr("ddr", asr1803_ddr_parent,
1252 ARRAY_SIZE(asr1803_ddr_parent),
1253 CLK_GET_RATE_NOCACHE, MMP_DDR_HWDFC_FEAT, &fc_seq_lock,
1254 &ddr_params);
1255 } else {
1256 clk = mmp_clk_register_ddr("ddr", asr1803_ddr_parent,
1257 ARRAY_SIZE(asr1803_ddr_parent),
1258 CLK_GET_RATE_NOCACHE, MMP_DDR_RIPC_LOCK_FC, &fc_seq_lock,
1259 &ddr_params);
1260 }
1261
1262 if (!clk) {
1263 pr_err("%s: registry top clk_ddr fail!!!\n", __func__);
1264 return;
1265 }
1266
1267 pr_info(" DDR boot up @%luHZ\n", clk_get_rate(clk));
1268
1269 mmp_clk_add(unit, ASR1803_CLK_DDR, clk);
1270
1271 clk_prepare_enable(clk);
1272
1273 combclk = __clk_lookup("axi");
1274
1275 if (!combclk) {
1276 pr_err("%s: combclk is not registered yet\n", __func__);
1277 return;
1278 }
1279
1280 get_axi_max_freq(&combclk_maxfreq);
1281
1282 if (cpu_is_asr1903() || cpu_is_asr1828())
1283 register_clk_bind2ddr(combclk, combclk_maxfreq, aclk_dclk_relationtbl_1903_1828,
1284 ARRAY_SIZE(aclk_dclk_relationtbl_1903_1828));
1285 else
1286 register_clk_bind2ddr(combclk, combclk_maxfreq, aclk_dclk_relationtbl_1803,
1287 ARRAY_SIZE(aclk_dclk_relationtbl_1803));
1288
1289 return;
1290}
1291
1292void asr18xx_dump_ddr_regs(void)
1293{
1294 void __iomem *reg_base;
1295
1296 if (ddr_params.apmu_base) {
1297 reg_base = ddr_params.apmu_base;
1298 if (ddr_is_hwdfc) {
1299 pr_emerg("DFCAP %x, DFCSTATUS %x\n",
1300 readl(DFC_AP(reg_base)), readl(DFC_STATUS(reg_base)));
1301 } else {
1302 pr_emerg("DPHY_CTRL 0x%08x SLP_TYPE 0x%08x\n",
1303 readl(APMU_CKPHY_FC_CTRL(reg_base)),
1304 readl(APMU_MC_HW_SLP_TYPE(reg_base)));
1305 }
1306 }
1307}
1308