blob: ebfd2492a1a00043e660d034fb526da1aab4c97b [file] [log] [blame]
rjw1f884582022-01-06 17:20:42 +08001/*
2 * Copyright (C) 2018 MediaTek Inc.
3 *
4 * This program is free software: you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <asm/cpu.h>
15#include "met_kernel_symbol.h"
16#include "cpu_pmu.h"
17
18/*******************************
19 * ARM v8 operations *
20 *******************************/
21/*
22 * Per-CPU PMCR: config reg
23 */
24#define ARMV8_PMCR_E (1 << 0) /* Enable all counters */
25#define ARMV8_PMCR_P (1 << 1) /* Reset all counters */
26#define ARMV8_PMCR_C (1 << 2) /* Cycle counter reset */
27#define ARMV8_PMCR_D (1 << 3) /* CCNT counts every 64th cpu cycle */
28#define ARMV8_PMCR_X (1 << 4) /* Export to ETM */
29#define ARMV8_PMCR_DP (1 << 5) /* Disable CCNT if non-invasive debug */
30#define ARMV8_PMCR_N_SHIFT 11 /* Number of counters supported */
31#define ARMV8_PMCR_N_MASK 0x1f
32#define ARMV8_PMCR_MASK 0x3f /* Mask for writable bits */
33
34/*
35 * PMOVSR: counters overflow flag status reg
36 */
37#define ARMV8_OVSR_MASK 0xffffffff /* Mask for writable bits */
38#define ARMV8_OVERFLOWED_MASK ARMV8_OVSR_MASK
39
40static inline void armv8_pmu_counter_select(unsigned int idx)
41{
42 asm volatile ("msr pmselr_el0, %0"::"r" (idx));
43 isb();
44}
45
46static inline void armv8_pmu_type_select(unsigned int idx, unsigned int type)
47{
48 armv8_pmu_counter_select(idx);
49 asm volatile ("msr pmxevtyper_el0, %0"::"r" (type));
50}
51
52static inline unsigned int armv8_pmu_read_count(unsigned int idx)
53{
54 unsigned int value;
55
56 if (idx == 31) {
57 asm volatile ("mrs %0, pmccntr_el0":"=r" (value));
58 } else {
59 armv8_pmu_counter_select(idx);
60 asm volatile ("mrs %0, pmxevcntr_el0":"=r" (value));
61 }
62 return value;
63}
64
65static inline void armv8_pmu_enable_count(unsigned int idx)
66{
67 asm volatile ("msr pmcntenset_el0, %0"::"r" (1 << idx));
68}
69
70static inline void armv8_pmu_disable_count(unsigned int idx)
71{
72 asm volatile ("msr pmcntenclr_el0, %0"::"r" (1 << idx));
73}
74
75static inline void armv8_pmu_enable_intr(unsigned int idx)
76{
77 asm volatile ("msr pmintenset_el1, %0"::"r" (1 << idx));
78}
79
80static inline void armv8_pmu_disable_intr(unsigned int idx)
81{
82 asm volatile ("msr pmintenclr_el1, %0"::"r" (1 << idx));
83 isb();
84 asm volatile ("msr pmovsclr_el0, %0"::"r" (1 << idx));
85 isb();
86}
87
88static inline unsigned int armv8_pmu_overflow(void)
89{
90 unsigned int val;
91
92 asm volatile ("mrs %0, pmovsclr_el0":"=r" (val)); /* read */
93 val &= ARMV8_OVSR_MASK;
94 asm volatile ("mrs %0, pmovsclr_el0"::"r" (val));
95 return val;
96}
97
98static inline unsigned int armv8_pmu_control_read(void)
99{
100 unsigned int val;
101
102 asm volatile ("mrs %0, pmcr_el0":"=r" (val));
103 return val;
104}
105
106static inline void armv8_pmu_control_write(u32 val)
107{
108 val &= ARMV8_PMCR_MASK;
109 isb();
110 asm volatile ("msr pmcr_el0, %0"::"r" (val));
111}
112
113static void armv8_pmu_hw_reset_all(int generic_counters)
114{
115 int i;
116
117 armv8_pmu_control_write(ARMV8_PMCR_C | ARMV8_PMCR_P);
118 /* generic counter */
119 for (i = 0; i < generic_counters; i++) {
120 armv8_pmu_disable_intr(i);
121 armv8_pmu_disable_count(i);
122 }
123 /* cycle counter */
124 armv8_pmu_disable_intr(31);
125 armv8_pmu_disable_count(31);
126 armv8_pmu_overflow(); /* clear overflow */
127}
128
129/***********************************
130 * MET ARM v8 operations *
131 ***********************************/
132enum ARM_TYPE {
133 CORTEX_A53 = 0xD03,
134 CORTEX_A35 = 0xD04,
135 CORTEX_A55 = 0xD05,
136 CORTEX_A57 = 0xD07,
137 CORTEX_A72 = 0xD08,
138 CORTEX_A73 = 0xD09,
139 CORTEX_A75 = 0xD0A,
140 CHIP_UNKNOWN = 0xFFF
141};
142
143struct chip_pmu {
144 enum ARM_TYPE type;
145 unsigned int event_count;
146};
147
148static struct chip_pmu chips[] = {
149 {CORTEX_A35, 6+1},
150 {CORTEX_A53, 6+1},
151 {CORTEX_A55, 6+1},
152 {CORTEX_A57, 6+1},
153 {CORTEX_A72, 6+1},
154 {CORTEX_A73, 6+1},
155 {CORTEX_A75, 6+1},
156};
157
158static int armv8_pmu_hw_check_event(struct met_pmu *pmu, int idx, int event)
159{
160 int i;
161
162 /* Check if event is duplicate */
163 for (i = 0; i < idx; i++) {
164 if (pmu[i].event == event)
165 break;
166 }
167 if (i < idx) {
168 /* pr_debug("++++++ found duplicate event 0x%02x i=%d\n", event, i); */
169 return -1;
170 }
171
172 return 0;
173}
174
175static void armv8_pmu_hw_start(struct met_pmu *pmu, int count)
176{
177 int i;
178 int generic = count - 1;
179
180 armv8_pmu_hw_reset_all(generic);
181 for (i = 0; i < generic; i++) {
182 if (pmu[i].mode == MODE_POLLING) {
183 armv8_pmu_type_select(i, pmu[i].event);
184 armv8_pmu_enable_count(i);
185 }
186 }
187 if (pmu[count - 1].mode == MODE_POLLING) { /* cycle counter */
188 armv8_pmu_enable_count(31);
189 }
190 armv8_pmu_control_write(ARMV8_PMCR_E);
191}
192
193static void armv8_pmu_hw_stop(int count)
194{
195 int generic = count - 1;
196
197 armv8_pmu_hw_reset_all(generic);
198}
199
200static unsigned int armv8_pmu_hw_polling(struct met_pmu *pmu, int count, unsigned int *pmu_value)
201{
202 int i, cnt = 0;
203 int generic = count - 1;
204
205 for (i = 0; i < generic; i++) {
206 if (pmu[i].mode == MODE_POLLING) {
207 pmu_value[cnt] = armv8_pmu_read_count(i);
208 cnt++;
209 }
210 }
211 if (pmu[count - 1].mode == MODE_POLLING) {
212 pmu_value[cnt] = armv8_pmu_read_count(31);
213 cnt++;
214 }
215 armv8_pmu_control_write(ARMV8_PMCR_C | ARMV8_PMCR_P | ARMV8_PMCR_E);
216
217 return cnt;
218}
219
220static struct met_pmu pmus[MXNR_CPU][MXNR_PMU_EVENTS];
221
222struct cpu_pmu_hw armv8_pmu = {
223 .name = "armv8_pmu",
224 .check_event = armv8_pmu_hw_check_event,
225 .start = armv8_pmu_hw_start,
226 .stop = armv8_pmu_hw_stop,
227 .polling = armv8_pmu_hw_polling,
228};
229
230static void init_pmus(void)
231{
232 int cpu;
233 int i;
234
235 for_each_possible_cpu(cpu) {
236 struct cpuinfo_arm64 *cpuinfo;
237 if (cpu >= MXNR_CPU)
238 continue;
239 met_get_cpuinfo_symbol(cpu, &cpuinfo);
240 /* PR_BOOTMSG("CPU[%d]: reg_midr = %x\n", cpu, cpuinfo->reg_midr); */
241 for (i = 0; i < ARRAY_SIZE(chips); i++) {
242 if (chips[i].type == (cpuinfo->reg_midr & 0xffff) >> 4) {
243 armv8_pmu.event_count[cpu] = chips[i].event_count;
244 break;
245 }
246 }
247 }
248}
249
250struct cpu_pmu_hw *cpu_pmu_hw_init(void)
251{
252 int cpu;
253
254 init_pmus();
255 for (cpu = 0; cpu < MXNR_CPU; cpu++)
256 armv8_pmu.pmu[cpu] = pmus[cpu];
257
258 return &armv8_pmu;
259}