rjw | 1f88458 | 2022-01-06 17:20:42 +0800 | [diff] [blame^] | 1 | /* |
| 2 | * Copyright (C) 2018 MediaTek Inc. |
| 3 | * |
| 4 | * This program is free software: you can redistribute it and/or modify |
| 5 | * it under the terms of the GNU General Public License version 2 as |
| 6 | * published by the Free Software Foundation. |
| 7 | * |
| 8 | * This program is distributed in the hope that it will be useful, |
| 9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 11 | * GNU General Public License for more details. |
| 12 | */ |
| 13 | |
| 14 | #include <linux/smp.h> |
| 15 | #include "interface.h" |
| 16 | #include "cpu_pmu.h" |
| 17 | #include "v8_pmu_name.h" |
| 18 | |
| 19 | /* |
| 20 | * Per-CPU PMCR: config reg |
| 21 | */ |
| 22 | #define ARMV8_PMCR_E (1 << 0) /* Enable all counters */ |
| 23 | #define ARMV8_PMCR_P (1 << 1) /* Reset all counters */ |
| 24 | #define ARMV8_PMCR_C (1 << 2) /* Cycle counter reset */ |
| 25 | #define ARMV8_PMCR_D (1 << 3) /* CCNT counts every 64th cpu cycle */ |
| 26 | #define ARMV8_PMCR_X (1 << 4) /* Export to ETM */ |
| 27 | #define ARMV8_PMCR_DP (1 << 5) /* Disable CCNT if non-invasive debug */ |
| 28 | #define ARMV8_PMCR_N_SHIFT 11 /* Number of counters supported */ |
| 29 | #define ARMV8_PMCR_N_MASK 0x1f |
| 30 | #define ARMV8_PMCR_MASK 0x3f /* Mask for writable bits */ |
| 31 | |
| 32 | /* |
| 33 | * PMOVSR: counters overflow flag status reg |
| 34 | */ |
| 35 | #define ARMV8_OVSR_MASK 0xffffffff /* Mask for writable bits */ |
| 36 | #define ARMV8_OVERFLOWED_MASK ARMV8_OVSR_MASK |
| 37 | |
| 38 | |
| 39 | enum ARM_TYPE { |
| 40 | CORTEX_A53 = 0xD03, |
| 41 | CORTEX_A35 = 0xD04, |
| 42 | CORTEX_A57 = 0xD07, |
| 43 | CORTEX_A72 = 0xD08, |
| 44 | CORTEX_A73 = 0xD09, |
| 45 | CHIP_UNKNOWN = 0xFFF |
| 46 | }; |
| 47 | |
| 48 | struct chip_pmu { |
| 49 | enum ARM_TYPE type; |
| 50 | struct pmu_desc *desc; |
| 51 | unsigned int count; |
| 52 | const char *cpu_name; |
| 53 | }; |
| 54 | |
| 55 | static struct chip_pmu chips[] = { |
| 56 | {CORTEX_A53, a53_pmu_desc, A53_PMU_DESC_COUNT, "Cortex-A7L"}, |
| 57 | {CORTEX_A35, a53_pmu_desc, A53_PMU_DESC_COUNT, "Cortex-A35"}, |
| 58 | {CORTEX_A57, a53_pmu_desc, A53_PMU_DESC_COUNT, "Cortex-A57"}, |
| 59 | {CORTEX_A72, a53_pmu_desc, A53_PMU_DESC_COUNT, "Cortex-A72"}, |
| 60 | {CORTEX_A73, a53_pmu_desc, A53_PMU_DESC_COUNT, "Cortex-A73"}, |
| 61 | }; |
| 62 | static struct chip_pmu chip_unknown = { CHIP_UNKNOWN, NULL, 0, "Unknown CPU" }; |
| 63 | |
| 64 | #define CHIP_PMU_COUNT (sizeof(chips) / sizeof(struct chip_pmu)) |
| 65 | |
| 66 | static struct chip_pmu *chip; |
| 67 | |
| 68 | static enum ARM_TYPE armv8_get_ic(void) |
| 69 | { |
| 70 | unsigned int value; |
| 71 | /* Read Main ID Register */ |
| 72 | asm("mrs %0, midr_el1":"=r"(value)); |
| 73 | |
| 74 | value = (value & 0xffff) >> 4; /* primary part number */ |
| 75 | return value; |
| 76 | } |
| 77 | |
| 78 | static inline void armv8_pmu_counter_select(unsigned int idx) |
| 79 | { |
| 80 | asm volatile ("msr pmselr_el0, %0"::"r" (idx)); |
| 81 | isb(); |
| 82 | } |
| 83 | |
| 84 | static inline void armv8_pmu_type_select(unsigned int idx, unsigned int type) |
| 85 | { |
| 86 | armv8_pmu_counter_select(idx); |
| 87 | asm volatile ("msr pmxevtyper_el0, %0"::"r" (type)); |
| 88 | } |
| 89 | |
| 90 | static inline unsigned int armv8_pmu_read_count(unsigned int idx) |
| 91 | { |
| 92 | unsigned int value; |
| 93 | |
| 94 | if (idx == 31) { |
| 95 | asm volatile ("mrs %0, pmccntr_el0":"=r" (value)); |
| 96 | } else { |
| 97 | armv8_pmu_counter_select(idx); |
| 98 | asm volatile ("mrs %0, pmxevcntr_el0":"=r" (value)); |
| 99 | } |
| 100 | return value; |
| 101 | } |
| 102 | |
| 103 | static inline void armv8_pmu_write_count(int idx, u32 value) |
| 104 | { |
| 105 | if (idx == 31) { |
| 106 | asm volatile ("msr pmccntr_el0, %0"::"r" (value)); |
| 107 | } else { |
| 108 | armv8_pmu_counter_select(idx); |
| 109 | asm volatile ("msr pmxevcntr_el0, %0"::"r" (value)); |
| 110 | } |
| 111 | } |
| 112 | |
| 113 | static inline void armv8_pmu_enable_count(unsigned int idx) |
| 114 | { |
| 115 | asm volatile ("msr pmcntenset_el0, %0"::"r" (1 << idx)); |
| 116 | } |
| 117 | |
| 118 | static inline void armv8_pmu_disable_count(unsigned int idx) |
| 119 | { |
| 120 | asm volatile ("msr pmcntenclr_el0, %0"::"r" (1 << idx)); |
| 121 | } |
| 122 | |
| 123 | static inline void armv8_pmu_enable_intr(unsigned int idx) |
| 124 | { |
| 125 | asm volatile ("msr pmintenset_el1, %0"::"r" (1 << idx)); |
| 126 | } |
| 127 | |
| 128 | static inline void armv8_pmu_disable_intr(unsigned int idx) |
| 129 | { |
| 130 | asm volatile ("msr pmintenclr_el1, %0"::"r" (1 << idx)); |
| 131 | isb(); |
| 132 | asm volatile ("msr pmovsclr_el0, %0"::"r" (1 << idx)); |
| 133 | isb(); |
| 134 | } |
| 135 | |
| 136 | static inline unsigned int armv8_pmu_overflow(void) |
| 137 | { |
| 138 | unsigned int val; |
| 139 | |
| 140 | asm volatile ("mrs %0, pmovsclr_el0":"=r" (val)); /* read */ |
| 141 | val &= ARMV8_OVSR_MASK; |
| 142 | asm volatile ("mrs %0, pmovsclr_el0"::"r" (val)); |
| 143 | return val; |
| 144 | } |
| 145 | |
| 146 | static inline unsigned int armv8_pmu_control_read(void) |
| 147 | { |
| 148 | unsigned int val; |
| 149 | |
| 150 | asm volatile ("mrs %0, pmcr_el0":"=r" (val)); |
| 151 | return val; |
| 152 | } |
| 153 | |
| 154 | static inline void armv8_pmu_control_write(u32 val) |
| 155 | { |
| 156 | val &= ARMV8_PMCR_MASK; |
| 157 | isb(); |
| 158 | asm volatile ("msr pmcr_el0, %0"::"r" (val)); |
| 159 | } |
| 160 | |
| 161 | static int armv8_pmu_hw_get_counters(void) |
| 162 | { |
| 163 | int count = armv8_pmu_control_read(); |
| 164 | /* N, bits[15:11] */ |
| 165 | count = ((count >> ARMV8_PMCR_N_SHIFT) & ARMV8_PMCR_N_MASK); |
| 166 | return count; |
| 167 | } |
| 168 | |
| 169 | static void armv8_pmu_hw_reset_all(int generic_counters) |
| 170 | { |
| 171 | int i; |
| 172 | |
| 173 | armv8_pmu_control_write(ARMV8_PMCR_C | ARMV8_PMCR_P); |
| 174 | /* generic counter */ |
| 175 | for (i = 0; i < generic_counters; i++) { |
| 176 | armv8_pmu_disable_intr(i); |
| 177 | armv8_pmu_disable_count(i); |
| 178 | } |
| 179 | /* cycle counter */ |
| 180 | armv8_pmu_disable_intr(31); |
| 181 | armv8_pmu_disable_count(31); |
| 182 | armv8_pmu_overflow(); /* clear overflow */ |
| 183 | } |
| 184 | |
| 185 | static int armv8_pmu_hw_get_event_desc(int i, int event, char *event_desc) |
| 186 | { |
| 187 | if (event_desc == NULL) |
| 188 | return -1; |
| 189 | |
| 190 | for (i = 0; i < chip->count; i++) { |
| 191 | if (chip->desc[i].event == event) { |
| 192 | strncpy(event_desc, chip->desc[i].name, MXSIZE_PMU_DESC - 1); |
| 193 | break; |
| 194 | } |
| 195 | } |
| 196 | if (i == chip->count) |
| 197 | return -1; |
| 198 | |
| 199 | return 0; |
| 200 | } |
| 201 | |
| 202 | static int armv8_pmu_hw_check_event(struct met_pmu *pmu, int idx, int event) |
| 203 | { |
| 204 | int i; |
| 205 | |
| 206 | /* Check if event is duplicate */ |
| 207 | for (i = 0; i < idx; i++) { |
| 208 | if (pmu[i].event == event) |
| 209 | break; |
| 210 | } |
| 211 | if (i < idx) { |
| 212 | /* pr_debug("++++++ found duplicate event 0x%02x i=%d\n", event, i); */ |
| 213 | return -1; |
| 214 | } |
| 215 | |
| 216 | for (i = 0; i < chip->count; i++) { |
| 217 | if (chip->desc[i].event == event) |
| 218 | break; |
| 219 | } |
| 220 | |
| 221 | if (i == chip->count) |
| 222 | return -1; |
| 223 | |
| 224 | return 0; |
| 225 | } |
| 226 | |
| 227 | static void armv8_pmu_hw_start(struct met_pmu *pmu, int count) |
| 228 | { |
| 229 | int i; |
| 230 | int generic = count - 1; |
| 231 | |
| 232 | armv8_pmu_hw_reset_all(generic); |
| 233 | for (i = 0; i < generic; i++) { |
| 234 | if (pmu[i].mode == MODE_POLLING) { |
| 235 | armv8_pmu_type_select(i, pmu[i].event); |
| 236 | armv8_pmu_enable_count(i); |
| 237 | } |
| 238 | } |
| 239 | if (pmu[count - 1].mode == MODE_POLLING) { /* cycle counter */ |
| 240 | armv8_pmu_enable_count(31); |
| 241 | } |
| 242 | armv8_pmu_control_write(ARMV8_PMCR_E); |
| 243 | } |
| 244 | |
| 245 | static void armv8_pmu_hw_stop(int count) |
| 246 | { |
| 247 | int generic = count - 1; |
| 248 | |
| 249 | armv8_pmu_hw_reset_all(generic); |
| 250 | } |
| 251 | |
| 252 | static unsigned int armv8_pmu_hw_polling(struct met_pmu *pmu, int count, unsigned int *pmu_value) |
| 253 | { |
| 254 | int i, cnt = 0; |
| 255 | int generic = count - 1; |
| 256 | |
| 257 | for (i = 0; i < generic; i++) { |
| 258 | if (pmu[i].mode == MODE_POLLING) { |
| 259 | pmu_value[cnt] = armv8_pmu_read_count(i); |
| 260 | cnt++; |
| 261 | } |
| 262 | } |
| 263 | if (pmu[count - 1].mode == MODE_POLLING) { |
| 264 | pmu_value[cnt] = armv8_pmu_read_count(31); |
| 265 | cnt++; |
| 266 | } |
| 267 | armv8_pmu_control_write(ARMV8_PMCR_C | ARMV8_PMCR_P | ARMV8_PMCR_E); |
| 268 | |
| 269 | return cnt; |
| 270 | } |
| 271 | |
| 272 | |
| 273 | struct cpu_pmu_hw armv8_pmu = { |
| 274 | .name = "armv8_pmu", |
| 275 | .get_event_desc = armv8_pmu_hw_get_event_desc, |
| 276 | .check_event = armv8_pmu_hw_check_event, |
| 277 | .start = armv8_pmu_hw_start, |
| 278 | .stop = armv8_pmu_hw_stop, |
| 279 | .polling = armv8_pmu_hw_polling, |
| 280 | }; |
| 281 | |
| 282 | struct cpu_pmu_hw *cpu_pmu_hw_init(void) |
| 283 | { |
| 284 | int i; |
| 285 | enum ARM_TYPE type; |
| 286 | |
| 287 | type = armv8_get_ic(); |
| 288 | PR_BOOTMSG("CPU TYPE - v8: %x\n", (unsigned int)type); |
| 289 | for (i = 0; i < CHIP_PMU_COUNT; i++) { |
| 290 | if (chips[i].type == type) { |
| 291 | chip = &(chips[i]); |
| 292 | break; |
| 293 | } |
| 294 | } |
| 295 | if (i == CHIP_PMU_COUNT) { |
| 296 | chip = &chip_unknown; |
| 297 | return NULL; |
| 298 | } |
| 299 | |
| 300 | armv8_pmu.nr_cnt = armv8_pmu_hw_get_counters() + 1; |
| 301 | armv8_pmu.cpu_name = chip->cpu_name; |
| 302 | |
| 303 | return &armv8_pmu; |
| 304 | } |