blob: 9732c5e0ed2257b0f987793c0708fc6f30b27370 [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2015 Linaro Ltd.
4 * Author: Shannon Zhao <shannon.zhao@linaro.org>
5 */
6
7#include <linux/cpu.h>
8#include <linux/kvm.h>
9#include <linux/kvm_host.h>
10#include <linux/perf_event.h>
11#include <linux/perf/arm_pmu.h>
12#include <linux/uaccess.h>
13#include <asm/kvm_emulate.h>
14#include <kvm/arm_pmu.h>
15#include <kvm/arm_vgic.h>
16
17static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx);
18
19#define PERF_ATTR_CFG1_KVM_PMU_CHAINED 0x1
20
21/**
22 * kvm_pmu_idx_is_64bit - determine if select_idx is a 64bit counter
23 * @vcpu: The vcpu pointer
24 * @select_idx: The counter index
25 */
26static bool kvm_pmu_idx_is_64bit(struct kvm_vcpu *vcpu, u64 select_idx)
27{
28 return (select_idx == ARMV8_PMU_CYCLE_IDX &&
29 __vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_LC);
30}
31
32static struct kvm_vcpu *kvm_pmc_to_vcpu(struct kvm_pmc *pmc)
33{
34 struct kvm_pmu *pmu;
35 struct kvm_vcpu_arch *vcpu_arch;
36
37 pmc -= pmc->idx;
38 pmu = container_of(pmc, struct kvm_pmu, pmc[0]);
39 vcpu_arch = container_of(pmu, struct kvm_vcpu_arch, pmu);
40 return container_of(vcpu_arch, struct kvm_vcpu, arch);
41}
42
43/**
44 * kvm_pmu_pmc_is_chained - determine if the pmc is chained
45 * @pmc: The PMU counter pointer
46 */
47static bool kvm_pmu_pmc_is_chained(struct kvm_pmc *pmc)
48{
49 struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
50
51 return test_bit(pmc->idx >> 1, vcpu->arch.pmu.chained);
52}
53
54/**
55 * kvm_pmu_idx_is_high_counter - determine if select_idx is a high/low counter
56 * @select_idx: The counter index
57 */
58static bool kvm_pmu_idx_is_high_counter(u64 select_idx)
59{
60 return select_idx & 0x1;
61}
62
63/**
64 * kvm_pmu_get_canonical_pmc - obtain the canonical pmc
65 * @pmc: The PMU counter pointer
66 *
67 * When a pair of PMCs are chained together we use the low counter (canonical)
68 * to hold the underlying perf event.
69 */
70static struct kvm_pmc *kvm_pmu_get_canonical_pmc(struct kvm_pmc *pmc)
71{
72 if (kvm_pmu_pmc_is_chained(pmc) &&
73 kvm_pmu_idx_is_high_counter(pmc->idx))
74 return pmc - 1;
75
76 return pmc;
77}
78
79/**
80 * kvm_pmu_idx_has_chain_evtype - determine if the event type is chain
81 * @vcpu: The vcpu pointer
82 * @select_idx: The counter index
83 */
84static bool kvm_pmu_idx_has_chain_evtype(struct kvm_vcpu *vcpu, u64 select_idx)
85{
86 u64 eventsel, reg;
87
88 select_idx |= 0x1;
89
90 if (select_idx == ARMV8_PMU_CYCLE_IDX)
91 return false;
92
93 reg = PMEVTYPER0_EL0 + select_idx;
94 eventsel = __vcpu_sys_reg(vcpu, reg) & ARMV8_PMU_EVTYPE_EVENT;
95
96 return eventsel == ARMV8_PMUV3_PERFCTR_CHAIN;
97}
98
99/**
100 * kvm_pmu_get_pair_counter_value - get PMU counter value
101 * @vcpu: The vcpu pointer
102 * @pmc: The PMU counter pointer
103 */
104static u64 kvm_pmu_get_pair_counter_value(struct kvm_vcpu *vcpu,
105 struct kvm_pmc *pmc)
106{
107 u64 counter, counter_high, reg, enabled, running;
108
109 if (kvm_pmu_pmc_is_chained(pmc)) {
110 pmc = kvm_pmu_get_canonical_pmc(pmc);
111 reg = PMEVCNTR0_EL0 + pmc->idx;
112
113 counter = __vcpu_sys_reg(vcpu, reg);
114 counter_high = __vcpu_sys_reg(vcpu, reg + 1);
115
116 counter = lower_32_bits(counter) | (counter_high << 32);
117 } else {
118 reg = (pmc->idx == ARMV8_PMU_CYCLE_IDX)
119 ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + pmc->idx;
120 counter = __vcpu_sys_reg(vcpu, reg);
121 }
122
123 /*
124 * The real counter value is equal to the value of counter register plus
125 * the value perf event counts.
126 */
127 if (pmc->perf_event)
128 counter += perf_event_read_value(pmc->perf_event, &enabled,
129 &running);
130
131 return counter;
132}
133
134/**
135 * kvm_pmu_get_counter_value - get PMU counter value
136 * @vcpu: The vcpu pointer
137 * @select_idx: The counter index
138 */
139u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx)
140{
141 u64 counter;
142 struct kvm_pmu *pmu = &vcpu->arch.pmu;
143 struct kvm_pmc *pmc = &pmu->pmc[select_idx];
144
145 counter = kvm_pmu_get_pair_counter_value(vcpu, pmc);
146
147 if (kvm_pmu_pmc_is_chained(pmc) &&
148 kvm_pmu_idx_is_high_counter(select_idx))
149 counter = upper_32_bits(counter);
150 else if (select_idx != ARMV8_PMU_CYCLE_IDX)
151 counter = lower_32_bits(counter);
152
153 return counter;
154}
155
156/**
157 * kvm_pmu_set_counter_value - set PMU counter value
158 * @vcpu: The vcpu pointer
159 * @select_idx: The counter index
160 * @val: The counter value
161 */
162void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val)
163{
164 u64 reg;
165
166 reg = (select_idx == ARMV8_PMU_CYCLE_IDX)
167 ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + select_idx;
168 __vcpu_sys_reg(vcpu, reg) += (s64)val - kvm_pmu_get_counter_value(vcpu, select_idx);
169
170 /* Recreate the perf event to reflect the updated sample_period */
171 kvm_pmu_create_perf_event(vcpu, select_idx);
172}
173
174/**
175 * kvm_pmu_release_perf_event - remove the perf event
176 * @pmc: The PMU counter pointer
177 */
178static void kvm_pmu_release_perf_event(struct kvm_pmc *pmc)
179{
180 pmc = kvm_pmu_get_canonical_pmc(pmc);
181 if (pmc->perf_event) {
182 perf_event_disable(pmc->perf_event);
183 perf_event_release_kernel(pmc->perf_event);
184 pmc->perf_event = NULL;
185 }
186}
187
188/**
189 * kvm_pmu_stop_counter - stop PMU counter
190 * @pmc: The PMU counter pointer
191 *
192 * If this counter has been configured to monitor some event, release it here.
193 */
194static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc)
195{
196 u64 counter, reg, val;
197
198 pmc = kvm_pmu_get_canonical_pmc(pmc);
199 if (!pmc->perf_event)
200 return;
201
202 counter = kvm_pmu_get_pair_counter_value(vcpu, pmc);
203
204 if (pmc->idx == ARMV8_PMU_CYCLE_IDX) {
205 reg = PMCCNTR_EL0;
206 val = counter;
207 } else {
208 reg = PMEVCNTR0_EL0 + pmc->idx;
209 val = lower_32_bits(counter);
210 }
211
212 __vcpu_sys_reg(vcpu, reg) = val;
213
214 if (kvm_pmu_pmc_is_chained(pmc))
215 __vcpu_sys_reg(vcpu, reg + 1) = upper_32_bits(counter);
216
217 kvm_pmu_release_perf_event(pmc);
218}
219
220/**
221 * kvm_pmu_vcpu_init - assign pmu counter idx for cpu
222 * @vcpu: The vcpu pointer
223 *
224 */
225void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu)
226{
227 int i;
228 struct kvm_pmu *pmu = &vcpu->arch.pmu;
229
230 for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++)
231 pmu->pmc[i].idx = i;
232}
233
234/**
235 * kvm_pmu_vcpu_reset - reset pmu state for cpu
236 * @vcpu: The vcpu pointer
237 *
238 */
239void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu)
240{
241 int i;
242 struct kvm_pmu *pmu = &vcpu->arch.pmu;
243
244 for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++)
245 kvm_pmu_stop_counter(vcpu, &pmu->pmc[i]);
246
247 bitmap_zero(vcpu->arch.pmu.chained, ARMV8_PMU_MAX_COUNTER_PAIRS);
248}
249
250/**
251 * kvm_pmu_vcpu_destroy - free perf event of PMU for cpu
252 * @vcpu: The vcpu pointer
253 *
254 */
255void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu)
256{
257 int i;
258 struct kvm_pmu *pmu = &vcpu->arch.pmu;
259
260 for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++)
261 kvm_pmu_release_perf_event(&pmu->pmc[i]);
262}
263
264u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu)
265{
266 u64 val = __vcpu_sys_reg(vcpu, PMCR_EL0) >> ARMV8_PMU_PMCR_N_SHIFT;
267
268 val &= ARMV8_PMU_PMCR_N_MASK;
269 if (val == 0)
270 return BIT(ARMV8_PMU_CYCLE_IDX);
271 else
272 return GENMASK(val - 1, 0) | BIT(ARMV8_PMU_CYCLE_IDX);
273}
274
275/**
276 * kvm_pmu_enable_counter_mask - enable selected PMU counters
277 * @vcpu: The vcpu pointer
278 * @val: the value guest writes to PMCNTENSET register
279 *
280 * Call perf_event_enable to start counting the perf event
281 */
282void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
283{
284 int i;
285 struct kvm_pmu *pmu = &vcpu->arch.pmu;
286 struct kvm_pmc *pmc;
287
288 if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) || !val)
289 return;
290
291 for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
292 if (!(val & BIT(i)))
293 continue;
294
295 pmc = &pmu->pmc[i];
296
297 /*
298 * For high counters of chained events we must recreate the
299 * perf event with the long (64bit) attribute set.
300 */
301 if (kvm_pmu_pmc_is_chained(pmc) &&
302 kvm_pmu_idx_is_high_counter(i)) {
303 kvm_pmu_create_perf_event(vcpu, i);
304 continue;
305 }
306
307 /* At this point, pmc must be the canonical */
308 if (pmc->perf_event) {
309 perf_event_enable(pmc->perf_event);
310 if (pmc->perf_event->state != PERF_EVENT_STATE_ACTIVE)
311 kvm_debug("fail to enable perf event\n");
312 }
313 }
314}
315
316/**
317 * kvm_pmu_disable_counter_mask - disable selected PMU counters
318 * @vcpu: The vcpu pointer
319 * @val: the value guest writes to PMCNTENCLR register
320 *
321 * Call perf_event_disable to stop counting the perf event
322 */
323void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
324{
325 int i;
326 struct kvm_pmu *pmu = &vcpu->arch.pmu;
327 struct kvm_pmc *pmc;
328
329 if (!val)
330 return;
331
332 for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
333 if (!(val & BIT(i)))
334 continue;
335
336 pmc = &pmu->pmc[i];
337
338 /*
339 * For high counters of chained events we must recreate the
340 * perf event with the long (64bit) attribute unset.
341 */
342 if (kvm_pmu_pmc_is_chained(pmc) &&
343 kvm_pmu_idx_is_high_counter(i)) {
344 kvm_pmu_create_perf_event(vcpu, i);
345 continue;
346 }
347
348 /* At this point, pmc must be the canonical */
349 if (pmc->perf_event)
350 perf_event_disable(pmc->perf_event);
351 }
352}
353
354static u64 kvm_pmu_overflow_status(struct kvm_vcpu *vcpu)
355{
356 u64 reg = 0;
357
358 if ((__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E)) {
359 reg = __vcpu_sys_reg(vcpu, PMOVSSET_EL0);
360 reg &= __vcpu_sys_reg(vcpu, PMINTENSET_EL1);
361 reg &= kvm_pmu_valid_counter_mask(vcpu);
362 }
363
364 return reg;
365}
366
367static void kvm_pmu_update_state(struct kvm_vcpu *vcpu)
368{
369 struct kvm_pmu *pmu = &vcpu->arch.pmu;
370 bool overflow;
371
372 if (!kvm_arm_pmu_v3_ready(vcpu))
373 return;
374
375 overflow = !!kvm_pmu_overflow_status(vcpu);
376 if (pmu->irq_level == overflow)
377 return;
378
379 pmu->irq_level = overflow;
380
381 if (likely(irqchip_in_kernel(vcpu->kvm))) {
382 int ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
383 pmu->irq_num, overflow, pmu);
384 WARN_ON(ret);
385 }
386}
387
388bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu)
389{
390 struct kvm_pmu *pmu = &vcpu->arch.pmu;
391 struct kvm_sync_regs *sregs = &vcpu->run->s.regs;
392 bool run_level = sregs->device_irq_level & KVM_ARM_DEV_PMU;
393
394 if (likely(irqchip_in_kernel(vcpu->kvm)))
395 return false;
396
397 return pmu->irq_level != run_level;
398}
399
400/*
401 * Reflect the PMU overflow interrupt output level into the kvm_run structure
402 */
403void kvm_pmu_update_run(struct kvm_vcpu *vcpu)
404{
405 struct kvm_sync_regs *regs = &vcpu->run->s.regs;
406
407 /* Populate the timer bitmap for user space */
408 regs->device_irq_level &= ~KVM_ARM_DEV_PMU;
409 if (vcpu->arch.pmu.irq_level)
410 regs->device_irq_level |= KVM_ARM_DEV_PMU;
411}
412
413/**
414 * kvm_pmu_flush_hwstate - flush pmu state to cpu
415 * @vcpu: The vcpu pointer
416 *
417 * Check if the PMU has overflowed while we were running in the host, and inject
418 * an interrupt if that was the case.
419 */
420void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu)
421{
422 kvm_pmu_update_state(vcpu);
423}
424
425/**
426 * kvm_pmu_sync_hwstate - sync pmu state from cpu
427 * @vcpu: The vcpu pointer
428 *
429 * Check if the PMU has overflowed while we were running in the guest, and
430 * inject an interrupt if that was the case.
431 */
432void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu)
433{
434 kvm_pmu_update_state(vcpu);
435}
436
437/**
438 * When the perf event overflows, set the overflow status and inform the vcpu.
439 */
440static void kvm_pmu_perf_overflow(struct perf_event *perf_event,
441 struct perf_sample_data *data,
442 struct pt_regs *regs)
443{
444 struct kvm_pmc *pmc = perf_event->overflow_handler_context;
445 struct arm_pmu *cpu_pmu = to_arm_pmu(perf_event->pmu);
446 struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
447 int idx = pmc->idx;
448 u64 period;
449
450 cpu_pmu->pmu.stop(perf_event, PERF_EF_UPDATE);
451
452 /*
453 * Reset the sample period to the architectural limit,
454 * i.e. the point where the counter overflows.
455 */
456 period = -(local64_read(&perf_event->count));
457
458 if (!kvm_pmu_idx_is_64bit(vcpu, pmc->idx))
459 period &= GENMASK(31, 0);
460
461 local64_set(&perf_event->hw.period_left, 0);
462 perf_event->attr.sample_period = period;
463 perf_event->hw.sample_period = period;
464
465 __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(idx);
466
467 if (kvm_pmu_overflow_status(vcpu)) {
468 kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
469 kvm_vcpu_kick(vcpu);
470 }
471
472 cpu_pmu->pmu.start(perf_event, PERF_EF_RELOAD);
473}
474
475/**
476 * kvm_pmu_software_increment - do software increment
477 * @vcpu: The vcpu pointer
478 * @val: the value guest writes to PMSWINC register
479 */
480void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val)
481{
482 struct kvm_pmu *pmu = &vcpu->arch.pmu;
483 int i;
484
485 if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E))
486 return;
487
488 /* Weed out disabled counters */
489 val &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
490
491 for (i = 0; i < ARMV8_PMU_CYCLE_IDX; i++) {
492 u64 type, reg;
493
494 if (!(val & BIT(i)))
495 continue;
496
497 /* PMSWINC only applies to ... SW_INC! */
498 type = __vcpu_sys_reg(vcpu, PMEVTYPER0_EL0 + i);
499 type &= ARMV8_PMU_EVTYPE_EVENT;
500 if (type != ARMV8_PMUV3_PERFCTR_SW_INCR)
501 continue;
502
503 /* increment this even SW_INC counter */
504 reg = __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) + 1;
505 reg = lower_32_bits(reg);
506 __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) = reg;
507
508 if (reg) /* no overflow on the low part */
509 continue;
510
511 if (kvm_pmu_pmc_is_chained(&pmu->pmc[i])) {
512 /* increment the high counter */
513 reg = __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i + 1) + 1;
514 reg = lower_32_bits(reg);
515 __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i + 1) = reg;
516 if (!reg) /* mark overflow on the high counter */
517 __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i + 1);
518 } else {
519 /* mark overflow on low counter */
520 __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i);
521 }
522 }
523}
524
525/**
526 * kvm_pmu_handle_pmcr - handle PMCR register
527 * @vcpu: The vcpu pointer
528 * @val: the value guest writes to PMCR register
529 */
530void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
531{
532 u64 mask;
533 int i;
534
535 mask = kvm_pmu_valid_counter_mask(vcpu);
536 if (val & ARMV8_PMU_PMCR_E) {
537 kvm_pmu_enable_counter_mask(vcpu,
538 __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & mask);
539 } else {
540 kvm_pmu_disable_counter_mask(vcpu, mask);
541 }
542
543 if (val & ARMV8_PMU_PMCR_C)
544 kvm_pmu_set_counter_value(vcpu, ARMV8_PMU_CYCLE_IDX, 0);
545
546 if (val & ARMV8_PMU_PMCR_P) {
547 for (i = 0; i < ARMV8_PMU_CYCLE_IDX; i++)
548 kvm_pmu_set_counter_value(vcpu, i, 0);
549 }
550}
551
552static bool kvm_pmu_counter_is_enabled(struct kvm_vcpu *vcpu, u64 select_idx)
553{
554 return (__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) &&
555 (__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & BIT(select_idx));
556}
557
558/**
559 * kvm_pmu_create_perf_event - create a perf event for a counter
560 * @vcpu: The vcpu pointer
561 * @select_idx: The number of selected counter
562 */
563static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx)
564{
565 struct kvm_pmu *pmu = &vcpu->arch.pmu;
566 struct kvm_pmc *pmc;
567 struct perf_event *event;
568 struct perf_event_attr attr;
569 u64 eventsel, counter, reg, data;
570
571 /*
572 * For chained counters the event type and filtering attributes are
573 * obtained from the low/even counter. We also use this counter to
574 * determine if the event is enabled/disabled.
575 */
576 pmc = kvm_pmu_get_canonical_pmc(&pmu->pmc[select_idx]);
577
578 reg = (pmc->idx == ARMV8_PMU_CYCLE_IDX)
579 ? PMCCFILTR_EL0 : PMEVTYPER0_EL0 + pmc->idx;
580 data = __vcpu_sys_reg(vcpu, reg);
581
582 kvm_pmu_stop_counter(vcpu, pmc);
583 eventsel = data & ARMV8_PMU_EVTYPE_EVENT;
584
585 /* Software increment event does't need to be backed by a perf event */
586 if (eventsel == ARMV8_PMUV3_PERFCTR_SW_INCR &&
587 pmc->idx != ARMV8_PMU_CYCLE_IDX)
588 return;
589
590 memset(&attr, 0, sizeof(struct perf_event_attr));
591 attr.type = PERF_TYPE_RAW;
592 attr.size = sizeof(attr);
593 attr.pinned = 1;
594 attr.disabled = !kvm_pmu_counter_is_enabled(vcpu, pmc->idx);
595 attr.exclude_user = data & ARMV8_PMU_EXCLUDE_EL0 ? 1 : 0;
596 attr.exclude_kernel = data & ARMV8_PMU_EXCLUDE_EL1 ? 1 : 0;
597 attr.exclude_hv = 1; /* Don't count EL2 events */
598 attr.exclude_host = 1; /* Don't count host events */
599 attr.config = (pmc->idx == ARMV8_PMU_CYCLE_IDX) ?
600 ARMV8_PMUV3_PERFCTR_CPU_CYCLES : eventsel;
601
602 counter = kvm_pmu_get_pair_counter_value(vcpu, pmc);
603
604 if (kvm_pmu_idx_has_chain_evtype(vcpu, pmc->idx)) {
605 /**
606 * The initial sample period (overflow count) of an event. For
607 * chained counters we only support overflow interrupts on the
608 * high counter.
609 */
610 attr.sample_period = (-counter) & GENMASK(63, 0);
611 if (kvm_pmu_counter_is_enabled(vcpu, pmc->idx + 1))
612 attr.config1 |= PERF_ATTR_CFG1_KVM_PMU_CHAINED;
613
614 event = perf_event_create_kernel_counter(&attr, -1, current,
615 kvm_pmu_perf_overflow,
616 pmc + 1);
617 } else {
618 /* The initial sample period (overflow count) of an event. */
619 if (kvm_pmu_idx_is_64bit(vcpu, pmc->idx))
620 attr.sample_period = (-counter) & GENMASK(63, 0);
621 else
622 attr.sample_period = (-counter) & GENMASK(31, 0);
623
624 event = perf_event_create_kernel_counter(&attr, -1, current,
625 kvm_pmu_perf_overflow, pmc);
626 }
627
628 if (IS_ERR(event)) {
629 pr_err_once("kvm: pmu event creation failed %ld\n",
630 PTR_ERR(event));
631 return;
632 }
633
634 pmc->perf_event = event;
635}
636
637/**
638 * kvm_pmu_update_pmc_chained - update chained bitmap
639 * @vcpu: The vcpu pointer
640 * @select_idx: The number of selected counter
641 *
642 * Update the chained bitmap based on the event type written in the
643 * typer register.
644 */
645static void kvm_pmu_update_pmc_chained(struct kvm_vcpu *vcpu, u64 select_idx)
646{
647 struct kvm_pmu *pmu = &vcpu->arch.pmu;
648 struct kvm_pmc *pmc = &pmu->pmc[select_idx];
649
650 if (kvm_pmu_idx_has_chain_evtype(vcpu, pmc->idx)) {
651 /*
652 * During promotion from !chained to chained we must ensure
653 * the adjacent counter is stopped and its event destroyed
654 */
655 if (!kvm_pmu_pmc_is_chained(pmc))
656 kvm_pmu_stop_counter(vcpu, pmc);
657
658 set_bit(pmc->idx >> 1, vcpu->arch.pmu.chained);
659 } else {
660 clear_bit(pmc->idx >> 1, vcpu->arch.pmu.chained);
661 }
662}
663
664/**
665 * kvm_pmu_set_counter_event_type - set selected counter to monitor some event
666 * @vcpu: The vcpu pointer
667 * @data: The data guest writes to PMXEVTYPER_EL0
668 * @select_idx: The number of selected counter
669 *
670 * When OS accesses PMXEVTYPER_EL0, that means it wants to set a PMC to count an
671 * event with given hardware event number. Here we call perf_event API to
672 * emulate this action and create a kernel perf event for it.
673 */
674void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
675 u64 select_idx)
676{
677 u64 reg, event_type = data & ARMV8_PMU_EVTYPE_MASK;
678
679 reg = (select_idx == ARMV8_PMU_CYCLE_IDX)
680 ? PMCCFILTR_EL0 : PMEVTYPER0_EL0 + select_idx;
681
682 __vcpu_sys_reg(vcpu, reg) = event_type;
683
684 kvm_pmu_update_pmc_chained(vcpu, select_idx);
685 kvm_pmu_create_perf_event(vcpu, select_idx);
686}
687
688bool kvm_arm_support_pmu_v3(void)
689{
690 /*
691 * Check if HW_PERF_EVENTS are supported by checking the number of
692 * hardware performance counters. This could ensure the presence of
693 * a physical PMU and CONFIG_PERF_EVENT is selected.
694 */
695 return (perf_num_counters() > 0);
696}
697
698int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
699{
700 if (!vcpu->arch.pmu.created)
701 return 0;
702
703 /*
704 * A valid interrupt configuration for the PMU is either to have a
705 * properly configured interrupt number and using an in-kernel
706 * irqchip, or to not have an in-kernel GIC and not set an IRQ.
707 */
708 if (irqchip_in_kernel(vcpu->kvm)) {
709 int irq = vcpu->arch.pmu.irq_num;
710 if (!kvm_arm_pmu_irq_initialized(vcpu))
711 return -EINVAL;
712
713 /*
714 * If we are using an in-kernel vgic, at this point we know
715 * the vgic will be initialized, so we can check the PMU irq
716 * number against the dimensions of the vgic and make sure
717 * it's valid.
718 */
719 if (!irq_is_ppi(irq) && !vgic_valid_spi(vcpu->kvm, irq))
720 return -EINVAL;
721 } else if (kvm_arm_pmu_irq_initialized(vcpu)) {
722 return -EINVAL;
723 }
724
725 kvm_pmu_vcpu_reset(vcpu);
726 vcpu->arch.pmu.ready = true;
727
728 return 0;
729}
730
731static int kvm_arm_pmu_v3_init(struct kvm_vcpu *vcpu)
732{
733 if (!kvm_arm_support_pmu_v3())
734 return -ENODEV;
735
736 if (!test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features))
737 return -ENXIO;
738
739 if (vcpu->arch.pmu.created)
740 return -EBUSY;
741
742 if (irqchip_in_kernel(vcpu->kvm)) {
743 int ret;
744
745 /*
746 * If using the PMU with an in-kernel virtual GIC
747 * implementation, we require the GIC to be already
748 * initialized when initializing the PMU.
749 */
750 if (!vgic_initialized(vcpu->kvm))
751 return -ENODEV;
752
753 if (!kvm_arm_pmu_irq_initialized(vcpu))
754 return -ENXIO;
755
756 ret = kvm_vgic_set_owner(vcpu, vcpu->arch.pmu.irq_num,
757 &vcpu->arch.pmu);
758 if (ret)
759 return ret;
760 }
761
762 vcpu->arch.pmu.created = true;
763 return 0;
764}
765
766/*
767 * For one VM the interrupt type must be same for each vcpu.
768 * As a PPI, the interrupt number is the same for all vcpus,
769 * while as an SPI it must be a separate number per vcpu.
770 */
771static bool pmu_irq_is_valid(struct kvm *kvm, int irq)
772{
773 int i;
774 struct kvm_vcpu *vcpu;
775
776 kvm_for_each_vcpu(i, vcpu, kvm) {
777 if (!kvm_arm_pmu_irq_initialized(vcpu))
778 continue;
779
780 if (irq_is_ppi(irq)) {
781 if (vcpu->arch.pmu.irq_num != irq)
782 return false;
783 } else {
784 if (vcpu->arch.pmu.irq_num == irq)
785 return false;
786 }
787 }
788
789 return true;
790}
791
792int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
793{
794 switch (attr->attr) {
795 case KVM_ARM_VCPU_PMU_V3_IRQ: {
796 int __user *uaddr = (int __user *)(long)attr->addr;
797 int irq;
798
799 if (!irqchip_in_kernel(vcpu->kvm))
800 return -EINVAL;
801
802 if (!test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features))
803 return -ENODEV;
804
805 if (get_user(irq, uaddr))
806 return -EFAULT;
807
808 /* The PMU overflow interrupt can be a PPI or a valid SPI. */
809 if (!(irq_is_ppi(irq) || irq_is_spi(irq)))
810 return -EINVAL;
811
812 if (!pmu_irq_is_valid(vcpu->kvm, irq))
813 return -EINVAL;
814
815 if (kvm_arm_pmu_irq_initialized(vcpu))
816 return -EBUSY;
817
818 kvm_debug("Set kvm ARM PMU irq: %d\n", irq);
819 vcpu->arch.pmu.irq_num = irq;
820 return 0;
821 }
822 case KVM_ARM_VCPU_PMU_V3_INIT:
823 return kvm_arm_pmu_v3_init(vcpu);
824 }
825
826 return -ENXIO;
827}
828
829int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
830{
831 switch (attr->attr) {
832 case KVM_ARM_VCPU_PMU_V3_IRQ: {
833 int __user *uaddr = (int __user *)(long)attr->addr;
834 int irq;
835
836 if (!irqchip_in_kernel(vcpu->kvm))
837 return -EINVAL;
838
839 if (!test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features))
840 return -ENODEV;
841
842 if (!kvm_arm_pmu_irq_initialized(vcpu))
843 return -ENXIO;
844
845 irq = vcpu->arch.pmu.irq_num;
846 return put_user(irq, uaddr);
847 }
848 }
849
850 return -ENXIO;
851}
852
853int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
854{
855 switch (attr->attr) {
856 case KVM_ARM_VCPU_PMU_V3_IRQ:
857 case KVM_ARM_VCPU_PMU_V3_INIT:
858 if (kvm_arm_support_pmu_v3() &&
859 test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features))
860 return 0;
861 }
862
863 return -ENXIO;
864}