blob: c67a636b268ff162e42a0cbca393d680d0fe318d [file] [log] [blame]
rjw1f884582022-01-06 17:20:42 +08001/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __KVM_X86_PMU_H
3#define __KVM_X86_PMU_H
4
5#include <linux/nospec.h>
6
7#define vcpu_to_pmu(vcpu) (&(vcpu)->arch.pmu)
8#define pmu_to_vcpu(pmu) (container_of((pmu), struct kvm_vcpu, arch.pmu))
9#define pmc_to_pmu(pmc) (&(pmc)->vcpu->arch.pmu)
10
11/* retrieve the 4 bits for EN and PMI out of IA32_FIXED_CTR_CTRL */
12#define fixed_ctrl_field(ctrl_reg, idx) (((ctrl_reg) >> ((idx)*4)) & 0xf)
13
14struct kvm_event_hw_type_mapping {
15 u8 eventsel;
16 u8 unit_mask;
17 unsigned event_type;
18};
19
20struct kvm_pmu_ops {
21 unsigned (*find_arch_event)(struct kvm_pmu *pmu, u8 event_select,
22 u8 unit_mask);
23 unsigned (*find_fixed_event)(int idx);
24 bool (*pmc_is_enabled)(struct kvm_pmc *pmc);
25 struct kvm_pmc *(*pmc_idx_to_pmc)(struct kvm_pmu *pmu, int pmc_idx);
26 struct kvm_pmc *(*msr_idx_to_pmc)(struct kvm_vcpu *vcpu, unsigned idx);
27 int (*is_valid_msr_idx)(struct kvm_vcpu *vcpu, unsigned idx);
28 bool (*is_valid_msr)(struct kvm_vcpu *vcpu, u32 msr);
29 int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr, u64 *data);
30 int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
31 void (*refresh)(struct kvm_vcpu *vcpu);
32 void (*init)(struct kvm_vcpu *vcpu);
33 void (*reset)(struct kvm_vcpu *vcpu);
34};
35
36static inline u64 pmc_bitmask(struct kvm_pmc *pmc)
37{
38 struct kvm_pmu *pmu = pmc_to_pmu(pmc);
39
40 return pmu->counter_bitmask[pmc->type];
41}
42
43static inline u64 pmc_read_counter(struct kvm_pmc *pmc)
44{
45 u64 counter, enabled, running;
46
47 counter = pmc->counter;
48 if (pmc->perf_event)
49 counter += perf_event_read_value(pmc->perf_event,
50 &enabled, &running);
51 /* FIXME: Scaling needed? */
52 return counter & pmc_bitmask(pmc);
53}
54
55static inline void pmc_stop_counter(struct kvm_pmc *pmc)
56{
57 if (pmc->perf_event) {
58 pmc->counter = pmc_read_counter(pmc);
59 perf_event_release_kernel(pmc->perf_event);
60 pmc->perf_event = NULL;
61 }
62}
63
64static inline bool pmc_is_gp(struct kvm_pmc *pmc)
65{
66 return pmc->type == KVM_PMC_GP;
67}
68
69static inline bool pmc_is_fixed(struct kvm_pmc *pmc)
70{
71 return pmc->type == KVM_PMC_FIXED;
72}
73
74static inline bool pmc_is_enabled(struct kvm_pmc *pmc)
75{
76 return kvm_x86_ops->pmu_ops->pmc_is_enabled(pmc);
77}
78
79/* returns general purpose PMC with the specified MSR. Note that it can be
80 * used for both PERFCTRn and EVNTSELn; that is why it accepts base as a
81 * paramenter to tell them apart.
82 */
83static inline struct kvm_pmc *get_gp_pmc(struct kvm_pmu *pmu, u32 msr,
84 u32 base)
85{
86 if (msr >= base && msr < base + pmu->nr_arch_gp_counters) {
87 u32 index = array_index_nospec(msr - base,
88 pmu->nr_arch_gp_counters);
89
90 return &pmu->gp_counters[index];
91 }
92
93 return NULL;
94}
95
96/* returns fixed PMC with the specified MSR */
97static inline struct kvm_pmc *get_fixed_pmc(struct kvm_pmu *pmu, u32 msr)
98{
99 int base = MSR_CORE_PERF_FIXED_CTR0;
100
101 if (msr >= base && msr < base + pmu->nr_arch_fixed_counters) {
102 u32 index = array_index_nospec(msr - base,
103 pmu->nr_arch_fixed_counters);
104
105 return &pmu->fixed_counters[index];
106 }
107
108 return NULL;
109}
110
111void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel);
112void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int fixed_idx);
113void reprogram_counter(struct kvm_pmu *pmu, int pmc_idx);
114
115void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu);
116void kvm_pmu_handle_event(struct kvm_vcpu *vcpu);
117int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data);
118int kvm_pmu_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned idx);
119bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr);
120int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data);
121int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
122void kvm_pmu_refresh(struct kvm_vcpu *vcpu);
123void kvm_pmu_reset(struct kvm_vcpu *vcpu);
124void kvm_pmu_init(struct kvm_vcpu *vcpu);
125void kvm_pmu_destroy(struct kvm_vcpu *vcpu);
126
127extern struct kvm_pmu_ops intel_pmu_ops;
128extern struct kvm_pmu_ops amd_pmu_ops;
129#endif /* __KVM_X86_PMU_H */