Merge tag 'sched-urgent-2023-09-17' of git://git.kernel.org/pub/scm/linux/kernel...
[platform/kernel/linux-rpi.git] / include / kvm / arm_pmu.h
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2015 Linaro Ltd.
4  * Author: Shannon Zhao <shannon.zhao@linaro.org>
5  */
6
7 #ifndef __ASM_ARM_KVM_PMU_H
8 #define __ASM_ARM_KVM_PMU_H
9
10 #include <linux/perf_event.h>
11 #include <linux/perf/arm_pmuv3.h>
12
13 #define ARMV8_PMU_CYCLE_IDX             (ARMV8_PMU_MAX_COUNTERS - 1)
14
15 #if IS_ENABLED(CONFIG_HW_PERF_EVENTS) && IS_ENABLED(CONFIG_KVM)
16
17 struct kvm_pmc {
18         u8 idx; /* index into the pmu->pmc array */
19         struct perf_event *perf_event;
20 };
21
22 struct kvm_pmu_events {
23         u32 events_host;
24         u32 events_guest;
25 };
26
27 struct kvm_pmu {
28         struct irq_work overflow_work;
29         struct kvm_pmu_events events;
30         struct kvm_pmc pmc[ARMV8_PMU_MAX_COUNTERS];
31         int irq_num;
32         bool created;
33         bool irq_level;
34 };
35
36 struct arm_pmu_entry {
37         struct list_head entry;
38         struct arm_pmu *arm_pmu;
39 };
40
41 DECLARE_STATIC_KEY_FALSE(kvm_arm_pmu_available);
42
43 static __always_inline bool kvm_arm_support_pmu_v3(void)
44 {
45         return static_branch_likely(&kvm_arm_pmu_available);
46 }
47
48 #define kvm_arm_pmu_irq_initialized(v)  ((v)->arch.pmu.irq_num >= VGIC_NR_SGIS)
49 u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx);
50 void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val);
51 u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu);
52 u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1);
53 void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu);
54 void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu);
55 void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu);
56 void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val);
57 void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val);
58 void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu);
59 void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu);
60 bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu);
61 void kvm_pmu_update_run(struct kvm_vcpu *vcpu);
62 void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val);
63 void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val);
64 void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
65                                     u64 select_idx);
66 int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu,
67                             struct kvm_device_attr *attr);
68 int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu,
69                             struct kvm_device_attr *attr);
70 int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu,
71                             struct kvm_device_attr *attr);
72 int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu);
73
74 struct kvm_pmu_events *kvm_get_pmu_events(void);
75 void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu);
76 void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu);
77 void kvm_vcpu_pmu_resync_el0(void);
78
79 #define kvm_vcpu_has_pmu(vcpu)                                  \
80         (test_bit(KVM_ARM_VCPU_PMU_V3, (vcpu)->arch.features))
81
82 /*
83  * Updates the vcpu's view of the pmu events for this cpu.
84  * Must be called before every vcpu run after disabling interrupts, to ensure
85  * that an interrupt cannot fire and update the structure.
86  */
87 #define kvm_pmu_update_vcpu_events(vcpu)                                \
88         do {                                                            \
89                 if (!has_vhe() && kvm_vcpu_has_pmu(vcpu))               \
90                         vcpu->arch.pmu.events = *kvm_get_pmu_events();  \
91         } while (0)
92
93 /*
94  * Evaluates as true when emulating PMUv3p5, and false otherwise.
95  */
96 #define kvm_pmu_is_3p5(vcpu) ({                                         \
97         u64 val = IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1);                \
98         u8 pmuver = SYS_FIELD_GET(ID_AA64DFR0_EL1, PMUVer, val);        \
99                                                                         \
100         pmuver >= ID_AA64DFR0_EL1_PMUVer_V3P5;                          \
101 })
102
103 u8 kvm_arm_pmu_get_pmuver_limit(void);
104
105 #else
106 struct kvm_pmu {
107 };
108
109 static inline bool kvm_arm_support_pmu_v3(void)
110 {
111         return false;
112 }
113
114 #define kvm_arm_pmu_irq_initialized(v)  (false)
115 static inline u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu,
116                                             u64 select_idx)
117 {
118         return 0;
119 }
120 static inline void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu,
121                                              u64 select_idx, u64 val) {}
122 static inline u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu)
123 {
124         return 0;
125 }
126 static inline void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu) {}
127 static inline void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu) {}
128 static inline void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu) {}
129 static inline void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val) {}
130 static inline void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val) {}
131 static inline void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu) {}
132 static inline void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu) {}
133 static inline bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu)
134 {
135         return false;
136 }
137 static inline void kvm_pmu_update_run(struct kvm_vcpu *vcpu) {}
138 static inline void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val) {}
139 static inline void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val) {}
140 static inline void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu,
141                                                   u64 data, u64 select_idx) {}
142 static inline int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu,
143                                           struct kvm_device_attr *attr)
144 {
145         return -ENXIO;
146 }
147 static inline int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu,
148                                           struct kvm_device_attr *attr)
149 {
150         return -ENXIO;
151 }
152 static inline int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu,
153                                           struct kvm_device_attr *attr)
154 {
155         return -ENXIO;
156 }
157 static inline int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
158 {
159         return 0;
160 }
161 static inline u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
162 {
163         return 0;
164 }
165
166 #define kvm_vcpu_has_pmu(vcpu)          ({ false; })
167 #define kvm_pmu_is_3p5(vcpu)            ({ false; })
168 static inline void kvm_pmu_update_vcpu_events(struct kvm_vcpu *vcpu) {}
169 static inline void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu) {}
170 static inline void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu) {}
171 static inline u8 kvm_arm_pmu_get_pmuver_limit(void)
172 {
173         return 0;
174 }
175 static inline void kvm_vcpu_pmu_resync_el0(void) {}
176
177 #endif
178
179 #endif