vt_ioctl: fix GIO_UNIMAP regression
[platform/kernel/linux-rpi.git] / arch / x86 / kvm / vmx / evmcs.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __KVM_X86_VMX_EVMCS_H
3 #define __KVM_X86_VMX_EVMCS_H
4
5 #include <linux/jump_label.h>
6
7 #include <asm/hyperv-tlfs.h>
8 #include <asm/mshyperv.h>
9 #include <asm/vmx.h>
10
11 #include "capabilities.h"
12 #include "vmcs.h"
13 #include "vmcs12.h"
14
15 struct vmcs_config;
16
17 DECLARE_STATIC_KEY_FALSE(enable_evmcs);
18
19 #define current_evmcs ((struct hv_enlightened_vmcs *)this_cpu_read(current_vmcs))
20
21 #define KVM_EVMCS_VERSION 1
22
23 /*
24  * Enlightened VMCSv1 doesn't support these:
25  *
26  *      POSTED_INTR_NV                  = 0x00000002,
27  *      GUEST_INTR_STATUS               = 0x00000810,
28  *      APIC_ACCESS_ADDR                = 0x00002014,
29  *      POSTED_INTR_DESC_ADDR           = 0x00002016,
30  *      EOI_EXIT_BITMAP0                = 0x0000201c,
31  *      EOI_EXIT_BITMAP1                = 0x0000201e,
32  *      EOI_EXIT_BITMAP2                = 0x00002020,
33  *      EOI_EXIT_BITMAP3                = 0x00002022,
34  *      GUEST_PML_INDEX                 = 0x00000812,
35  *      PML_ADDRESS                     = 0x0000200e,
36  *      VM_FUNCTION_CONTROL             = 0x00002018,
37  *      EPTP_LIST_ADDRESS               = 0x00002024,
38  *      VMREAD_BITMAP                   = 0x00002026,
39  *      VMWRITE_BITMAP                  = 0x00002028,
40  *
41  *      TSC_MULTIPLIER                  = 0x00002032,
42  *      PLE_GAP                         = 0x00004020,
43  *      PLE_WINDOW                      = 0x00004022,
44  *      VMX_PREEMPTION_TIMER_VALUE      = 0x0000482E,
45  *      GUEST_IA32_PERF_GLOBAL_CTRL     = 0x00002808,
46  *      HOST_IA32_PERF_GLOBAL_CTRL      = 0x00002c04,
47  *
48  * Currently unsupported in KVM:
49  *      GUEST_IA32_RTIT_CTL             = 0x00002814,
50  */
51 #define EVMCS1_UNSUPPORTED_PINCTRL (PIN_BASED_POSTED_INTR | \
52                                     PIN_BASED_VMX_PREEMPTION_TIMER)
53 #define EVMCS1_UNSUPPORTED_2NDEXEC                                      \
54         (SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |                         \
55          SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |                      \
56          SECONDARY_EXEC_APIC_REGISTER_VIRT |                            \
57          SECONDARY_EXEC_ENABLE_PML |                                    \
58          SECONDARY_EXEC_ENABLE_VMFUNC |                                 \
59          SECONDARY_EXEC_SHADOW_VMCS |                                   \
60          SECONDARY_EXEC_TSC_SCALING |                                   \
61          SECONDARY_EXEC_PAUSE_LOOP_EXITING)
62 #define EVMCS1_UNSUPPORTED_VMEXIT_CTRL (VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL)
63 #define EVMCS1_UNSUPPORTED_VMENTRY_CTRL (VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL)
64 #define EVMCS1_UNSUPPORTED_VMFUNC (VMX_VMFUNC_EPTP_SWITCHING)
65
66 #if IS_ENABLED(CONFIG_HYPERV)
67
68 struct evmcs_field {
69         u16 offset;
70         u16 clean_field;
71 };
72
73 extern const struct evmcs_field vmcs_field_to_evmcs_1[];
74 extern const unsigned int nr_evmcs_1_fields;
75
76 #define ROL16(val, n) ((u16)(((u16)(val) << (n)) | ((u16)(val) >> (16 - (n)))))
77
78 static __always_inline int get_evmcs_offset(unsigned long field,
79                                             u16 *clean_field)
80 {
81         unsigned int index = ROL16(field, 6);
82         const struct evmcs_field *evmcs_field;
83
84         if (unlikely(index >= nr_evmcs_1_fields)) {
85                 WARN_ONCE(1, "KVM: accessing unsupported EVMCS field %lx\n",
86                           field);
87                 return -ENOENT;
88         }
89
90         evmcs_field = &vmcs_field_to_evmcs_1[index];
91
92         if (clean_field)
93                 *clean_field = evmcs_field->clean_field;
94
95         return evmcs_field->offset;
96 }
97
98 #undef ROL16
99
100 static inline void evmcs_write64(unsigned long field, u64 value)
101 {
102         u16 clean_field;
103         int offset = get_evmcs_offset(field, &clean_field);
104
105         if (offset < 0)
106                 return;
107
108         *(u64 *)((char *)current_evmcs + offset) = value;
109
110         current_evmcs->hv_clean_fields &= ~clean_field;
111 }
112
113 static inline void evmcs_write32(unsigned long field, u32 value)
114 {
115         u16 clean_field;
116         int offset = get_evmcs_offset(field, &clean_field);
117
118         if (offset < 0)
119                 return;
120
121         *(u32 *)((char *)current_evmcs + offset) = value;
122         current_evmcs->hv_clean_fields &= ~clean_field;
123 }
124
125 static inline void evmcs_write16(unsigned long field, u16 value)
126 {
127         u16 clean_field;
128         int offset = get_evmcs_offset(field, &clean_field);
129
130         if (offset < 0)
131                 return;
132
133         *(u16 *)((char *)current_evmcs + offset) = value;
134         current_evmcs->hv_clean_fields &= ~clean_field;
135 }
136
137 static inline u64 evmcs_read64(unsigned long field)
138 {
139         int offset = get_evmcs_offset(field, NULL);
140
141         if (offset < 0)
142                 return 0;
143
144         return *(u64 *)((char *)current_evmcs + offset);
145 }
146
147 static inline u32 evmcs_read32(unsigned long field)
148 {
149         int offset = get_evmcs_offset(field, NULL);
150
151         if (offset < 0)
152                 return 0;
153
154         return *(u32 *)((char *)current_evmcs + offset);
155 }
156
157 static inline u16 evmcs_read16(unsigned long field)
158 {
159         int offset = get_evmcs_offset(field, NULL);
160
161         if (offset < 0)
162                 return 0;
163
164         return *(u16 *)((char *)current_evmcs + offset);
165 }
166
167 static inline void evmcs_touch_msr_bitmap(void)
168 {
169         if (unlikely(!current_evmcs))
170                 return;
171
172         if (current_evmcs->hv_enlightenments_control.msr_bitmap)
173                 current_evmcs->hv_clean_fields &=
174                         ~HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP;
175 }
176
177 static inline void evmcs_load(u64 phys_addr)
178 {
179         struct hv_vp_assist_page *vp_ap =
180                 hv_get_vp_assist_page(smp_processor_id());
181
182         if (current_evmcs->hv_enlightenments_control.nested_flush_hypercall)
183                 vp_ap->nested_control.features.directhypercall = 1;
184         vp_ap->current_nested_vmcs = phys_addr;
185         vp_ap->enlighten_vmentry = 1;
186 }
187
188 void evmcs_sanitize_exec_ctrls(struct vmcs_config *vmcs_conf);
189 #else /* !IS_ENABLED(CONFIG_HYPERV) */
190 static inline void evmcs_write64(unsigned long field, u64 value) {}
191 static inline void evmcs_write32(unsigned long field, u32 value) {}
192 static inline void evmcs_write16(unsigned long field, u16 value) {}
193 static inline u64 evmcs_read64(unsigned long field) { return 0; }
194 static inline u32 evmcs_read32(unsigned long field) { return 0; }
195 static inline u16 evmcs_read16(unsigned long field) { return 0; }
196 static inline void evmcs_load(u64 phys_addr) {}
197 static inline void evmcs_sanitize_exec_ctrls(struct vmcs_config *vmcs_conf) {}
198 static inline void evmcs_touch_msr_bitmap(void) {}
199 #endif /* IS_ENABLED(CONFIG_HYPERV) */
200
201 enum nested_evmptrld_status {
202         EVMPTRLD_DISABLED,
203         EVMPTRLD_SUCCEEDED,
204         EVMPTRLD_VMFAIL,
205         EVMPTRLD_ERROR,
206 };
207
208 bool nested_enlightened_vmentry(struct kvm_vcpu *vcpu, u64 *evmcs_gpa);
209 uint16_t nested_get_evmcs_version(struct kvm_vcpu *vcpu);
210 int nested_enable_evmcs(struct kvm_vcpu *vcpu,
211                         uint16_t *vmcs_version);
212 void nested_evmcs_filter_control_msr(u32 msr_index, u64 *pdata);
213 int nested_evmcs_check_controls(struct vmcs12 *vmcs12);
214
215 #endif /* __KVM_X86_VMX_EVMCS_H */