drivers: hv: Mark percpu hvcall input arg page unencrypted in SEV-SNP enlightened...
[platform/kernel/linux-starfive.git] / drivers / hv / hv.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2009, Microsoft Corporation.
4  *
5  * Authors:
6  *   Haiyang Zhang <haiyangz@microsoft.com>
7  *   Hank Janssen  <hjanssen@microsoft.com>
8  */
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10
11 #include <linux/io.h>
12 #include <linux/kernel.h>
13 #include <linux/mm.h>
14 #include <linux/slab.h>
15 #include <linux/vmalloc.h>
16 #include <linux/hyperv.h>
17 #include <linux/random.h>
18 #include <linux/clockchips.h>
19 #include <linux/delay.h>
20 #include <linux/interrupt.h>
21 #include <clocksource/hyperv_timer.h>
22 #include <asm/mshyperv.h>
23 #include <linux/set_memory.h>
24 #include "hyperv_vmbus.h"
25
26 /* The one and only */
27 struct hv_context hv_context;
28
29 /*
30  * hv_init - Main initialization routine.
31  *
32  * This routine must be called before any other routines in here are called
33  */
34 int hv_init(void)
35 {
36         hv_context.cpu_context = alloc_percpu(struct hv_per_cpu_context);
37         if (!hv_context.cpu_context)
38                 return -ENOMEM;
39         return 0;
40 }
41
42 /*
43  * hv_post_message - Post a message using the hypervisor message IPC.
44  *
45  * This involves a hypercall.
46  */
47 int hv_post_message(union hv_connection_id connection_id,
48                   enum hv_message_type message_type,
49                   void *payload, size_t payload_size)
50 {
51         struct hv_input_post_message *aligned_msg;
52         unsigned long flags;
53         u64 status;
54
55         if (payload_size > HV_MESSAGE_PAYLOAD_BYTE_COUNT)
56                 return -EMSGSIZE;
57
58         local_irq_save(flags);
59
60         aligned_msg = *this_cpu_ptr(hyperv_pcpu_input_arg);
61         aligned_msg->connectionid = connection_id;
62         aligned_msg->reserved = 0;
63         aligned_msg->message_type = message_type;
64         aligned_msg->payload_size = payload_size;
65         memcpy((void *)aligned_msg->payload, payload, payload_size);
66
67         if (hv_isolation_type_snp())
68                 status = hv_ghcb_hypercall(HVCALL_POST_MESSAGE,
69                                 (void *)aligned_msg, NULL,
70                                 sizeof(*aligned_msg));
71         else
72                 status = hv_do_hypercall(HVCALL_POST_MESSAGE,
73                                 aligned_msg, NULL);
74
75         local_irq_restore(flags);
76
77         return hv_result(status);
78 }
79
80 int hv_synic_alloc(void)
81 {
82         int cpu, ret = -ENOMEM;
83         struct hv_per_cpu_context *hv_cpu;
84
85         /*
86          * First, zero all per-cpu memory areas so hv_synic_free() can
87          * detect what memory has been allocated and cleanup properly
88          * after any failures.
89          */
90         for_each_present_cpu(cpu) {
91                 hv_cpu = per_cpu_ptr(hv_context.cpu_context, cpu);
92                 memset(hv_cpu, 0, sizeof(*hv_cpu));
93         }
94
95         hv_context.hv_numa_map = kcalloc(nr_node_ids, sizeof(struct cpumask),
96                                          GFP_KERNEL);
97         if (hv_context.hv_numa_map == NULL) {
98                 pr_err("Unable to allocate NUMA map\n");
99                 goto err;
100         }
101
102         for_each_present_cpu(cpu) {
103                 hv_cpu = per_cpu_ptr(hv_context.cpu_context, cpu);
104
105                 tasklet_init(&hv_cpu->msg_dpc,
106                              vmbus_on_msg_dpc, (unsigned long) hv_cpu);
107
108                 /*
109                  * Synic message and event pages are allocated by paravisor.
110                  * Skip these pages allocation here.
111                  */
112                 if (!hv_isolation_type_snp() && !hv_root_partition) {
113                         hv_cpu->synic_message_page =
114                                 (void *)get_zeroed_page(GFP_ATOMIC);
115                         if (hv_cpu->synic_message_page == NULL) {
116                                 pr_err("Unable to allocate SYNIC message page\n");
117                                 goto err;
118                         }
119
120                         hv_cpu->synic_event_page =
121                                 (void *)get_zeroed_page(GFP_ATOMIC);
122                         if (hv_cpu->synic_event_page == NULL) {
123                                 pr_err("Unable to allocate SYNIC event page\n");
124                                 goto err;
125                         }
126                 }
127
128                 if (hv_isolation_type_en_snp()) {
129                         ret = set_memory_decrypted((unsigned long)
130                                 hv_cpu->synic_message_page, 1);
131                         if (ret) {
132                                 pr_err("Failed to decrypt SYNIC msg page: %d\n", ret);
133                                 hv_cpu->synic_message_page = NULL;
134
135                                 /*
136                                  * Free the event page here so that hv_synic_free()
137                                  * won't later try to re-encrypt it.
138                                  */
139                                 free_page((unsigned long)hv_cpu->synic_event_page);
140                                 hv_cpu->synic_event_page = NULL;
141                                 goto err;
142                         }
143
144                         ret = set_memory_decrypted((unsigned long)
145                                 hv_cpu->synic_event_page, 1);
146                         if (ret) {
147                                 pr_err("Failed to decrypt SYNIC event page: %d\n", ret);
148                                 hv_cpu->synic_event_page = NULL;
149                                 goto err;
150                         }
151
152                         memset(hv_cpu->synic_message_page, 0, PAGE_SIZE);
153                         memset(hv_cpu->synic_event_page, 0, PAGE_SIZE);
154                 }
155         }
156
157         return 0;
158
159 err:
160         /*
161          * Any memory allocations that succeeded will be freed when
162          * the caller cleans up by calling hv_synic_free()
163          */
164         return ret;
165 }
166
167
168 void hv_synic_free(void)
169 {
170         int cpu, ret;
171
172         for_each_present_cpu(cpu) {
173                 struct hv_per_cpu_context *hv_cpu
174                         = per_cpu_ptr(hv_context.cpu_context, cpu);
175
176                 /* It's better to leak the page if the encryption fails. */
177                 if (hv_isolation_type_en_snp()) {
178                         if (hv_cpu->synic_message_page) {
179                                 ret = set_memory_encrypted((unsigned long)
180                                         hv_cpu->synic_message_page, 1);
181                                 if (ret) {
182                                         pr_err("Failed to encrypt SYNIC msg page: %d\n", ret);
183                                         hv_cpu->synic_message_page = NULL;
184                                 }
185                         }
186
187                         if (hv_cpu->synic_event_page) {
188                                 ret = set_memory_encrypted((unsigned long)
189                                         hv_cpu->synic_event_page, 1);
190                                 if (ret) {
191                                         pr_err("Failed to encrypt SYNIC event page: %d\n", ret);
192                                         hv_cpu->synic_event_page = NULL;
193                                 }
194                         }
195                 }
196
197                 free_page((unsigned long)hv_cpu->synic_event_page);
198                 free_page((unsigned long)hv_cpu->synic_message_page);
199         }
200
201         kfree(hv_context.hv_numa_map);
202 }
203
204 /*
205  * hv_synic_init - Initialize the Synthetic Interrupt Controller.
206  *
207  * If it is already initialized by another entity (ie x2v shim), we need to
208  * retrieve the initialized message and event pages.  Otherwise, we create and
209  * initialize the message and event pages.
210  */
211 void hv_synic_enable_regs(unsigned int cpu)
212 {
213         struct hv_per_cpu_context *hv_cpu
214                 = per_cpu_ptr(hv_context.cpu_context, cpu);
215         union hv_synic_simp simp;
216         union hv_synic_siefp siefp;
217         union hv_synic_sint shared_sint;
218         union hv_synic_scontrol sctrl;
219
220         /* Setup the Synic's message page */
221         simp.as_uint64 = hv_get_register(HV_REGISTER_SIMP);
222         simp.simp_enabled = 1;
223
224         if (hv_isolation_type_snp() || hv_root_partition) {
225                 /* Mask out vTOM bit. ioremap_cache() maps decrypted */
226                 u64 base = (simp.base_simp_gpa << HV_HYP_PAGE_SHIFT) &
227                                 ~ms_hyperv.shared_gpa_boundary;
228                 hv_cpu->synic_message_page
229                         = (void *)ioremap_cache(base, HV_HYP_PAGE_SIZE);
230                 if (!hv_cpu->synic_message_page)
231                         pr_err("Fail to map synic message page.\n");
232         } else {
233                 simp.base_simp_gpa = virt_to_phys(hv_cpu->synic_message_page)
234                         >> HV_HYP_PAGE_SHIFT;
235         }
236
237         hv_set_register(HV_REGISTER_SIMP, simp.as_uint64);
238
239         /* Setup the Synic's event page */
240         siefp.as_uint64 = hv_get_register(HV_REGISTER_SIEFP);
241         siefp.siefp_enabled = 1;
242
243         if (hv_isolation_type_snp() || hv_root_partition) {
244                 /* Mask out vTOM bit. ioremap_cache() maps decrypted */
245                 u64 base = (siefp.base_siefp_gpa << HV_HYP_PAGE_SHIFT) &
246                                 ~ms_hyperv.shared_gpa_boundary;
247                 hv_cpu->synic_event_page
248                         = (void *)ioremap_cache(base, HV_HYP_PAGE_SIZE);
249                 if (!hv_cpu->synic_event_page)
250                         pr_err("Fail to map synic event page.\n");
251         } else {
252                 siefp.base_siefp_gpa = virt_to_phys(hv_cpu->synic_event_page)
253                         >> HV_HYP_PAGE_SHIFT;
254         }
255
256         hv_set_register(HV_REGISTER_SIEFP, siefp.as_uint64);
257
258         /* Setup the shared SINT. */
259         if (vmbus_irq != -1)
260                 enable_percpu_irq(vmbus_irq, 0);
261         shared_sint.as_uint64 = hv_get_register(HV_REGISTER_SINT0 +
262                                         VMBUS_MESSAGE_SINT);
263
264         shared_sint.vector = vmbus_interrupt;
265         shared_sint.masked = false;
266
267         /*
268          * On architectures where Hyper-V doesn't support AEOI (e.g., ARM64),
269          * it doesn't provide a recommendation flag and AEOI must be disabled.
270          */
271 #ifdef HV_DEPRECATING_AEOI_RECOMMENDED
272         shared_sint.auto_eoi =
273                         !(ms_hyperv.hints & HV_DEPRECATING_AEOI_RECOMMENDED);
274 #else
275         shared_sint.auto_eoi = 0;
276 #endif
277         hv_set_register(HV_REGISTER_SINT0 + VMBUS_MESSAGE_SINT,
278                                 shared_sint.as_uint64);
279
280         /* Enable the global synic bit */
281         sctrl.as_uint64 = hv_get_register(HV_REGISTER_SCONTROL);
282         sctrl.enable = 1;
283
284         hv_set_register(HV_REGISTER_SCONTROL, sctrl.as_uint64);
285 }
286
287 int hv_synic_init(unsigned int cpu)
288 {
289         hv_synic_enable_regs(cpu);
290
291         hv_stimer_legacy_init(cpu, VMBUS_MESSAGE_SINT);
292
293         return 0;
294 }
295
296 /*
297  * hv_synic_cleanup - Cleanup routine for hv_synic_init().
298  */
299 void hv_synic_disable_regs(unsigned int cpu)
300 {
301         struct hv_per_cpu_context *hv_cpu
302                 = per_cpu_ptr(hv_context.cpu_context, cpu);
303         union hv_synic_sint shared_sint;
304         union hv_synic_simp simp;
305         union hv_synic_siefp siefp;
306         union hv_synic_scontrol sctrl;
307
308         shared_sint.as_uint64 = hv_get_register(HV_REGISTER_SINT0 +
309                                         VMBUS_MESSAGE_SINT);
310
311         shared_sint.masked = 1;
312
313         /* Need to correctly cleanup in the case of SMP!!! */
314         /* Disable the interrupt */
315         hv_set_register(HV_REGISTER_SINT0 + VMBUS_MESSAGE_SINT,
316                                 shared_sint.as_uint64);
317
318         simp.as_uint64 = hv_get_register(HV_REGISTER_SIMP);
319         /*
320          * In Isolation VM, sim and sief pages are allocated by
321          * paravisor. These pages also will be used by kdump
322          * kernel. So just reset enable bit here and keep page
323          * addresses.
324          */
325         simp.simp_enabled = 0;
326         if (hv_isolation_type_snp() || hv_root_partition) {
327                 iounmap(hv_cpu->synic_message_page);
328                 hv_cpu->synic_message_page = NULL;
329         } else {
330                 simp.base_simp_gpa = 0;
331         }
332
333         hv_set_register(HV_REGISTER_SIMP, simp.as_uint64);
334
335         siefp.as_uint64 = hv_get_register(HV_REGISTER_SIEFP);
336         siefp.siefp_enabled = 0;
337
338         if (hv_isolation_type_snp() || hv_root_partition) {
339                 iounmap(hv_cpu->synic_event_page);
340                 hv_cpu->synic_event_page = NULL;
341         } else {
342                 siefp.base_siefp_gpa = 0;
343         }
344
345         hv_set_register(HV_REGISTER_SIEFP, siefp.as_uint64);
346
347         /* Disable the global synic bit */
348         sctrl.as_uint64 = hv_get_register(HV_REGISTER_SCONTROL);
349         sctrl.enable = 0;
350         hv_set_register(HV_REGISTER_SCONTROL, sctrl.as_uint64);
351
352         if (vmbus_irq != -1)
353                 disable_percpu_irq(vmbus_irq);
354 }
355
356 #define HV_MAX_TRIES 3
357 /*
358  * Scan the event flags page of 'this' CPU looking for any bit that is set.  If we find one
359  * bit set, then wait for a few milliseconds.  Repeat these steps for a maximum of 3 times.
360  * Return 'true', if there is still any set bit after this operation; 'false', otherwise.
361  *
362  * If a bit is set, that means there is a pending channel interrupt.  The expectation is
363  * that the normal interrupt handling mechanism will find and process the channel interrupt
364  * "very soon", and in the process clear the bit.
365  */
366 static bool hv_synic_event_pending(void)
367 {
368         struct hv_per_cpu_context *hv_cpu = this_cpu_ptr(hv_context.cpu_context);
369         union hv_synic_event_flags *event =
370                 (union hv_synic_event_flags *)hv_cpu->synic_event_page + VMBUS_MESSAGE_SINT;
371         unsigned long *recv_int_page = event->flags; /* assumes VMBus version >= VERSION_WIN8 */
372         bool pending;
373         u32 relid;
374         int tries = 0;
375
376 retry:
377         pending = false;
378         for_each_set_bit(relid, recv_int_page, HV_EVENT_FLAGS_COUNT) {
379                 /* Special case - VMBus channel protocol messages */
380                 if (relid == 0)
381                         continue;
382                 pending = true;
383                 break;
384         }
385         if (pending && tries++ < HV_MAX_TRIES) {
386                 usleep_range(10000, 20000);
387                 goto retry;
388         }
389         return pending;
390 }
391
392 int hv_synic_cleanup(unsigned int cpu)
393 {
394         struct vmbus_channel *channel, *sc;
395         bool channel_found = false;
396
397         if (vmbus_connection.conn_state != CONNECTED)
398                 goto always_cleanup;
399
400         /*
401          * Hyper-V does not provide a way to change the connect CPU once
402          * it is set; we must prevent the connect CPU from going offline
403          * while the VM is running normally. But in the panic or kexec()
404          * path where the vmbus is already disconnected, the CPU must be
405          * allowed to shut down.
406          */
407         if (cpu == VMBUS_CONNECT_CPU)
408                 return -EBUSY;
409
410         /*
411          * Search for channels which are bound to the CPU we're about to
412          * cleanup.  In case we find one and vmbus is still connected, we
413          * fail; this will effectively prevent CPU offlining.
414          *
415          * TODO: Re-bind the channels to different CPUs.
416          */
417         mutex_lock(&vmbus_connection.channel_mutex);
418         list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
419                 if (channel->target_cpu == cpu) {
420                         channel_found = true;
421                         break;
422                 }
423                 list_for_each_entry(sc, &channel->sc_list, sc_list) {
424                         if (sc->target_cpu == cpu) {
425                                 channel_found = true;
426                                 break;
427                         }
428                 }
429                 if (channel_found)
430                         break;
431         }
432         mutex_unlock(&vmbus_connection.channel_mutex);
433
434         if (channel_found)
435                 return -EBUSY;
436
437         /*
438          * channel_found == false means that any channels that were previously
439          * assigned to the CPU have been reassigned elsewhere with a call of
440          * vmbus_send_modifychannel().  Scan the event flags page looking for
441          * bits that are set and waiting with a timeout for vmbus_chan_sched()
442          * to process such bits.  If bits are still set after this operation
443          * and VMBus is connected, fail the CPU offlining operation.
444          */
445         if (vmbus_proto_version >= VERSION_WIN10_V4_1 && hv_synic_event_pending())
446                 return -EBUSY;
447
448 always_cleanup:
449         hv_stimer_legacy_cleanup(cpu);
450
451         hv_synic_disable_regs(cpu);
452
453         return 0;
454 }