1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2009, Microsoft Corporation.
6 * Haiyang Zhang <haiyangz@microsoft.com>
7 * Hank Janssen <hjanssen@microsoft.com>
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 #include <linux/kernel.h>
14 #include <linux/slab.h>
15 #include <linux/vmalloc.h>
16 #include <linux/hyperv.h>
17 #include <linux/random.h>
18 #include <linux/clockchips.h>
19 #include <linux/delay.h>
20 #include <linux/interrupt.h>
21 #include <clocksource/hyperv_timer.h>
22 #include <asm/mshyperv.h>
23 #include <linux/set_memory.h>
24 #include "hyperv_vmbus.h"
26 /* The one and only */
27 struct hv_context hv_context;
30 * hv_init - Main initialization routine.
32 * This routine must be called before any other routines in here are called
36 hv_context.cpu_context = alloc_percpu(struct hv_per_cpu_context);
37 if (!hv_context.cpu_context)
43 * hv_post_message - Post a message using the hypervisor message IPC.
45 * This involves a hypercall.
47 int hv_post_message(union hv_connection_id connection_id,
48 enum hv_message_type message_type,
49 void *payload, size_t payload_size)
51 struct hv_input_post_message *aligned_msg;
55 if (payload_size > HV_MESSAGE_PAYLOAD_BYTE_COUNT)
58 local_irq_save(flags);
60 aligned_msg = *this_cpu_ptr(hyperv_pcpu_input_arg);
61 aligned_msg->connectionid = connection_id;
62 aligned_msg->reserved = 0;
63 aligned_msg->message_type = message_type;
64 aligned_msg->payload_size = payload_size;
65 memcpy((void *)aligned_msg->payload, payload, payload_size);
67 if (hv_isolation_type_snp())
68 status = hv_ghcb_hypercall(HVCALL_POST_MESSAGE,
69 (void *)aligned_msg, NULL,
70 sizeof(*aligned_msg));
72 status = hv_do_hypercall(HVCALL_POST_MESSAGE,
75 local_irq_restore(flags);
77 return hv_result(status);
80 int hv_synic_alloc(void)
82 int cpu, ret = -ENOMEM;
83 struct hv_per_cpu_context *hv_cpu;
86 * First, zero all per-cpu memory areas so hv_synic_free() can
87 * detect what memory has been allocated and cleanup properly
90 for_each_present_cpu(cpu) {
91 hv_cpu = per_cpu_ptr(hv_context.cpu_context, cpu);
92 memset(hv_cpu, 0, sizeof(*hv_cpu));
95 hv_context.hv_numa_map = kcalloc(nr_node_ids, sizeof(struct cpumask),
97 if (hv_context.hv_numa_map == NULL) {
98 pr_err("Unable to allocate NUMA map\n");
102 for_each_present_cpu(cpu) {
103 hv_cpu = per_cpu_ptr(hv_context.cpu_context, cpu);
105 tasklet_init(&hv_cpu->msg_dpc,
106 vmbus_on_msg_dpc, (unsigned long) hv_cpu);
109 * Synic message and event pages are allocated by paravisor.
110 * Skip these pages allocation here.
112 if (!hv_isolation_type_snp() && !hv_root_partition) {
113 hv_cpu->synic_message_page =
114 (void *)get_zeroed_page(GFP_ATOMIC);
115 if (hv_cpu->synic_message_page == NULL) {
116 pr_err("Unable to allocate SYNIC message page\n");
120 hv_cpu->synic_event_page =
121 (void *)get_zeroed_page(GFP_ATOMIC);
122 if (hv_cpu->synic_event_page == NULL) {
123 pr_err("Unable to allocate SYNIC event page\n");
128 if (hv_isolation_type_en_snp()) {
129 ret = set_memory_decrypted((unsigned long)
130 hv_cpu->synic_message_page, 1);
132 pr_err("Failed to decrypt SYNIC msg page: %d\n", ret);
133 hv_cpu->synic_message_page = NULL;
136 * Free the event page here so that hv_synic_free()
137 * won't later try to re-encrypt it.
139 free_page((unsigned long)hv_cpu->synic_event_page);
140 hv_cpu->synic_event_page = NULL;
144 ret = set_memory_decrypted((unsigned long)
145 hv_cpu->synic_event_page, 1);
147 pr_err("Failed to decrypt SYNIC event page: %d\n", ret);
148 hv_cpu->synic_event_page = NULL;
152 memset(hv_cpu->synic_message_page, 0, PAGE_SIZE);
153 memset(hv_cpu->synic_event_page, 0, PAGE_SIZE);
161 * Any memory allocations that succeeded will be freed when
162 * the caller cleans up by calling hv_synic_free()
168 void hv_synic_free(void)
172 for_each_present_cpu(cpu) {
173 struct hv_per_cpu_context *hv_cpu
174 = per_cpu_ptr(hv_context.cpu_context, cpu);
176 /* It's better to leak the page if the encryption fails. */
177 if (hv_isolation_type_en_snp()) {
178 if (hv_cpu->synic_message_page) {
179 ret = set_memory_encrypted((unsigned long)
180 hv_cpu->synic_message_page, 1);
182 pr_err("Failed to encrypt SYNIC msg page: %d\n", ret);
183 hv_cpu->synic_message_page = NULL;
187 if (hv_cpu->synic_event_page) {
188 ret = set_memory_encrypted((unsigned long)
189 hv_cpu->synic_event_page, 1);
191 pr_err("Failed to encrypt SYNIC event page: %d\n", ret);
192 hv_cpu->synic_event_page = NULL;
197 free_page((unsigned long)hv_cpu->synic_event_page);
198 free_page((unsigned long)hv_cpu->synic_message_page);
201 kfree(hv_context.hv_numa_map);
205 * hv_synic_init - Initialize the Synthetic Interrupt Controller.
207 * If it is already initialized by another entity (ie x2v shim), we need to
208 * retrieve the initialized message and event pages. Otherwise, we create and
209 * initialize the message and event pages.
211 void hv_synic_enable_regs(unsigned int cpu)
213 struct hv_per_cpu_context *hv_cpu
214 = per_cpu_ptr(hv_context.cpu_context, cpu);
215 union hv_synic_simp simp;
216 union hv_synic_siefp siefp;
217 union hv_synic_sint shared_sint;
218 union hv_synic_scontrol sctrl;
220 /* Setup the Synic's message page */
221 simp.as_uint64 = hv_get_register(HV_REGISTER_SIMP);
222 simp.simp_enabled = 1;
224 if (hv_isolation_type_snp() || hv_root_partition) {
225 /* Mask out vTOM bit. ioremap_cache() maps decrypted */
226 u64 base = (simp.base_simp_gpa << HV_HYP_PAGE_SHIFT) &
227 ~ms_hyperv.shared_gpa_boundary;
228 hv_cpu->synic_message_page
229 = (void *)ioremap_cache(base, HV_HYP_PAGE_SIZE);
230 if (!hv_cpu->synic_message_page)
231 pr_err("Fail to map synic message page.\n");
233 simp.base_simp_gpa = virt_to_phys(hv_cpu->synic_message_page)
234 >> HV_HYP_PAGE_SHIFT;
237 hv_set_register(HV_REGISTER_SIMP, simp.as_uint64);
239 /* Setup the Synic's event page */
240 siefp.as_uint64 = hv_get_register(HV_REGISTER_SIEFP);
241 siefp.siefp_enabled = 1;
243 if (hv_isolation_type_snp() || hv_root_partition) {
244 /* Mask out vTOM bit. ioremap_cache() maps decrypted */
245 u64 base = (siefp.base_siefp_gpa << HV_HYP_PAGE_SHIFT) &
246 ~ms_hyperv.shared_gpa_boundary;
247 hv_cpu->synic_event_page
248 = (void *)ioremap_cache(base, HV_HYP_PAGE_SIZE);
249 if (!hv_cpu->synic_event_page)
250 pr_err("Fail to map synic event page.\n");
252 siefp.base_siefp_gpa = virt_to_phys(hv_cpu->synic_event_page)
253 >> HV_HYP_PAGE_SHIFT;
256 hv_set_register(HV_REGISTER_SIEFP, siefp.as_uint64);
258 /* Setup the shared SINT. */
260 enable_percpu_irq(vmbus_irq, 0);
261 shared_sint.as_uint64 = hv_get_register(HV_REGISTER_SINT0 +
264 shared_sint.vector = vmbus_interrupt;
265 shared_sint.masked = false;
268 * On architectures where Hyper-V doesn't support AEOI (e.g., ARM64),
269 * it doesn't provide a recommendation flag and AEOI must be disabled.
271 #ifdef HV_DEPRECATING_AEOI_RECOMMENDED
272 shared_sint.auto_eoi =
273 !(ms_hyperv.hints & HV_DEPRECATING_AEOI_RECOMMENDED);
275 shared_sint.auto_eoi = 0;
277 hv_set_register(HV_REGISTER_SINT0 + VMBUS_MESSAGE_SINT,
278 shared_sint.as_uint64);
280 /* Enable the global synic bit */
281 sctrl.as_uint64 = hv_get_register(HV_REGISTER_SCONTROL);
284 hv_set_register(HV_REGISTER_SCONTROL, sctrl.as_uint64);
287 int hv_synic_init(unsigned int cpu)
289 hv_synic_enable_regs(cpu);
291 hv_stimer_legacy_init(cpu, VMBUS_MESSAGE_SINT);
297 * hv_synic_cleanup - Cleanup routine for hv_synic_init().
299 void hv_synic_disable_regs(unsigned int cpu)
301 struct hv_per_cpu_context *hv_cpu
302 = per_cpu_ptr(hv_context.cpu_context, cpu);
303 union hv_synic_sint shared_sint;
304 union hv_synic_simp simp;
305 union hv_synic_siefp siefp;
306 union hv_synic_scontrol sctrl;
308 shared_sint.as_uint64 = hv_get_register(HV_REGISTER_SINT0 +
311 shared_sint.masked = 1;
313 /* Need to correctly cleanup in the case of SMP!!! */
314 /* Disable the interrupt */
315 hv_set_register(HV_REGISTER_SINT0 + VMBUS_MESSAGE_SINT,
316 shared_sint.as_uint64);
318 simp.as_uint64 = hv_get_register(HV_REGISTER_SIMP);
320 * In Isolation VM, sim and sief pages are allocated by
321 * paravisor. These pages also will be used by kdump
322 * kernel. So just reset enable bit here and keep page
325 simp.simp_enabled = 0;
326 if (hv_isolation_type_snp() || hv_root_partition) {
327 iounmap(hv_cpu->synic_message_page);
328 hv_cpu->synic_message_page = NULL;
330 simp.base_simp_gpa = 0;
333 hv_set_register(HV_REGISTER_SIMP, simp.as_uint64);
335 siefp.as_uint64 = hv_get_register(HV_REGISTER_SIEFP);
336 siefp.siefp_enabled = 0;
338 if (hv_isolation_type_snp() || hv_root_partition) {
339 iounmap(hv_cpu->synic_event_page);
340 hv_cpu->synic_event_page = NULL;
342 siefp.base_siefp_gpa = 0;
345 hv_set_register(HV_REGISTER_SIEFP, siefp.as_uint64);
347 /* Disable the global synic bit */
348 sctrl.as_uint64 = hv_get_register(HV_REGISTER_SCONTROL);
350 hv_set_register(HV_REGISTER_SCONTROL, sctrl.as_uint64);
353 disable_percpu_irq(vmbus_irq);
356 #define HV_MAX_TRIES 3
358 * Scan the event flags page of 'this' CPU looking for any bit that is set. If we find one
359 * bit set, then wait for a few milliseconds. Repeat these steps for a maximum of 3 times.
360 * Return 'true', if there is still any set bit after this operation; 'false', otherwise.
362 * If a bit is set, that means there is a pending channel interrupt. The expectation is
363 * that the normal interrupt handling mechanism will find and process the channel interrupt
364 * "very soon", and in the process clear the bit.
366 static bool hv_synic_event_pending(void)
368 struct hv_per_cpu_context *hv_cpu = this_cpu_ptr(hv_context.cpu_context);
369 union hv_synic_event_flags *event =
370 (union hv_synic_event_flags *)hv_cpu->synic_event_page + VMBUS_MESSAGE_SINT;
371 unsigned long *recv_int_page = event->flags; /* assumes VMBus version >= VERSION_WIN8 */
378 for_each_set_bit(relid, recv_int_page, HV_EVENT_FLAGS_COUNT) {
379 /* Special case - VMBus channel protocol messages */
385 if (pending && tries++ < HV_MAX_TRIES) {
386 usleep_range(10000, 20000);
392 int hv_synic_cleanup(unsigned int cpu)
394 struct vmbus_channel *channel, *sc;
395 bool channel_found = false;
397 if (vmbus_connection.conn_state != CONNECTED)
401 * Hyper-V does not provide a way to change the connect CPU once
402 * it is set; we must prevent the connect CPU from going offline
403 * while the VM is running normally. But in the panic or kexec()
404 * path where the vmbus is already disconnected, the CPU must be
405 * allowed to shut down.
407 if (cpu == VMBUS_CONNECT_CPU)
411 * Search for channels which are bound to the CPU we're about to
412 * cleanup. In case we find one and vmbus is still connected, we
413 * fail; this will effectively prevent CPU offlining.
415 * TODO: Re-bind the channels to different CPUs.
417 mutex_lock(&vmbus_connection.channel_mutex);
418 list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
419 if (channel->target_cpu == cpu) {
420 channel_found = true;
423 list_for_each_entry(sc, &channel->sc_list, sc_list) {
424 if (sc->target_cpu == cpu) {
425 channel_found = true;
432 mutex_unlock(&vmbus_connection.channel_mutex);
438 * channel_found == false means that any channels that were previously
439 * assigned to the CPU have been reassigned elsewhere with a call of
440 * vmbus_send_modifychannel(). Scan the event flags page looking for
441 * bits that are set and waiting with a timeout for vmbus_chan_sched()
442 * to process such bits. If bits are still set after this operation
443 * and VMBus is connected, fail the CPU offlining operation.
445 if (vmbus_proto_version >= VERSION_WIN10_V4_1 && hv_synic_event_pending())
449 hv_stimer_legacy_cleanup(cpu);
451 hv_synic_disable_regs(cpu);