x86/kvm: Add kexec support for SEV Live Migration.
[platform/kernel/linux-starfive.git] / arch / x86 / kernel / kvm.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * KVM paravirt_ops implementation
4  *
5  * Copyright (C) 2007, Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
6  * Copyright IBM Corporation, 2007
7  *   Authors: Anthony Liguori <aliguori@us.ibm.com>
8  */
9
10 #define pr_fmt(fmt) "kvm-guest: " fmt
11
12 #include <linux/context_tracking.h>
13 #include <linux/init.h>
14 #include <linux/irq.h>
15 #include <linux/kernel.h>
16 #include <linux/kvm_para.h>
17 #include <linux/cpu.h>
18 #include <linux/mm.h>
19 #include <linux/highmem.h>
20 #include <linux/hardirq.h>
21 #include <linux/notifier.h>
22 #include <linux/reboot.h>
23 #include <linux/hash.h>
24 #include <linux/sched.h>
25 #include <linux/slab.h>
26 #include <linux/kprobes.h>
27 #include <linux/nmi.h>
28 #include <linux/swait.h>
29 #include <linux/syscore_ops.h>
30 #include <linux/efi.h>
31 #include <asm/timer.h>
32 #include <asm/cpu.h>
33 #include <asm/traps.h>
34 #include <asm/desc.h>
35 #include <asm/tlbflush.h>
36 #include <asm/apic.h>
37 #include <asm/apicdef.h>
38 #include <asm/hypervisor.h>
39 #include <asm/tlb.h>
40 #include <asm/cpuidle_haltpoll.h>
41 #include <asm/ptrace.h>
42 #include <asm/reboot.h>
43 #include <asm/svm.h>
44 #include <asm/e820/api.h>
45
46 DEFINE_STATIC_KEY_FALSE(kvm_async_pf_enabled);
47
48 static int kvmapf = 1;
49
50 static int __init parse_no_kvmapf(char *arg)
51 {
52         kvmapf = 0;
53         return 0;
54 }
55
56 early_param("no-kvmapf", parse_no_kvmapf);
57
58 static int steal_acc = 1;
59 static int __init parse_no_stealacc(char *arg)
60 {
61         steal_acc = 0;
62         return 0;
63 }
64
65 early_param("no-steal-acc", parse_no_stealacc);
66
67 static DEFINE_PER_CPU_DECRYPTED(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64);
68 DEFINE_PER_CPU_DECRYPTED(struct kvm_steal_time, steal_time) __aligned(64) __visible;
69 static int has_steal_clock = 0;
70
71 /*
72  * No need for any "IO delay" on KVM
73  */
74 static void kvm_io_delay(void)
75 {
76 }
77
78 #define KVM_TASK_SLEEP_HASHBITS 8
79 #define KVM_TASK_SLEEP_HASHSIZE (1<<KVM_TASK_SLEEP_HASHBITS)
80
81 struct kvm_task_sleep_node {
82         struct hlist_node link;
83         struct swait_queue_head wq;
84         u32 token;
85         int cpu;
86 };
87
88 static struct kvm_task_sleep_head {
89         raw_spinlock_t lock;
90         struct hlist_head list;
91 } async_pf_sleepers[KVM_TASK_SLEEP_HASHSIZE];
92
93 static struct kvm_task_sleep_node *_find_apf_task(struct kvm_task_sleep_head *b,
94                                                   u32 token)
95 {
96         struct hlist_node *p;
97
98         hlist_for_each(p, &b->list) {
99                 struct kvm_task_sleep_node *n =
100                         hlist_entry(p, typeof(*n), link);
101                 if (n->token == token)
102                         return n;
103         }
104
105         return NULL;
106 }
107
108 static bool kvm_async_pf_queue_task(u32 token, struct kvm_task_sleep_node *n)
109 {
110         u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
111         struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
112         struct kvm_task_sleep_node *e;
113
114         raw_spin_lock(&b->lock);
115         e = _find_apf_task(b, token);
116         if (e) {
117                 /* dummy entry exist -> wake up was delivered ahead of PF */
118                 hlist_del(&e->link);
119                 raw_spin_unlock(&b->lock);
120                 kfree(e);
121                 return false;
122         }
123
124         n->token = token;
125         n->cpu = smp_processor_id();
126         init_swait_queue_head(&n->wq);
127         hlist_add_head(&n->link, &b->list);
128         raw_spin_unlock(&b->lock);
129         return true;
130 }
131
132 /*
133  * kvm_async_pf_task_wait_schedule - Wait for pagefault to be handled
134  * @token:      Token to identify the sleep node entry
135  *
136  * Invoked from the async pagefault handling code or from the VM exit page
137  * fault handler. In both cases RCU is watching.
138  */
139 void kvm_async_pf_task_wait_schedule(u32 token)
140 {
141         struct kvm_task_sleep_node n;
142         DECLARE_SWAITQUEUE(wait);
143
144         lockdep_assert_irqs_disabled();
145
146         if (!kvm_async_pf_queue_task(token, &n))
147                 return;
148
149         for (;;) {
150                 prepare_to_swait_exclusive(&n.wq, &wait, TASK_UNINTERRUPTIBLE);
151                 if (hlist_unhashed(&n.link))
152                         break;
153
154                 local_irq_enable();
155                 schedule();
156                 local_irq_disable();
157         }
158         finish_swait(&n.wq, &wait);
159 }
160 EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait_schedule);
161
162 static void apf_task_wake_one(struct kvm_task_sleep_node *n)
163 {
164         hlist_del_init(&n->link);
165         if (swq_has_sleeper(&n->wq))
166                 swake_up_one(&n->wq);
167 }
168
169 static void apf_task_wake_all(void)
170 {
171         int i;
172
173         for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++) {
174                 struct kvm_task_sleep_head *b = &async_pf_sleepers[i];
175                 struct kvm_task_sleep_node *n;
176                 struct hlist_node *p, *next;
177
178                 raw_spin_lock(&b->lock);
179                 hlist_for_each_safe(p, next, &b->list) {
180                         n = hlist_entry(p, typeof(*n), link);
181                         if (n->cpu == smp_processor_id())
182                                 apf_task_wake_one(n);
183                 }
184                 raw_spin_unlock(&b->lock);
185         }
186 }
187
188 void kvm_async_pf_task_wake(u32 token)
189 {
190         u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
191         struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
192         struct kvm_task_sleep_node *n;
193
194         if (token == ~0) {
195                 apf_task_wake_all();
196                 return;
197         }
198
199 again:
200         raw_spin_lock(&b->lock);
201         n = _find_apf_task(b, token);
202         if (!n) {
203                 /*
204                  * async PF was not yet handled.
205                  * Add dummy entry for the token.
206                  */
207                 n = kzalloc(sizeof(*n), GFP_ATOMIC);
208                 if (!n) {
209                         /*
210                          * Allocation failed! Busy wait while other cpu
211                          * handles async PF.
212                          */
213                         raw_spin_unlock(&b->lock);
214                         cpu_relax();
215                         goto again;
216                 }
217                 n->token = token;
218                 n->cpu = smp_processor_id();
219                 init_swait_queue_head(&n->wq);
220                 hlist_add_head(&n->link, &b->list);
221         } else {
222                 apf_task_wake_one(n);
223         }
224         raw_spin_unlock(&b->lock);
225         return;
226 }
227 EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake);
228
229 noinstr u32 kvm_read_and_reset_apf_flags(void)
230 {
231         u32 flags = 0;
232
233         if (__this_cpu_read(apf_reason.enabled)) {
234                 flags = __this_cpu_read(apf_reason.flags);
235                 __this_cpu_write(apf_reason.flags, 0);
236         }
237
238         return flags;
239 }
240 EXPORT_SYMBOL_GPL(kvm_read_and_reset_apf_flags);
241
242 noinstr bool __kvm_handle_async_pf(struct pt_regs *regs, u32 token)
243 {
244         u32 flags = kvm_read_and_reset_apf_flags();
245         irqentry_state_t state;
246
247         if (!flags)
248                 return false;
249
250         state = irqentry_enter(regs);
251         instrumentation_begin();
252
253         /*
254          * If the host managed to inject an async #PF into an interrupt
255          * disabled region, then die hard as this is not going to end well
256          * and the host side is seriously broken.
257          */
258         if (unlikely(!(regs->flags & X86_EFLAGS_IF)))
259                 panic("Host injected async #PF in interrupt disabled region\n");
260
261         if (flags & KVM_PV_REASON_PAGE_NOT_PRESENT) {
262                 if (unlikely(!(user_mode(regs))))
263                         panic("Host injected async #PF in kernel mode\n");
264                 /* Page is swapped out by the host. */
265                 kvm_async_pf_task_wait_schedule(token);
266         } else {
267                 WARN_ONCE(1, "Unexpected async PF flags: %x\n", flags);
268         }
269
270         instrumentation_end();
271         irqentry_exit(regs, state);
272         return true;
273 }
274
275 DEFINE_IDTENTRY_SYSVEC(sysvec_kvm_asyncpf_interrupt)
276 {
277         struct pt_regs *old_regs = set_irq_regs(regs);
278         u32 token;
279
280         ack_APIC_irq();
281
282         inc_irq_stat(irq_hv_callback_count);
283
284         if (__this_cpu_read(apf_reason.enabled)) {
285                 token = __this_cpu_read(apf_reason.token);
286                 kvm_async_pf_task_wake(token);
287                 __this_cpu_write(apf_reason.token, 0);
288                 wrmsrl(MSR_KVM_ASYNC_PF_ACK, 1);
289         }
290
291         set_irq_regs(old_regs);
292 }
293
294 static void __init paravirt_ops_setup(void)
295 {
296         pv_info.name = "KVM";
297
298         if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY))
299                 pv_ops.cpu.io_delay = kvm_io_delay;
300
301 #ifdef CONFIG_X86_IO_APIC
302         no_timer_check = 1;
303 #endif
304 }
305
306 static void kvm_register_steal_time(void)
307 {
308         int cpu = smp_processor_id();
309         struct kvm_steal_time *st = &per_cpu(steal_time, cpu);
310
311         if (!has_steal_clock)
312                 return;
313
314         wrmsrl(MSR_KVM_STEAL_TIME, (slow_virt_to_phys(st) | KVM_MSR_ENABLED));
315         pr_info("stealtime: cpu %d, msr %llx\n", cpu,
316                 (unsigned long long) slow_virt_to_phys(st));
317 }
318
319 static DEFINE_PER_CPU_DECRYPTED(unsigned long, kvm_apic_eoi) = KVM_PV_EOI_DISABLED;
320
321 static notrace void kvm_guest_apic_eoi_write(u32 reg, u32 val)
322 {
323         /**
324          * This relies on __test_and_clear_bit to modify the memory
325          * in a way that is atomic with respect to the local CPU.
326          * The hypervisor only accesses this memory from the local CPU so
327          * there's no need for lock or memory barriers.
328          * An optimization barrier is implied in apic write.
329          */
330         if (__test_and_clear_bit(KVM_PV_EOI_BIT, this_cpu_ptr(&kvm_apic_eoi)))
331                 return;
332         apic->native_eoi_write(APIC_EOI, APIC_EOI_ACK);
333 }
334
335 static void kvm_guest_cpu_init(void)
336 {
337         if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF_INT) && kvmapf) {
338                 u64 pa = slow_virt_to_phys(this_cpu_ptr(&apf_reason));
339
340                 WARN_ON_ONCE(!static_branch_likely(&kvm_async_pf_enabled));
341
342                 pa = slow_virt_to_phys(this_cpu_ptr(&apf_reason));
343                 pa |= KVM_ASYNC_PF_ENABLED | KVM_ASYNC_PF_DELIVERY_AS_INT;
344
345                 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF_VMEXIT))
346                         pa |= KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT;
347
348                 wrmsrl(MSR_KVM_ASYNC_PF_INT, HYPERVISOR_CALLBACK_VECTOR);
349
350                 wrmsrl(MSR_KVM_ASYNC_PF_EN, pa);
351                 __this_cpu_write(apf_reason.enabled, 1);
352                 pr_info("setup async PF for cpu %d\n", smp_processor_id());
353         }
354
355         if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) {
356                 unsigned long pa;
357
358                 /* Size alignment is implied but just to make it explicit. */
359                 BUILD_BUG_ON(__alignof__(kvm_apic_eoi) < 4);
360                 __this_cpu_write(kvm_apic_eoi, 0);
361                 pa = slow_virt_to_phys(this_cpu_ptr(&kvm_apic_eoi))
362                         | KVM_MSR_ENABLED;
363                 wrmsrl(MSR_KVM_PV_EOI_EN, pa);
364         }
365
366         if (has_steal_clock)
367                 kvm_register_steal_time();
368 }
369
370 static void kvm_pv_disable_apf(void)
371 {
372         if (!__this_cpu_read(apf_reason.enabled))
373                 return;
374
375         wrmsrl(MSR_KVM_ASYNC_PF_EN, 0);
376         __this_cpu_write(apf_reason.enabled, 0);
377
378         pr_info("disable async PF for cpu %d\n", smp_processor_id());
379 }
380
381 static void kvm_disable_steal_time(void)
382 {
383         if (!has_steal_clock)
384                 return;
385
386         wrmsr(MSR_KVM_STEAL_TIME, 0, 0);
387 }
388
389 static u64 kvm_steal_clock(int cpu)
390 {
391         u64 steal;
392         struct kvm_steal_time *src;
393         int version;
394
395         src = &per_cpu(steal_time, cpu);
396         do {
397                 version = src->version;
398                 virt_rmb();
399                 steal = src->steal;
400                 virt_rmb();
401         } while ((version & 1) || (version != src->version));
402
403         return steal;
404 }
405
406 static inline void __set_percpu_decrypted(void *ptr, unsigned long size)
407 {
408         early_set_memory_decrypted((unsigned long) ptr, size);
409 }
410
411 /*
412  * Iterate through all possible CPUs and map the memory region pointed
413  * by apf_reason, steal_time and kvm_apic_eoi as decrypted at once.
414  *
415  * Note: we iterate through all possible CPUs to ensure that CPUs
416  * hotplugged will have their per-cpu variable already mapped as
417  * decrypted.
418  */
419 static void __init sev_map_percpu_data(void)
420 {
421         int cpu;
422
423         if (!sev_active())
424                 return;
425
426         for_each_possible_cpu(cpu) {
427                 __set_percpu_decrypted(&per_cpu(apf_reason, cpu), sizeof(apf_reason));
428                 __set_percpu_decrypted(&per_cpu(steal_time, cpu), sizeof(steal_time));
429                 __set_percpu_decrypted(&per_cpu(kvm_apic_eoi, cpu), sizeof(kvm_apic_eoi));
430         }
431 }
432
433 static void kvm_guest_cpu_offline(bool shutdown)
434 {
435         kvm_disable_steal_time();
436         if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
437                 wrmsrl(MSR_KVM_PV_EOI_EN, 0);
438         if (kvm_para_has_feature(KVM_FEATURE_MIGRATION_CONTROL))
439                 wrmsrl(MSR_KVM_MIGRATION_CONTROL, 0);
440         kvm_pv_disable_apf();
441         if (!shutdown)
442                 apf_task_wake_all();
443         kvmclock_disable();
444 }
445
446 static int kvm_cpu_online(unsigned int cpu)
447 {
448         unsigned long flags;
449
450         local_irq_save(flags);
451         kvm_guest_cpu_init();
452         local_irq_restore(flags);
453         return 0;
454 }
455
456 #ifdef CONFIG_SMP
457
458 static DEFINE_PER_CPU(cpumask_var_t, __pv_cpu_mask);
459
460 static bool pv_tlb_flush_supported(void)
461 {
462         return (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) &&
463                 !kvm_para_has_hint(KVM_HINTS_REALTIME) &&
464                 kvm_para_has_feature(KVM_FEATURE_STEAL_TIME));
465 }
466
467 static bool pv_ipi_supported(void)
468 {
469         return kvm_para_has_feature(KVM_FEATURE_PV_SEND_IPI);
470 }
471
472 static bool pv_sched_yield_supported(void)
473 {
474         return (kvm_para_has_feature(KVM_FEATURE_PV_SCHED_YIELD) &&
475                 !kvm_para_has_hint(KVM_HINTS_REALTIME) &&
476             kvm_para_has_feature(KVM_FEATURE_STEAL_TIME));
477 }
478
479 #define KVM_IPI_CLUSTER_SIZE    (2 * BITS_PER_LONG)
480
481 static void __send_ipi_mask(const struct cpumask *mask, int vector)
482 {
483         unsigned long flags;
484         int cpu, apic_id, icr;
485         int min = 0, max = 0;
486 #ifdef CONFIG_X86_64
487         __uint128_t ipi_bitmap = 0;
488 #else
489         u64 ipi_bitmap = 0;
490 #endif
491         long ret;
492
493         if (cpumask_empty(mask))
494                 return;
495
496         local_irq_save(flags);
497
498         switch (vector) {
499         default:
500                 icr = APIC_DM_FIXED | vector;
501                 break;
502         case NMI_VECTOR:
503                 icr = APIC_DM_NMI;
504                 break;
505         }
506
507         for_each_cpu(cpu, mask) {
508                 apic_id = per_cpu(x86_cpu_to_apicid, cpu);
509                 if (!ipi_bitmap) {
510                         min = max = apic_id;
511                 } else if (apic_id < min && max - apic_id < KVM_IPI_CLUSTER_SIZE) {
512                         ipi_bitmap <<= min - apic_id;
513                         min = apic_id;
514                 } else if (apic_id < min + KVM_IPI_CLUSTER_SIZE) {
515                         max = apic_id < max ? max : apic_id;
516                 } else {
517                         ret = kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
518                                 (unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr);
519                         WARN_ONCE(ret < 0, "kvm-guest: failed to send PV IPI: %ld",
520                                   ret);
521                         min = max = apic_id;
522                         ipi_bitmap = 0;
523                 }
524                 __set_bit(apic_id - min, (unsigned long *)&ipi_bitmap);
525         }
526
527         if (ipi_bitmap) {
528                 ret = kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
529                         (unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr);
530                 WARN_ONCE(ret < 0, "kvm-guest: failed to send PV IPI: %ld",
531                           ret);
532         }
533
534         local_irq_restore(flags);
535 }
536
537 static void kvm_send_ipi_mask(const struct cpumask *mask, int vector)
538 {
539         __send_ipi_mask(mask, vector);
540 }
541
542 static void kvm_send_ipi_mask_allbutself(const struct cpumask *mask, int vector)
543 {
544         unsigned int this_cpu = smp_processor_id();
545         struct cpumask *new_mask = this_cpu_cpumask_var_ptr(__pv_cpu_mask);
546         const struct cpumask *local_mask;
547
548         cpumask_copy(new_mask, mask);
549         cpumask_clear_cpu(this_cpu, new_mask);
550         local_mask = new_mask;
551         __send_ipi_mask(local_mask, vector);
552 }
553
554 static int __init setup_efi_kvm_sev_migration(void)
555 {
556         efi_char16_t efi_sev_live_migration_enabled[] = L"SevLiveMigrationEnabled";
557         efi_guid_t efi_variable_guid = AMD_SEV_MEM_ENCRYPT_GUID;
558         efi_status_t status;
559         unsigned long size;
560         bool enabled;
561
562         if (!sev_active() ||
563             !kvm_para_has_feature(KVM_FEATURE_MIGRATION_CONTROL))
564                 return 0;
565
566         if (!efi_enabled(EFI_BOOT))
567                 return 0;
568
569         if (!efi_enabled(EFI_RUNTIME_SERVICES)) {
570                 pr_info("%s : EFI runtime services are not enabled\n", __func__);
571                 return 0;
572         }
573
574         size = sizeof(enabled);
575
576         /* Get variable contents into buffer */
577         status = efi.get_variable(efi_sev_live_migration_enabled,
578                                   &efi_variable_guid, NULL, &size, &enabled);
579
580         if (status == EFI_NOT_FOUND) {
581                 pr_info("%s : EFI live migration variable not found\n", __func__);
582                 return 0;
583         }
584
585         if (status != EFI_SUCCESS) {
586                 pr_info("%s : EFI variable retrieval failed\n", __func__);
587                 return 0;
588         }
589
590         if (enabled == 0) {
591                 pr_info("%s: live migration disabled in EFI\n", __func__);
592                 return 0;
593         }
594
595         pr_info("%s : live migration enabled in EFI\n", __func__);
596         wrmsrl(MSR_KVM_MIGRATION_CONTROL, KVM_MIGRATION_READY);
597
598         return 1;
599 }
600
601 late_initcall(setup_efi_kvm_sev_migration);
602
603 /*
604  * Set the IPI entry points
605  */
606 static void kvm_setup_pv_ipi(void)
607 {
608         apic->send_IPI_mask = kvm_send_ipi_mask;
609         apic->send_IPI_mask_allbutself = kvm_send_ipi_mask_allbutself;
610         pr_info("setup PV IPIs\n");
611 }
612
613 static void kvm_smp_send_call_func_ipi(const struct cpumask *mask)
614 {
615         int cpu;
616
617         native_send_call_func_ipi(mask);
618
619         /* Make sure other vCPUs get a chance to run if they need to. */
620         for_each_cpu(cpu, mask) {
621                 if (vcpu_is_preempted(cpu)) {
622                         kvm_hypercall1(KVM_HC_SCHED_YIELD, per_cpu(x86_cpu_to_apicid, cpu));
623                         break;
624                 }
625         }
626 }
627
628 static void kvm_flush_tlb_multi(const struct cpumask *cpumask,
629                         const struct flush_tlb_info *info)
630 {
631         u8 state;
632         int cpu;
633         struct kvm_steal_time *src;
634         struct cpumask *flushmask = this_cpu_cpumask_var_ptr(__pv_cpu_mask);
635
636         cpumask_copy(flushmask, cpumask);
637         /*
638          * We have to call flush only on online vCPUs. And
639          * queue flush_on_enter for pre-empted vCPUs
640          */
641         for_each_cpu(cpu, flushmask) {
642                 /*
643                  * The local vCPU is never preempted, so we do not explicitly
644                  * skip check for local vCPU - it will never be cleared from
645                  * flushmask.
646                  */
647                 src = &per_cpu(steal_time, cpu);
648                 state = READ_ONCE(src->preempted);
649                 if ((state & KVM_VCPU_PREEMPTED)) {
650                         if (try_cmpxchg(&src->preempted, &state,
651                                         state | KVM_VCPU_FLUSH_TLB))
652                                 __cpumask_clear_cpu(cpu, flushmask);
653                 }
654         }
655
656         native_flush_tlb_multi(flushmask, info);
657 }
658
659 static __init int kvm_alloc_cpumask(void)
660 {
661         int cpu;
662
663         if (!kvm_para_available() || nopv)
664                 return 0;
665
666         if (pv_tlb_flush_supported() || pv_ipi_supported())
667                 for_each_possible_cpu(cpu) {
668                         zalloc_cpumask_var_node(per_cpu_ptr(&__pv_cpu_mask, cpu),
669                                 GFP_KERNEL, cpu_to_node(cpu));
670                 }
671
672         return 0;
673 }
674 arch_initcall(kvm_alloc_cpumask);
675
676 static void __init kvm_smp_prepare_boot_cpu(void)
677 {
678         /*
679          * Map the per-cpu variables as decrypted before kvm_guest_cpu_init()
680          * shares the guest physical address with the hypervisor.
681          */
682         sev_map_percpu_data();
683
684         kvm_guest_cpu_init();
685         native_smp_prepare_boot_cpu();
686         kvm_spinlock_init();
687 }
688
689 static int kvm_cpu_down_prepare(unsigned int cpu)
690 {
691         unsigned long flags;
692
693         local_irq_save(flags);
694         kvm_guest_cpu_offline(false);
695         local_irq_restore(flags);
696         return 0;
697 }
698
699 #endif
700
701 static int kvm_suspend(void)
702 {
703         kvm_guest_cpu_offline(false);
704
705         return 0;
706 }
707
708 static void kvm_resume(void)
709 {
710         kvm_cpu_online(raw_smp_processor_id());
711 }
712
713 static struct syscore_ops kvm_syscore_ops = {
714         .suspend        = kvm_suspend,
715         .resume         = kvm_resume,
716 };
717
718 static void kvm_pv_guest_cpu_reboot(void *unused)
719 {
720         kvm_guest_cpu_offline(true);
721 }
722
723 static int kvm_pv_reboot_notify(struct notifier_block *nb,
724                                 unsigned long code, void *unused)
725 {
726         if (code == SYS_RESTART)
727                 on_each_cpu(kvm_pv_guest_cpu_reboot, NULL, 1);
728         return NOTIFY_DONE;
729 }
730
731 static struct notifier_block kvm_pv_reboot_nb = {
732         .notifier_call = kvm_pv_reboot_notify,
733 };
734
735 /*
736  * After a PV feature is registered, the host will keep writing to the
737  * registered memory location. If the guest happens to shutdown, this memory
738  * won't be valid. In cases like kexec, in which you install a new kernel, this
739  * means a random memory location will be kept being written.
740  */
741 #ifdef CONFIG_KEXEC_CORE
742 static void kvm_crash_shutdown(struct pt_regs *regs)
743 {
744         kvm_guest_cpu_offline(true);
745         native_machine_crash_shutdown(regs);
746 }
747 #endif
748
749 static void __init kvm_guest_init(void)
750 {
751         int i;
752
753         paravirt_ops_setup();
754         register_reboot_notifier(&kvm_pv_reboot_nb);
755         for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++)
756                 raw_spin_lock_init(&async_pf_sleepers[i].lock);
757
758         if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
759                 has_steal_clock = 1;
760                 static_call_update(pv_steal_clock, kvm_steal_clock);
761         }
762
763         if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
764                 apic_set_eoi_write(kvm_guest_apic_eoi_write);
765
766         if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF_INT) && kvmapf) {
767                 static_branch_enable(&kvm_async_pf_enabled);
768                 alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, asm_sysvec_kvm_asyncpf_interrupt);
769         }
770
771 #ifdef CONFIG_SMP
772         if (pv_tlb_flush_supported()) {
773                 pv_ops.mmu.flush_tlb_multi = kvm_flush_tlb_multi;
774                 pv_ops.mmu.tlb_remove_table = tlb_remove_table;
775                 pr_info("KVM setup pv remote TLB flush\n");
776         }
777
778         smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
779         if (pv_sched_yield_supported()) {
780                 smp_ops.send_call_func_ipi = kvm_smp_send_call_func_ipi;
781                 pr_info("setup PV sched yield\n");
782         }
783         if (cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/kvm:online",
784                                       kvm_cpu_online, kvm_cpu_down_prepare) < 0)
785                 pr_err("failed to install cpu hotplug callbacks\n");
786 #else
787         sev_map_percpu_data();
788         kvm_guest_cpu_init();
789 #endif
790
791 #ifdef CONFIG_KEXEC_CORE
792         machine_ops.crash_shutdown = kvm_crash_shutdown;
793 #endif
794
795         register_syscore_ops(&kvm_syscore_ops);
796
797         /*
798          * Hard lockup detection is enabled by default. Disable it, as guests
799          * can get false positives too easily, for example if the host is
800          * overcommitted.
801          */
802         hardlockup_detector_disable();
803 }
804
805 static noinline uint32_t __kvm_cpuid_base(void)
806 {
807         if (boot_cpu_data.cpuid_level < 0)
808                 return 0;       /* So we don't blow up on old processors */
809
810         if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
811                 return hypervisor_cpuid_base("KVMKVMKVM\0\0\0", 0);
812
813         return 0;
814 }
815
816 static inline uint32_t kvm_cpuid_base(void)
817 {
818         static int kvm_cpuid_base = -1;
819
820         if (kvm_cpuid_base == -1)
821                 kvm_cpuid_base = __kvm_cpuid_base();
822
823         return kvm_cpuid_base;
824 }
825
826 bool kvm_para_available(void)
827 {
828         return kvm_cpuid_base() != 0;
829 }
830 EXPORT_SYMBOL_GPL(kvm_para_available);
831
832 unsigned int kvm_arch_para_features(void)
833 {
834         return cpuid_eax(kvm_cpuid_base() | KVM_CPUID_FEATURES);
835 }
836
837 unsigned int kvm_arch_para_hints(void)
838 {
839         return cpuid_edx(kvm_cpuid_base() | KVM_CPUID_FEATURES);
840 }
841 EXPORT_SYMBOL_GPL(kvm_arch_para_hints);
842
843 static uint32_t __init kvm_detect(void)
844 {
845         return kvm_cpuid_base();
846 }
847
848 static void __init kvm_apic_init(void)
849 {
850 #ifdef CONFIG_SMP
851         if (pv_ipi_supported())
852                 kvm_setup_pv_ipi();
853 #endif
854 }
855
856 static bool __init kvm_msi_ext_dest_id(void)
857 {
858         return kvm_para_has_feature(KVM_FEATURE_MSI_EXT_DEST_ID);
859 }
860
861 static void kvm_sev_hc_page_enc_status(unsigned long pfn, int npages, bool enc)
862 {
863         kvm_sev_hypercall3(KVM_HC_MAP_GPA_RANGE, pfn << PAGE_SHIFT, npages,
864                            KVM_MAP_GPA_RANGE_ENC_STAT(enc) | KVM_MAP_GPA_RANGE_PAGE_SZ_4K);
865 }
866
867 static void __init kvm_init_platform(void)
868 {
869         if (sev_active() &&
870             kvm_para_has_feature(KVM_FEATURE_MIGRATION_CONTROL)) {
871                 unsigned long nr_pages;
872                 int i;
873
874                 pv_ops.mmu.notify_page_enc_status_changed =
875                         kvm_sev_hc_page_enc_status;
876
877                 /*
878                  * Reset the host's shared pages list related to kernel
879                  * specific page encryption status settings before we load a
880                  * new kernel by kexec. Reset the page encryption status
881                  * during early boot intead of just before kexec to avoid SMP
882                  * races during kvm_pv_guest_cpu_reboot().
883                  * NOTE: We cannot reset the complete shared pages list
884                  * here as we need to retain the UEFI/OVMF firmware
885                  * specific settings.
886                  */
887
888                 for (i = 0; i < e820_table->nr_entries; i++) {
889                         struct e820_entry *entry = &e820_table->entries[i];
890
891                         if (entry->type != E820_TYPE_RAM)
892                                 continue;
893
894                         nr_pages = DIV_ROUND_UP(entry->size, PAGE_SIZE);
895
896                         kvm_sev_hypercall3(KVM_HC_MAP_GPA_RANGE, entry->addr,
897                                        nr_pages,
898                                        KVM_MAP_GPA_RANGE_ENCRYPTED | KVM_MAP_GPA_RANGE_PAGE_SZ_4K);
899                 }
900
901                 /*
902                  * Ensure that _bss_decrypted section is marked as decrypted in the
903                  * shared pages list.
904                  */
905                 nr_pages = DIV_ROUND_UP(__end_bss_decrypted - __start_bss_decrypted,
906                                         PAGE_SIZE);
907                 early_set_mem_enc_dec_hypercall((unsigned long)__start_bss_decrypted,
908                                                 nr_pages, 0);
909
910                 /*
911                  * If not booted using EFI, enable Live migration support.
912                  */
913                 if (!efi_enabled(EFI_BOOT))
914                         wrmsrl(MSR_KVM_MIGRATION_CONTROL,
915                                KVM_MIGRATION_READY);
916         }
917         kvmclock_init();
918         x86_platform.apic_post_init = kvm_apic_init;
919 }
920
921 #if defined(CONFIG_AMD_MEM_ENCRYPT)
922 static void kvm_sev_es_hcall_prepare(struct ghcb *ghcb, struct pt_regs *regs)
923 {
924         /* RAX and CPL are already in the GHCB */
925         ghcb_set_rbx(ghcb, regs->bx);
926         ghcb_set_rcx(ghcb, regs->cx);
927         ghcb_set_rdx(ghcb, regs->dx);
928         ghcb_set_rsi(ghcb, regs->si);
929 }
930
931 static bool kvm_sev_es_hcall_finish(struct ghcb *ghcb, struct pt_regs *regs)
932 {
933         /* No checking of the return state needed */
934         return true;
935 }
936 #endif
937
938 const __initconst struct hypervisor_x86 x86_hyper_kvm = {
939         .name                           = "KVM",
940         .detect                         = kvm_detect,
941         .type                           = X86_HYPER_KVM,
942         .init.guest_late_init           = kvm_guest_init,
943         .init.x2apic_available          = kvm_para_available,
944         .init.msi_ext_dest_id           = kvm_msi_ext_dest_id,
945         .init.init_platform             = kvm_init_platform,
946 #if defined(CONFIG_AMD_MEM_ENCRYPT)
947         .runtime.sev_es_hcall_prepare   = kvm_sev_es_hcall_prepare,
948         .runtime.sev_es_hcall_finish    = kvm_sev_es_hcall_finish,
949 #endif
950 };
951
952 static __init int activate_jump_labels(void)
953 {
954         if (has_steal_clock) {
955                 static_key_slow_inc(&paravirt_steal_enabled);
956                 if (steal_acc)
957                         static_key_slow_inc(&paravirt_steal_rq_enabled);
958         }
959
960         return 0;
961 }
962 arch_initcall(activate_jump_labels);
963
964 #ifdef CONFIG_PARAVIRT_SPINLOCKS
965
966 /* Kick a cpu by its apicid. Used to wake up a halted vcpu */
967 static void kvm_kick_cpu(int cpu)
968 {
969         int apicid;
970         unsigned long flags = 0;
971
972         apicid = per_cpu(x86_cpu_to_apicid, cpu);
973         kvm_hypercall2(KVM_HC_KICK_CPU, flags, apicid);
974 }
975
976 #include <asm/qspinlock.h>
977
978 static void kvm_wait(u8 *ptr, u8 val)
979 {
980         if (in_nmi())
981                 return;
982
983         /*
984          * halt until it's our turn and kicked. Note that we do safe halt
985          * for irq enabled case to avoid hang when lock info is overwritten
986          * in irq spinlock slowpath and no spurious interrupt occur to save us.
987          */
988         if (irqs_disabled()) {
989                 if (READ_ONCE(*ptr) == val)
990                         halt();
991         } else {
992                 local_irq_disable();
993
994                 /* safe_halt() will enable IRQ */
995                 if (READ_ONCE(*ptr) == val)
996                         safe_halt();
997                 else
998                         local_irq_enable();
999         }
1000 }
1001
1002 #ifdef CONFIG_X86_32
1003 __visible bool __kvm_vcpu_is_preempted(long cpu)
1004 {
1005         struct kvm_steal_time *src = &per_cpu(steal_time, cpu);
1006
1007         return !!(src->preempted & KVM_VCPU_PREEMPTED);
1008 }
1009 PV_CALLEE_SAVE_REGS_THUNK(__kvm_vcpu_is_preempted);
1010
1011 #else
1012
1013 #include <asm/asm-offsets.h>
1014
1015 extern bool __raw_callee_save___kvm_vcpu_is_preempted(long);
1016
1017 /*
1018  * Hand-optimize version for x86-64 to avoid 8 64-bit register saving and
1019  * restoring to/from the stack.
1020  */
1021 asm(
1022 ".pushsection .text;"
1023 ".global __raw_callee_save___kvm_vcpu_is_preempted;"
1024 ".type __raw_callee_save___kvm_vcpu_is_preempted, @function;"
1025 "__raw_callee_save___kvm_vcpu_is_preempted:"
1026 "movq   __per_cpu_offset(,%rdi,8), %rax;"
1027 "cmpb   $0, " __stringify(KVM_STEAL_TIME_preempted) "+steal_time(%rax);"
1028 "setne  %al;"
1029 "ret;"
1030 ".size __raw_callee_save___kvm_vcpu_is_preempted, .-__raw_callee_save___kvm_vcpu_is_preempted;"
1031 ".popsection");
1032
1033 #endif
1034
1035 /*
1036  * Setup pv_lock_ops to exploit KVM_FEATURE_PV_UNHALT if present.
1037  */
1038 void __init kvm_spinlock_init(void)
1039 {
1040         /*
1041          * In case host doesn't support KVM_FEATURE_PV_UNHALT there is still an
1042          * advantage of keeping virt_spin_lock_key enabled: virt_spin_lock() is
1043          * preferred over native qspinlock when vCPU is preempted.
1044          */
1045         if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT)) {
1046                 pr_info("PV spinlocks disabled, no host support\n");
1047                 return;
1048         }
1049
1050         /*
1051          * Disable PV spinlocks and use native qspinlock when dedicated pCPUs
1052          * are available.
1053          */
1054         if (kvm_para_has_hint(KVM_HINTS_REALTIME)) {
1055                 pr_info("PV spinlocks disabled with KVM_HINTS_REALTIME hints\n");
1056                 goto out;
1057         }
1058
1059         if (num_possible_cpus() == 1) {
1060                 pr_info("PV spinlocks disabled, single CPU\n");
1061                 goto out;
1062         }
1063
1064         if (nopvspin) {
1065                 pr_info("PV spinlocks disabled, forced by \"nopvspin\" parameter\n");
1066                 goto out;
1067         }
1068
1069         pr_info("PV spinlocks enabled\n");
1070
1071         __pv_init_lock_hash();
1072         pv_ops.lock.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
1073         pv_ops.lock.queued_spin_unlock =
1074                 PV_CALLEE_SAVE(__pv_queued_spin_unlock);
1075         pv_ops.lock.wait = kvm_wait;
1076         pv_ops.lock.kick = kvm_kick_cpu;
1077
1078         if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
1079                 pv_ops.lock.vcpu_is_preempted =
1080                         PV_CALLEE_SAVE(__kvm_vcpu_is_preempted);
1081         }
1082         /*
1083          * When PV spinlock is enabled which is preferred over
1084          * virt_spin_lock(), virt_spin_lock_key's value is meaningless.
1085          * Just disable it anyway.
1086          */
1087 out:
1088         static_branch_disable(&virt_spin_lock_key);
1089 }
1090
1091 #endif  /* CONFIG_PARAVIRT_SPINLOCKS */
1092
1093 #ifdef CONFIG_ARCH_CPUIDLE_HALTPOLL
1094
1095 static void kvm_disable_host_haltpoll(void *i)
1096 {
1097         wrmsrl(MSR_KVM_POLL_CONTROL, 0);
1098 }
1099
1100 static void kvm_enable_host_haltpoll(void *i)
1101 {
1102         wrmsrl(MSR_KVM_POLL_CONTROL, 1);
1103 }
1104
1105 void arch_haltpoll_enable(unsigned int cpu)
1106 {
1107         if (!kvm_para_has_feature(KVM_FEATURE_POLL_CONTROL)) {
1108                 pr_err_once("host does not support poll control\n");
1109                 pr_err_once("host upgrade recommended\n");
1110                 return;
1111         }
1112
1113         /* Enable guest halt poll disables host halt poll */
1114         smp_call_function_single(cpu, kvm_disable_host_haltpoll, NULL, 1);
1115 }
1116 EXPORT_SYMBOL_GPL(arch_haltpoll_enable);
1117
1118 void arch_haltpoll_disable(unsigned int cpu)
1119 {
1120         if (!kvm_para_has_feature(KVM_FEATURE_POLL_CONTROL))
1121                 return;
1122
1123         /* Disable guest halt poll enables host halt poll */
1124         smp_call_function_single(cpu, kvm_enable_host_haltpoll, NULL, 1);
1125 }
1126 EXPORT_SYMBOL_GPL(arch_haltpoll_disable);
1127 #endif