2 * s390host.c -- hosting zSeries kernel virtual machines
4 * Copyright IBM Corp. 2008,2009
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
16 #include <linux/compiler.h>
17 #include <linux/err.h>
19 #include <linux/hrtimer.h>
20 #include <linux/init.h>
21 #include <linux/kvm.h>
22 #include <linux/kvm_host.h>
23 #include <linux/module.h>
24 #include <linux/slab.h>
25 #include <linux/timer.h>
26 #include <asm/asm-offsets.h>
27 #include <asm/lowcore.h>
28 #include <asm/pgtable.h>
30 #include <asm/system.h>
34 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
36 struct kvm_stats_debugfs_item debugfs_entries[] = {
37 { "userspace_handled", VCPU_STAT(exit_userspace) },
38 { "exit_null", VCPU_STAT(exit_null) },
39 { "exit_validity", VCPU_STAT(exit_validity) },
40 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
41 { "exit_external_request", VCPU_STAT(exit_external_request) },
42 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
43 { "exit_instruction", VCPU_STAT(exit_instruction) },
44 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
45 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
46 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
47 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
48 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
49 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
50 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
51 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
52 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
53 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
54 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
55 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
56 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
57 { "instruction_spx", VCPU_STAT(instruction_spx) },
58 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
59 { "instruction_stap", VCPU_STAT(instruction_stap) },
60 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
61 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
62 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
63 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
64 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
65 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
66 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
67 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
68 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
69 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
70 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
71 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
72 { "diagnose_44", VCPU_STAT(diagnose_44) },
76 static unsigned long long *facilities;
78 /* Section: not file related */
79 int kvm_arch_hardware_enable(void *garbage)
81 /* every s390 is virtualization enabled ;-) */
85 void kvm_arch_hardware_disable(void *garbage)
89 int kvm_arch_hardware_setup(void)
94 void kvm_arch_hardware_unsetup(void)
98 void kvm_arch_check_processor_compat(void *rtn)
102 int kvm_arch_init(void *opaque)
107 void kvm_arch_exit(void)
111 /* Section: device related */
112 long kvm_arch_dev_ioctl(struct file *filp,
113 unsigned int ioctl, unsigned long arg)
115 if (ioctl == KVM_S390_ENABLE_SIE)
116 return s390_enable_sie();
120 int kvm_dev_ioctl_check_extension(long ext)
125 case KVM_CAP_S390_PSW:
134 /* Section: vm related */
136 * Get (and clear) the dirty memory log for a memory slot.
138 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
139 struct kvm_dirty_log *log)
144 long kvm_arch_vm_ioctl(struct file *filp,
145 unsigned int ioctl, unsigned long arg)
147 struct kvm *kvm = filp->private_data;
148 void __user *argp = (void __user *)arg;
152 case KVM_S390_INTERRUPT: {
153 struct kvm_s390_interrupt s390int;
156 if (copy_from_user(&s390int, argp, sizeof(s390int)))
158 r = kvm_s390_inject_vm(kvm, &s390int);
168 int kvm_arch_init_vm(struct kvm *kvm)
173 rc = s390_enable_sie();
177 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
181 sprintf(debug_name, "kvm-%u", current->pid);
183 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
187 spin_lock_init(&kvm->arch.float_int.lock);
188 INIT_LIST_HEAD(&kvm->arch.float_int.list);
190 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
191 VM_EVENT(kvm, 3, "%s", "vm created");
195 free_page((unsigned long)(kvm->arch.sca));
200 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
202 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
203 clear_bit(63 - vcpu->vcpu_id, (unsigned long *) &vcpu->kvm->arch.sca->mcn);
204 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
205 (__u64) vcpu->arch.sie_block)
206 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
208 free_page((unsigned long)(vcpu->arch.sie_block));
209 kvm_vcpu_uninit(vcpu);
213 static void kvm_free_vcpus(struct kvm *kvm)
216 struct kvm_vcpu *vcpu;
218 kvm_for_each_vcpu(i, vcpu, kvm)
219 kvm_arch_vcpu_destroy(vcpu);
221 mutex_lock(&kvm->lock);
222 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
223 kvm->vcpus[i] = NULL;
225 atomic_set(&kvm->online_vcpus, 0);
226 mutex_unlock(&kvm->lock);
229 void kvm_arch_sync_events(struct kvm *kvm)
233 void kvm_arch_destroy_vm(struct kvm *kvm)
236 free_page((unsigned long)(kvm->arch.sca));
237 debug_unregister(kvm->arch.dbf);
240 /* Section: vcpu related */
241 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
246 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
251 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
253 save_fp_regs(&vcpu->arch.host_fpregs);
254 save_access_regs(vcpu->arch.host_acrs);
255 vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
256 restore_fp_regs(&vcpu->arch.guest_fpregs);
257 restore_access_regs(vcpu->arch.guest_acrs);
260 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
262 save_fp_regs(&vcpu->arch.guest_fpregs);
263 save_access_regs(vcpu->arch.guest_acrs);
264 restore_fp_regs(&vcpu->arch.host_fpregs);
265 restore_access_regs(vcpu->arch.host_acrs);
268 static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
270 /* this equals initial cpu reset in pop, but we don't switch to ESA */
271 vcpu->arch.sie_block->gpsw.mask = 0UL;
272 vcpu->arch.sie_block->gpsw.addr = 0UL;
273 vcpu->arch.sie_block->prefix = 0UL;
274 vcpu->arch.sie_block->ihcpu = 0xffff;
275 vcpu->arch.sie_block->cputm = 0UL;
276 vcpu->arch.sie_block->ckc = 0UL;
277 vcpu->arch.sie_block->todpr = 0;
278 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
279 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
280 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
281 vcpu->arch.guest_fpregs.fpc = 0;
282 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
283 vcpu->arch.sie_block->gbea = 1;
286 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
288 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH);
289 set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests);
290 vcpu->arch.sie_block->ecb = 6;
291 vcpu->arch.sie_block->eca = 0xC1002001U;
292 vcpu->arch.sie_block->fac = (int) (long) facilities;
293 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
294 tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
295 (unsigned long) vcpu);
296 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
297 get_cpu_id(&vcpu->arch.cpu_id);
298 vcpu->arch.cpu_id.version = 0xff;
302 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
305 struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
311 vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
312 get_zeroed_page(GFP_KERNEL);
314 if (!vcpu->arch.sie_block)
317 vcpu->arch.sie_block->icpua = id;
318 BUG_ON(!kvm->arch.sca);
319 if (!kvm->arch.sca->cpu[id].sda)
320 kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block;
321 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32);
322 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
323 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
325 spin_lock_init(&vcpu->arch.local_int.lock);
326 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
327 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
328 spin_lock(&kvm->arch.float_int.lock);
329 kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
330 init_waitqueue_head(&vcpu->arch.local_int.wq);
331 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
332 spin_unlock(&kvm->arch.float_int.lock);
334 rc = kvm_vcpu_init(vcpu, kvm, id);
336 goto out_free_sie_block;
337 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
338 vcpu->arch.sie_block);
342 free_page((unsigned long)(vcpu->arch.sie_block));
349 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
351 /* kvm common code refers to this, but never calls it */
356 static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
358 kvm_s390_vcpu_initial_reset(vcpu);
362 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
364 memcpy(&vcpu->arch.guest_gprs, ®s->gprs, sizeof(regs->gprs));
368 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
370 memcpy(®s->gprs, &vcpu->arch.guest_gprs, sizeof(regs->gprs));
374 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
375 struct kvm_sregs *sregs)
377 memcpy(&vcpu->arch.guest_acrs, &sregs->acrs, sizeof(sregs->acrs));
378 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
382 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
383 struct kvm_sregs *sregs)
385 memcpy(&sregs->acrs, &vcpu->arch.guest_acrs, sizeof(sregs->acrs));
386 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
390 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
392 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
393 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
397 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
399 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
400 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
404 static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
408 if (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_RUNNING)
411 vcpu->run->psw_mask = psw.mask;
412 vcpu->run->psw_addr = psw.addr;
417 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
418 struct kvm_translation *tr)
420 return -EINVAL; /* not implemented yet */
423 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
424 struct kvm_guest_debug *dbg)
426 return -EINVAL; /* not implemented yet */
429 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
430 struct kvm_mp_state *mp_state)
432 return -EINVAL; /* not implemented yet */
435 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
436 struct kvm_mp_state *mp_state)
438 return -EINVAL; /* not implemented yet */
441 static void __vcpu_run(struct kvm_vcpu *vcpu)
443 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->arch.guest_gprs[14], 16);
448 if (test_thread_flag(TIF_MCCK_PENDING))
451 kvm_s390_deliver_pending_interrupts(vcpu);
453 vcpu->arch.sie_block->icptcode = 0;
457 VCPU_EVENT(vcpu, 6, "entering sie flags %x",
458 atomic_read(&vcpu->arch.sie_block->cpuflags));
459 if (sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs)) {
460 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
461 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
463 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
464 vcpu->arch.sie_block->icptcode);
469 memcpy(&vcpu->arch.guest_gprs[14], &vcpu->arch.sie_block->gg14, 16);
472 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
479 if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
480 kvm_s390_vcpu_set_mem(vcpu);
482 /* verify, that memory has been registered */
483 if (!vcpu->arch.sie_block->gmslm) {
485 VCPU_EVENT(vcpu, 3, "%s", "no memory registered to run vcpu");
489 if (vcpu->sigset_active)
490 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
492 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
494 BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
496 switch (kvm_run->exit_reason) {
497 case KVM_EXIT_S390_SIEIC:
498 case KVM_EXIT_UNKNOWN:
500 case KVM_EXIT_S390_RESET:
506 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
507 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
513 rc = kvm_handle_sie_intercept(vcpu);
514 } while (!signal_pending(current) && !rc);
516 if (rc == SIE_INTERCEPT_RERUNVCPU)
519 if (signal_pending(current) && !rc) {
520 kvm_run->exit_reason = KVM_EXIT_INTR;
524 if (rc == -EOPNOTSUPP) {
525 /* intercept cannot be handled in-kernel, prepare kvm-run */
526 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
527 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
528 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
529 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
533 if (rc == -EREMOTE) {
534 /* intercept was handled, but userspace support is needed
535 * kvm_run has been prepared by the handler */
539 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
540 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
542 if (vcpu->sigset_active)
543 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
545 vcpu->stat.exit_userspace++;
549 static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, const void *from,
550 unsigned long n, int prefix)
553 return copy_to_guest(vcpu, guestdest, from, n);
555 return copy_to_guest_absolute(vcpu, guestdest, from, n);
559 * store status at address
560 * we use have two special cases:
561 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
562 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
564 int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
566 const unsigned char archmode = 1;
569 if (addr == KVM_S390_STORE_STATUS_NOADDR) {
570 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
572 addr = SAVE_AREA_BASE;
574 } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
575 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
577 addr = SAVE_AREA_BASE;
582 if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
583 vcpu->arch.guest_fpregs.fprs, 128, prefix))
586 if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
587 vcpu->arch.guest_gprs, 128, prefix))
590 if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
591 &vcpu->arch.sie_block->gpsw, 16, prefix))
594 if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
595 &vcpu->arch.sie_block->prefix, 4, prefix))
598 if (__guestcopy(vcpu,
599 addr + offsetof(struct save_area, fp_ctrl_reg),
600 &vcpu->arch.guest_fpregs.fpc, 4, prefix))
603 if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
604 &vcpu->arch.sie_block->todpr, 4, prefix))
607 if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
608 &vcpu->arch.sie_block->cputm, 8, prefix))
611 if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
612 &vcpu->arch.sie_block->ckc, 8, prefix))
615 if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
616 &vcpu->arch.guest_acrs, 64, prefix))
619 if (__guestcopy(vcpu,
620 addr + offsetof(struct save_area, ctrl_regs),
621 &vcpu->arch.sie_block->gcr, 128, prefix))
626 long kvm_arch_vcpu_ioctl(struct file *filp,
627 unsigned int ioctl, unsigned long arg)
629 struct kvm_vcpu *vcpu = filp->private_data;
630 void __user *argp = (void __user *)arg;
634 case KVM_S390_INTERRUPT: {
635 struct kvm_s390_interrupt s390int;
638 if (copy_from_user(&s390int, argp, sizeof(s390int)))
640 r = kvm_s390_inject_vcpu(vcpu, &s390int);
643 case KVM_S390_STORE_STATUS:
644 r = kvm_s390_vcpu_store_status(vcpu, arg);
646 case KVM_S390_SET_INITIAL_PSW: {
650 if (copy_from_user(&psw, argp, sizeof(psw)))
652 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
655 case KVM_S390_INITIAL_RESET:
656 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
664 /* Section: memory related */
665 int kvm_arch_prepare_memory_region(struct kvm *kvm,
666 struct kvm_memory_slot *memslot,
667 struct kvm_memory_slot old,
668 struct kvm_userspace_memory_region *mem,
671 /* A few sanity checks. We can have exactly one memory slot which has
672 to start at guest virtual zero and which has to be located at a
673 page boundary in userland and which has to end at a page boundary.
674 The memory in userland is ok to be fragmented into various different
675 vmas. It is okay to mmap() and munmap() stuff in this slot after
676 doing this call at any time */
681 if (mem->guest_phys_addr)
684 if (mem->userspace_addr & (PAGE_SIZE - 1))
687 if (mem->memory_size & (PAGE_SIZE - 1))
696 void kvm_arch_commit_memory_region(struct kvm *kvm,
697 struct kvm_userspace_memory_region *mem,
698 struct kvm_memory_slot old,
702 struct kvm_vcpu *vcpu;
704 /* request update of sie control block for all available vcpus */
705 kvm_for_each_vcpu(i, vcpu, kvm) {
706 if (test_and_set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
708 kvm_s390_inject_sigp_stop(vcpu, ACTION_RELOADVCPU_ON_STOP);
712 void kvm_arch_flush_shadow(struct kvm *kvm)
716 static int __init kvm_s390_init(void)
719 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
724 * guests can ask for up to 255+1 double words, we need a full page
725 * to hold the maximum amount of facilities. On the other hand, we
726 * only set facilities that are known to work in KVM.
728 facilities = (unsigned long long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
733 memcpy(facilities, S390_lowcore.stfle_fac_list, 16);
734 facilities[0] &= 0xff00fff3f47c0000ULL;
735 facilities[1] &= 0x201c000000000000ULL;
739 static void __exit kvm_s390_exit(void)
741 free_page((unsigned long) facilities);
745 module_init(kvm_s390_init);
746 module_exit(kvm_s390_exit);