xen: Port Xen bus driver from mini-os
[platform/kernel/u-boot.git] / drivers / xen / hypervisor.c
1 // SPDX-License-Identifier: MIT License
2 /*
3  * hypervisor.c
4  *
5  * Communication to/from hypervisor.
6  *
7  * Copyright (c) 2002-2003, K A Fraser
8  * Copyright (c) 2005, Grzegorz Milos, gm281@cam.ac.uk,Intel Research Cambridge
9  * Copyright (c) 2020, EPAM Systems Inc.
10  */
11 #include <common.h>
12 #include <cpu_func.h>
13 #include <log.h>
14 #include <memalign.h>
15
16 #include <asm/io.h>
17 #include <asm/armv8/mmu.h>
18 #include <asm/xen/system.h>
19
20 #include <linux/bug.h>
21
22 #include <xen/hvm.h>
23 #include <xen/events.h>
24 #include <xen/xenbus.h>
25 #include <xen/interface/memory.h>
26
27 #define active_evtchns(cpu, sh, idx)    \
28         ((sh)->evtchn_pending[idx] &    \
29          ~(sh)->evtchn_mask[idx])
30
31 int in_callback;
32
33 /*
34  * Shared page for communicating with the hypervisor.
35  * Events flags go here, for example.
36  */
37 struct shared_info *HYPERVISOR_shared_info;
38
39 static const char *param_name(int op)
40 {
41 #define PARAM(x)[HVM_PARAM_##x] = #x
42         static const char *const names[] = {
43                 PARAM(CALLBACK_IRQ),
44                 PARAM(STORE_PFN),
45                 PARAM(STORE_EVTCHN),
46                 PARAM(PAE_ENABLED),
47                 PARAM(IOREQ_PFN),
48                 PARAM(VPT_ALIGN),
49                 PARAM(CONSOLE_PFN),
50                 PARAM(CONSOLE_EVTCHN),
51         };
52 #undef PARAM
53
54         if (op >= ARRAY_SIZE(names))
55                 return "unknown";
56
57         if (!names[op])
58                 return "reserved";
59
60         return names[op];
61 }
62
63 /**
64  * hvm_get_parameter_maintain_dcache - function to obtain a HVM
65  * parameter value.
66  * @idx: HVM parameter index
67  * @value: Value to fill in
68  *
69  * According to Xen on ARM ABI (xen/include/public/arch-arm.h):
70  * all memory which is shared with other entities in the system
71  * (including the hypervisor and other guests) must reside in memory
72  * which is mapped as Normal Inner Write-Back Outer Write-Back
73  * Inner-Shareable.
74  *
75  * Thus, page attributes must be equally set for all the entities
76  * working with that page.
77  *
78  * Before MMU setup the data cache is turned off, so it means that
79  * manual data cache maintenance is required, because of the
80  * difference of page attributes.
81  */
82 int hvm_get_parameter_maintain_dcache(int idx, uint64_t *value)
83 {
84         struct xen_hvm_param xhv;
85         int ret;
86
87         invalidate_dcache_range((unsigned long)&xhv,
88                                 (unsigned long)&xhv + sizeof(xhv));
89         xhv.domid = DOMID_SELF;
90         xhv.index = idx;
91         invalidate_dcache_range((unsigned long)&xhv,
92                                 (unsigned long)&xhv + sizeof(xhv));
93
94         ret = HYPERVISOR_hvm_op(HVMOP_get_param, &xhv);
95         if (ret < 0) {
96                 pr_err("Cannot get hvm parameter %s (%d): %d!\n",
97                            param_name(idx), idx, ret);
98                 BUG();
99         }
100         invalidate_dcache_range((unsigned long)&xhv,
101                                 (unsigned long)&xhv + sizeof(xhv));
102
103         *value = xhv.value;
104
105         return ret;
106 }
107
108 int hvm_get_parameter(int idx, uint64_t *value)
109 {
110         struct xen_hvm_param xhv;
111         int ret;
112
113         xhv.domid = DOMID_SELF;
114         xhv.index = idx;
115         ret = HYPERVISOR_hvm_op(HVMOP_get_param, &xhv);
116         if (ret < 0) {
117                 pr_err("Cannot get hvm parameter %s (%d): %d!\n",
118                            param_name(idx), idx, ret);
119                 BUG();
120         }
121
122         *value = xhv.value;
123
124         return ret;
125 }
126
127 struct shared_info *map_shared_info(void *p)
128 {
129         struct xen_add_to_physmap xatp;
130
131         HYPERVISOR_shared_info = (struct shared_info *)memalign(PAGE_SIZE,
132                                                                 PAGE_SIZE);
133         if (!HYPERVISOR_shared_info)
134                 BUG();
135
136         xatp.domid = DOMID_SELF;
137         xatp.idx = 0;
138         xatp.space = XENMAPSPACE_shared_info;
139         xatp.gpfn = virt_to_pfn(HYPERVISOR_shared_info);
140         if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp) != 0)
141                 BUG();
142
143         return HYPERVISOR_shared_info;
144 }
145
146 void do_hypervisor_callback(struct pt_regs *regs)
147 {
148         unsigned long l1, l2, l1i, l2i;
149         unsigned int port;
150         int cpu = 0;
151         struct shared_info *s = HYPERVISOR_shared_info;
152         struct vcpu_info *vcpu_info = &s->vcpu_info[cpu];
153
154         in_callback = 1;
155
156         vcpu_info->evtchn_upcall_pending = 0;
157         l1 = xchg(&vcpu_info->evtchn_pending_sel, 0);
158
159         while (l1 != 0) {
160                 l1i = __ffs(l1);
161                 l1 &= ~(1UL << l1i);
162
163                 while ((l2 = active_evtchns(cpu, s, l1i)) != 0) {
164                         l2i = __ffs(l2);
165                         l2 &= ~(1UL << l2i);
166
167                         port = (l1i * (sizeof(unsigned long) * 8)) + l2i;
168                         do_event(port, regs);
169                 }
170         }
171
172         in_callback = 0;
173 }
174
175 void force_evtchn_callback(void)
176 {
177 #ifdef XEN_HAVE_PV_UPCALL_MASK
178         int save;
179 #endif
180         struct vcpu_info *vcpu;
181
182         vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()];
183 #ifdef XEN_HAVE_PV_UPCALL_MASK
184         save = vcpu->evtchn_upcall_mask;
185 #endif
186
187         while (vcpu->evtchn_upcall_pending) {
188 #ifdef XEN_HAVE_PV_UPCALL_MASK
189                 vcpu->evtchn_upcall_mask = 1;
190 #endif
191                 do_hypervisor_callback(NULL);
192 #ifdef XEN_HAVE_PV_UPCALL_MASK
193                 vcpu->evtchn_upcall_mask = save;
194 #endif
195         };
196 }
197
198 void mask_evtchn(uint32_t port)
199 {
200         struct shared_info *s = HYPERVISOR_shared_info;
201
202         synch_set_bit(port, &s->evtchn_mask[0]);
203 }
204
205 void unmask_evtchn(uint32_t port)
206 {
207         struct shared_info *s = HYPERVISOR_shared_info;
208         struct vcpu_info *vcpu_info = &s->vcpu_info[smp_processor_id()];
209
210         synch_clear_bit(port, &s->evtchn_mask[0]);
211
212         /*
213          * Just like a real IO-APIC we 'lose the interrupt edge' if the
214          * channel is masked.
215          */
216         if (synch_test_bit(port, &s->evtchn_pending[0]) &&
217             !synch_test_and_set_bit(port / (sizeof(unsigned long) * 8),
218                                     &vcpu_info->evtchn_pending_sel)) {
219                 vcpu_info->evtchn_upcall_pending = 1;
220 #ifdef XEN_HAVE_PV_UPCALL_MASK
221                 if (!vcpu_info->evtchn_upcall_mask)
222 #endif
223                         force_evtchn_callback();
224         }
225 }
226
227 void clear_evtchn(uint32_t port)
228 {
229         struct shared_info *s = HYPERVISOR_shared_info;
230
231         synch_clear_bit(port, &s->evtchn_pending[0]);
232 }
233
234 void xen_init(void)
235 {
236         debug("%s\n", __func__);
237
238         map_shared_info(NULL);
239         init_events();
240         init_xenbus();
241 }
242