2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3 * Authors: Rusty Russell <rusty@rustcorp.com.au>
4 * Christoffer Dall <c.dall@virtualopensystems.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, version 2, as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
20 #include <linux/kvm_host.h>
21 #include <linux/uaccess.h>
22 #include <asm/kvm_arm.h>
23 #include <asm/kvm_host.h>
24 #include <asm/kvm_emulate.h>
25 #include <asm/kvm_coproc.h>
26 #include <asm/cacheflush.h>
27 #include <asm/cputype.h>
28 #include <trace/events/kvm.h>
34 /******************************************************************************
35 * Co-processor emulation
36 *****************************************************************************/
38 int kvm_handle_cp10_id(struct kvm_vcpu *vcpu, struct kvm_run *run)
40 kvm_inject_undefined(vcpu);
44 int kvm_handle_cp_0_13_access(struct kvm_vcpu *vcpu, struct kvm_run *run)
47 * We can get here, if the host has been built without VFPv3 support,
48 * but the guest attempted a floating point operation.
50 kvm_inject_undefined(vcpu);
54 int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run)
56 kvm_inject_undefined(vcpu);
60 int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run)
62 kvm_inject_undefined(vcpu);
66 /* See note at ARM ARM B1.14.4 */
67 static bool access_dcsw(struct kvm_vcpu *vcpu,
68 const struct coproc_params *p,
69 const struct coproc_reg *r)
77 return read_from_write_only(vcpu, p);
79 cpumask_setall(&vcpu->arch.require_dcache_flush);
80 cpumask_clear_cpu(cpu, &vcpu->arch.require_dcache_flush);
82 /* If we were already preempted, take the long way around */
83 if (cpu != vcpu->arch.last_pcpu) {
88 val = *vcpu_reg(vcpu, p->Rt1);
91 case 6: /* Upgrade DCISW to DCCISW, as per HCR.SWIO */
93 asm volatile("mcr p15, 0, %0, c7, c14, 2" : : "r" (val));
97 asm volatile("mcr p15, 0, %0, c7, c10, 2" : : "r" (val));
108 * We could trap ID_DFR0 and tell the guest we don't support performance
109 * monitoring. Unfortunately the patch to make the kernel check ID_DFR0 was
110 * NAKed, so it will read the PMCR anyway.
112 * Therefore we tell the guest we have 0 counters. Unfortunately, we
113 * must always support PMCCNTR (the cycle counter): we just RAZ/WI for
114 * all PM registers, which doesn't crash the guest kernel at least.
116 static bool pm_fake(struct kvm_vcpu *vcpu,
117 const struct coproc_params *p,
118 const struct coproc_reg *r)
121 return ignore_write(vcpu, p);
123 return read_zero(vcpu, p);
126 #define access_pmcr pm_fake
127 #define access_pmcntenset pm_fake
128 #define access_pmcntenclr pm_fake
129 #define access_pmovsr pm_fake
130 #define access_pmselr pm_fake
131 #define access_pmceid0 pm_fake
132 #define access_pmceid1 pm_fake
133 #define access_pmccntr pm_fake
134 #define access_pmxevtyper pm_fake
135 #define access_pmxevcntr pm_fake
136 #define access_pmuserenr pm_fake
137 #define access_pmintenset pm_fake
138 #define access_pmintenclr pm_fake
140 /* Architected CP15 registers.
141 * Important: Must be sorted ascending by CRn, CRM, Op1, Op2
143 static const struct coproc_reg cp15_regs[] = {
144 /* CSSELR: swapped by interrupt.S. */
145 { CRn( 0), CRm( 0), Op1( 2), Op2( 0), is32,
146 NULL, reset_unknown, c0_CSSELR },
148 /* TTBR0/TTBR1: swapped by interrupt.S. */
149 { CRm( 2), Op1( 0), is64, NULL, reset_unknown64, c2_TTBR0 },
150 { CRm( 2), Op1( 1), is64, NULL, reset_unknown64, c2_TTBR1 },
152 /* TTBCR: swapped by interrupt.S. */
153 { CRn( 2), CRm( 0), Op1( 0), Op2( 2), is32,
154 NULL, reset_val, c2_TTBCR, 0x00000000 },
156 /* DACR: swapped by interrupt.S. */
157 { CRn( 3), CRm( 0), Op1( 0), Op2( 0), is32,
158 NULL, reset_unknown, c3_DACR },
160 /* DFSR/IFSR/ADFSR/AIFSR: swapped by interrupt.S. */
161 { CRn( 5), CRm( 0), Op1( 0), Op2( 0), is32,
162 NULL, reset_unknown, c5_DFSR },
163 { CRn( 5), CRm( 0), Op1( 0), Op2( 1), is32,
164 NULL, reset_unknown, c5_IFSR },
165 { CRn( 5), CRm( 1), Op1( 0), Op2( 0), is32,
166 NULL, reset_unknown, c5_ADFSR },
167 { CRn( 5), CRm( 1), Op1( 0), Op2( 1), is32,
168 NULL, reset_unknown, c5_AIFSR },
170 /* DFAR/IFAR: swapped by interrupt.S. */
171 { CRn( 6), CRm( 0), Op1( 0), Op2( 0), is32,
172 NULL, reset_unknown, c6_DFAR },
173 { CRn( 6), CRm( 0), Op1( 0), Op2( 2), is32,
174 NULL, reset_unknown, c6_IFAR },
176 * DC{C,I,CI}SW operations:
178 { CRn( 7), CRm( 6), Op1( 0), Op2( 2), is32, access_dcsw},
179 { CRn( 7), CRm(10), Op1( 0), Op2( 2), is32, access_dcsw},
180 { CRn( 7), CRm(14), Op1( 0), Op2( 2), is32, access_dcsw},
182 * Dummy performance monitor implementation.
184 { CRn( 9), CRm(12), Op1( 0), Op2( 0), is32, access_pmcr},
185 { CRn( 9), CRm(12), Op1( 0), Op2( 1), is32, access_pmcntenset},
186 { CRn( 9), CRm(12), Op1( 0), Op2( 2), is32, access_pmcntenclr},
187 { CRn( 9), CRm(12), Op1( 0), Op2( 3), is32, access_pmovsr},
188 { CRn( 9), CRm(12), Op1( 0), Op2( 5), is32, access_pmselr},
189 { CRn( 9), CRm(12), Op1( 0), Op2( 6), is32, access_pmceid0},
190 { CRn( 9), CRm(12), Op1( 0), Op2( 7), is32, access_pmceid1},
191 { CRn( 9), CRm(13), Op1( 0), Op2( 0), is32, access_pmccntr},
192 { CRn( 9), CRm(13), Op1( 0), Op2( 1), is32, access_pmxevtyper},
193 { CRn( 9), CRm(13), Op1( 0), Op2( 2), is32, access_pmxevcntr},
194 { CRn( 9), CRm(14), Op1( 0), Op2( 0), is32, access_pmuserenr},
195 { CRn( 9), CRm(14), Op1( 0), Op2( 1), is32, access_pmintenset},
196 { CRn( 9), CRm(14), Op1( 0), Op2( 2), is32, access_pmintenclr},
198 /* PRRR/NMRR (aka MAIR0/MAIR1): swapped by interrupt.S. */
199 { CRn(10), CRm( 2), Op1( 0), Op2( 0), is32,
200 NULL, reset_unknown, c10_PRRR},
201 { CRn(10), CRm( 2), Op1( 0), Op2( 1), is32,
202 NULL, reset_unknown, c10_NMRR},
204 /* VBAR: swapped by interrupt.S. */
205 { CRn(12), CRm( 0), Op1( 0), Op2( 0), is32,
206 NULL, reset_val, c12_VBAR, 0x00000000 },
208 /* CONTEXTIDR/TPIDRURW/TPIDRURO/TPIDRPRW: swapped by interrupt.S. */
209 { CRn(13), CRm( 0), Op1( 0), Op2( 1), is32,
210 NULL, reset_val, c13_CID, 0x00000000 },
211 { CRn(13), CRm( 0), Op1( 0), Op2( 2), is32,
212 NULL, reset_unknown, c13_TID_URW },
213 { CRn(13), CRm( 0), Op1( 0), Op2( 3), is32,
214 NULL, reset_unknown, c13_TID_URO },
215 { CRn(13), CRm( 0), Op1( 0), Op2( 4), is32,
216 NULL, reset_unknown, c13_TID_PRIV },
219 /* Target specific emulation tables */
220 static struct kvm_coproc_target_table *target_tables[KVM_ARM_NUM_TARGETS];
222 void kvm_register_target_coproc_table(struct kvm_coproc_target_table *table)
224 target_tables[table->target] = table;
227 /* Get specific register table for this target. */
228 static const struct coproc_reg *get_target_table(unsigned target, size_t *num)
230 struct kvm_coproc_target_table *table;
232 table = target_tables[target];
237 static const struct coproc_reg *find_reg(const struct coproc_params *params,
238 const struct coproc_reg table[],
243 for (i = 0; i < num; i++) {
244 const struct coproc_reg *r = &table[i];
246 if (params->is_64bit != r->is_64)
248 if (params->CRn != r->CRn)
250 if (params->CRm != r->CRm)
252 if (params->Op1 != r->Op1)
254 if (params->Op2 != r->Op2)
262 static int emulate_cp15(struct kvm_vcpu *vcpu,
263 const struct coproc_params *params)
266 const struct coproc_reg *table, *r;
268 trace_kvm_emulate_cp15_imp(params->Op1, params->Rt1, params->CRn,
269 params->CRm, params->Op2, params->is_write);
271 table = get_target_table(vcpu->arch.target, &num);
273 /* Search target-specific then generic table. */
274 r = find_reg(params, table, num);
276 r = find_reg(params, cp15_regs, ARRAY_SIZE(cp15_regs));
279 /* If we don't have an accessor, we should never get here! */
282 if (likely(r->access(vcpu, params, r))) {
283 /* Skip instruction, since it was emulated */
284 kvm_skip_instr(vcpu, (vcpu->arch.hsr >> 25) & 1);
287 /* If access function fails, it should complain. */
289 kvm_err("Unsupported guest CP15 access at: %08x\n",
291 print_cp_instr(params);
293 kvm_inject_undefined(vcpu);
298 * kvm_handle_cp15_64 -- handles a mrrc/mcrr trap on a guest CP15 access
299 * @vcpu: The VCPU pointer
300 * @run: The kvm_run struct
302 int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
304 struct coproc_params params;
306 params.CRm = (vcpu->arch.hsr >> 1) & 0xf;
307 params.Rt1 = (vcpu->arch.hsr >> 5) & 0xf;
308 params.is_write = ((vcpu->arch.hsr & 1) == 0);
309 params.is_64bit = true;
311 params.Op1 = (vcpu->arch.hsr >> 16) & 0xf;
313 params.Rt2 = (vcpu->arch.hsr >> 10) & 0xf;
316 return emulate_cp15(vcpu, ¶ms);
319 static void reset_coproc_regs(struct kvm_vcpu *vcpu,
320 const struct coproc_reg *table, size_t num)
324 for (i = 0; i < num; i++)
326 table[i].reset(vcpu, &table[i]);
330 * kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access
331 * @vcpu: The VCPU pointer
332 * @run: The kvm_run struct
334 int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
336 struct coproc_params params;
338 params.CRm = (vcpu->arch.hsr >> 1) & 0xf;
339 params.Rt1 = (vcpu->arch.hsr >> 5) & 0xf;
340 params.is_write = ((vcpu->arch.hsr & 1) == 0);
341 params.is_64bit = false;
343 params.CRn = (vcpu->arch.hsr >> 10) & 0xf;
344 params.Op1 = (vcpu->arch.hsr >> 14) & 0x7;
345 params.Op2 = (vcpu->arch.hsr >> 17) & 0x7;
348 return emulate_cp15(vcpu, ¶ms);
351 /******************************************************************************
353 *****************************************************************************/
355 static bool index_to_params(u64 id, struct coproc_params *params)
357 switch (id & KVM_REG_SIZE_MASK) {
358 case KVM_REG_SIZE_U32:
359 /* Any unused index bits means it's not valid. */
360 if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK
361 | KVM_REG_ARM_COPROC_MASK
362 | KVM_REG_ARM_32_CRN_MASK
363 | KVM_REG_ARM_CRM_MASK
364 | KVM_REG_ARM_OPC1_MASK
365 | KVM_REG_ARM_32_OPC2_MASK))
368 params->is_64bit = false;
369 params->CRn = ((id & KVM_REG_ARM_32_CRN_MASK)
370 >> KVM_REG_ARM_32_CRN_SHIFT);
371 params->CRm = ((id & KVM_REG_ARM_CRM_MASK)
372 >> KVM_REG_ARM_CRM_SHIFT);
373 params->Op1 = ((id & KVM_REG_ARM_OPC1_MASK)
374 >> KVM_REG_ARM_OPC1_SHIFT);
375 params->Op2 = ((id & KVM_REG_ARM_32_OPC2_MASK)
376 >> KVM_REG_ARM_32_OPC2_SHIFT);
378 case KVM_REG_SIZE_U64:
379 /* Any unused index bits means it's not valid. */
380 if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK
381 | KVM_REG_ARM_COPROC_MASK
382 | KVM_REG_ARM_CRM_MASK
383 | KVM_REG_ARM_OPC1_MASK))
385 params->is_64bit = true;
386 params->CRm = ((id & KVM_REG_ARM_CRM_MASK)
387 >> KVM_REG_ARM_CRM_SHIFT);
388 params->Op1 = ((id & KVM_REG_ARM_OPC1_MASK)
389 >> KVM_REG_ARM_OPC1_SHIFT);
398 /* Decode an index value, and find the cp15 coproc_reg entry. */
399 static const struct coproc_reg *index_to_coproc_reg(struct kvm_vcpu *vcpu,
403 const struct coproc_reg *table, *r;
404 struct coproc_params params;
406 /* We only do cp15 for now. */
407 if ((id & KVM_REG_ARM_COPROC_MASK) >> KVM_REG_ARM_COPROC_SHIFT != 15)
410 if (!index_to_params(id, ¶ms))
413 table = get_target_table(vcpu->arch.target, &num);
414 r = find_reg(¶ms, table, num);
416 r = find_reg(¶ms, cp15_regs, ARRAY_SIZE(cp15_regs));
418 /* Not saved in the cp15 array? */
426 * These are the invariant cp15 registers: we let the guest see the host
427 * versions of these, so they're part of the guest state.
429 * A future CPU may provide a mechanism to present different values to
430 * the guest, or a future kvm may trap them.
432 /* Unfortunately, there's no register-argument for mrc, so generate. */
433 #define FUNCTION_FOR32(crn, crm, op1, op2, name) \
434 static void get_##name(struct kvm_vcpu *v, \
435 const struct coproc_reg *r) \
439 asm volatile("mrc p15, " __stringify(op1) \
440 ", %0, c" __stringify(crn) \
441 ", c" __stringify(crm) \
442 ", " __stringify(op2) "\n" : "=r" (val)); \
443 ((struct coproc_reg *)r)->val = val; \
446 FUNCTION_FOR32(0, 0, 0, 0, MIDR)
447 FUNCTION_FOR32(0, 0, 0, 1, CTR)
448 FUNCTION_FOR32(0, 0, 0, 2, TCMTR)
449 FUNCTION_FOR32(0, 0, 0, 3, TLBTR)
450 FUNCTION_FOR32(0, 0, 0, 6, REVIDR)
451 FUNCTION_FOR32(0, 1, 0, 0, ID_PFR0)
452 FUNCTION_FOR32(0, 1, 0, 1, ID_PFR1)
453 FUNCTION_FOR32(0, 1, 0, 2, ID_DFR0)
454 FUNCTION_FOR32(0, 1, 0, 3, ID_AFR0)
455 FUNCTION_FOR32(0, 1, 0, 4, ID_MMFR0)
456 FUNCTION_FOR32(0, 1, 0, 5, ID_MMFR1)
457 FUNCTION_FOR32(0, 1, 0, 6, ID_MMFR2)
458 FUNCTION_FOR32(0, 1, 0, 7, ID_MMFR3)
459 FUNCTION_FOR32(0, 2, 0, 0, ID_ISAR0)
460 FUNCTION_FOR32(0, 2, 0, 1, ID_ISAR1)
461 FUNCTION_FOR32(0, 2, 0, 2, ID_ISAR2)
462 FUNCTION_FOR32(0, 2, 0, 3, ID_ISAR3)
463 FUNCTION_FOR32(0, 2, 0, 4, ID_ISAR4)
464 FUNCTION_FOR32(0, 2, 0, 5, ID_ISAR5)
465 FUNCTION_FOR32(0, 0, 1, 1, CLIDR)
466 FUNCTION_FOR32(0, 0, 1, 7, AIDR)
468 /* ->val is filled in by kvm_invariant_coproc_table_init() */
469 static struct coproc_reg invariant_cp15[] = {
470 { CRn( 0), CRm( 0), Op1( 0), Op2( 0), is32, NULL, get_MIDR },
471 { CRn( 0), CRm( 0), Op1( 0), Op2( 1), is32, NULL, get_CTR },
472 { CRn( 0), CRm( 0), Op1( 0), Op2( 2), is32, NULL, get_TCMTR },
473 { CRn( 0), CRm( 0), Op1( 0), Op2( 3), is32, NULL, get_TLBTR },
474 { CRn( 0), CRm( 0), Op1( 0), Op2( 6), is32, NULL, get_REVIDR },
476 { CRn( 0), CRm( 1), Op1( 0), Op2( 0), is32, NULL, get_ID_PFR0 },
477 { CRn( 0), CRm( 1), Op1( 0), Op2( 1), is32, NULL, get_ID_PFR1 },
478 { CRn( 0), CRm( 1), Op1( 0), Op2( 2), is32, NULL, get_ID_DFR0 },
479 { CRn( 0), CRm( 1), Op1( 0), Op2( 3), is32, NULL, get_ID_AFR0 },
480 { CRn( 0), CRm( 1), Op1( 0), Op2( 4), is32, NULL, get_ID_MMFR0 },
481 { CRn( 0), CRm( 1), Op1( 0), Op2( 5), is32, NULL, get_ID_MMFR1 },
482 { CRn( 0), CRm( 1), Op1( 0), Op2( 6), is32, NULL, get_ID_MMFR2 },
483 { CRn( 0), CRm( 1), Op1( 0), Op2( 7), is32, NULL, get_ID_MMFR3 },
485 { CRn( 0), CRm( 2), Op1( 0), Op2( 0), is32, NULL, get_ID_ISAR0 },
486 { CRn( 0), CRm( 2), Op1( 0), Op2( 1), is32, NULL, get_ID_ISAR1 },
487 { CRn( 0), CRm( 2), Op1( 0), Op2( 2), is32, NULL, get_ID_ISAR2 },
488 { CRn( 0), CRm( 2), Op1( 0), Op2( 3), is32, NULL, get_ID_ISAR3 },
489 { CRn( 0), CRm( 2), Op1( 0), Op2( 4), is32, NULL, get_ID_ISAR4 },
490 { CRn( 0), CRm( 2), Op1( 0), Op2( 5), is32, NULL, get_ID_ISAR5 },
492 { CRn( 0), CRm( 0), Op1( 1), Op2( 1), is32, NULL, get_CLIDR },
493 { CRn( 0), CRm( 0), Op1( 1), Op2( 7), is32, NULL, get_AIDR },
496 static int reg_from_user(void *val, const void __user *uaddr, u64 id)
498 /* This Just Works because we are little endian. */
499 if (copy_from_user(val, uaddr, KVM_REG_SIZE(id)) != 0)
504 static int reg_to_user(void __user *uaddr, const void *val, u64 id)
506 /* This Just Works because we are little endian. */
507 if (copy_to_user(uaddr, val, KVM_REG_SIZE(id)) != 0)
512 static int get_invariant_cp15(u64 id, void __user *uaddr)
514 struct coproc_params params;
515 const struct coproc_reg *r;
517 if (!index_to_params(id, ¶ms))
520 r = find_reg(¶ms, invariant_cp15, ARRAY_SIZE(invariant_cp15));
524 return reg_to_user(uaddr, &r->val, id);
527 static int set_invariant_cp15(u64 id, void __user *uaddr)
529 struct coproc_params params;
530 const struct coproc_reg *r;
532 u64 val = 0; /* Make sure high bits are 0 for 32-bit regs */
534 if (!index_to_params(id, ¶ms))
536 r = find_reg(¶ms, invariant_cp15, ARRAY_SIZE(invariant_cp15));
540 err = reg_from_user(&val, uaddr, id);
544 /* This is what we mean by invariant: you can't change it. */
551 int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
553 const struct coproc_reg *r;
554 void __user *uaddr = (void __user *)(long)reg->addr;
556 r = index_to_coproc_reg(vcpu, reg->id);
558 return get_invariant_cp15(reg->id, uaddr);
560 /* Note: copies two regs if size is 64 bit. */
561 return reg_to_user(uaddr, &vcpu->arch.cp15[r->reg], reg->id);
564 int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
566 const struct coproc_reg *r;
567 void __user *uaddr = (void __user *)(long)reg->addr;
569 r = index_to_coproc_reg(vcpu, reg->id);
571 return set_invariant_cp15(reg->id, uaddr);
573 /* Note: copies two regs if size is 64 bit */
574 return reg_from_user(&vcpu->arch.cp15[r->reg], uaddr, reg->id);
577 static u64 cp15_to_index(const struct coproc_reg *reg)
579 u64 val = KVM_REG_ARM | (15 << KVM_REG_ARM_COPROC_SHIFT);
581 val |= KVM_REG_SIZE_U64;
582 val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT);
583 val |= (reg->CRm << KVM_REG_ARM_CRM_SHIFT);
585 val |= KVM_REG_SIZE_U32;
586 val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT);
587 val |= (reg->Op2 << KVM_REG_ARM_32_OPC2_SHIFT);
588 val |= (reg->CRm << KVM_REG_ARM_CRM_SHIFT);
589 val |= (reg->CRn << KVM_REG_ARM_32_CRN_SHIFT);
594 static bool copy_reg_to_user(const struct coproc_reg *reg, u64 __user **uind)
599 if (put_user(cp15_to_index(reg), *uind))
606 /* Assumed ordered tables, see kvm_coproc_table_init. */
607 static int walk_cp15(struct kvm_vcpu *vcpu, u64 __user *uind)
609 const struct coproc_reg *i1, *i2, *end1, *end2;
610 unsigned int total = 0;
613 /* We check for duplicates here, to allow arch-specific overrides. */
614 i1 = get_target_table(vcpu->arch.target, &num);
617 end2 = cp15_regs + ARRAY_SIZE(cp15_regs);
619 BUG_ON(i1 == end1 || i2 == end2);
621 /* Walk carefully, as both tables may refer to the same register. */
623 int cmp = cmp_reg(i1, i2);
624 /* target-specific overrides generic entry. */
626 /* Ignore registers we trap but don't save. */
628 if (!copy_reg_to_user(i1, &uind))
633 /* Ignore registers we trap but don't save. */
635 if (!copy_reg_to_user(i2, &uind))
641 if (cmp <= 0 && ++i1 == end1)
643 if (cmp >= 0 && ++i2 == end2)
649 unsigned long kvm_arm_num_coproc_regs(struct kvm_vcpu *vcpu)
651 return ARRAY_SIZE(invariant_cp15)
652 + walk_cp15(vcpu, (u64 __user *)NULL);
655 int kvm_arm_copy_coproc_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
660 /* Then give them all the invariant registers' indices. */
661 for (i = 0; i < ARRAY_SIZE(invariant_cp15); i++) {
662 if (put_user(cp15_to_index(&invariant_cp15[i]), uindices))
667 err = walk_cp15(vcpu, uindices);
673 void kvm_coproc_table_init(void)
677 /* Make sure tables are unique and in order. */
678 for (i = 1; i < ARRAY_SIZE(cp15_regs); i++)
679 BUG_ON(cmp_reg(&cp15_regs[i-1], &cp15_regs[i]) >= 0);
681 /* We abuse the reset function to overwrite the table itself. */
682 for (i = 0; i < ARRAY_SIZE(invariant_cp15); i++)
683 invariant_cp15[i].reset(NULL, &invariant_cp15[i]);
687 * kvm_reset_coprocs - sets cp15 registers to reset value
688 * @vcpu: The VCPU pointer
690 * This function finds the right table above and sets the registers on the
691 * virtual CPU struct to their architecturally defined reset values.
693 void kvm_reset_coprocs(struct kvm_vcpu *vcpu)
696 const struct coproc_reg *table;
698 /* Catch someone adding a register without putting in reset entry. */
699 memset(vcpu->arch.cp15, 0x42, sizeof(vcpu->arch.cp15));
701 /* Generic chip reset first (so target could override). */
702 reset_coproc_regs(vcpu, cp15_regs, ARRAY_SIZE(cp15_regs));
704 table = get_target_table(vcpu->arch.target, &num);
705 reset_coproc_regs(vcpu, table, num);
707 for (num = 1; num < NR_CP15_REGS; num++)
708 if (vcpu->arch.cp15[num] == 0x42424242)
709 panic("Didn't reset vcpu->arch.cp15[%zi]", num);