2 /*---------------------------------------------------------------*/
4 /*--- This file (main_main.c) is ---*/
5 /*--- Copyright (C) OpenWorks LLP. All rights reserved. ---*/
7 /*---------------------------------------------------------------*/
10 This file is part of LibVEX, a library for dynamic binary
11 instrumentation and translation.
13 Copyright (C) 2004-2009 OpenWorks LLP. All rights reserved.
15 This library is made available under a dual licensing scheme.
17 If you link LibVEX against other code all of which is itself
18 licensed under the GNU General Public License, version 2 dated June
19 1991 ("GPL v2"), then you may use LibVEX under the terms of the GPL
20 v2, as appearing in the file LICENSE.GPL. If the file LICENSE.GPL
21 is missing, you can obtain a copy of the GPL v2 from the Free
22 Software Foundation Inc., 51 Franklin St, Fifth Floor, Boston, MA
25 For any other uses of LibVEX, you must first obtain a commercial
26 license from OpenWorks LLP. Please contact info@open-works.co.uk
27 for information about commercial licensing.
29 This software is provided by OpenWorks LLP "as is" and any express
30 or implied warranties, including, but not limited to, the implied
31 warranties of merchantability and fitness for a particular purpose
32 are disclaimed. In no event shall OpenWorks LLP be liable for any
33 direct, indirect, incidental, special, exemplary, or consequential
34 damages (including, but not limited to, procurement of substitute
35 goods or services; loss of use, data, or profits; or business
36 interruption) however caused and on any theory of liability,
37 whether in contract, strict liability, or tort (including
38 negligence or otherwise) arising in any way out of the use of this
39 software, even if advised of the possibility of such damage.
41 Neither the names of the U.S. Department of Energy nor the
42 University of California nor the names of its contributors may be
43 used to endorse or promote products derived from this software
44 without prior written permission.
48 #include "libvex_emwarn.h"
49 #include "libvex_guest_x86.h"
50 #include "libvex_guest_amd64.h"
51 #include "libvex_guest_arm.h"
52 #include "libvex_guest_ppc32.h"
53 #include "libvex_guest_ppc64.h"
55 #include "main_globals.h"
56 #include "main_util.h"
57 #include "host_generic_regs.h"
60 #include "host_x86_defs.h"
61 #include "host_amd64_defs.h"
62 #include "host_ppc_defs.h"
64 #include "guest_generic_bb_to_IR.h"
65 #include "guest_x86_defs.h"
66 #include "guest_amd64_defs.h"
67 #include "guest_arm_defs.h"
68 #include "guest_ppc_defs.h"
71 /* This file contains the top level interface to the library. */
73 /* --------- fwds ... --------- */
75 static Bool are_valid_hwcaps ( VexArch arch, UInt hwcaps );
76 static HChar* show_hwcaps ( VexArch arch, UInt hwcaps );
79 /* --------- Initialise the library. --------- */
81 /* Exported to library client. */
83 void LibVEX_default_VexControl ( /*OUT*/ VexControl* vcon )
85 vcon->iropt_verbosity = 0;
86 vcon->iropt_level = 2;
87 vcon->iropt_precise_memory_exns = False;
88 vcon->iropt_unroll_thresh = 120;
89 vcon->guest_max_insns = 60;
90 vcon->guest_chase_thresh = 10;
94 /* Exported to library client. */
97 /* failure exit function */
98 __attribute__ ((noreturn))
99 void (*failure_exit) ( void ),
100 /* logging output function */
101 void (*log_bytes) ( HChar*, Int nbytes ),
102 /* debug paranoia level */
104 /* Are we supporting valgrind checking? */
105 Bool valgrind_support,
107 /*READONLY*/VexControl* vcon
110 /* First off, do enough minimal setup so that the following
111 assertions can fail in a sane fashion, if need be. */
112 vex_failure_exit = failure_exit;
113 vex_log_bytes = log_bytes;
115 /* Now it's safe to check parameters for sanity. */
116 vassert(!vex_initdone);
117 vassert(failure_exit);
119 vassert(debuglevel >= 0);
121 vassert(vcon->iropt_verbosity >= 0);
122 vassert(vcon->iropt_level >= 0);
123 vassert(vcon->iropt_level <= 2);
124 vassert(vcon->iropt_unroll_thresh >= 0);
125 vassert(vcon->iropt_unroll_thresh <= 400);
126 vassert(vcon->guest_max_insns >= 1);
127 vassert(vcon->guest_max_insns <= 100);
128 vassert(vcon->guest_chase_thresh >= 0);
129 vassert(vcon->guest_chase_thresh < vcon->guest_max_insns);
131 /* Check that Vex has been built with sizes of basic types as
132 stated in priv/libvex_basictypes.h. Failure of any of these is
133 a serious configuration error and should be corrected
134 immediately. If any of these assertions fail you can fully
135 expect Vex not to work properly, if at all. */
137 vassert(1 == sizeof(UChar));
138 vassert(1 == sizeof(Char));
139 vassert(2 == sizeof(UShort));
140 vassert(2 == sizeof(Short));
141 vassert(4 == sizeof(UInt));
142 vassert(4 == sizeof(Int));
143 vassert(8 == sizeof(ULong));
144 vassert(8 == sizeof(Long));
145 vassert(4 == sizeof(Float));
146 vassert(8 == sizeof(Double));
147 vassert(1 == sizeof(Bool));
148 vassert(4 == sizeof(Addr32));
149 vassert(8 == sizeof(Addr64));
150 vassert(16 == sizeof(U128));
152 vassert(sizeof(void*) == 4 || sizeof(void*) == 8);
153 vassert(sizeof(void*) == sizeof(int*));
154 vassert(sizeof(void*) == sizeof(HWord));
156 vassert(VEX_HOST_WORDSIZE == sizeof(void*));
157 vassert(VEX_HOST_WORDSIZE == sizeof(HWord));
159 /* Really start up .. */
160 vex_debuglevel = debuglevel;
161 vex_valgrind_support = valgrind_support;
164 vexSetAllocMode ( VexAllocModeTEMP );
168 /* --------- Make a translation. --------- */
170 /* Exported to library client. */
172 VexTranslateResult LibVEX_Translate ( VexTranslateArgs* vta )
174 /* This the bundle of functions we need to do the back-end stuff
175 (insn selection, reg-alloc, assembly) whilst being insulated
176 from the target instruction set. */
177 HReg* available_real_regs;
178 Int n_available_real_regs;
179 Bool (*isMove) ( HInstr*, HReg*, HReg* );
180 void (*getRegUsage) ( HRegUsage*, HInstr*, Bool );
181 void (*mapRegs) ( HRegRemap*, HInstr*, Bool );
182 HInstr* (*genSpill) ( HReg, Int, Bool );
183 HInstr* (*genReload) ( HReg, Int, Bool );
184 HInstr* (*directReload) ( HInstr*, HReg, Short );
185 void (*ppInstr) ( HInstr*, Bool );
186 void (*ppReg) ( HReg );
187 HInstrArray* (*iselSB) ( IRSB*, VexArch, VexArchInfo*,
189 Int (*emit) ( UChar*, Int, HInstr*, Bool, void* );
190 IRExpr* (*specHelper) ( HChar*, IRExpr** );
191 Bool (*preciseMemExnsFn) ( Int, Int );
193 DisOneInstrFn disInstrFn;
195 VexGuestLayout* guest_layout;
196 Bool host_is_bigendian = False;
200 Int i, j, k, out_used, guest_sizeB;
201 Int offB_TISTART, offB_TILEN;
202 UChar insn_bytes[32];
203 IRType guest_word_type;
204 IRType host_word_type;
208 available_real_regs = NULL;
209 n_available_real_regs = 0;
221 preciseMemExnsFn = NULL;
223 guest_word_type = Ity_INVALID;
224 host_word_type = Ity_INVALID;
229 vex_traceflags = vta->traceflags;
231 vassert(vex_initdone);
232 vexSetAllocModeTEMP_and_clear();
233 vexAllocSanityCheck();
235 /* First off, check that the guest and host insn sets
238 switch (vta->arch_host) {
242 getAllocableRegs_X86 ( &n_available_real_regs,
243 &available_real_regs );
244 isMove = (Bool(*)(HInstr*,HReg*,HReg*)) isMove_X86Instr;
245 getRegUsage = (void(*)(HRegUsage*,HInstr*, Bool)) getRegUsage_X86Instr;
246 mapRegs = (void(*)(HRegRemap*,HInstr*, Bool)) mapRegs_X86Instr;
247 genSpill = (HInstr*(*)(HReg,Int, Bool)) genSpill_X86;
248 genReload = (HInstr*(*)(HReg,Int, Bool)) genReload_X86;
249 directReload = (HInstr*(*)(HInstr*,HReg,Short)) directReload_X86;
250 ppInstr = (void(*)(HInstr*, Bool)) ppX86Instr;
251 ppReg = (void(*)(HReg)) ppHRegX86;
253 emit = (Int(*)(UChar*,Int,HInstr*,Bool,void*)) emit_X86Instr;
254 host_is_bigendian = False;
255 host_word_type = Ity_I32;
256 vassert(are_valid_hwcaps(VexArchX86, vta->archinfo_host.hwcaps));
257 vassert(vta->dispatch != NULL); /* jump-to-dispatcher scheme */
262 getAllocableRegs_AMD64 ( &n_available_real_regs,
263 &available_real_regs );
264 isMove = (Bool(*)(HInstr*,HReg*,HReg*)) isMove_AMD64Instr;
265 getRegUsage = (void(*)(HRegUsage*,HInstr*, Bool)) getRegUsage_AMD64Instr;
266 mapRegs = (void(*)(HRegRemap*,HInstr*, Bool)) mapRegs_AMD64Instr;
267 genSpill = (HInstr*(*)(HReg,Int, Bool)) genSpill_AMD64;
268 genReload = (HInstr*(*)(HReg,Int, Bool)) genReload_AMD64;
269 ppInstr = (void(*)(HInstr*, Bool)) ppAMD64Instr;
270 ppReg = (void(*)(HReg)) ppHRegAMD64;
271 iselSB = iselSB_AMD64;
272 emit = (Int(*)(UChar*,Int,HInstr*,Bool,void*)) emit_AMD64Instr;
273 host_is_bigendian = False;
274 host_word_type = Ity_I64;
275 vassert(are_valid_hwcaps(VexArchAMD64, vta->archinfo_host.hwcaps));
276 vassert(vta->dispatch != NULL); /* jump-to-dispatcher scheme */
281 getAllocableRegs_PPC ( &n_available_real_regs,
282 &available_real_regs, mode64 );
283 isMove = (Bool(*)(HInstr*,HReg*,HReg*)) isMove_PPCInstr;
284 getRegUsage = (void(*)(HRegUsage*,HInstr*,Bool)) getRegUsage_PPCInstr;
285 mapRegs = (void(*)(HRegRemap*,HInstr*,Bool)) mapRegs_PPCInstr;
286 genSpill = (HInstr*(*)(HReg,Int,Bool)) genSpill_PPC;
287 genReload = (HInstr*(*)(HReg,Int,Bool)) genReload_PPC;
288 ppInstr = (void(*)(HInstr*,Bool)) ppPPCInstr;
289 ppReg = (void(*)(HReg)) ppHRegPPC;
291 emit = (Int(*)(UChar*,Int,HInstr*,Bool,void*)) emit_PPCInstr;
292 host_is_bigendian = True;
293 host_word_type = Ity_I32;
294 vassert(are_valid_hwcaps(VexArchPPC32, vta->archinfo_host.hwcaps));
295 vassert(vta->dispatch == NULL); /* return-to-dispatcher scheme */
300 getAllocableRegs_PPC ( &n_available_real_regs,
301 &available_real_regs, mode64 );
302 isMove = (Bool(*)(HInstr*,HReg*,HReg*)) isMove_PPCInstr;
303 getRegUsage = (void(*)(HRegUsage*,HInstr*, Bool)) getRegUsage_PPCInstr;
304 mapRegs = (void(*)(HRegRemap*,HInstr*, Bool)) mapRegs_PPCInstr;
305 genSpill = (HInstr*(*)(HReg,Int, Bool)) genSpill_PPC;
306 genReload = (HInstr*(*)(HReg,Int, Bool)) genReload_PPC;
307 ppInstr = (void(*)(HInstr*, Bool)) ppPPCInstr;
308 ppReg = (void(*)(HReg)) ppHRegPPC;
310 emit = (Int(*)(UChar*,Int,HInstr*,Bool,void*)) emit_PPCInstr;
311 host_is_bigendian = True;
312 host_word_type = Ity_I64;
313 vassert(are_valid_hwcaps(VexArchPPC64, vta->archinfo_host.hwcaps));
314 vassert(vta->dispatch == NULL); /* return-to-dispatcher scheme */
318 vpanic("LibVEX_Translate: unsupported target insn set");
322 switch (vta->arch_guest) {
325 preciseMemExnsFn = guest_x86_state_requires_precise_mem_exns;
326 disInstrFn = disInstr_X86;
327 specHelper = guest_x86_spechelper;
328 guest_sizeB = sizeof(VexGuestX86State);
329 guest_word_type = Ity_I32;
330 guest_layout = &x86guest_layout;
331 offB_TISTART = offsetof(VexGuestX86State,guest_TISTART);
332 offB_TILEN = offsetof(VexGuestX86State,guest_TILEN);
333 vassert(are_valid_hwcaps(VexArchX86, vta->archinfo_guest.hwcaps));
334 vassert(0 == sizeof(VexGuestX86State) % 8);
335 vassert(sizeof( ((VexGuestX86State*)0)->guest_TISTART) == 4);
336 vassert(sizeof( ((VexGuestX86State*)0)->guest_TILEN ) == 4);
337 vassert(sizeof( ((VexGuestX86State*)0)->guest_NRADDR ) == 4);
341 preciseMemExnsFn = guest_amd64_state_requires_precise_mem_exns;
342 disInstrFn = disInstr_AMD64;
343 specHelper = guest_amd64_spechelper;
344 guest_sizeB = sizeof(VexGuestAMD64State);
345 guest_word_type = Ity_I64;
346 guest_layout = &amd64guest_layout;
347 offB_TISTART = offsetof(VexGuestAMD64State,guest_TISTART);
348 offB_TILEN = offsetof(VexGuestAMD64State,guest_TILEN);
349 vassert(are_valid_hwcaps(VexArchAMD64, vta->archinfo_guest.hwcaps));
350 vassert(0 == sizeof(VexGuestAMD64State) % 8);
351 vassert(sizeof( ((VexGuestAMD64State*)0)->guest_TISTART ) == 8);
352 vassert(sizeof( ((VexGuestAMD64State*)0)->guest_TILEN ) == 8);
353 vassert(sizeof( ((VexGuestAMD64State*)0)->guest_NRADDR ) == 8);
357 preciseMemExnsFn = guest_arm_state_requires_precise_mem_exns;
358 disInstrFn = NULL; /* HACK */
359 specHelper = guest_arm_spechelper;
360 guest_sizeB = sizeof(VexGuestARMState);
361 guest_word_type = Ity_I32;
362 guest_layout = &armGuest_layout;
363 offB_TISTART = 0; /* hack ... arm has bitrot */
364 offB_TILEN = 0; /* hack ... arm has bitrot */
365 vassert(are_valid_hwcaps(VexArchARM, vta->archinfo_guest.hwcaps));
369 preciseMemExnsFn = guest_ppc32_state_requires_precise_mem_exns;
370 disInstrFn = disInstr_PPC;
371 specHelper = guest_ppc32_spechelper;
372 guest_sizeB = sizeof(VexGuestPPC32State);
373 guest_word_type = Ity_I32;
374 guest_layout = &ppc32Guest_layout;
375 offB_TISTART = offsetof(VexGuestPPC32State,guest_TISTART);
376 offB_TILEN = offsetof(VexGuestPPC32State,guest_TILEN);
377 vassert(are_valid_hwcaps(VexArchPPC32, vta->archinfo_guest.hwcaps));
378 vassert(0 == sizeof(VexGuestPPC32State) % 8);
379 vassert(sizeof( ((VexGuestPPC32State*)0)->guest_TISTART ) == 4);
380 vassert(sizeof( ((VexGuestPPC32State*)0)->guest_TILEN ) == 4);
381 vassert(sizeof( ((VexGuestPPC32State*)0)->guest_NRADDR ) == 4);
385 preciseMemExnsFn = guest_ppc64_state_requires_precise_mem_exns;
386 disInstrFn = disInstr_PPC;
387 specHelper = guest_ppc64_spechelper;
388 guest_sizeB = sizeof(VexGuestPPC64State);
389 guest_word_type = Ity_I64;
390 guest_layout = &ppc64Guest_layout;
391 offB_TISTART = offsetof(VexGuestPPC64State,guest_TISTART);
392 offB_TILEN = offsetof(VexGuestPPC64State,guest_TILEN);
393 vassert(are_valid_hwcaps(VexArchPPC64, vta->archinfo_guest.hwcaps));
394 vassert(0 == sizeof(VexGuestPPC64State) % 16);
395 vassert(sizeof( ((VexGuestPPC64State*)0)->guest_TISTART ) == 8);
396 vassert(sizeof( ((VexGuestPPC64State*)0)->guest_TILEN ) == 8);
397 vassert(sizeof( ((VexGuestPPC64State*)0)->guest_NRADDR ) == 8);
398 vassert(sizeof( ((VexGuestPPC64State*)0)->guest_NRADDR_GPR2) == 8);
402 vpanic("LibVEX_Translate: unsupported guest insn set");
405 /* yet more sanity checks ... */
406 if (vta->arch_guest == vta->arch_host) {
407 /* doesn't necessarily have to be true, but if it isn't it means
408 we are simulating one flavour of an architecture a different
409 flavour of the same architecture, which is pretty strange. */
410 vassert(vta->archinfo_guest.hwcaps == vta->archinfo_host.hwcaps);
413 vexAllocSanityCheck();
415 if (vex_traceflags & VEX_TRACE_FE)
416 vex_printf("\n------------------------"
418 "------------------------\n\n");
420 irsb = bb_to_IR ( vta->guest_extents,
421 vta->callback_opaque,
424 vta->guest_bytes_addr,
428 &vta->archinfo_guest,
432 vta->preamble_function,
436 vexAllocSanityCheck();
439 /* Access failure. */
440 vexSetAllocModeTEMP_and_clear();
442 return VexTransAccessFail;
445 vassert(vta->guest_extents->n_used >= 1 && vta->guest_extents->n_used <= 3);
446 vassert(vta->guest_extents->base[0] == vta->guest_bytes_addr);
447 for (i = 0; i < vta->guest_extents->n_used; i++) {
448 vassert(vta->guest_extents->len[i] < 10000); /* sanity */
451 /* If debugging, show the raw guest bytes for this bb. */
452 if (0 || (vex_traceflags & VEX_TRACE_FE)) {
453 if (vta->guest_extents->n_used > 1) {
454 vex_printf("can't show code due to extents > 1\n");
457 UChar* p = (UChar*)vta->guest_bytes;
458 UInt guest_bytes_read = (UInt)vta->guest_extents->len[0];
459 vex_printf(". 0 %llx %u\n.", vta->guest_bytes_addr, guest_bytes_read );
460 for (i = 0; i < guest_bytes_read; i++)
461 vex_printf(" %02x", (Int)p[i] );
466 /* Sanity check the initial IR. */
467 sanityCheckIRSB( irsb, "initial IR",
468 False/*can be non-flat*/, guest_word_type );
470 vexAllocSanityCheck();
472 /* Clean it up, hopefully a lot. */
473 irsb = do_iropt_BB ( irsb, specHelper, preciseMemExnsFn,
474 vta->guest_bytes_addr );
475 sanityCheckIRSB( irsb, "after initial iropt",
476 True/*must be flat*/, guest_word_type );
478 if (vex_traceflags & VEX_TRACE_OPT1) {
479 vex_printf("\n------------------------"
480 " After pre-instr IR optimisation "
481 "------------------------\n\n");
486 vexAllocSanityCheck();
488 /* Get the thing instrumented. */
489 if (vta->instrument1)
490 irsb = vta->instrument1(vta->callback_opaque,
493 guest_word_type, host_word_type);
494 vexAllocSanityCheck();
496 if (vta->instrument2)
497 irsb = vta->instrument2(vta->callback_opaque,
500 guest_word_type, host_word_type);
502 if (vex_traceflags & VEX_TRACE_INST) {
503 vex_printf("\n------------------------"
504 " After instrumentation "
505 "------------------------\n\n");
510 if (vta->instrument1 || vta->instrument2)
511 sanityCheckIRSB( irsb, "after instrumentation",
512 True/*must be flat*/, guest_word_type );
514 /* Do a post-instrumentation cleanup pass. */
515 if (vta->instrument1 || vta->instrument2) {
516 do_deadcode_BB( irsb );
517 irsb = cprop_BB( irsb );
518 do_deadcode_BB( irsb );
519 sanityCheckIRSB( irsb, "after post-instrumentation cleanup",
520 True/*must be flat*/, guest_word_type );
523 vexAllocSanityCheck();
525 if (vex_traceflags & VEX_TRACE_OPT2) {
526 vex_printf("\n------------------------"
527 " After post-instr IR optimisation "
528 "------------------------\n\n");
533 /* Turn it into virtual-registerised code. Build trees -- this
534 also throws away any dead bindings. */
535 ado_treebuild_BB( irsb );
537 if (vta->finaltidy) {
538 irsb = vta->finaltidy(irsb);
541 vexAllocSanityCheck();
543 if (vex_traceflags & VEX_TRACE_TREES) {
544 vex_printf("\n------------------------"
545 " After tree-building "
546 "------------------------\n\n");
552 if (0) { *(vta->host_bytes_used) = 0; return VexTransOK; }
555 if (vex_traceflags & VEX_TRACE_VCODE)
556 vex_printf("\n------------------------"
557 " Instruction selection "
558 "------------------------\n");
560 vcode = iselSB ( irsb, vta->arch_host, &vta->archinfo_host,
561 &vta->abiinfo_both );
563 vexAllocSanityCheck();
565 if (vex_traceflags & VEX_TRACE_VCODE)
568 if (vex_traceflags & VEX_TRACE_VCODE) {
569 for (i = 0; i < vcode->arr_used; i++) {
570 vex_printf("%3d ", i);
571 ppInstr(vcode->arr[i], mode64);
577 /* Register allocate. */
578 rcode = doRegisterAllocation ( vcode, available_real_regs,
579 n_available_real_regs,
580 isMove, getRegUsage, mapRegs,
581 genSpill, genReload, directReload,
583 ppInstr, ppReg, mode64 );
585 vexAllocSanityCheck();
587 if (vex_traceflags & VEX_TRACE_RCODE) {
588 vex_printf("\n------------------------"
589 " Register-allocated code "
590 "------------------------\n\n");
591 for (i = 0; i < rcode->arr_used; i++) {
592 vex_printf("%3d ", i);
593 ppInstr(rcode->arr[i], mode64);
600 if (0) { *(vta->host_bytes_used) = 0; return VexTransOK; }
604 if (vex_traceflags & VEX_TRACE_ASM) {
605 vex_printf("\n------------------------"
607 "------------------------\n\n");
610 out_used = 0; /* tracks along the host_bytes array */
611 for (i = 0; i < rcode->arr_used; i++) {
612 if (vex_traceflags & VEX_TRACE_ASM) {
613 ppInstr(rcode->arr[i], mode64);
616 j = (*emit)( insn_bytes, 32, rcode->arr[i], mode64, vta->dispatch );
617 if (vex_traceflags & VEX_TRACE_ASM) {
618 for (k = 0; k < j; k++)
619 if (insn_bytes[k] < 16)
620 vex_printf("0%x ", (UInt)insn_bytes[k]);
622 vex_printf("%x ", (UInt)insn_bytes[k]);
625 if (out_used + j > vta->host_bytes_size) {
626 vexSetAllocModeTEMP_and_clear();
628 return VexTransOutputFull;
630 for (k = 0; k < j; k++) {
631 vta->host_bytes[out_used] = insn_bytes[k];
634 vassert(out_used <= vta->host_bytes_size);
636 *(vta->host_bytes_used) = out_used;
638 vexAllocSanityCheck();
640 vexSetAllocModeTEMP_and_clear();
647 /* --------- Emulation warnings. --------- */
649 HChar* LibVEX_EmWarn_string ( VexEmWarn ew )
654 case EmWarn_X86_x87exns:
655 return "Unmasking x87 FP exceptions";
656 case EmWarn_X86_x87precision:
657 return "Selection of non-80-bit x87 FP precision";
658 case EmWarn_X86_sseExns:
659 return "Unmasking SSE FP exceptions";
661 return "Setting %mxcsr.fz (SSE flush-underflows-to-zero mode)";
663 return "Setting %mxcsr.daz (SSE treat-denormals-as-zero mode)";
664 case EmWarn_X86_acFlag:
665 return "Setting %eflags.ac (setting noted but ignored)";
667 return "Unmasking PPC32/64 FP exceptions";
668 case EmWarn_PPC64_redir_overflow:
669 return "PPC64 function redirection stack overflow";
670 case EmWarn_PPC64_redir_underflow:
671 return "PPC64 function redirection stack underflow";
673 vpanic("LibVEX_EmWarn_string: unknown warning");
677 /* ------------------ Arch/HwCaps stuff. ------------------ */
679 const HChar* LibVEX_ppVexArch ( VexArch arch )
682 case VexArch_INVALID: return "INVALID";
683 case VexArchX86: return "X86";
684 case VexArchAMD64: return "AMD64";
685 case VexArchARM: return "ARM";
686 case VexArchPPC32: return "PPC32";
687 case VexArchPPC64: return "PPC64";
688 default: return "VexArch???";
692 const HChar* LibVEX_ppVexHwCaps ( VexArch arch, UInt hwcaps )
694 HChar* str = show_hwcaps(arch,hwcaps);
695 return str ? str : "INVALID";
699 /* Write default settings info *vai. */
700 void LibVEX_default_VexArchInfo ( /*OUT*/VexArchInfo* vai )
703 vai->ppc_cache_line_szB = 0;
706 /* Write default settings info *vbi. */
707 void LibVEX_default_VexAbiInfo ( /*OUT*/VexAbiInfo* vbi )
709 vbi->guest_stack_redzone_size = 0;
710 vbi->guest_amd64_assume_fs_is_zero = False;
711 vbi->guest_amd64_assume_gs_is_0x60 = False;
712 vbi->guest_ppc_zap_RZ_at_blr = False;
713 vbi->guest_ppc_zap_RZ_at_bl = NULL;
714 vbi->guest_ppc_sc_continues_at_LR = False;
715 vbi->host_ppc_calls_use_fndescrs = False;
716 vbi->host_ppc32_regalign_int64_args = False;
720 /* Return a string showing the hwcaps in a nice way. The string will
721 be NULL for invalid combinations of flags, so these functions also
722 serve as a way to validate hwcaps values. */
724 static HChar* show_hwcaps_x86 ( UInt hwcaps )
726 /* Monotonic, SSE3 > SSE2 > SSE1 > baseline. */
729 if (hwcaps == VEX_HWCAPS_X86_SSE1)
731 if (hwcaps == (VEX_HWCAPS_X86_SSE1 | VEX_HWCAPS_X86_SSE2))
732 return "x86-sse1-sse2";
733 if (hwcaps == (VEX_HWCAPS_X86_SSE1
734 | VEX_HWCAPS_X86_SSE2 | VEX_HWCAPS_X86_SSE3))
735 return "x86-sse1-sse2-sse3";
740 static HChar* show_hwcaps_amd64 ( UInt hwcaps )
742 /* SSE3 and CX16 are orthogonal and > baseline, although we really
743 don't expect to come across anything which can do SSE3 but can't
744 do CX16. Still, we can handle that case. */
745 const UInt SSE3 = VEX_HWCAPS_AMD64_SSE3;
746 const UInt CX16 = VEX_HWCAPS_AMD64_CX16;
748 if (c == 0) return "amd64-sse2";
749 if (c == SSE3) return "amd64-sse3";
750 if (c == CX16) return "amd64-sse2-cx16";
751 if (c == (SSE3|CX16)) return "amd64-sse3-cx16";
755 static HChar* show_hwcaps_ppc32 ( UInt hwcaps )
757 /* Monotonic with complications. Basically V > F > baseline,
758 but once you have F then you can have FX or GX too. */
759 const UInt F = VEX_HWCAPS_PPC32_F;
760 const UInt V = VEX_HWCAPS_PPC32_V;
761 const UInt FX = VEX_HWCAPS_PPC32_FX;
762 const UInt GX = VEX_HWCAPS_PPC32_GX;
764 if (c == 0) return "ppc32-int";
765 if (c == F) return "ppc32-int-flt";
766 if (c == (F|FX)) return "ppc32-int-flt-FX";
767 if (c == (F|GX)) return "ppc32-int-flt-GX";
768 if (c == (F|FX|GX)) return "ppc32-int-flt-FX-GX";
769 if (c == (F|V)) return "ppc32-int-flt-vmx";
770 if (c == (F|V|FX)) return "ppc32-int-flt-vmx-FX";
771 if (c == (F|V|GX)) return "ppc32-int-flt-vmx-GX";
772 if (c == (F|V|FX|GX)) return "ppc32-int-flt-vmx-FX-GX";
776 static HChar* show_hwcaps_ppc64 ( UInt hwcaps )
778 /* Monotonic with complications. Basically V > baseline(==F),
779 but once you have F then you can have FX or GX too. */
780 const UInt V = VEX_HWCAPS_PPC64_V;
781 const UInt FX = VEX_HWCAPS_PPC64_FX;
782 const UInt GX = VEX_HWCAPS_PPC64_GX;
784 if (c == 0) return "ppc64-int-flt";
785 if (c == FX) return "ppc64-int-flt-FX";
786 if (c == GX) return "ppc64-int-flt-GX";
787 if (c == (FX|GX)) return "ppc64-int-flt-FX-GX";
788 if (c == V) return "ppc64-int-flt-vmx";
789 if (c == (V|FX)) return "ppc64-int-flt-vmx-FX";
790 if (c == (V|GX)) return "ppc64-int-flt-vmx-GX";
791 if (c == (V|FX|GX)) return "ppc64-int-flt-vmx-FX-GX";
795 static HChar* show_hwcaps_arm ( UInt hwcaps )
797 if (hwcaps == 0) return "arm-baseline";
802 static HChar* show_hwcaps ( VexArch arch, UInt hwcaps )
805 case VexArchX86: return show_hwcaps_x86(hwcaps);
806 case VexArchAMD64: return show_hwcaps_amd64(hwcaps);
807 case VexArchPPC32: return show_hwcaps_ppc32(hwcaps);
808 case VexArchPPC64: return show_hwcaps_ppc64(hwcaps);
809 case VexArchARM: return show_hwcaps_arm(hwcaps);
810 default: return NULL;
814 static Bool are_valid_hwcaps ( VexArch arch, UInt hwcaps )
816 return show_hwcaps(arch,hwcaps) != NULL;
820 /*---------------------------------------------------------------*/
821 /*--- end main_main.c ---*/
822 /*---------------------------------------------------------------*/