1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
- 2. The origin of this software must not be misrepresented; you must
- not claim that you wrote the original software. If you use this
- software in a product, an acknowledgment in the product
+ 2. The origin of this software must not be misrepresented; you must
+ not claim that you wrote the original software. If you use this
+ software in a product, an acknowledgment in the product
documentation would be appreciated but is not required.
3. Altered source versions must be plainly marked as such, and must
not be misrepresented as being the original software.
- 4. The name of the author may not be used to endorse or promote
- products derived from this software without specific prior written
+ 4. The name of the author may not be used to endorse or promote
+ products derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
the terms of the GNU General Public License, version 2. See the
COPYING file in the source distribution for details.
- ----------------------------------------------------------------
+ ----------------------------------------------------------------
*/
/* This file is for inclusion into client (your!) code.
- You can use these macros to manipulate and query Valgrind's
+ You can use these macros to manipulate and query Valgrind's
execution inside your own programs.
The resulting executables will still run without Valgrind, just a
|| (__VALGRIND_MAJOR__ == 3 && __VALGRIND_MINOR__ >= 6))
*/
#define __VALGRIND_MAJOR__ 3
-#define __VALGRIND_MINOR__ 15
+#define __VALGRIND_MINOR__ 18
#include <stdarg.h>
*/
#undef PLAT_x86_darwin
#undef PLAT_amd64_darwin
+#undef PLAT_x86_freebsd
+#undef PLAT_amd64_freebsd
#undef PLAT_x86_win32
#undef PLAT_amd64_win64
#undef PLAT_x86_linux
#undef PLAT_s390x_linux
#undef PLAT_mips32_linux
#undef PLAT_mips64_linux
+#undef PLAT_nanomips_linux
#undef PLAT_x86_solaris
#undef PLAT_amd64_solaris
# define PLAT_x86_darwin 1
#elif defined(__APPLE__) && defined(__x86_64__)
# define PLAT_amd64_darwin 1
-#elif (defined(__MINGW32__) && !defined(__MINGW64__)) \
+#elif defined(__FreeBSD__) && defined(__i386__)
+# define PLAT_x86_freebsd 1
+#elif defined(__FreeBSD__) && defined(__amd64__)
+# define PLAT_amd64_freebsd 1
+#elif (defined(__MINGW32__) && defined(__i386__)) \
|| defined(__CYGWIN32__) \
|| (defined(_WIN32) && defined(_M_IX86))
# define PLAT_x86_win32 1
-#elif defined(__MINGW64__) \
- || (defined(_WIN64) && defined(_M_X64))
+#elif (defined(__MINGW32__) && defined(__x86_64__)) \
+ || (defined(_WIN32) && defined(_M_X64))
+/* __MINGW32__ and _WIN32 are defined in 64 bit mode as well. */
# define PLAT_amd64_win64 1
#elif defined(__linux__) && defined(__i386__)
# define PLAT_x86_linux 1
# define PLAT_s390x_linux 1
#elif defined(__linux__) && defined(__mips__) && (__mips==64)
# define PLAT_mips64_linux 1
-#elif defined(__linux__) && defined(__mips__) && (__mips!=64)
+#elif defined(__linux__) && defined(__mips__) && (__mips==32)
# define PLAT_mips32_linux 1
+#elif defined(__linux__) && defined(__nanomips__)
+# define PLAT_nanomips_linux 1
#elif defined(__sun) && defined(__i386__)
# define PLAT_x86_solaris 1
#elif defined(__sun) && defined(__x86_64__)
this is executed not under Valgrind. Args are passed in a memory
block, and so there's no intrinsic limit to the number that could
be passed, but it's currently five.
-
- The macro args are:
+
+ The macro args are:
_zzq_rlval result lvalue
_zzq_default default value (result returned when running on real CPU)
_zzq_request request code
#if defined(PLAT_x86_linux) || defined(PLAT_x86_darwin) \
|| (defined(PLAT_x86_win32) && defined(__GNUC__)) \
- || defined(PLAT_x86_solaris)
+ || defined(PLAT_x86_solaris) || defined(PLAT_x86_freebsd)
typedef
- struct {
+ struct {
unsigned int nraddr; /* where's the code? */
}
OrigFn;
#if defined(PLAT_x86_win32) && !defined(__GNUC__)
typedef
- struct {
+ struct {
unsigned int nraddr; /* where's the code? */
}
OrigFn;
#if defined(PLAT_amd64_linux) || defined(PLAT_amd64_darwin) \
|| defined(PLAT_amd64_solaris) \
+ || defined(PLAT_amd64_freebsd) \
|| (defined(PLAT_amd64_win64) && defined(__GNUC__))
typedef
- struct {
+ struct {
unsigned long int nraddr; /* where's the code? */
}
OrigFn;
#if defined(PLAT_ppc32_linux)
typedef
- struct {
+ struct {
unsigned int nraddr; /* where's the code? */
}
OrigFn;
#if defined(PLAT_ppc64be_linux)
typedef
- struct {
+ struct {
unsigned long int nraddr; /* where's the code? */
unsigned long int r2; /* what tocptr do we need? */
}
#if defined(PLAT_arm_linux)
typedef
- struct {
+ struct {
unsigned int nraddr; /* where's the code? */
}
OrigFn;
#if defined(PLAT_arm64_linux)
typedef
- struct {
+ struct {
unsigned long int nraddr; /* where's the code? */
}
OrigFn;
/* results = r3 */ \
"lgr %0, 3\n\t" \
: "=d" (_zzq_result) \
- : "a" (&_zzq_args[0]), "0" (_zzq_default) \
+ : "a" (&_zzq_args[0]), \
+ "0" ((unsigned long int)_zzq_default) \
: "cc", "2", "3", "memory" \
); \
_zzq_result; \
#if defined(PLAT_mips32_linux)
typedef
- struct {
+ struct {
unsigned int nraddr; /* where's the code? */
}
OrigFn;
"srl $0, $0, 29\n\t" \
"srl $0, $0, 3\n\t" \
"srl $0, $0, 19\n\t"
-
+
#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
_zzq_default, _zzq_request, \
_zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
#endif /* PLAT_mips64_linux */
+#if defined(PLAT_nanomips_linux)
+
+typedef
+ struct {
+ unsigned int nraddr; /* where's the code? */
+ }
+ OrigFn;
+/*
+ 8000 c04d srl zero, zero, 13
+ 8000 c05d srl zero, zero, 29
+ 8000 c043 srl zero, zero, 3
+ 8000 c053 srl zero, zero, 19
+*/
+
+#define __SPECIAL_INSTRUCTION_PREAMBLE "srl[32] $zero, $zero, 13 \n\t" \
+ "srl[32] $zero, $zero, 29 \n\t" \
+ "srl[32] $zero, $zero, 3 \n\t" \
+ "srl[32] $zero, $zero, 19 \n\t"
+
+#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
+ _zzq_default, _zzq_request, \
+ _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
+ __extension__ \
+ ({ volatile unsigned int _zzq_args[6]; \
+ volatile unsigned int _zzq_result; \
+ _zzq_args[0] = (unsigned int)(_zzq_request); \
+ _zzq_args[1] = (unsigned int)(_zzq_arg1); \
+ _zzq_args[2] = (unsigned int)(_zzq_arg2); \
+ _zzq_args[3] = (unsigned int)(_zzq_arg3); \
+ _zzq_args[4] = (unsigned int)(_zzq_arg4); \
+ _zzq_args[5] = (unsigned int)(_zzq_arg5); \
+ __asm__ volatile("move $a7, %1\n\t" /* default */ \
+ "move $t0, %2\n\t" /* ptr */ \
+ __SPECIAL_INSTRUCTION_PREAMBLE \
+ /* $a7 = client_request( $t0 ) */ \
+ "or[32] $t0, $t0, $t0\n\t" \
+ "move %0, $a7\n\t" /* result */ \
+ : "=r" (_zzq_result) \
+ : "r" (_zzq_default), "r" (&_zzq_args[0]) \
+ : "$a7", "$t0", "memory"); \
+ _zzq_result; \
+ })
+
+#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
+ { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
+ volatile unsigned long int __addr; \
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
+ /* $a7 = guest_NRADDR */ \
+ "or[32] $t1, $t1, $t1\n\t" \
+ "move %0, $a7" /*result*/ \
+ : "=r" (__addr) \
+ : \
+ : "$a7"); \
+ _zzq_orig->nraddr = __addr; \
+ }
+
+#define VALGRIND_CALL_NOREDIR_T9 \
+ __SPECIAL_INSTRUCTION_PREAMBLE \
+ /* call-noredir $25 */ \
+ "or[32] $t2, $t2, $t2\n\t"
+
+#define VALGRIND_VEX_INJECT_IR() \
+ do { \
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
+ "or[32] $t3, $t3, $t3\n\t" \
+ ); \
+ } while (0)
+
+#endif
/* Insert assembly code for other platforms here... */
#endif /* NVALGRIND */
/* ----------------- x86-{linux,darwin,solaris} ---------------- */
#if defined(PLAT_x86_linux) || defined(PLAT_x86_darwin) \
- || defined(PLAT_x86_solaris)
+ || defined(PLAT_x86_solaris) || defined(PLAT_x86_freebsd)
/* These regs are trashed by the hidden call. No need to mention eax
as gcc can already see that, plus causes gcc to bomb. */
/* ---------------- amd64-{linux,darwin,solaris} --------------- */
#if defined(PLAT_amd64_linux) || defined(PLAT_amd64_darwin) \
- || defined(PLAT_amd64_solaris)
+ || defined(PLAT_amd64_solaris) || defined(PLAT_amd64_freebsd)
/* ARGREGS: rdi rsi rdx rcx r8 r9 (the rest on stack in R-to-L order) */
#define VALGRIND_RESTORE_STACK \
"mr 1,28\n\t"
-/* These CALL_FN_ macros assume that on ppc32-linux,
+/* These CALL_FN_ macros assume that on ppc32-linux,
sizeof(unsigned long) == 4. */
#define CALL_FN_W_v(lval, orig) \
r14 in s390_irgen_noredir (VEX/priv/guest_s390_irgen.c) to give the
function a proper return address. All others are ABI defined call
clobbers. */
-#define __CALLER_SAVED_REGS "0","1","2","3","4","5","14", \
- "f0","f1","f2","f3","f4","f5","f6","f7"
+#if defined(__VX__) || defined(__S390_VX__)
+#define __CALLER_SAVED_REGS "0", "1", "2", "3", "4", "5", "14", \
+ "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", \
+ "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", \
+ "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", \
+ "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"
+#else
+#define __CALLER_SAVED_REGS "0", "1", "2", "3", "4", "5", "14", \
+ "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7"
+#endif
-/* Nb: Although r11 is modified in the asm snippets below (inside
+/* Nb: Although r11 is modified in the asm snippets below (inside
VALGRIND_CFI_PROLOGUE) it is not listed in the clobber section, for
two reasons:
(1) r11 is restored in VALGRIND_CFI_EPILOGUE, so effectively it is not
"aghi 15,-160\n\t" \
"lg 1, 0(1)\n\t" /* target->r1 */ \
VALGRIND_CALL_NOREDIR_R1 \
- "lgr %0, 2\n\t" \
"aghi 15,160\n\t" \
VALGRIND_CFI_EPILOGUE \
+ "lgr %0, 2\n\t" \
: /*out*/ "=d" (_res) \
: /*in*/ "d" (&_argvec[0]) __FRAME_POINTER \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"7" \
"lg 2, 8(1)\n\t" \
"lg 1, 0(1)\n\t" \
VALGRIND_CALL_NOREDIR_R1 \
- "lgr %0, 2\n\t" \
"aghi 15,160\n\t" \
VALGRIND_CFI_EPILOGUE \
+ "lgr %0, 2\n\t" \
: /*out*/ "=d" (_res) \
: /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"7" \
"lg 3,16(1)\n\t" \
"lg 1, 0(1)\n\t" \
VALGRIND_CALL_NOREDIR_R1 \
- "lgr %0, 2\n\t" \
"aghi 15,160\n\t" \
VALGRIND_CFI_EPILOGUE \
+ "lgr %0, 2\n\t" \
: /*out*/ "=d" (_res) \
: /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"7" \
"lg 4,24(1)\n\t" \
"lg 1, 0(1)\n\t" \
VALGRIND_CALL_NOREDIR_R1 \
- "lgr %0, 2\n\t" \
"aghi 15,160\n\t" \
VALGRIND_CFI_EPILOGUE \
+ "lgr %0, 2\n\t" \
: /*out*/ "=d" (_res) \
: /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"7" \
"lg 5,32(1)\n\t" \
"lg 1, 0(1)\n\t" \
VALGRIND_CALL_NOREDIR_R1 \
- "lgr %0, 2\n\t" \
"aghi 15,160\n\t" \
VALGRIND_CFI_EPILOGUE \
+ "lgr %0, 2\n\t" \
: /*out*/ "=d" (_res) \
: /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"7" \
"lg 6,40(1)\n\t" \
"lg 1, 0(1)\n\t" \
VALGRIND_CALL_NOREDIR_R1 \
- "lgr %0, 2\n\t" \
"aghi 15,160\n\t" \
VALGRIND_CFI_EPILOGUE \
+ "lgr %0, 2\n\t" \
: /*out*/ "=d" (_res) \
: /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \
"mvc 160(8,15), 48(1)\n\t" \
"lg 1, 0(1)\n\t" \
VALGRIND_CALL_NOREDIR_R1 \
- "lgr %0, 2\n\t" \
"aghi 15,168\n\t" \
VALGRIND_CFI_EPILOGUE \
+ "lgr %0, 2\n\t" \
: /*out*/ "=d" (_res) \
: /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \
"mvc 168(8,15), 56(1)\n\t" \
"lg 1, 0(1)\n\t" \
VALGRIND_CALL_NOREDIR_R1 \
- "lgr %0, 2\n\t" \
"aghi 15,176\n\t" \
VALGRIND_CFI_EPILOGUE \
+ "lgr %0, 2\n\t" \
: /*out*/ "=d" (_res) \
: /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \
"mvc 176(8,15), 64(1)\n\t" \
"lg 1, 0(1)\n\t" \
VALGRIND_CALL_NOREDIR_R1 \
- "lgr %0, 2\n\t" \
"aghi 15,184\n\t" \
VALGRIND_CFI_EPILOGUE \
+ "lgr %0, 2\n\t" \
: /*out*/ "=d" (_res) \
: /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \
"mvc 184(8,15), 72(1)\n\t" \
"lg 1, 0(1)\n\t" \
VALGRIND_CALL_NOREDIR_R1 \
- "lgr %0, 2\n\t" \
"aghi 15,192\n\t" \
VALGRIND_CFI_EPILOGUE \
+ "lgr %0, 2\n\t" \
: /*out*/ "=d" (_res) \
: /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \
"mvc 192(8,15), 80(1)\n\t" \
"lg 1, 0(1)\n\t" \
VALGRIND_CALL_NOREDIR_R1 \
- "lgr %0, 2\n\t" \
"aghi 15,200\n\t" \
VALGRIND_CFI_EPILOGUE \
+ "lgr %0, 2\n\t" \
: /*out*/ "=d" (_res) \
: /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \
"mvc 200(8,15), 88(1)\n\t" \
"lg 1, 0(1)\n\t" \
VALGRIND_CALL_NOREDIR_R1 \
- "lgr %0, 2\n\t" \
"aghi 15,208\n\t" \
VALGRIND_CFI_EPILOGUE \
+ "lgr %0, 2\n\t" \
: /*out*/ "=d" (_res) \
: /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \
"mvc 208(8,15), 96(1)\n\t" \
"lg 1, 0(1)\n\t" \
VALGRIND_CALL_NOREDIR_R1 \
- "lgr %0, 2\n\t" \
"aghi 15,216\n\t" \
VALGRIND_CFI_EPILOGUE \
+ "lgr %0, 2\n\t" \
: /*out*/ "=d" (_res) \
: /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \
#endif /* PLAT_s390x_linux */
/* ------------------------- mips32-linux ----------------------- */
-
+
#if defined(PLAT_mips32_linux)
/* These regs are trashed by the hidden call. */
#endif /* PLAT_mips32_linux */
+/* ------------------------- nanomips-linux -------------------- */
+
+#if defined(PLAT_nanomips_linux)
+
+/* These regs are trashed by the hidden call. */
+#define __CALLER_SAVED_REGS "$t4", "$t5", "$a0", "$a1", "$a2", \
+"$a3", "$a4", "$a5", "$a6", "$a7", "$t0", "$t1", "$t2", "$t3", \
+"$t8","$t9", "$at"
+
+/* These CALL_FN_ macros assume that on mips-linux, sizeof(unsigned
+ long) == 4. */
+
+#define CALL_FN_W_v(lval, orig) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[1]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ __asm__ volatile( \
+ "lw $t9, 0(%1)\n\t" \
+ VALGRIND_CALL_NOREDIR_T9 \
+ "move %0, $a0\n" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_W(lval, orig, arg1) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[2]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ __asm__ volatile( \
+ "lw $t9, 0(%1)\n\t" \
+ "lw $a0, 4(%1)\n\t" \
+ VALGRIND_CALL_NOREDIR_T9 \
+ "move %0, $a0\n" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WW(lval, orig, arg1,arg2) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ __asm__ volatile( \
+ "lw $t9, 0(%1)\n\t" \
+ "lw $a0, 4(%1)\n\t" \
+ "lw $a1, 8(%1)\n\t" \
+ VALGRIND_CALL_NOREDIR_T9 \
+ "move %0, $a0\n" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[4]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ __asm__ volatile( \
+ "lw $t9, 0(%1)\n\t" \
+ "lw $a0, 4(%1)\n\t" \
+ "lw $a1, 8(%1)\n\t" \
+ "lw $a2,12(%1)\n\t" \
+ VALGRIND_CALL_NOREDIR_T9 \
+ "move %0, $a0\n" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[5]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ __asm__ volatile( \
+ "lw $t9, 0(%1)\n\t" \
+ "lw $a0, 4(%1)\n\t" \
+ "lw $a1, 8(%1)\n\t" \
+ "lw $a2,12(%1)\n\t" \
+ "lw $a3,16(%1)\n\t" \
+ VALGRIND_CALL_NOREDIR_T9 \
+ "move %0, $a0\n" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[6]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ __asm__ volatile( \
+ "lw $t9, 0(%1)\n\t" \
+ "lw $a0, 4(%1)\n\t" \
+ "lw $a1, 8(%1)\n\t" \
+ "lw $a2,12(%1)\n\t" \
+ "lw $a3,16(%1)\n\t" \
+ "lw $a4,20(%1)\n\t" \
+ VALGRIND_CALL_NOREDIR_T9 \
+ "move %0, $a0\n" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[7]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ __asm__ volatile( \
+ "lw $t9, 0(%1)\n\t" \
+ "lw $a0, 4(%1)\n\t" \
+ "lw $a1, 8(%1)\n\t" \
+ "lw $a2,12(%1)\n\t" \
+ "lw $a3,16(%1)\n\t" \
+ "lw $a4,20(%1)\n\t" \
+ "lw $a5,24(%1)\n\t" \
+ VALGRIND_CALL_NOREDIR_T9 \
+ "move %0, $a0\n" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[8]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ __asm__ volatile( \
+ "lw $t9, 0(%1)\n\t" \
+ "lw $a0, 4(%1)\n\t" \
+ "lw $a1, 8(%1)\n\t" \
+ "lw $a2,12(%1)\n\t" \
+ "lw $a3,16(%1)\n\t" \
+ "lw $a4,20(%1)\n\t" \
+ "lw $a5,24(%1)\n\t" \
+ "lw $a6,28(%1)\n\t" \
+ VALGRIND_CALL_NOREDIR_T9 \
+ "move %0, $a0\n" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[9]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ __asm__ volatile( \
+ "lw $t9, 0(%1)\n\t" \
+ "lw $a0, 4(%1)\n\t" \
+ "lw $a1, 8(%1)\n\t" \
+ "lw $a2,12(%1)\n\t" \
+ "lw $a3,16(%1)\n\t" \
+ "lw $a4,20(%1)\n\t" \
+ "lw $a5,24(%1)\n\t" \
+ "lw $a6,28(%1)\n\t" \
+ "lw $a7,32(%1)\n\t" \
+ VALGRIND_CALL_NOREDIR_T9 \
+ "move %0, $a0\n" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[10]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ _argvec[9] = (unsigned long)(arg9); \
+ __asm__ volatile( \
+ "addiu $sp, $sp, -16 \n\t" \
+ "lw $t9,36(%1) \n\t" \
+ "sw $t9, 0($sp) \n\t" \
+ "lw $t9, 0(%1) \n\t" \
+ "lw $a0, 4(%1) \n\t" \
+ "lw $a1, 8(%1) \n\t" \
+ "lw $a2,12(%1) \n\t" \
+ "lw $a3,16(%1) \n\t" \
+ "lw $a4,20(%1) \n\t" \
+ "lw $a5,24(%1) \n\t" \
+ "lw $a6,28(%1) \n\t" \
+ "lw $a7,32(%1) \n\t" \
+ VALGRIND_CALL_NOREDIR_T9 \
+ "move %0, $a0 \n\t" \
+ "addiu $sp, $sp, 16 \n\t" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9,arg10) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[11]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ _argvec[9] = (unsigned long)(arg9); \
+ _argvec[10] = (unsigned long)(arg10); \
+ __asm__ volatile( \
+ "addiu $sp, $sp, -16 \n\t" \
+ "lw $t9,36(%1) \n\t" \
+ "sw $t9, 0($sp) \n\t" \
+ "lw $t9,40(%1) \n\t" \
+ "sw $t9, 4($sp) \n\t" \
+ "lw $t9, 0(%1) \n\t" \
+ "lw $a0, 4(%1) \n\t" \
+ "lw $a1, 8(%1) \n\t" \
+ "lw $a2,12(%1) \n\t" \
+ "lw $a3,16(%1) \n\t" \
+ "lw $a4,20(%1) \n\t" \
+ "lw $a5,24(%1) \n\t" \
+ "lw $a6,28(%1) \n\t" \
+ "lw $a7,32(%1) \n\t" \
+ VALGRIND_CALL_NOREDIR_T9 \
+ "move %0, $a0 \n\t" \
+ "addiu $sp, $sp, 16 \n\t" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5, \
+ arg6,arg7,arg8,arg9,arg10, \
+ arg11) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[12]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ _argvec[9] = (unsigned long)(arg9); \
+ _argvec[10] = (unsigned long)(arg10); \
+ _argvec[11] = (unsigned long)(arg11); \
+ __asm__ volatile( \
+ "addiu $sp, $sp, -16 \n\t" \
+ "lw $t9,36(%1) \n\t" \
+ "sw $t9, 0($sp) \n\t" \
+ "lw $t9,40(%1) \n\t" \
+ "sw $t9, 4($sp) \n\t" \
+ "lw $t9,44(%1) \n\t" \
+ "sw $t9, 8($sp) \n\t" \
+ "lw $t9, 0(%1) \n\t" \
+ "lw $a0, 4(%1) \n\t" \
+ "lw $a1, 8(%1) \n\t" \
+ "lw $a2,12(%1) \n\t" \
+ "lw $a3,16(%1) \n\t" \
+ "lw $a4,20(%1) \n\t" \
+ "lw $a5,24(%1) \n\t" \
+ "lw $a6,28(%1) \n\t" \
+ "lw $a7,32(%1) \n\t" \
+ VALGRIND_CALL_NOREDIR_T9 \
+ "move %0, $a0 \n\t" \
+ "addiu $sp, $sp, 16 \n\t" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5, \
+ arg6,arg7,arg8,arg9,arg10, \
+ arg11,arg12) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[13]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ _argvec[9] = (unsigned long)(arg9); \
+ _argvec[10] = (unsigned long)(arg10); \
+ _argvec[11] = (unsigned long)(arg11); \
+ _argvec[12] = (unsigned long)(arg12); \
+ __asm__ volatile( \
+ "addiu $sp, $sp, -16 \n\t" \
+ "lw $t9,36(%1) \n\t" \
+ "sw $t9, 0($sp) \n\t" \
+ "lw $t9,40(%1) \n\t" \
+ "sw $t9, 4($sp) \n\t" \
+ "lw $t9,44(%1) \n\t" \
+ "sw $t9, 8($sp) \n\t" \
+ "lw $t9,48(%1) \n\t" \
+ "sw $t9,12($sp) \n\t" \
+ "lw $t9, 0(%1) \n\t" \
+ "lw $a0, 4(%1) \n\t" \
+ "lw $a1, 8(%1) \n\t" \
+ "lw $a2,12(%1) \n\t" \
+ "lw $a3,16(%1) \n\t" \
+ "lw $a4,20(%1) \n\t" \
+ "lw $a5,24(%1) \n\t" \
+ "lw $a6,28(%1) \n\t" \
+ "lw $a7,32(%1) \n\t" \
+ VALGRIND_CALL_NOREDIR_T9 \
+ "move %0, $a0 \n\t" \
+ "addiu $sp, $sp, 16 \n\t" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#endif /* PLAT_nanomips_linux */
+
/* ------------------------- mips64-linux ------------------------- */
#if defined(PLAT_mips64_linux)
#define VG_IS_TOOL_USERREQ(a, b, v) \
(VG_USERREQ_TOOL_BASE(a,b) == ((v) & 0xffff0000))
-/* !! ABIWARNING !! ABIWARNING !! ABIWARNING !! ABIWARNING !!
+/* !! ABIWARNING !! ABIWARNING !! ABIWARNING !! ABIWARNING !!
This enum comprises an ABI exported by Valgrind to programs
which use client requests. DO NOT CHANGE THE NUMERIC VALUES OF THESE
ENTRIES, NOR DELETE ANY -- add new ones at the end of the most
command. */
VG_USERREQ__GDB_MONITOR_COMMAND = 0x1202,
+ /* Allows the client program to change a dynamic command line
+ option. */
+ VG_USERREQ__CLO_CHANGE = 0x1203,
+
/* These are useful and can be interpreted by any tool that
tracks malloc() et al, by using vg_replace_malloc.c. */
VG_USERREQ__MALLOCLIKE_BLOCK = 0x1301,
_qzz_res = VALGRIND_DO_CLIENT_REQUEST_EXPR(0,
VG_USERREQ__PRINTF_VALIST_BY_REF,
(unsigned long)format,
- (unsigned long)&vargs,
+ (unsigned long)&vargs,
0, 0, 0);
#endif
va_end(vargs);
_qzz_res = VALGRIND_DO_CLIENT_REQUEST_EXPR(0,
VG_USERREQ__PRINTF_BACKTRACE_VALIST_BY_REF,
(unsigned long)format,
- (unsigned long)&vargs,
+ (unsigned long)&vargs,
0, 0, 0);
#endif
va_end(vargs);
/* These requests allow control to move from the simulated CPU to the
real CPU, calling an arbitrary function.
-
+
Note that the current ThreadId is inserted as the first argument.
So this call:
- It marks the block as being addressable and undefined (if 'is_zeroed' is
not set), or addressable and defined (if 'is_zeroed' is set). This
controls how accesses to the block by the program are handled.
-
+
'addr' is the start of the usable block (ie. after any
redzone), 'sizeB' is its size. 'rzB' is the redzone size if the allocator
can apply redzones -- these are blocks of padding at the start and end of
Valgrind will spot block overruns. `is_zeroed' indicates if the memory is
zeroed (or filled with another predictable value), as is the case for
calloc().
-
+
VALGRIND_MALLOCLIKE_BLOCK should be put immediately after the point where a
heap block -- that will be used by the client program -- is allocated.
It's best to put it at the outermost level of the allocator if possible;
/* Create a memory pool with some flags specifying extended behaviour.
When flags is zero, the behaviour is identical to VALGRIND_CREATE_MEMPOOL.
-
- The flag VALGRIND_MEMPOOL_METAPOOL specifies that the pieces of memory
+
+ The flag VALGRIND_MEMPOOL_METAPOOL specifies that the pieces of memory
associated with the pool using VALGRIND_MEMPOOL_ALLOC will be used
by the application as superblocks to dole out MALLOC_LIKE blocks using
VALGRIND_MALLOCLIKE_BLOCK. In other words, a meta pool is a "2 levels"
command, 0, 0, 0, 0)
+/* Change the value of a dynamic command line option.
+ Note that unknown or not dynamically changeable options
+ will cause a warning message to be output. */
+#define VALGRIND_CLO_CHANGE(option) \
+ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__CLO_CHANGE, \
+ option, 0, 0, 0, 0)
+
+
#undef PLAT_x86_darwin
#undef PLAT_amd64_darwin
#undef PLAT_x86_win32
#undef PLAT_s390x_linux
#undef PLAT_mips32_linux
#undef PLAT_mips64_linux
+#undef PLAT_nanomips_linux
#undef PLAT_x86_solaris
#undef PLAT_amd64_solaris