Use MSVC compiler host architecture defines where applicable.
There are a couple of places where gcc/clang compiler host architecture
defines are in used, missing compilers like MSVC. Commit adds MSVC host
architecture defines in places where they make sense and can be applied.
NOTE, part of this already worked since it will fallbacks to TARGET_XXX
in a couple of places and currently this is the same as host architecture
on Windows, still adding MSVC host architecture defines for completeness
and future when host != target might differ.
#if defined(_M_IA64)
ProcRva = (DWORD)((DWORD_PTR)PLabel - (DWORD_PTR)DosHeader);
*(PLabel)++ = *ExportFixup->ProcAddress.PLabel;
-#elif defined(_M_AMD64)
+#elif defined(_M_X64)
ProcRva = (DWORD)((DWORD_PTR)Trampoline - (DWORD_PTR)DosHeader);
/* mov r11, ExportFixup->ProcAddress */
*(Trampoline)++ = 0x49;
#endif
#ifndef CONFIG_CPU
-#if defined(__i386__) || defined(TARGET_X86)
+#if defined(__i386__) || defined(_M_IX86) || defined(TARGET_X86)
#define CONFIG_CPU "x86"
#define CONFIG_WORDSIZE "32"
-#elif defined(__x86_64__) || defined(TARGET_AMD64)
+#elif defined(__x86_64__) || defined(_M_X64) || defined(TARGET_AMD64)
#define CONFIG_CPU "x86-64"
#define CONFIG_WORDSIZE "64"
#elif defined(sparc) || defined(__sparc__)
#endif /* DISABLE_LOGGING */
-#if defined(__i386__) || defined(__x86_64__)
+#if defined(__i386__) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64)
#if !defined(TARGET_ARM64) && !defined(__APPLE__)
#define emit_debug_info TRUE
#else
#define MIN_PAGES 16
-#if _WIN32 // These are the same.
+#if defined(_WIN32) && (defined(_M_IX86) || defined(_M_X64)) // These are the same.
#define MIN_ALIGN MEMORY_ALLOCATION_ALIGNMENT
#elif defined(__x86_64__)
/*
{
CodeChunk *chunk;
-#if defined(__i386__) || defined(__x86_64__)
+#if defined(__i386__) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64)
int fill_value = 0xcc; /* x86 break */
#else
int fill_value = 0x2a;
#include <ucontext.h>
#endif
-#if (defined(__i386__) && !defined(MONO_CROSS_COMPILE)) || (defined(TARGET_X86))
+#if ((defined(__i386__) || defined(_M_IX86)) && !defined(MONO_CROSS_COMPILE)) || (defined(TARGET_X86))
#include <mono/utils/mono-context.h>
#endif
}
-#elif (defined(__x86_64__) && !defined(MONO_CROSS_COMPILE)) || (defined(TARGET_AMD64)) /* defined(__i386__) */
+#elif ((defined(__x86_64__) || defined(_M_X64)) && !defined(MONO_CROSS_COMPILE)) || (defined(TARGET_AMD64))
#include <mono/utils/mono-context.h>
#define MONO_CONTEXT_GET_BP(ctx) ((gpointer)(gsize)((ctx)->wasm_bp))
#define MONO_CONTEXT_GET_SP(ctx) ((gpointer)(gsize)((ctx)->wasm_sp))
-#elif (defined(__i386__) && !defined(MONO_CROSS_COMPILE)) || (defined(TARGET_X86))
+#elif ((defined(__i386__) || defined(_M_IX86)) && !defined(MONO_CROSS_COMPILE)) || (defined(TARGET_X86))
/*HACK, move this to an eventual mono-signal.c*/
#if defined( __linux__) || defined(__sun) || defined(__APPLE__) || defined(__NetBSD__) || \
#define MONO_ARCH_HAS_MONO_CONTEXT 1
-#elif (defined(__x86_64__) && !defined(MONO_CROSS_COMPILE)) || (defined(TARGET_AMD64)) /* defined(__i386__) */
+#elif ((defined(__x86_64__) || defined(_M_X64)) && !defined(MONO_CROSS_COMPILE)) || (defined(TARGET_AMD64))
#include <mono/arch/amd64/amd64-codegen.h>
*/
//#define mono_compiler_barrier() asm volatile("": : :"memory")
-#ifdef TARGET_WASM
+#ifdef HOST_WASM
static inline void mono_memory_barrier (void)
{
#define LOAD_BARRIER mono_memory_read_barrier ()
#define STORE_BARRIER mono_memory_write_barrier ()
-#if defined(__i386__) || defined(__x86_64__)
+#if defined(__i386__) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64)
/*
Both x86 and amd64 follow the SPO memory model:
-Loads are not reordered with other loads