Based on CoreCLR changes.
CC @clamp03 @wscho77 @HJLeee @JongHeonChoi @t-mustafin @gbalykov
add_definitions(-DMIPS64)
add_definitions(-D_WIN64)
add_definitions(-DBIT64=1)
+elseif (CLR_CMAKE_HOST_ARCH_RISCV64)
+ add_definitions(-D_RISCV64_)
+ add_definitions(-DRISCV64)
+ add_definitions(-D_WIN64)
+ add_definitions(-DBIT64=1)
else ()
clr_unknown_arch()
endif ()
add_definitions(-DDBG_TARGET_64BIT=1)
add_definitions(-DDBG_TARGET_WIN64=1)
add_definitions(-DFEATURE_MULTIREG_RETURN)
+elseif (CLR_CMAKE_TARGET_ARCH_RISCV64)
+ add_definitions(-DDBG_TARGET_RISCV64_UNIX)
+ add_definitions(-D_TARGET_RISCV64_=1)
+ add_definitions(-D_TARGET_64BIT_=1)
+ add_definitions(-DDBG_TARGET_RISCV64=1)
+ add_definitions(-DDBG_TARGET_64BIT=1)
+ add_definitions(-DDBG_TARGET_WIN64=1)
+ add_definitions(-DFEATURE_MULTIREG_RETURN)
else ()
clr_unknown_arch()
endif (CLR_CMAKE_TARGET_ARCH_AMD64)
add_definitions(-D_TARGET_WIN64_=1)
add_definitions(-DDBG_TARGET_64BIT)
add_definitions(-DDBG_TARGET_WIN64=1)
+elseif(CLR_CMAKE_HOST_ARCH_RISCV64)
+ add_definitions(-DSOS_TARGET_RISCV64=1)
+ add_definitions(-D_TARGET_WIN64_=1)
+ add_definitions(-DDBG_TARGET_64BIT)
+ add_definitions(-DDBG_TARGET_WIN64=1)
endif()
add_definitions(-DSTRIKE)
DataTarget::GetPointerSize(
/* [out] */ ULONG32 *size)
{
-#if defined(SOS_TARGET_AMD64) || defined(SOS_TARGET_ARM64) || defined(SOS_TARGET_MIPS64)
+#if defined(SOS_TARGET_AMD64) || defined(SOS_TARGET_ARM64) || defined(SOS_TARGET_MIPS64) || defined(SOS_TARGET_RISCV64)
*size = 8;
#elif defined(SOS_TARGET_ARM) || defined(SOS_TARGET_X86)
*size = 4;
"DOTNET_ROOT_ARM";
#elif defined(HOST_ARM64)
"DOTNET_ROOT_ARM64";
+#elif defined(HOST_RISCV64)
+ "DOTNET_ROOT_RISCV64";
#else
"Error";
#error Hosting layer doesn't support target arch
"/etc/dotnet/install_location_arm";
#elif defined(HOST_ARM64)
"/etc/dotnet/install_location_arm64";
+#elif defined(HOST_RISCV64)
+ "/etc/dotnet/install_location_riscv64";
#else
"ERROR";
#error Hosting layer doesn't support target arch
add_definitions(-DDBG_TARGET_WIN64=1)
add_definitions(-DBIT64)
SET(REQUIRE_LLDBPLUGIN false)
+elseif(CLR_CMAKE_HOST_ARCH_RISCV64)
+ add_definitions(-D_TARGET_RISCV64_=1)
+ add_definitions(-DDBG_TARGET_64BIT=1)
+ add_definitions(-DDBG_TARGET_RISCV64=1)
+ add_definitions(-DDBG_TARGET_WIN64=1)
+ add_definitions(-DBIT64)
+ SET(REQUIRE_LLDBPLUGIN false)
endif()
if(NOT $ENV{LLVM_HOME} STREQUAL "")
const WCHAR* pHostArch = W("arm");
#elif defined(HOST_ARM64)
const WCHAR* pHostArch = W("arm64");
+#elif defined(HOST_RISCV64)
+ const WCHAR* pHostArch = W("riscv64");
#else
_ASSERTE(!"Unknown host arch");
return E_NOTIMPL;
e_machine = EM_AARCH64;
#elif defined(TARGET_LOONGARCH64)
e_machine = EM_LOONGARCH;
+#elif defined(TARGET_RISCV64)
+ e_machine = EM_RISCV;
#endif
e_flags = 0;
e_version = 1;
return m_IsVarArg;
}
-#if defined(TARGET_ARM) || defined(TARGET_ARM64)
+#if defined(TARGET_ARM) || defined(TARGET_ARM64) || defined(TARGET_RISCV64)
bool GcInfoDecoder::HasTailCalls()
{
_ASSERTE( m_Flags & DECODE_HAS_TAILCALLS );
return m_HasTailCalls;
}
-#endif // TARGET_ARM || TARGET_ARM64
+#endif // TARGET_ARM || TARGET_ARM64 || TARGET_RISCV64
bool GcInfoDecoder::WantsReportOnlyLeaf()
{
#endif
+#ifdef TARGET_RISCV64
+#include "daccess.h"
+
+#define UNW_FLAG_NHANDLER 0x0 /* any handler */
+#define UNW_FLAG_EHANDLER 0x1 /* filter handler */
+#define UNW_FLAG_UHANDLER 0x2 /* unwind handler */
+
+// This function returns the RVA of the end of the function (exclusive, so one byte after the actual end)
+// using the unwind info on ARM64. (see ExternalAPIs\Win9CoreSystem\inc\winnt.h)
+FORCEINLINE
+ULONG64
+RtlpGetFunctionEndAddress (
+ _In_ PT_RUNTIME_FUNCTION FunctionEntry,
+ _In_ ULONG64 ImageBase
+ )
+{
+ ULONG64 FunctionLength;
+
+ FunctionLength = FunctionEntry->UnwindData;
+ if ((FunctionLength & 3) != 0) {
+ FunctionLength = (FunctionLength >> 2) & 0x7ff;
+ } else {
+ memcpy(&FunctionLength, (void*)(ImageBase + FunctionLength), sizeof(UINT32));
+ FunctionLength &= 0x3ffff;
+ }
+
+ return FunctionEntry->BeginAddress + 4 * FunctionLength;
+}
+
+#define RUNTIME_FUNCTION__BeginAddress(FunctionEntry) ((FunctionEntry)->BeginAddress)
+#define RUNTIME_FUNCTION__SetBeginAddress(FunctionEntry,address) ((FunctionEntry)->BeginAddress = (address))
+
+#define RUNTIME_FUNCTION__EndAddress(FunctionEntry, ImageBase) (RtlpGetFunctionEndAddress(FunctionEntry, (ULONG64)(ImageBase)))
+
+#define RUNTIME_FUNCTION__SetUnwindInfoAddress(prf,address) do { (prf)->UnwindData = (address); } while (0)
+
+typedef struct _UNWIND_INFO {
+ // dummy
+} UNWIND_INFO, *PUNWIND_INFO;
+
+EXTERN_C
+NTSYSAPI
+PEXCEPTION_ROUTINE
+NTAPI
+RtlVirtualUnwind(
+ IN ULONG HandlerType,
+ IN ULONG64 ImageBase,
+ IN ULONG64 ControlPc,
+ IN PRUNTIME_FUNCTION FunctionEntry,
+ IN OUT PCONTEXT ContextRecord,
+ OUT PVOID *HandlerData,
+ OUT PULONG64 EstablisherFrame,
+ IN OUT PKNONVOLATILE_CONTEXT_POINTERS ContextPointers OPTIONAL
+ );
+
+#endif // TARGET_RISCV64
+
#endif // CLRNT_H_
#endif // TARGET_ARM64 && !HOST_ARM64
+#elif defined(HOST_AMD64) && defined(TARGET_RISCV64) // Host amd64 managing RISCV64 related code
+
+#ifndef CROSS_COMPILE
+#define CROSS_COMPILE
+#endif
+
+//
+// Specify the number of breakpoints and watchpoints that the OS
+// will track.
+//
+
+#define RISCV64_MAX_BREAKPOINTS 8
+#define RISCV64_MAX_WATCHPOINTS 2
+
+#define CONTEXT_UNWOUND_TO_CALL 0x20000000
+
+typedef struct DECLSPEC_ALIGN(16) _T_CONTEXT {
+
+ //
+ // Control flags.
+ //
+
+ /* +0x000 */ DWORD ContextFlags;
+
+ //
+ // Integer registers
+ //
+ DWORD64 R0;
+ DWORD64 Ra;
+ DWORD64 Sp;
+ DWORD64 Gp;
+ DWORD64 Tp;
+ DWORD64 T0;
+ DWORD64 T1;
+ DWORD64 T2;
+ DWORD64 Fp;
+ DWORD64 S1;
+ DWORD64 A0;
+ DWORD64 A1;
+ DWORD64 A2;
+ DWORD64 A3;
+ DWORD64 A4;
+ DWORD64 A5;
+ DWORD64 A6;
+ DWORD64 A7;
+ DWORD64 S2;
+ DWORD64 S3;
+ DWORD64 S4;
+ DWORD64 S5;
+ DWORD64 S6;
+ DWORD64 S7;
+ DWORD64 S8;
+ DWORD64 S9;
+ DWORD64 S10;
+ DWORD64 S11;
+ DWORD64 T3;
+ DWORD64 T4;
+ DWORD64 T5;
+ DWORD64 T6;
+ DWORD64 Pc;
+
+ //
+ // Floating Point Registers
+ //
+ //TODO-RISCV64: support the SIMD.
+ ULONGLONG F[32];
+ DWORD Fcsr;
+} T_CONTEXT, *PT_CONTEXT;
+
+// _IMAGE_RISCV64_RUNTIME_FUNCTION_ENTRY (see ExternalAPIs\Win9CoreSystem\inc\winnt.h)
+typedef struct _T_RUNTIME_FUNCTION {
+ DWORD BeginAddress;
+ union {
+ DWORD UnwindData;
+ struct {
+ DWORD Flag : 2;
+ DWORD FunctionLength : 11;
+ DWORD RegF : 3;
+ DWORD RegI : 4;
+ DWORD H : 1;
+ DWORD CR : 2;
+ DWORD FrameSize : 9;
+ } PackedUnwindData;
+ };
+} T_RUNTIME_FUNCTION, *PT_RUNTIME_FUNCTION;
+
+//
+// Define exception dispatch context structure.
+//
+
+typedef struct _T_DISPATCHER_CONTEXT {
+ DWORD64 ControlPc;
+ DWORD64 ImageBase;
+ PT_RUNTIME_FUNCTION FunctionEntry;
+ DWORD64 EstablisherFrame;
+ DWORD64 TargetPc;
+ PCONTEXT ContextRecord;
+ PEXCEPTION_ROUTINE LanguageHandler;
+ PVOID HandlerData;
+ PVOID HistoryTable;
+ DWORD ScopeIndex;
+ BOOLEAN ControlPcIsUnwound;
+ PBYTE NonVolatileRegisters;
+} T_DISPATCHER_CONTEXT, *PT_DISPATCHER_CONTEXT;
+
+//
+// Nonvolatile context pointer record.
+//
+
+typedef struct _T_KNONVOLATILE_CONTEXT_POINTERS {
+
+ PDWORD64 S1;
+ PDWORD64 S2;
+ PDWORD64 S3;
+ PDWORD64 S4;
+ PDWORD64 S5;
+ PDWORD64 S6;
+ PDWORD64 S7;
+ PDWORD64 S8;
+ PDWORD64 S9;
+ PDWORD64 S10;
+ PDWORD64 S11;
+ PDWORD64 Fp;
+ PDWORD64 Gp;
+ PDWORD64 Tp;
+ PDWORD64 Ra;
+
+ PDWORD64 F8;
+ PDWORD64 F9;
+ PDWORD64 F18;
+ PDWORD64 F19;
+ PDWORD64 F20;
+ PDWORD64 F21;
+ PDWORD64 F22;
+ PDWORD64 F23;
+ PDWORD64 F24;
+ PDWORD64 F25;
+ PDWORD64 F26;
+ PDWORD64 F27;
+} T_KNONVOLATILE_CONTEXT_POINTERS, *PT_KNONVOLATILE_CONTEXT_POINTERS;
+
#else
#define T_CONTEXT CONTEXT
#define DAC_CS_NATIVE_DATA_SIZE 96
#elif defined(TARGET_LINUX) && defined(TARGET_S390X)
#define DAC_CS_NATIVE_DATA_SIZE 96
+#elif defined(TARGET_LINUX) && defined(TARGET_RISCV64)
+#define DAC_CS_NATIVE_DATA_SIZE 96
#elif defined(TARGET_NETBSD) && defined(TARGET_AMD64)
#define DAC_CS_NATIVE_DATA_SIZE 96
#elif defined(TARGET_NETBSD) && defined(TARGET_ARM)
#define DTCONTEXT_IS_ARM
#elif defined (TARGET_ARM64)
#define DTCONTEXT_IS_ARM64
+#elif defined (TARGET_RISCV64)
+#define DTCONTEXT_IS_RISCV64
#endif
#if defined(DTCONTEXT_IS_X86)
} DT_CONTEXT;
+#elif defined(DTCONTEXT_IS_RISCV64)
+
+#define DT_CONTEXT_RISCV64 0x01000000L
+
+#define DT_CONTEXT_CONTROL (DT_CONTEXT_RISCV64 | 0x1L)
+#define DT_CONTEXT_INTEGER (DT_CONTEXT_RISCV64 | 0x2L)
+#define DT_CONTEXT_FLOATING_POINT (DT_CONTEXT_RISCV64 | 0x4L)
+#define DT_CONTEXT_DEBUG_REGISTERS (DT_CONTEXT_RISCV64 | 0x8L)
+
+#define DT_CONTEXT_FULL (DT_CONTEXT_CONTROL | DT_CONTEXT_INTEGER | DT_CONTEXT_FLOATING_POINT)
+#define DT_CONTEXT_ALL (DT_CONTEXT_CONTROL | DT_CONTEXT_INTEGER | DT_CONTEXT_FLOATING_POINT | DT_CONTEXT_DEBUG_REGISTERS)
+
+#define DT_RISCV64_MAX_BREAKPOINTS 8
+#define DT_RISCV64_MAX_WATCHPOINTS 2
+
+typedef struct DECLSPEC_ALIGN(16) {
+ //
+ // Control flags.
+ //
+
+ /* +0x000 */ DWORD ContextFlags;
+
+ //
+ // Integer registers
+ //
+ DWORD64 R0;
+ DWORD64 Ra;
+ DWORD64 Sp;
+ DWORD64 Gp;
+ DWORD64 Tp;
+ DWORD64 T0;
+ DWORD64 T1;
+ DWORD64 T2;
+ DWORD64 Fp;
+ DWORD64 S1;
+ DWORD64 A0;
+ DWORD64 A1;
+ DWORD64 A2;
+ DWORD64 A3;
+ DWORD64 A4;
+ DWORD64 A5;
+ DWORD64 A6;
+ DWORD64 A7;
+ DWORD64 S2;
+ DWORD64 S3;
+ DWORD64 S4;
+ DWORD64 S5;
+ DWORD64 S6;
+ DWORD64 S7;
+ DWORD64 S8;
+ DWORD64 S9;
+ DWORD64 S10;
+ DWORD64 S11;
+ DWORD64 T3;
+ DWORD64 T4;
+ DWORD64 T5;
+ DWORD64 T6;
+ DWORD64 Pc;
+
+ //
+ // Floating Point Registers
+ //
+ ULONGLONG F[32];
+ DWORD Fcsr;
+} DT_CONTEXT;
+
+static_assert(sizeof(DT_CONTEXT) == sizeof(T_CONTEXT), "DT_CONTEXT size must equal the T_CONTEXT size");
+
#else
#error Unsupported platform
#endif
DECODE_EDIT_AND_CONTINUE = 0x800,
DECODE_REVERSE_PINVOKE_VAR = 0x1000,
DECODE_RETURN_KIND = 0x2000,
-#if defined(TARGET_ARM) || defined(TARGET_ARM64)
+#if defined(TARGET_ARM) || defined(TARGET_ARM64) || defined(TARGET_RISCV64)
DECODE_HAS_TAILCALLS = 0x4000,
#endif // TARGET_ARM || TARGET_ARM64
};
GC_INFO_HAS_STACK_BASE_REGISTER = 0x40,
#ifdef TARGET_AMD64
GC_INFO_WANTS_REPORT_ONLY_LEAF = 0x80,
-#elif defined(TARGET_ARM) || defined(TARGET_ARM64)
+#elif defined(TARGET_ARM) || defined(TARGET_ARM64) || defined(TARGET_RISCV64)
GC_INFO_HAS_TAILCALLS = 0x80,
#endif // TARGET_AMD64
GC_INFO_HAS_EDIT_AND_CONTINUE_PRESERVED_SLOTS = 0x100,
bool HasMethodTableGenericsInstContext();
bool GetIsVarArg();
bool WantsReportOnlyLeaf();
-#if defined(TARGET_ARM) || defined(TARGET_ARM64)
+#if defined(TARGET_ARM) || defined(TARGET_ARM64) || defined(TARGET_RISCV64)
bool HasTailCalls();
-#endif // TARGET_ARM || TARGET_ARM64
+#endif // TARGET_ARM || TARGET_ARM64 || TARGET_RISCV64
ReturnKind GetReturnKind();
UINT32 GetCodeLength();
UINT32 GetStackBaseRegister();
bool m_GenericSecretParamIsMT;
#ifdef TARGET_AMD64
bool m_WantsReportOnlyLeaf;
-#elif defined(TARGET_ARM) || defined(TARGET_ARM64)
+#elif defined(TARGET_ARM) || defined(TARGET_ARM64) || defined(TARGET_RISCV64)
bool m_HasTailCalls;
#endif // TARGET_AMD64
INT32 m_SecurityObjectStackSlot;
// 10 RT_ByRef
// 11 RT_Unset
-#elif defined(TARGET_AMD64) || defined(TARGET_ARM64)
+#elif defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_RISCV64)
// Slim Header:
#define LIVESTATE_RLE_RUN_ENCBASE 2
#define LIVESTATE_RLE_SKIP_ENCBASE 4
+#elif defined(TARGET_RISCV64)
+#ifndef TARGET_POINTER_SIZE
+#define TARGET_POINTER_SIZE 8 // equal to sizeof(void*) and the managed pointer size in bytes for this target
+#endif
+#define NUM_NORM_CODE_OFFSETS_PER_CHUNK (64)
+#define NUM_NORM_CODE_OFFSETS_PER_CHUNK_LOG2 (6)
+#define NORMALIZE_STACK_SLOT(x) ((x)>>3) // GC Pointers are 8-bytes aligned
+#define DENORMALIZE_STACK_SLOT(x) ((x)<<3)
+#define NORMALIZE_CODE_LENGTH(x) ((x)>>2) // All Instructions are 4 bytes long
+#define DENORMALIZE_CODE_LENGTH(x) ((x)<<2)
+#define NORMALIZE_STACK_BASE_REGISTER(x) ((x)^8) // Encode Frame pointer X8 as zero
+#define DENORMALIZE_STACK_BASE_REGISTER(x) ((x)^8)
+#define NORMALIZE_SIZE_OF_STACK_AREA(x) ((x)>>3)
+#define DENORMALIZE_SIZE_OF_STACK_AREA(x) ((x)<<3)
+#define CODE_OFFSETS_NEED_NORMALIZATION 0
+#define NORMALIZE_CODE_OFFSET(x) (x) // Instructions are 4 bytes long, but the safe-point
+#define DENORMALIZE_CODE_OFFSET(x) (x) // offsets are encoded with a -1 adjustment.
+#define NORMALIZE_REGISTER(x) (x)
+#define DENORMALIZE_REGISTER(x) (x)
+#define NORMALIZE_NUM_SAFE_POINTS(x) (x)
+#define DENORMALIZE_NUM_SAFE_POINTS(x) (x)
+#define NORMALIZE_NUM_INTERRUPTIBLE_RANGES(x) (x)
+#define DENORMALIZE_NUM_INTERRUPTIBLE_RANGES(x) (x)
+
+#define PSP_SYM_STACK_SLOT_ENCBASE 6
+#define GENERICS_INST_CONTEXT_STACK_SLOT_ENCBASE 6
+#define SECURITY_OBJECT_STACK_SLOT_ENCBASE 6
+#define GS_COOKIE_STACK_SLOT_ENCBASE 6
+#define CODE_LENGTH_ENCBASE 8
+#define SIZE_OF_RETURN_KIND_IN_SLIM_HEADER 2
+#define SIZE_OF_RETURN_KIND_IN_FAT_HEADER 4
+#define STACK_BASE_REGISTER_ENCBASE 2
+// FP encoded as 0, SP as 2??
+#define SIZE_OF_STACK_AREA_ENCBASE 3
+#define SIZE_OF_EDIT_AND_CONTINUE_PRESERVED_AREA_ENCBASE 4
+#define SIZE_OF_EDIT_AND_CONTINUE_FIXED_STACK_FRAME_ENCBASE 4
+#define REVERSE_PINVOKE_FRAME_ENCBASE 6
+#define NUM_REGISTERS_ENCBASE 3
+#define NUM_STACK_SLOTS_ENCBASE 2
+#define NUM_UNTRACKED_SLOTS_ENCBASE 1
+#define NORM_PROLOG_SIZE_ENCBASE 5
+#define NORM_EPILOG_SIZE_ENCBASE 3
+#define NORM_CODE_OFFSET_DELTA_ENCBASE 3
+#define INTERRUPTIBLE_RANGE_DELTA1_ENCBASE 6
+#define INTERRUPTIBLE_RANGE_DELTA2_ENCBASE 6
+#define REGISTER_ENCBASE 3
+#define REGISTER_DELTA_ENCBASE 2
+#define STACK_SLOT_ENCBASE 6
+#define STACK_SLOT_DELTA_ENCBASE 4
+#define NUM_SAFE_POINTS_ENCBASE 3
+#define NUM_INTERRUPTIBLE_RANGES_ENCBASE 1
+#define NUM_EH_CLAUSES_ENCBASE 2
+#define POINTER_SIZE_ENCBASE 3
+#define LIVESTATE_RLE_RUN_ENCBASE 2
+#define LIVESTATE_RLE_SKIP_ENCBASE 4
+
#else
#ifndef TARGET_X86
#define IMAGE_FILE_MACHINE_NATIVE IMAGE_FILE_MACHINE_ARM64
#elif defined(TARGET_S390X)
#define IMAGE_FILE_MACHINE_NATIVE IMAGE_FILE_MACHINE_UNKNOWN
+#elif defined(TARGET_RISCV64)
+#define IMAGE_FILE_MACHINE_NATIVE IMAGE_FILE_MACHINE_RISCV64
#else
#error "port me"
#endif
};
} Arm64VolatileContextPointer;
#endif //TARGET_ARM64
+
+#if defined(TARGET_RISCV64)
+typedef struct _RiscV64VolatileContextPointer
+{
+ PDWORD64 R0;
+ PDWORD64 A0;
+ PDWORD64 A1;
+ PDWORD64 A2;
+ PDWORD64 A3;
+ PDWORD64 A4;
+ PDWORD64 A5;
+ PDWORD64 A6;
+ PDWORD64 A7;
+ PDWORD64 T0;
+ PDWORD64 T1;
+ PDWORD64 T2;
+ PDWORD64 T3;
+ PDWORD64 T4;
+ PDWORD64 T5;
+ PDWORD64 T6;
+} RiscV64VolatileContextPointer;
+#endif
+
struct REGDISPLAY : public REGDISPLAY_BASE {
#ifdef TARGET_ARM64
Arm64VolatileContextPointer volatileCurrContextPointers;
#endif
+#ifdef TARGET_RISCV64
+ RiscV64VolatileContextPointer volatileCurrContextPointers;
+#endif
+
REGDISPLAY()
{
// Initialize
return (LPVOID)((TADDR)display->pCurrentContext->R0);
#elif defined(TARGET_X86)
return (LPVOID)display->pCurrentContext->Eax;
+#elif defined(TARGET_RISCV64)
+ return (LPVOID)display->pCurrentContext->A0;
#else
PORTABILITY_ASSERT("GetRegdisplayReturnValue NYI for this platform (Regdisp.h)");
return NULL;
{
*(&pCtxPtrs->Edi + i) = (&pCtx->Edi + i);
}
-#else // TARGET_X86
+#elif defined(TARGET_RISCV64) // TARGET_X86
+ *(&pCtxPtrs->S1) = &pCtx->S1;
+ *(&pCtxPtrs->S2) = &pCtx->S2;
+ *(&pCtxPtrs->S3) = &pCtx->S3;
+ *(&pCtxPtrs->S4) = &pCtx->S4;
+ *(&pCtxPtrs->S5) = &pCtx->S5;
+ *(&pCtxPtrs->S6) = &pCtx->S6;
+ *(&pCtxPtrs->S7) = &pCtx->S7;
+ *(&pCtxPtrs->S8) = &pCtx->S8;
+ *(&pCtxPtrs->S9) = &pCtx->S9;
+ *(&pCtxPtrs->S10) = &pCtx->S10;
+ *(&pCtxPtrs->S11) = &pCtx->S11;
+ *(&pCtxPtrs->Gp) = &pCtx->Gp;
+ *(&pCtxPtrs->Tp) = &pCtx->Tp;
+ *(&pCtxPtrs->Fp) = &pCtx->Fp;
+ *(&pCtxPtrs->Ra) = &pCtx->Ra;
+#else // TARGET_RISCV64
PORTABILITY_ASSERT("FillContextPointers");
#endif // _TARGET_???_ (ELSE)
}
// Fill volatile context pointers. They can be used by GC in the case of the leaf frame
for (int i=0; i < 18; i++)
pRD->volatileCurrContextPointers.X[i] = &pctx->X[i];
-#endif // TARGET_ARM64
+#elif defined(TARGET_RISCV64) // TARGET_ARM64
+ pRD->volatileCurrContextPointers.A0 = &pctx->A0;
+ pRD->volatileCurrContextPointers.A1 = &pctx->A1;
+ pRD->volatileCurrContextPointers.A2 = &pctx->A2;
+ pRD->volatileCurrContextPointers.A3 = &pctx->A3;
+ pRD->volatileCurrContextPointers.A4 = &pctx->A4;
+ pRD->volatileCurrContextPointers.A5 = &pctx->A5;
+ pRD->volatileCurrContextPointers.A6 = &pctx->A6;
+ pRD->volatileCurrContextPointers.A7 = &pctx->A7;
+ pRD->volatileCurrContextPointers.T0 = &pctx->T0;
+ pRD->volatileCurrContextPointers.T1 = &pctx->T1;
+ pRD->volatileCurrContextPointers.T2 = &pctx->T2;
+ pRD->volatileCurrContextPointers.T3 = &pctx->T3;
+ pRD->volatileCurrContextPointers.T4 = &pctx->T4;
+ pRD->volatileCurrContextPointers.T5 = &pctx->T5;
+ pRD->volatileCurrContextPointers.T6 = &pctx->T6;
+#endif // TARGET_RISCV64
#ifdef DEBUG_REGDISPLAY
pRD->_pThread = NULL;
#elif defined(TARGET_ARM64)
_ASSERTE(regNum < 31);
return (size_t *)®s->X0 + regNum;
+#elif defined(TARGET_RISCV64)
+ _ASSERTE(regNum < 32);
+ return (size_t *)®s->R0 + regNum;
#else
_ASSERTE(!"@TODO Port - getRegAddr (Regdisp.h)");
#endif
#if defined(TARGET_X86) || defined(TARGET_ARM)
#define USE_LAZY_PREFERRED_RANGE 0
-#elif defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_S390X) || defined(TARGET_LOONGARCH64)
+#elif defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_S390X) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64)
#if defined(HOST_UNIX)
// In PAL we have a smechanism that reserves memory on start up that is
#error The Volatile type is currently only defined for Visual C++ and GNU C++
#endif
-#if defined(__GNUC__) && !defined(HOST_X86) && !defined(HOST_AMD64) && !defined(HOST_ARM) && !defined(HOST_ARM64) && !defined(HOST_S390X)
-#error The Volatile type is currently only defined for GCC when targeting x86, AMD64, ARM, ARM64, or S390X CPUs
+#if defined(__GNUC__) && !defined(HOST_X86) && !defined(HOST_AMD64) && !defined(HOST_ARM) && !defined(HOST_ARM64) && !defined(HOST_RISCV64) && !defined(HOST_S390X)
+#error The Volatile type is currently only defined for GCC when targeting x86, AMD64, ARM, ARM64, RISCV64, or S390X CPUs
#endif
#if defined(__GNUC__)
// currently don't have a cheap way to determine the number of CPUs from this header file. Revisit this if it
// turns out to be a performance issue for the uni-proc case.
#define VOLATILE_MEMORY_BARRIER() MemoryBarrier()
+#elif defined(HOST_RISCV64)
+#define VOLATILE_MEMORY_BARRIER() asm volatile ("fence rw,rw" : : : "memory")
#else
//
// On VC++, reorderings at the compiler and machine level are prevented by the use of the
#define _M_ARM64 1
#elif defined(__s390x__) && !defined(_M_S390X)
#define _M_S390X 1
+#elif defined(__riscv) && (__riscv_xlen == 64) && !defined(_M_RISCV64)
+#define _M_RISCV64 1
#endif
#if defined(_M_IX86) && !defined(HOST_X86)
#define HOST_ARM64
#elif defined(_M_S390X) && !defined(HOST_S390X)
#define HOST_S390X
+#elif defined(_M_RISCV64) && !defined(HOST_RISCV64)
+#define HOST_RISCV64
#endif
#endif // !_MSC_VER
} KNONVOLATILE_CONTEXT_POINTERS, *PKNONVOLATILE_CONTEXT_POINTERS;
+#elif defined(HOST_RISCV64)
+
+// Please refer to src/coreclr/pal/src/arch/riscv64/asmconstants.h
+#define CONTEXT_RISCV64 0x01000000L
+
+#define CONTEXT_CONTROL (CONTEXT_RISCV64 | 0x1)
+#define CONTEXT_INTEGER (CONTEXT_RISCV64 | 0x2)
+#define CONTEXT_FLOATING_POINT (CONTEXT_RISCV64 | 0x4)
+#define CONTEXT_DEBUG_REGISTERS (CONTEXT_RISCV64 | 0x8)
+
+#define CONTEXT_FULL (CONTEXT_CONTROL | CONTEXT_INTEGER | CONTEXT_FLOATING_POINT)
+
+#define CONTEXT_ALL (CONTEXT_CONTROL | CONTEXT_INTEGER | CONTEXT_FLOATING_POINT | CONTEXT_DEBUG_REGISTERS)
+
+#define CONTEXT_EXCEPTION_ACTIVE 0x8000000
+#define CONTEXT_SERVICE_ACTIVE 0x10000000
+#define CONTEXT_EXCEPTION_REQUEST 0x40000000
+#define CONTEXT_EXCEPTION_REPORTING 0x80000000
+
+//
+// This flag is set by the unwinder if it has unwound to a call
+// site, and cleared whenever it unwinds through a trap frame.
+// It is used by language-specific exception handlers to help
+// differentiate exception scopes during dispatching.
+//
+
+#define CONTEXT_UNWOUND_TO_CALL 0x20000000
+
+// begin_ntoshvp
+
+//
+// Specify the number of breakpoints and watchpoints that the OS
+// will track. Architecturally, RISCV64 supports up to 16. In practice,
+// however, almost no one implements more than 4 of each.
+//
+
+#define RISCV64_MAX_BREAKPOINTS 8
+#define RISCV64_MAX_WATCHPOINTS 2
+
+typedef struct DECLSPEC_ALIGN(16) _CONTEXT {
+
+ //
+ // Control flags.
+ //
+
+ /* +0x000 */ DWORD ContextFlags;
+
+ //
+ // Integer registers.
+ //
+ DWORD64 R0;
+ DWORD64 Ra;
+ DWORD64 Sp;
+ DWORD64 Gp;
+ DWORD64 Tp;
+ DWORD64 T0;
+ DWORD64 T1;
+ DWORD64 T2;
+ DWORD64 Fp;
+ DWORD64 S1;
+ DWORD64 A0;
+ DWORD64 A1;
+ DWORD64 A2;
+ DWORD64 A3;
+ DWORD64 A4;
+ DWORD64 A5;
+ DWORD64 A6;
+ DWORD64 A7;
+ DWORD64 S2;
+ DWORD64 S3;
+ DWORD64 S4;
+ DWORD64 S5;
+ DWORD64 S6;
+ DWORD64 S7;
+ DWORD64 S8;
+ DWORD64 S9;
+ DWORD64 S10;
+ DWORD64 S11;
+ DWORD64 T3;
+ DWORD64 T4;
+ DWORD64 T5;
+ DWORD64 T6;
+ DWORD64 Pc;
+
+ //
+ // Floating Point Registers
+ //
+ // TODO-RISCV64: support the SIMD.
+ ULONGLONG F[32];
+ DWORD Fcsr;
+} CONTEXT, *PCONTEXT, *LPCONTEXT;
+
+//
+// Nonvolatile context pointer record.
+//
+
+typedef struct _KNONVOLATILE_CONTEXT_POINTERS {
+
+ PDWORD64 S1;
+ PDWORD64 S2;
+ PDWORD64 S3;
+ PDWORD64 S4;
+ PDWORD64 S5;
+ PDWORD64 S6;
+ PDWORD64 S7;
+ PDWORD64 S8;
+ PDWORD64 S9;
+ PDWORD64 S10;
+ PDWORD64 S11;
+ PDWORD64 Fp;
+ PDWORD64 Gp;
+ PDWORD64 Tp;
+ PDWORD64 Ra;
+
+ PDWORD64 F8;
+ PDWORD64 F9;
+ PDWORD64 F18;
+ PDWORD64 F19;
+ PDWORD64 F20;
+ PDWORD64 F21;
+ PDWORD64 F22;
+ PDWORD64 F23;
+ PDWORD64 F24;
+ PDWORD64 F25;
+ PDWORD64 F26;
+ PDWORD64 F27;
+} KNONVOLATILE_CONTEXT_POINTERS, *PKNONVOLATILE_CONTEXT_POINTERS;
+
#elif defined(HOST_S390X)
// There is no context for s390x defined in winnt.h,
#define PAL_CS_NATIVE_DATA_SIZE 56
#elif defined(__sun) && defined(__x86_64__)
#define PAL_CS_NATIVE_DATA_SIZE 48
+#elif defined(__linux__) && defined(__riscv) && __riscv_xlen == 64
+#define PAL_CS_NATIVE_DATA_SIZE 96
#else
#warning
#error PAL_CS_NATIVE_DATA_SIZE is not defined for this architecture
return qwMask != 0;
}
-FORCEINLINE void PAL_ArmInterlockedOperationBarrier()
+FORCEINLINE void PAL_InterlockedOperationBarrier()
{
-#ifdef HOST_ARM64
+#if defined(HOST_ARM64) || defined(HOST_RISCV64)
// On arm64, most of the __sync* functions generate a code sequence like:
// loop:
// ldaxr (load acquire exclusive)
// require the load to occur after the store. This memory barrier should be used following a call to a __sync* function to
// prevent that reordering. Code generated for arm32 includes a 'dmb' after 'cbnz', so no issue there at the moment.
__sync_synchronize();
-#endif // HOST_ARM64
+#endif
}
/*++
IN OUT LONG volatile *lpAddend)
{
LONG result = __sync_add_and_fetch(lpAddend, (LONG)1);
- PAL_ArmInterlockedOperationBarrier();
+ PAL_InterlockedOperationBarrier();
return result;
}
IN OUT LONGLONG volatile *lpAddend)
{
LONGLONG result = __sync_add_and_fetch(lpAddend, (LONGLONG)1);
- PAL_ArmInterlockedOperationBarrier();
+ PAL_InterlockedOperationBarrier();
return result;
}
IN OUT LONG volatile *lpAddend)
{
LONG result = __sync_sub_and_fetch(lpAddend, (LONG)1);
- PAL_ArmInterlockedOperationBarrier();
+ PAL_InterlockedOperationBarrier();
return result;
}
IN OUT LONGLONG volatile *lpAddend)
{
LONGLONG result = __sync_sub_and_fetch(lpAddend, (LONGLONG)1);
- PAL_ArmInterlockedOperationBarrier();
+ PAL_InterlockedOperationBarrier();
return result;
}
IN LONG Value)
{
LONG result = __atomic_exchange_n(Target, Value, __ATOMIC_ACQ_REL);
- PAL_ArmInterlockedOperationBarrier();
+ PAL_InterlockedOperationBarrier();
return result;
}
IN LONGLONG Value)
{
LONGLONG result = __atomic_exchange_n(Target, Value, __ATOMIC_ACQ_REL);
- PAL_ArmInterlockedOperationBarrier();
+ PAL_InterlockedOperationBarrier();
return result;
}
Destination, /* The pointer to a variable whose value is to be compared with. */
Comperand, /* The value to be compared */
Exchange /* The value to be stored */);
- PAL_ArmInterlockedOperationBarrier();
+ PAL_InterlockedOperationBarrier();
return result;
}
Destination, /* The pointer to a variable whose value is to be compared with. */
Comperand, /* The value to be compared */
Exchange /* The value to be stored */);
- PAL_ArmInterlockedOperationBarrier();
+ PAL_InterlockedOperationBarrier();
return result;
}
IN LONG Value)
{
LONG result = __sync_fetch_and_add(Addend, Value);
- PAL_ArmInterlockedOperationBarrier();
+ PAL_InterlockedOperationBarrier();
return result;
}
IN LONGLONG Value)
{
LONGLONG result = __sync_fetch_and_add(Addend, Value);
- PAL_ArmInterlockedOperationBarrier();
+ PAL_InterlockedOperationBarrier();
return result;
}
IN LONG Value)
{
LONG result = __sync_fetch_and_and(Destination, Value);
- PAL_ArmInterlockedOperationBarrier();
+ PAL_InterlockedOperationBarrier();
return result;
}
IN LONG Value)
{
LONG result = __sync_fetch_and_or(Destination, Value);
- PAL_ArmInterlockedOperationBarrier();
+ PAL_InterlockedOperationBarrier();
return result;
}
"nop");
#elif defined(HOST_ARM) || defined(HOST_ARM64)
__asm__ __volatile__( "yield");
+#elif defined(HOST_RISCV64)
+ // TODO-RISCV64-CQ: When Zihintpause is supported, replace with `pause` instruction.
+ __asm__ __volatile__(".word 0x0100000f");
#else
return;
#endif
#define LODWORD(_qw) ((ULONG)(_qw))
#if defined(MIDL_PASS) || defined(RC_INVOKED) || defined(_M_CEE_PURE) \
- || defined(_M_AMD64) || defined(__ARM_ARCH) || defined(_M_S390X)
+ || defined(_M_AMD64) || defined(__ARM_ARCH) || defined(_M_S390X) || defined(_M_RISCV64)
#ifndef UInt32x32To64
#define UInt32x32To64(a, b) ((unsigned __int64)((ULONG)(a)) * (unsigned __int64)((ULONG)(b)))
#define IMAGE_FILE_MACHINE_ARM64 0xAA64 // ARM64 Little-Endian
#define IMAGE_FILE_MACHINE_CEE 0xC0EE
#define IMAGE_FILE_MACHINE_LOONGARCH64 0x6264 // LOONGARCH64.
+#define IMAGE_FILE_MACHINE_RISCV64 0x5064 // RISCV64
//
// Directory format.
#define IMAGE_REL_LOONGARCH64_PC 0x0003
#define IMAGE_REL_LOONGARCH64_JIR 0x0004
+//
+// RISCV64 relocation types
+//
+#define IMAGE_REL_RISCV64_PC 0x0003
+#define IMAGE_REL_RISCV64_JALR 0x0004
+
//
// CEF relocation types.
//
DWORD Reserved;
} DISPATCHER_CONTEXT, *PDISPATCHER_CONTEXT;
-#elif defined(HOST_ARM64)
+#elif defined(HOST_ARM64) || defined(HOST_RISCV64)
typedef struct _DISPATCHER_CONTEXT {
ULONG64 ControlPc;
#include "unixasmmacrosarm.inc"
#elif defined(HOST_ARM64)
#include "unixasmmacrosarm64.inc"
+#elif defined(HOST_RISCV64)
+#include "unixasmmacrosriscv64.inc"
#elif defined(HOST_S390X)
#include "unixasmmacross390x.inc"
#endif
--- /dev/null
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+.macro NESTED_ENTRY Name, Section, Handler
+ LEAF_ENTRY \Name, \Section
+ .ifnc \Handler, NoHandler
+ .cfi_personality 0x1B, C_FUNC(\Handler) // 0x1B == DW_EH_PE_pcrel | DW_EH_PE_sdata4
+ .endif
+.endm
+
+.macro NESTED_END Name, Section
+ LEAF_END \Name, \Section
+.endm
+
+.macro PATCH_LABEL Name
+ .global C_FUNC(\Name)
+C_FUNC(\Name):
+.endm
+
+.macro LEAF_ENTRY Name, Section
+ .global C_FUNC(\Name)
+ .type \Name, %function
+C_FUNC(\Name):
+ .cfi_startproc
+.endm
+
+.macro LEAF_END Name, Section
+ .size \Name, .-\Name
+ .cfi_endproc
+.endm
+
+.macro LEAF_END_MARKED Name, Section
+C_FUNC(\Name\()_End):
+ .global C_FUNC(\Name\()_End)
+ LEAF_END \Name, \Section
+ // make sure this symbol gets its own address
+ nop
+.endm
+
+.macro PREPARE_EXTERNAL_VAR Name, HelperReg
+ lla \HelperReg, \Name
+.endm
+
+.macro PROLOG_STACK_ALLOC Size
+ addi sp, sp, -\Size
+ .cfi_adjust_cfa_offset \Size
+.endm
+
+.macro EPILOG_STACK_FREE Size
+ addi sp, sp, \Size
+ .cfi_adjust_cfa_offset -\Size
+.endm
+
+.macro EPILOG_STACK_RESTORE
+ ori sp, fp, 0
+ .cfi_restore sp
+.endm
+
+.macro PROLOG_SAVE_REG reg, ofs
+ sd \reg, \ofs(sp)
+ .cfi_rel_offset \reg, \ofs
+.endm
+
+.macro PROLOG_SAVE_REG_PAIR reg1, reg2, ofs, __def_cfa_save=0
+ sd \reg1, \ofs(sp)
+ sd \reg2, (\ofs+8)(sp)
+ .cfi_rel_offset \reg1, \ofs
+ .cfi_rel_offset \reg2, \ofs + 8
+ .if (\__def_cfa_save == 1)
+ addi fp, sp, 0
+ .cfi_def_cfa_register fp
+ .endif
+.endm
+
+.macro PROLOG_SAVE_REG_PAIR_INDEXED reg1, reg2, ssize, __def_cfa_save=1
+ addi sp, sp, -\ssize
+ .cfi_adjust_cfa_offset \ssize
+
+ sd \reg1, 0(sp)
+ sd \reg2, 8(sp)
+
+ .cfi_rel_offset \reg1, 0
+ .cfi_rel_offset \reg2, 8
+ .if (\__def_cfa_save == 1)
+ addi fp, sp, 0
+ .cfi_def_cfa_register fp
+ .endif
+.endm
+
+.macro EPILOG_RESTORE_REG reg, ofs
+ ld \reg, (\ofs)(sp)
+ .cfi_restore \reg
+.endm
+
+.macro EPILOG_RESTORE_REG_PAIR reg1, reg2, ofs
+ ld \reg2, (\ofs+8)(sp)
+ ld \reg1, (\ofs)(sp)
+ .cfi_restore \reg2
+ .cfi_restore \reg1
+.endm
+
+.macro EPILOG_RESTORE_REG_PAIR_INDEXED reg1, reg2, ssize
+ ld \reg2, 8(sp)
+ ld \reg1, 0(sp)
+ .cfi_restore \reg2
+ .cfi_restore \reg1
+
+ addi sp, sp, \ssize
+ .cfi_adjust_cfa_offset -\ssize
+.endm
+
+.macro EPILOG_RETURN
+ ret
+.endm
+
+.macro EMIT_BREAKPOINT
+ ebreak
+.endm
+
+.macro EPILOG_BRANCH Target
+ j \Target
+.endm
+
+.macro EPILOG_BRANCH_REG reg
+ jr \reg
+.endm
+
+//-----------------------------------------------------------------------------
+// The Following sets of SAVE_*_REGISTERS expect the memory to be reserved and
+// base address to be passed in $reg
+//
+
+// Reserve 64 bytes of memory before calling SAVE_CALLEESAVED_REGISTERS
+.macro SAVE_CALLEESAVED_REGISTERS reg, ofs
+ PROLOG_SAVE_REG_PAIR s1, s2, \ofs + 16
+ PROLOG_SAVE_REG_PAIR s3, s4, \ofs + 32
+ PROLOG_SAVE_REG_PAIR s5, s6, \ofs + 48
+ PROLOG_SAVE_REG_PAIR s7, s8, \ofs + 64
+ PROLOG_SAVE_REG_PAIR s9, s10, \ofs + 80
+ PROLOG_SAVE_REG_PAIR s11, tp \ofs + 96
+ PROLOG_SAVE_REG gp, \ofs + 112
+.endm
+
+// Reserve 64 bytes of memory before calling SAVE_ARGUMENT_REGISTERS
+.macro SAVE_ARGUMENT_REGISTERS reg, ofs
+ sd a0, (\ofs)(\reg)
+ sd a1, (\ofs + 8)(\reg)
+ sd a2, (\ofs + 16)(\reg)
+ sd a3, (\ofs + 24)(\reg)
+ sd a4, (\ofs + 32)(\reg)
+ sd a5, (\ofs + 40)(\reg)
+ sd a6, (\ofs + 48)(\reg)
+ sd a7, (\ofs + 56)(\reg)
+.endm
+
+// Reserve 64 bytes of memory before calling SAVE_FLOAT_ARGUMENT_REGISTERS
+.macro SAVE_FLOAT_ARGUMENT_REGISTERS reg, ofs
+ fsd fa0, (\ofs)(\reg)
+ fsd fa1, (\ofs + 8)(\reg)
+ fsd fa2, (\ofs + 16)(\reg)
+ fsd fa3, (\ofs + 24)(\reg)
+ fsd fa4, (\ofs + 32)(\reg)
+ fsd fa5, (\ofs + 40)(\reg)
+ fsd fa6, (\ofs + 48)(\reg)
+ fsd fa7, (\ofs + 56)(\reg)
+.endm
+
+// Reserve 64 bytes of memory before calling SAVE_FLOAT_CALLEESAVED_REGISTERS
+.macro SAVE_FLOAT_CALLEESAVED_REGISTERS reg, ofs
+// TODO RISCV NYI
+ sw ra, 0(zero)
+.endm
+
+.macro RESTORE_CALLEESAVED_REGISTERS reg, ofs
+ EPILOG_RESTORE_REG gp \ofs + 112
+ EPILOG_RESTORE_REG_PAIR s11, tp \ofs + 96
+ EPILOG_RESTORE_REG_PAIR s9, s10, \ofs + 80
+ EPILOG_RESTORE_REG_PAIR s7, s8, \ofs + 64
+ EPILOG_RESTORE_REG_PAIR s5, s6, \ofs + 48
+ EPILOG_RESTORE_REG_PAIR s3, s4, \ofs + 32
+ EPILOG_RESTORE_REG_PAIR s1, s2, \ofs + 16
+.endm
+
+.macro RESTORE_ARGUMENT_REGISTERS reg, ofs
+ ld a0, (\ofs)(\reg)
+ ld a1, (\ofs + 8)(\reg)
+ ld a2, (\ofs + 16)(\reg)
+ ld a3, (\ofs + 24)(\reg)
+ ld a4, (\ofs + 32)(\reg)
+ ld a5, (\ofs + 40)(\reg)
+ ld a6, (\ofs + 48)(\reg)
+ ld a7, (\ofs + 56)(\reg)
+.endm
+
+.macro RESTORE_FLOAT_ARGUMENT_REGISTERS reg, ofs
+ fld fa0, (\ofs)(\reg)
+ fld fa1, (\ofs + 8)(\reg)
+ fld fa2, (\ofs + 16)(\reg)
+ fld fa3, (\ofs + 24)(\reg)
+ fld fa4, (\ofs + 32)(\reg)
+ fld fa5, (\ofs + 40)(\reg)
+ fld fa6, (\ofs + 48)(\reg)
+ fld fa7, (\ofs + 56)(\reg)
+.endm
+
+.macro RESTORE_FLOAT_CALLEESAVED_REGISTERS reg, ofs
+// TODO RISCV NYI
+ sw ra, 0(zero)
+.endm
+
+//-----------------------------------------------------------------------------
+// Define the prolog for a TransitionBlock-based method. This macro should be called first in the method and
+// comprises the entire prolog.The locals must be 8 byte aligned
+//
+// Save_argument_registers:
+// GPR_a7
+// GPR_a6
+// GPR_a5
+// GPR_a4
+// GPR_a3
+// GPR_a2
+// GPR_a1
+// GPR_a0
+//
+// General Registers:
+// GPR_tp
+// GPR_s8
+// GPR_s7
+// GPR_s6
+// GPR_s5
+// GPR_s4
+// GPR_s3
+// GPR_s2
+// GPR_s1
+// GPR_s0
+// GPR_ra
+// GPR_fp
+//
+// Float Point:
+// FPR_f27 / fs11
+// FPR_f26 / fs10
+// FPR_f25 / fs9
+// FPR_f24 / fs8
+// FPR_f23 / fs7
+// FPR_f22 / fs6
+// FPR_f21 / fs5
+// FPR_f20 / fs4
+// FPR_f19 / fs3
+// FPR_f18 / fs2
+// FPR_f9 / fs1
+// FPR_f8 / fs0
+// Extra:
+//
+.macro PROLOG_WITH_TRANSITION_BLOCK extraParameters = 0, extraLocals = 0, SaveFPRegs = 1
+ __PWTB_SaveFPArgs = \SaveFPRegs
+
+ __PWTB_FloatArgumentRegisters = \extraLocals
+
+ .if ((__PWTB_FloatArgumentRegisters % 16) != 0)
+ __PWTB_FloatArgumentRegisters = __PWTB_FloatArgumentRegisters + 8
+ .endif
+
+ __PWTB_TransitionBlock = __PWTB_FloatArgumentRegisters
+
+ .if (__PWTB_SaveFPArgs == 1)
+ __PWTB_TransitionBlock = __PWTB_TransitionBlock + SIZEOF__FloatArgumentRegisters
+ .endif
+
+
+ __PWTB_CalleeSavedRegisters = __PWTB_TransitionBlock
+ __PWTB_ArgumentRegisters = __PWTB_TransitionBlock + 120
+
+ // Including fp, ra, s1-s11, tp, gp, and (a0-a7)arguments. (1+1+11+1+1)*8 + 8*8.
+ __PWTB_StackAlloc = __PWTB_TransitionBlock + 120 + 64
+ PROLOG_STACK_ALLOC __PWTB_StackAlloc
+ PROLOG_SAVE_REG_PAIR fp, ra, __PWTB_CalleeSavedRegisters, 1
+
+ // First, Spill argument registers.
+ SAVE_ARGUMENT_REGISTERS sp, __PWTB_ArgumentRegisters
+
+ // Then, Spill callee saved registers. sp=r2.
+ SAVE_CALLEESAVED_REGISTERS sp, __PWTB_CalleeSavedRegisters
+
+ // saving is f10-17.
+ .if (__PWTB_SaveFPArgs == 1)
+ SAVE_FLOAT_ARGUMENT_REGISTERS sp, __PWTB_FloatArgumentRegisters
+ .endif
+
+.endm
+
+.macro EPILOG_WITH_TRANSITION_BLOCK_RETURN
+// TODO RISCV NYI
+ sw ra, 0(zero)
+.endm
+
+
+//-----------------------------------------------------------------------------
+// Provides a matching epilog to PROLOG_WITH_TRANSITION_BLOCK and ends by preparing for tail-calling.
+// Since this is a tail call argument registers are restored.
+//
+.macro EPILOG_WITH_TRANSITION_BLOCK_TAILCALL
+ .if (__PWTB_SaveFPArgs == 1)
+ RESTORE_FLOAT_ARGUMENT_REGISTERS sp, __PWTB_FloatArgumentRegisters
+ .endif
+
+ RESTORE_CALLEESAVED_REGISTERS sp, __PWTB_CalleeSavedRegisters
+
+ RESTORE_ARGUMENT_REGISTERS sp, __PWTB_ArgumentRegisters
+
+ EPILOG_RESTORE_REG_PAIR fp, ra, __PWTB_CalleeSavedRegisters
+
+ EPILOG_STACK_FREE __PWTB_StackAlloc
+.endm
+
+// ------------------------------------------------------------------
+// Macro to generate Redirection Stubs
+//
+// $reason : reason for redirection
+// Eg. GCThreadControl
+// NOTE: If you edit this macro, make sure you update GetCONTEXTFromRedirectedStubStackFrame.
+// This function is used by both the personality routine and the debugger to retrieve the original CONTEXT.
+.macro GenerateRedirectedHandledJITCaseStub reason
+// TODO RISCV NYI
+ sw ra, 0(zero)
+.endm
+
+//-----------------------------------------------------------------------------
+// Macro used to check (in debug builds only) whether the stack is 16-bytes aligned (a requirement before calling
+// out into C++/OS code). Invoke this directly after your prolog (if the stack frame size is fixed) or directly
+// before a call (if you have a frame pointer and a dynamic stack). A breakpoint will be invoked if the stack
+// is misaligned.
+//
+.macro CHECK_STACK_ALIGNMENT
+
+#ifdef _DEBUG
+ andi t4, sp, 0xf
+ beq t4, zero, 0f
+ EMIT_BREAKPOINT
+0:
+#endif
+.endm
CORDB_PLATFORM_POSIX_AMD64 = ( CORDB_PLATFORM_WINDOWS_ARM64 + 1 ) ,
CORDB_PLATFORM_POSIX_X86 = ( CORDB_PLATFORM_POSIX_AMD64 + 1 ) ,
CORDB_PLATFORM_POSIX_ARM = ( CORDB_PLATFORM_POSIX_X86 + 1 ) ,
- CORDB_PLATFORM_POSIX_ARM64 = ( CORDB_PLATFORM_POSIX_ARM + 1 )
+ CORDB_PLATFORM_POSIX_ARM64 = ( CORDB_PLATFORM_POSIX_ARM + 1 ) ,
+ CORDB_PLATFORM_POSIX_LOONGARCH64 = ( CORDB_PLATFORM_POSIX_ARM64 + 1 ) ,
+ CORDB_PLATFORM_POSIX_RISCV64 = ( CORDB_PLATFORM_POSIX_LOONGARCH64 + 1 )
} CorDebugPlatform;
REGISTER_ARM64_V28 = ( REGISTER_ARM64_V27 + 1 ) ,
REGISTER_ARM64_V29 = ( REGISTER_ARM64_V28 + 1 ) ,
REGISTER_ARM64_V30 = ( REGISTER_ARM64_V29 + 1 ) ,
- REGISTER_ARM64_V31 = ( REGISTER_ARM64_V30 + 1 )
+ REGISTER_ARM64_V31 = ( REGISTER_ARM64_V30 + 1 ) ,
+ REGISTER_RISCV64_PC = 0,
+ REGISTER_RISCV64_RA = ( REGISTER_RISCV64_PC + 1),
+ REGISTER_RISCV64_SP = ( REGISTER_RISCV64_RA + 1),
+ REGISTER_RISCV64_GP = ( REGISTER_RISCV64_SP + 1),
+ REGISTER_RISCV64_TP = ( REGISTER_RISCV64_GP + 1 ),
+ REGISTER_RISCV64_T0 = ( REGISTER_RISCV64_TP + 1 ),
+ REGISTER_RISCV64_T1 = ( REGISTER_RISCV64_T0 + 1 ),
+ REGISTER_RISCV64_T2 = ( REGISTER_RISCV64_T1 + 1 ),
+ REGISTER_RISCV64_FP = ( REGISTER_RISCV64_T2 + 1 ),
+ REGISTER_RISCV64_S1 = ( REGISTER_RISCV64_FP + 1 ),
+ REGISTER_RISCV64_A0 = ( REGISTER_RISCV64_S1 + 1 ),
+ REGISTER_RISCV64_A1 = ( REGISTER_RISCV64_A0 + 1 ),
+ REGISTER_RISCV64_A2 = ( REGISTER_RISCV64_A1 + 1 ),
+ REGISTER_RISCV64_A3 = ( REGISTER_RISCV64_A2 + 1 ),
+ REGISTER_RISCV64_A4 = ( REGISTER_RISCV64_A3 + 1 ),
+ REGISTER_RISCV64_A5 = ( REGISTER_RISCV64_A4 + 1 ),
+ REGISTER_RISCV64_A6 = ( REGISTER_RISCV64_A5 + 1 ),
+ REGISTER_RISCV64_A7 = ( REGISTER_RISCV64_A6 + 1 ),
+ REGISTER_RISCV64_S2 = ( REGISTER_RISCV64_A7 + 1 ),
+ REGISTER_RISCV64_S3 = ( REGISTER_RISCV64_S2 + 1 ),
+ REGISTER_RISCV64_S4 = ( REGISTER_RISCV64_S3 + 1 ),
+ REGISTER_RISCV64_S5 = ( REGISTER_RISCV64_S4 + 1 ),
+ REGISTER_RISCV64_S6 = ( REGISTER_RISCV64_S5 + 1 ),
+ REGISTER_RISCV64_S7 = ( REGISTER_RISCV64_S6 + 1 ),
+ REGISTER_RISCV64_S8 = ( REGISTER_RISCV64_S7 + 1 ),
+ REGISTER_RISCV64_S9 = ( REGISTER_RISCV64_S8 + 1 ),
+ REGISTER_RISCV64_S10 = ( REGISTER_RISCV64_S9 + 1 ),
+ REGISTER_RISCV64_S11 = ( REGISTER_RISCV64_S10 + 1 ),
+ REGISTER_RISCV64_T3 = ( REGISTER_RISCV64_S11 + 1 ),
+ REGISTER_RISCV64_T4 = ( REGISTER_RISCV64_T3 + 1 ),
+ REGISTER_RISCV64_T5 = ( REGISTER_RISCV64_T4 + 1 ),
+ REGISTER_RISCV64_T6 = ( REGISTER_RISCV64_T5 + 1 ),
+ REGISTER_RISCV64_F0 = ( REGISTER_RISCV64_T6 + 1 ),
+ REGISTER_RISCV64_F1 = ( REGISTER_RISCV64_F0 + 1 ),
+ REGISTER_RISCV64_F2 = ( REGISTER_RISCV64_F1 + 1 ),
+ REGISTER_RISCV64_F3 = ( REGISTER_RISCV64_F2 + 1 ),
+ REGISTER_RISCV64_F4 = ( REGISTER_RISCV64_F3 + 1 ),
+ REGISTER_RISCV64_F5 = ( REGISTER_RISCV64_F4 + 1 ),
+ REGISTER_RISCV64_F6 = ( REGISTER_RISCV64_F5 + 1 ),
+ REGISTER_RISCV64_F7 = ( REGISTER_RISCV64_F6 + 1 ),
+ REGISTER_RISCV64_F8 = ( REGISTER_RISCV64_F7 + 1 ),
+ REGISTER_RISCV64_F9 = ( REGISTER_RISCV64_F8 + 1 ),
+ REGISTER_RISCV64_F10 = ( REGISTER_RISCV64_F9 + 1 ),
+ REGISTER_RISCV64_F11 = ( REGISTER_RISCV64_F10 + 1 ),
+ REGISTER_RISCV64_F12 = ( REGISTER_RISCV64_F11 + 1 ),
+ REGISTER_RISCV64_F13 = ( REGISTER_RISCV64_F12 + 1 ),
+ REGISTER_RISCV64_F14 = ( REGISTER_RISCV64_F13 + 1 ),
+ REGISTER_RISCV64_F15 = ( REGISTER_RISCV64_F14 + 1 ),
+ REGISTER_RISCV64_F16 = ( REGISTER_RISCV64_F15 + 1 ),
+ REGISTER_RISCV64_F17 = ( REGISTER_RISCV64_F16 + 1 ),
+ REGISTER_RISCV64_F18 = ( REGISTER_RISCV64_F17 + 1 ),
+ REGISTER_RISCV64_F19 = ( REGISTER_RISCV64_F18 + 1 ),
+ REGISTER_RISCV64_F20 = ( REGISTER_RISCV64_F19 + 1 ),
+ REGISTER_RISCV64_F21 = ( REGISTER_RISCV64_F20 + 1 ),
+ REGISTER_RISCV64_F22 = ( REGISTER_RISCV64_F21 + 1 ),
+ REGISTER_RISCV64_F23 = ( REGISTER_RISCV64_F22 + 1 ),
+ REGISTER_RISCV64_F24 = ( REGISTER_RISCV64_F23 + 1 ),
+ REGISTER_RISCV64_F25 = ( REGISTER_RISCV64_F24 + 1 ),
+ REGISTER_RISCV64_F26 = ( REGISTER_RISCV64_F25 + 1 ),
+ REGISTER_RISCV64_F27 = ( REGISTER_RISCV64_F26 + 1 ),
+ REGISTER_RISCV64_F28 = ( REGISTER_RISCV64_F27 + 1 ),
+ REGISTER_RISCV64_F29 = ( REGISTER_RISCV64_F28 + 1 ),
+ REGISTER_RISCV64_F30 = ( REGISTER_RISCV64_F29 + 1 ),
+ REGISTER_RISCV64_F31 = ( REGISTER_RISCV64_F30 + 1 ),
+ REGISTER_RISCV64_X0 = ( REGISTER_RISCV64_F31 + 1 ), // TODO-RISCV64-CQ: Add X0 for an use in debug. Need to check.
} CorDebugRegister;
set(PAL_ARCH_SOURCES_DIR i386)
elseif(CLR_CMAKE_HOST_ARCH_S390X)
set(PAL_ARCH_SOURCES_DIR s390x)
+elseif(CLR_CMAKE_HOST_ARCH_RISCV64)
+ set(PAL_ARCH_SOURCES_DIR riscv64)
endif()
if(CLR_CMAKE_TARGET_OSX)
--- /dev/null
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+#ifndef __PAL_RISCV64_ASMCONSTANTS_H__
+#define __PAL_RISCV64_ASMCONSTANTS_H__
+
+// https://github.com/riscv-non-isa/riscv-elf-psabi-doc/blob/2d865a2964fe06bfc569ab00c74e152b582ed764/riscv-dwarf.adoc
+// https://github.com/riscv-non-isa/riscv-elf-psabi-doc/blob/2d865a2964fe06bfc569ab00c74e152b582ed764/riscv-cc.adoc
+
+#define CONTEXT_RISCV64 0x01000000L
+
+#define CONTEXT_CONTROL_BIT (0)
+#define CONTEXT_INTEGER_BIT (1)
+#define CONTEXT_FLOATING_POINT_BIT (2)
+#define CONTEXT_DEBUG_REGISTERS_BIT (3)
+
+#define CONTEXT_CONTROL (CONTEXT_RISCV64 | (1L << CONTEXT_CONTROL_BIT))
+#define CONTEXT_INTEGER (CONTEXT_RISCV64 | (1 << CONTEXT_INTEGER_BIT))
+#define CONTEXT_FLOATING_POINT (CONTEXT_RISCV64 | (1 << CONTEXT_FLOATING_POINT_BIT))
+#define CONTEXT_DEBUG_REGISTERS (CONTEXT_RISCV64 | (1 << CONTEXT_DEBUG_REGISTERS_BIT))
+
+#define CONTEXT_FULL (CONTEXT_CONTROL | CONTEXT_INTEGER | CONTEXT_FLOATING_POINT)
+
+#define SIZEOF_RISCV64_GPR 8
+#define SIZEOF_RISCV64_FPR 8
+
+#define CONTEXT_ContextFlags 0
+#define CONTEXT_X0 CONTEXT_ContextFlags + SIZEOF_RISCV64_GPR // hardwired zero
+#define CONTEXT_X1 CONTEXT_X0 + SIZEOF_RISCV64_GPR
+#define CONTEXT_X2 CONTEXT_X1 + SIZEOF_RISCV64_GPR
+#define CONTEXT_X3 CONTEXT_X2 + SIZEOF_RISCV64_GPR
+#define CONTEXT_X4 CONTEXT_X3 + SIZEOF_RISCV64_GPR
+#define CONTEXT_X5 CONTEXT_X4 + SIZEOF_RISCV64_GPR
+#define CONTEXT_X6 CONTEXT_X5 + SIZEOF_RISCV64_GPR
+#define CONTEXT_X7 CONTEXT_X6 + SIZEOF_RISCV64_GPR
+#define CONTEXT_X8 CONTEXT_X7 + SIZEOF_RISCV64_GPR
+#define CONTEXT_X9 CONTEXT_X8 + SIZEOF_RISCV64_GPR
+#define CONTEXT_X10 CONTEXT_X9 + SIZEOF_RISCV64_GPR
+#define CONTEXT_X11 CONTEXT_X10 + SIZEOF_RISCV64_GPR
+#define CONTEXT_X12 CONTEXT_X11 + SIZEOF_RISCV64_GPR
+#define CONTEXT_X13 CONTEXT_X12 + SIZEOF_RISCV64_GPR
+#define CONTEXT_X14 CONTEXT_X13 + SIZEOF_RISCV64_GPR
+#define CONTEXT_X15 CONTEXT_X14 + SIZEOF_RISCV64_GPR
+#define CONTEXT_X16 CONTEXT_X15 + SIZEOF_RISCV64_GPR
+#define CONTEXT_X17 CONTEXT_X16 + SIZEOF_RISCV64_GPR
+#define CONTEXT_X18 CONTEXT_X17 + SIZEOF_RISCV64_GPR
+#define CONTEXT_X19 CONTEXT_X18 + SIZEOF_RISCV64_GPR
+#define CONTEXT_X20 CONTEXT_X19 + SIZEOF_RISCV64_GPR
+#define CONTEXT_X21 CONTEXT_X20 + SIZEOF_RISCV64_GPR
+#define CONTEXT_X22 CONTEXT_X21 + SIZEOF_RISCV64_GPR
+#define CONTEXT_X23 CONTEXT_X22 + SIZEOF_RISCV64_GPR
+#define CONTEXT_X24 CONTEXT_X23 + SIZEOF_RISCV64_GPR
+#define CONTEXT_X25 CONTEXT_X24 + SIZEOF_RISCV64_GPR
+#define CONTEXT_X26 CONTEXT_X25 + SIZEOF_RISCV64_GPR
+#define CONTEXT_X27 CONTEXT_X26 + SIZEOF_RISCV64_GPR
+#define CONTEXT_X28 CONTEXT_X27 + SIZEOF_RISCV64_GPR
+#define CONTEXT_X29 CONTEXT_X28 + SIZEOF_RISCV64_GPR
+#define CONTEXT_X30 CONTEXT_X29 + SIZEOF_RISCV64_GPR
+#define CONTEXT_X31 CONTEXT_X30 + SIZEOF_RISCV64_GPR
+
+#define CONTEXT_Pc CONTEXT_X31 + SIZEOF_RISCV64_GPR
+#define CONTEXT_FPU_OFFSET CONTEXT_Pc + SIZEOF_RISCV64_GPR
+#define CONTEXT_Ra CONTEXT_X1
+#define CONTEXT_Sp CONTEXT_X2
+#define CONTEXT_Gp CONTEXT_X3
+#define CONTEXT_Tp CONTEXT_X4
+#define CONTEXT_Fp CONTEXT_X8
+
+#define CONTEXT_S0 CONTEXT_X8
+#define CONTEXT_S1 CONTEXT_X9
+#define CONTEXT_S2 CONTEXT_X18
+#define CONTEXT_S3 CONTEXT_X19
+#define CONTEXT_S4 CONTEXT_X20
+#define CONTEXT_S5 CONTEXT_X21
+#define CONTEXT_S6 CONTEXT_X22
+#define CONTEXT_S7 CONTEXT_X23
+#define CONTEXT_S8 CONTEXT_X24
+#define CONTEXT_S9 CONTEXT_X25
+#define CONTEXT_S10 CONTEXT_X26
+#define CONTEXT_S11 CONTEXT_X27
+
+#define CONTEXT_A0 CONTEXT_X10
+#define CONTEXT_A1 CONTEXT_X11
+#define CONTEXT_A2 CONTEXT_X12
+#define CONTEXT_A3 CONTEXT_X13
+#define CONTEXT_A4 CONTEXT_X14
+#define CONTEXT_A5 CONTEXT_X15
+#define CONTEXT_A6 CONTEXT_X16
+#define CONTEXT_A7 CONTEXT_X17
+
+#define CONTEXT_T0 CONTEXT_X5
+#define CONTEXT_T1 CONTEXT_X6
+#define CONTEXT_T2 CONTEXT_X7
+#define CONTEXT_T3 CONTEXT_X28
+#define CONTEXT_T4 CONTEXT_X29
+#define CONTEXT_T5 CONTEXT_X30
+#define CONTEXT_T6 CONTEXT_X31
+
+#define CONTEXT_F0 0
+#define CONTEXT_F1 CONTEXT_F0 + SIZEOF_RISCV64_FPR
+#define CONTEXT_F2 CONTEXT_F1 + SIZEOF_RISCV64_FPR
+#define CONTEXT_F3 CONTEXT_F2 + SIZEOF_RISCV64_FPR
+#define CONTEXT_F4 CONTEXT_F3 + SIZEOF_RISCV64_FPR
+#define CONTEXT_F5 CONTEXT_F4 + SIZEOF_RISCV64_FPR
+#define CONTEXT_F6 CONTEXT_F5 + SIZEOF_RISCV64_FPR
+#define CONTEXT_F7 CONTEXT_F6 + SIZEOF_RISCV64_FPR
+#define CONTEXT_F8 CONTEXT_F7 + SIZEOF_RISCV64_FPR
+#define CONTEXT_F9 CONTEXT_F8 + SIZEOF_RISCV64_FPR
+#define CONTEXT_F10 CONTEXT_F9 + SIZEOF_RISCV64_FPR
+#define CONTEXT_F11 CONTEXT_F10 + SIZEOF_RISCV64_FPR
+#define CONTEXT_F12 CONTEXT_F11 + SIZEOF_RISCV64_FPR
+#define CONTEXT_F13 CONTEXT_F12 + SIZEOF_RISCV64_FPR
+#define CONTEXT_F14 CONTEXT_F13 + SIZEOF_RISCV64_FPR
+#define CONTEXT_F15 CONTEXT_F14 + SIZEOF_RISCV64_FPR
+#define CONTEXT_F16 CONTEXT_F15 + SIZEOF_RISCV64_FPR
+#define CONTEXT_F17 CONTEXT_F16 + SIZEOF_RISCV64_FPR
+#define CONTEXT_F18 CONTEXT_F17 + SIZEOF_RISCV64_FPR
+#define CONTEXT_F19 CONTEXT_F18 + SIZEOF_RISCV64_FPR
+#define CONTEXT_F20 CONTEXT_F19 + SIZEOF_RISCV64_FPR
+#define CONTEXT_F21 CONTEXT_F20 + SIZEOF_RISCV64_FPR
+#define CONTEXT_F22 CONTEXT_F21 + SIZEOF_RISCV64_FPR
+#define CONTEXT_F23 CONTEXT_F22 + SIZEOF_RISCV64_FPR
+#define CONTEXT_F24 CONTEXT_F23 + SIZEOF_RISCV64_FPR
+#define CONTEXT_F25 CONTEXT_F24 + SIZEOF_RISCV64_FPR
+#define CONTEXT_F26 CONTEXT_F25 + SIZEOF_RISCV64_FPR
+#define CONTEXT_F27 CONTEXT_F26 + SIZEOF_RISCV64_FPR
+#define CONTEXT_F28 CONTEXT_F27 + SIZEOF_RISCV64_FPR
+#define CONTEXT_F29 CONTEXT_F28 + SIZEOF_RISCV64_FPR
+#define CONTEXT_F30 CONTEXT_F29 + SIZEOF_RISCV64_FPR
+#define CONTEXT_F31 CONTEXT_F30 + SIZEOF_RISCV64_FPR
+
+#define CONTEXT_FLOAT_CONTROL_OFFSET CONTEXT_F31+SIZEOF_RISCV64_FPR
+#define CONTEXT_Size ((CONTEXT_FPU_OFFSET + 8 + 8 + 0xf) & ~0xf)
+
+#endif
--- /dev/null
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+//
+// Implementation of _CONTEXT_CaptureContext for the RISCV64 platform.
+// This function is processor dependent. It is used by exception handling,
+// and is always apply to the current thread.
+//
+
+#include "unixasmmacros.inc"
+#include "asmconstants.h"
+
+// Incoming:
+// a0: Context*
+// a1: Exception*
+//
+LEAF_ENTRY RtlRestoreContext, _TEXT
+#ifdef HAS_ASAN
+#pragma error("TODO-RISCV64-CQ: unimplemented on RISCV64 yet")
+#endif
+
+ mv t4, a0
+ lw t1, CONTEXT_ContextFlags(t4)
+ andi t1, t1, 0x1 << CONTEXT_FLOATING_POINT_BIT
+ beqz t1, LOCAL_LABEL(No_Restore_CONTEXT_FLOATING_POINT)
+
+ //64-bits FPR.
+ addi t0, t4, CONTEXT_FPU_OFFSET
+
+ fld f0, (CONTEXT_F0)(t0)
+ fld f1, (CONTEXT_F1)(t0)
+ fld f2, (CONTEXT_F2)(t0)
+ fld f3, (CONTEXT_F3)(t0)
+ fld f4, (CONTEXT_F4)(t0)
+ fld f5, (CONTEXT_F5)(t0)
+ fld f6, (CONTEXT_F6)(t0)
+ fld f7, (CONTEXT_F7)(t0)
+ fld f8, (CONTEXT_F8)(t0)
+ fld f9, (CONTEXT_F9)(t0)
+ fld f10, (CONTEXT_F10)(t0)
+ fld f11, (CONTEXT_F11)(t0)
+ fld f12, (CONTEXT_F12)(t0)
+ fld f13, (CONTEXT_F13)(t0)
+ fld f14, (CONTEXT_F14)(t0)
+ fld f15, (CONTEXT_F15)(t0)
+ fld f16, (CONTEXT_F16)(t0)
+ fld f17, (CONTEXT_F17)(t0)
+ fld f18, (CONTEXT_F18)(t0)
+ fld f19, (CONTEXT_F19)(t0)
+ fld f20, (CONTEXT_F20)(t0)
+ fld f21, (CONTEXT_F21)(t0)
+ fld f22, (CONTEXT_F22)(t0)
+ fld f23, (CONTEXT_F23)(t0)
+ fld f24, (CONTEXT_F24)(t0)
+ fld f25, (CONTEXT_F25)(t0)
+ fld f26, (CONTEXT_F26)(t0)
+ fld f27, (CONTEXT_F27)(t0)
+ fld f28, (CONTEXT_F28)(t0)
+ fld f29, (CONTEXT_F29)(t0)
+ fld f30, (CONTEXT_F30)(t0)
+ fld f31, (CONTEXT_F31)(t0)
+
+ lw t1, (CONTEXT_FLOAT_CONTROL_OFFSET)(t0)
+ fscsr x0, t1
+
+LOCAL_LABEL(No_Restore_CONTEXT_FLOATING_POINT):
+
+ lw t1, CONTEXT_ContextFlags(t4)
+ andi t1, t1, 0x1 << CONTEXT_INTEGER_BIT
+ beqz t1, LOCAL_LABEL(No_Restore_CONTEXT_INTEGER)
+
+ ld tp, (CONTEXT_Tp)(a0)
+ ld gp, (CONTEXT_Gp)(a0)
+ ld a1, (CONTEXT_A1)(a0)
+ ld a2, (CONTEXT_A2)(a0)
+ ld a3, (CONTEXT_A3)(a0)
+ ld a4, (CONTEXT_A4)(a0)
+ ld a5, (CONTEXT_A5)(a0)
+ ld a6, (CONTEXT_A6)(a0)
+ ld a7, (CONTEXT_A7)(a0)
+ ld t0, (CONTEXT_T0)(a0)
+ ld t1, (CONTEXT_T1)(a0)
+ ld t2, (CONTEXT_T2)(a0)
+ ld t3, (CONTEXT_T3)(a0)
+ ld t5, (CONTEXT_T5)(a0)
+ ld t6, (CONTEXT_T6)(a0)
+
+ ld s1, (CONTEXT_S1)(a0)
+ ld s2, (CONTEXT_S2)(a0)
+ ld s3, (CONTEXT_S3)(a0)
+ ld s4, (CONTEXT_S4)(a0)
+ ld s5, (CONTEXT_S5)(a0)
+ ld s6, (CONTEXT_S6)(a0)
+ ld s7, (CONTEXT_S7)(a0)
+ ld s8, (CONTEXT_S8)(a0)
+ ld s9, (CONTEXT_S9)(a0)
+ ld s10, (CONTEXT_S10)(a0)
+ ld s11, (CONTEXT_S11)(a0)
+
+ ld a0, (CONTEXT_A0)(a0)
+
+LOCAL_LABEL(No_Restore_CONTEXT_INTEGER):
+
+ lw t1, CONTEXT_ContextFlags(t4)
+ andi t1, t1, 0x1 << CONTEXT_CONTROL_BIT
+ beqz t1, LOCAL_LABEL(No_Restore_CONTEXT_CONTROL)
+
+ ld ra, (CONTEXT_Ra)(t4)
+ ld fp, (CONTEXT_Fp)(t4)
+ ld sp, (CONTEXT_Sp)(t4)
+ ld t1, (CONTEXT_Pc)(t4) // Since we cannot control $pc directly, we're going to corrupt t1
+ ld t4, (CONTEXT_T4)(t4)
+ jr t1
+
+LOCAL_LABEL(No_Restore_CONTEXT_CONTROL):
+ ld t4, (CONTEXT_T4)(t4)
+ ret
+LEAF_END RtlRestoreContext, _TEXT
+
+// Incoming:
+// a0: Context*
+
+LEAF_ENTRY RtlCaptureContext, _TEXT
+ PROLOG_STACK_ALLOC 16
+ sd t1, 0(sp)
+ li t1, CONTEXT_FULL
+ sw t1, CONTEXT_ContextFlags(a0)
+ ld t1, 0(sp)
+ EPILOG_STACK_FREE 16
+ tail CONTEXT_CaptureContext
+LEAF_END RtlCaptureContext, _TEXT
+
+// Incoming:
+// a0: Context*
+//
+
+LEAF_ENTRY CONTEXT_CaptureContext, _TEXT
+ PROLOG_STACK_ALLOC 24
+ sd t0, 0(sp)
+ sd t1, 8(sp)
+ sd t3, 16(sp)
+
+ lw t1, CONTEXT_ContextFlags(a0)
+ li t0, CONTEXT_CONTROL
+ and t3, t1, t0
+ bne t3, t0, LOCAL_LABEL(Done_CONTEXT_CONTROL)
+
+ addi t0, sp, 24
+ sd fp, CONTEXT_Fp(a0)
+ sd t0, CONTEXT_Sp(a0)
+ sd ra, CONTEXT_Ra(a0)
+ sd ra, CONTEXT_Pc(a0)
+
+LOCAL_LABEL(Done_CONTEXT_CONTROL):
+
+ li t0, CONTEXT_INTEGER
+ and t3, t1, t0
+ bne t3, t0, LOCAL_LABEL(Done_CONTEXT_INTEGER)
+
+ ld t0, 0(sp)
+ ld t1, 8(sp)
+ ld t3, 16(sp)
+
+ sd tp, (CONTEXT_Tp)(a0)
+ sd gp, (CONTEXT_Gp)(a0)
+ sd a0, (CONTEXT_A0)(a0)
+ sd a1, (CONTEXT_A1)(a0)
+ sd a2, (CONTEXT_A2)(a0)
+ sd a3, (CONTEXT_A3)(a0)
+ sd a4, (CONTEXT_A4)(a0)
+ sd a5, (CONTEXT_A5)(a0)
+ sd a6, (CONTEXT_A6)(a0)
+ sd a7, (CONTEXT_A7)(a0)
+ sd t0, (CONTEXT_T0)(a0)
+ sd t1, (CONTEXT_T1)(a0)
+ sd t2, (CONTEXT_T2)(a0)
+ sd t3, (CONTEXT_T3)(a0)
+ sd t4, (CONTEXT_T4)(a0)
+ sd t5, (CONTEXT_T5)(a0)
+ sd t6, (CONTEXT_T6)(a0)
+
+ sd s1, (CONTEXT_S1)(a0)
+ sd s2, (CONTEXT_S2)(a0)
+ sd s3, (CONTEXT_S3)(a0)
+ sd s4, (CONTEXT_S4)(a0)
+ sd s5, (CONTEXT_S5)(a0)
+ sd s6, (CONTEXT_S6)(a0)
+ sd s7, (CONTEXT_S7)(a0)
+ sd s8, (CONTEXT_S8)(a0)
+ sd s9, (CONTEXT_S9)(a0)
+ sd s10, (CONTEXT_S10)(a0)
+ sd s11, (CONTEXT_S11)(a0)
+
+LOCAL_LABEL(Done_CONTEXT_INTEGER):
+ lw t1, CONTEXT_ContextFlags(a0)
+
+ li t0, CONTEXT_FLOATING_POINT
+ and t3, t1, t0
+ bne t3, t0, LOCAL_LABEL(Done_CONTEXT_FLOATING_POINT)
+
+ addi a0, a0, CONTEXT_FPU_OFFSET
+
+ fsd f0, (CONTEXT_F0)(a0)
+ fsd f1, (CONTEXT_F1)(a0)
+ fsd f2, (CONTEXT_F2)(a0)
+ fsd f3, (CONTEXT_F3)(a0)
+ fsd f4, (CONTEXT_F4)(a0)
+ fsd f5, (CONTEXT_F5)(a0)
+ fsd f6, (CONTEXT_F6)(a0)
+ fsd f7, (CONTEXT_F7)(a0)
+ fsd f8, (CONTEXT_F8)(a0)
+ fsd f9, (CONTEXT_F9)(a0)
+ fsd f10, (CONTEXT_F10)(a0)
+ fsd f11, (CONTEXT_F11)(a0)
+ fsd f12, (CONTEXT_F12)(a0)
+ fsd f13, (CONTEXT_F13)(a0)
+ fsd f14, (CONTEXT_F14)(a0)
+ fsd f15, (CONTEXT_F15)(a0)
+ fsd f16, (CONTEXT_F16)(a0)
+ fsd f17, (CONTEXT_F17)(a0)
+ fsd f18, (CONTEXT_F18)(a0)
+ fsd f19, (CONTEXT_F19)(a0)
+ fsd f20, (CONTEXT_F20)(a0)
+ fsd f21, (CONTEXT_F21)(a0)
+ fsd f22, (CONTEXT_F22)(a0)
+ fsd f23, (CONTEXT_F23)(a0)
+ fsd f24, (CONTEXT_F24)(a0)
+ fsd f25, (CONTEXT_F25)(a0)
+ fsd f26, (CONTEXT_F26)(a0)
+ fsd f27, (CONTEXT_F27)(a0)
+ fsd f28, (CONTEXT_F28)(a0)
+ fsd f29, (CONTEXT_F29)(a0)
+ fsd f30, (CONTEXT_F30)(a0)
+ fsd f31, (CONTEXT_F31)(a0)
+
+ frcsr t0
+ sd t0, (CONTEXT_FLOAT_CONTROL_OFFSET)(a0)
+
+LOCAL_LABEL(Done_CONTEXT_FLOATING_POINT):
+
+ EPILOG_STACK_FREE 24
+ ret
+LEAF_END CONTEXT_CaptureContext, _TEXT
--- /dev/null
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+#include "unixasmmacros.inc"
+
+LEAF_ENTRY DBG_DebugBreak, _TEXT
+ EMIT_BREAKPOINT
+LEAF_END_MARKED DBG_DebugBreak, _TEXT
--- /dev/null
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+/*++
+
+
+
+Module Name:
+
+ processor.cpp
+
+Abstract:
+
+ Implementation of processor related functions for the ARM64
+ platform. These functions are processor dependent.
+
+
+
+--*/
+
+#include "pal/palinternal.h"
#endif // __APPLE__
}
+#ifndef TARGET_RISCV64
// There is no API to get the total virtual address space size on
// Unix, so we use a constant value representing 128TB, which is
// the approximate size of total user virtual address space on
// the currently supported Unix systems.
- static const UINT64 _128TB = (1ull << 47);
- lpBuffer->ullTotalVirtual = _128TB;
+ static const UINT64 VMSize = (1ull << 47);
+#else // TARGET_RISCV64
+ // For RISC-V Linux Kernel SV39 virtual memory limit is 256gb.
+ static const UINT64 VMSize = (1ull << 38);
+#endif // TARGET_RISCV64
+ lpBuffer->ullTotalVirtual = VMSize;
lpBuffer->ullAvailVirtual = lpBuffer->ullAvailPhys;
LOGEXIT("GlobalMemoryStatusEx returns %d\n", fRetVal);