_snprintf_s(szRegName, ARRAY_SIZE(szRegName), sizeof(szRegName), "r%u", regnum);
return szRegName;
+#elif defined(TARGET_LOONGARCH64)
+ switch (regnum)
+ {
+ case 0: return "r0";
+ case 1: return "ra";
+ case 2: return "tp";
+ case 3: return "sp";
+ case 4: return "a0";
+ case 5: return "a1";
+ case 6: return "a2";
+ case 7: return "a3";
+ case 8: return "a4";
+ case 9: return "a5";
+ case 10: return "a6";
+ case 11: return "a7";
+ case 12: return "t0";
+ case 13: return "t1";
+ case 14: return "t2";
+ case 15: return "t3";
+ case 16: return "t4";
+ case 17: return "t5";
+ case 18: return "t6";
+ case 19: return "t7";
+ case 20: return "t8";
+ case 21: return "x0";
+ case 22: return "fp";
+ case 23: return "s0";
+ case 24: return "s1";
+ case 25: return "s2";
+ case 26: return "s3";
+ case 27: return "s4";
+ case 28: return "s5";
+ case 29: return "s6";
+ case 30: return "s7";
+ case 31: return "s8";
+ case 32: return "pc";
+ }
+
+ return "???";
#elif defined(TARGET_RISCV64)
switch (regnum)
{
| DECODE_GC_LIFETIMES
| DECODE_PROLOG_LENGTH
| DECODE_RETURN_KIND
-#if defined(TARGET_ARM) || defined(TARGET_ARM64) || defined(TARGET_RISCV64)
+#if defined(TARGET_ARM) || defined(TARGET_ARM64) || defined(TARGET_RISCV64) || defined(TARGET_LOONGARCH64)
| DECODE_HAS_TAILCALLS
#endif
),
#ifdef TARGET_AMD64
gcPrintf("Wants Report Only Leaf: %u\n", hdrdecoder.WantsReportOnlyLeaf());
-#elif defined(TARGET_ARM) || defined(TARGET_ARM64) || defined(TARGET_RISCV64)
+#elif defined(TARGET_ARM) || defined(TARGET_ARM64) || defined(TARGET_RISCV64) || defined(TARGET_LOONGARCH64)
gcPrintf("Has tailcalls: %u\n", hdrdecoder.HasTailCalls());
#endif // TARGET_AMD64
#ifdef FIXED_STACK_PARAMETER_SCRATCH_AREA
int hasStackBaseRegister = headerFlags & GC_INFO_HAS_STACK_BASE_REGISTER;
#ifdef TARGET_AMD64
m_WantsReportOnlyLeaf = ((headerFlags & GC_INFO_WANTS_REPORT_ONLY_LEAF) != 0);
-#elif defined(TARGET_ARM) || defined(TARGET_ARM64) || defined(TARGET_RISCV64)
+#elif defined(TARGET_ARM) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64)
m_HasTailCalls = ((headerFlags & GC_INFO_HAS_TAILCALLS) != 0);
#endif // TARGET_AMD64
int hasSizeOfEditAndContinuePreservedArea = headerFlags & GC_INFO_HAS_EDIT_AND_CONTINUE_PRESERVED_SLOTS;
if(m_NumSafePoints == 0)
return false;
-#if defined(TARGET_AMD64) || defined(TARGET_ARM) || defined(TARGET_ARM64)|| defined(TARGET_RISCV64)
+#if defined(TARGET_AMD64) || defined(TARGET_ARM) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64)
// Safepoints are encoded with a -1 adjustment
codeOffset--;
#endif
const UINT32 numBitsPerOffset = CeilOfLog2(NORMALIZE_CODE_OFFSET(m_CodeLength));
UINT32 result = m_NumSafePoints;
-#if defined(TARGET_ARM) || defined(TARGET_ARM64) || defined(TARGET_RISCV64)
+#if defined(TARGET_ARM) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64)
// Safepoints are encoded with a -1 adjustment
// but normalizing them masks off the low order bit
// Thus only bother looking if the address is odd
UINT32 normOffset = (UINT32)m_Reader.Read(numBitsPerOffset);
UINT32 offset = DENORMALIZE_CODE_OFFSET(normOffset) + 2;
-#if defined(TARGET_AMD64) || defined(TARGET_ARM) || defined(TARGET_ARM64) || defined(TARGET_RISCV64)
+#if defined(TARGET_AMD64) || defined(TARGET_ARM) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64)
// Safepoints are encoded with a -1 adjustment
offset--;
#endif
return m_IsVarArg;
}
-#if defined(TARGET_ARM) || defined(TARGET_ARM64) || defined(TARGET_RISCV64)
+#if defined(TARGET_ARM) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64)
bool GcInfoDecoder::HasTailCalls()
{
_ASSERTE( m_Flags & DECODE_HAS_TAILCALLS );
return m_HasTailCalls;
}
-#endif // TARGET_ARM || TARGET_ARM64 || TARGET_RISCV64
+#endif // TARGET_ARM || TARGET_ARM64 || defined(TARGET_LOONGARCH64) || TARGET_RISCV64
bool GcInfoDecoder::WantsReportOnlyLeaf()
{
}
#endif // TARGET_UNIX && !FEATURE_REDHAWK
+#elif defined(TARGET_LOONGARCH64)
+
+#if defined(TARGET_UNIX) && !defined(FEATURE_REDHAWK)
+OBJECTREF* GcInfoDecoder::GetCapturedRegister(
+ int regNum,
+ PREGDISPLAY pRD
+ )
+{
+ _ASSERTE(regNum >= 1 && regNum <= 31);
+
+ // The fields of CONTEXT are in the same order as
+ // the processor encoding numbers.
+
+ DWORD64 *pR0 = &pRD->pCurrentContext->R0;
+
+ return (OBJECTREF*)(pR0 + regNum);
+}
+#endif // TARGET_UNIX && !FEATURE_REDHAWK
+
+OBJECTREF* GcInfoDecoder::GetRegisterSlot(
+ int regNum,
+ PREGDISPLAY pRD
+ )
+{
+ _ASSERTE((regNum == 1) || (regNum >= 4 && regNum <= 31));
+
+#ifdef FEATURE_REDHAWK
+ PTR_UIntNative* ppReg = &pRD->pR0;
+
+ return (OBJECTREF*)*(ppReg + regNum);
+#else
+ if(regNum == 1)
+ {
+ return (OBJECTREF*) pRD->pCurrentContextPointers->Ra;
+ }
+ else if (regNum < 22)
+ {
+ return (OBJECTREF*)*(DWORD64**)(&pRD->volatileCurrContextPointers.A0 + (regNum - 4));
+ }
+ else if(regNum == 22)
+ {
+ return (OBJECTREF*) pRD->pCurrentContextPointers->Fp;
+ }
+ return (OBJECTREF*)*(DWORD64**)(&pRD->pCurrentContextPointers->S0 + (regNum-23));
+#endif
+}
+
+bool GcInfoDecoder::IsScratchRegister(int regNum, PREGDISPLAY pRD)
+{
+ _ASSERTE(regNum >= 0 && regNum <= 31);
+
+ return (regNum <= 21 && ((regNum >= 4) || (regNum == 1)));
+}
+
+bool GcInfoDecoder::IsScratchStackSlot(INT32 spOffset, GcStackSlotBase spBase, PREGDISPLAY pRD)
+{
+#ifdef FIXED_STACK_PARAMETER_SCRATCH_AREA
+ _ASSERTE( m_Flags & DECODE_GC_LIFETIMES );
+
+ TADDR pSlot = (TADDR) GetStackSlot(spOffset, spBase, pRD);
+ _ASSERTE(pSlot >= pRD->SP);
+
+ return (pSlot < pRD->SP + m_SizeOfStackOutgoingAndScratchArea);
+#else
+ return FALSE;
+#endif
+}
+
+void GcInfoDecoder::ReportRegisterToGC(
+ int regNum,
+ unsigned gcFlags,
+ PREGDISPLAY pRD,
+ unsigned flags,
+ GCEnumCallback pCallBack,
+ void * hCallBack)
+{
+ GCINFODECODER_CONTRACT;
+
+ _ASSERTE(regNum > 0 && regNum <= 31);
+
+ LOG((LF_GCROOTS, LL_INFO1000, "Reporting " FMT_REG, regNum ));
+
+ OBJECTREF* pObjRef = GetRegisterSlot( regNum, pRD );
+#if defined(TARGET_UNIX) && !defined(FEATURE_REDHAWK) && !defined(SOS_TARGET_LOONGARCH64)
+ // On PAL, we don't always have the context pointers available due to
+ // a limitation of an unwinding library. In such case, the context
+ // pointers for some nonvolatile registers are NULL.
+ // In such case, we let the pObjRef point to the captured register
+ // value in the context and pin the object itself.
+ if (pObjRef == NULL)
+ {
+ // Report a pinned object to GC only in the promotion phase when the
+ // GC is scanning roots.
+ GCCONTEXT* pGCCtx = (GCCONTEXT*)(hCallBack);
+ if (!pGCCtx->sc->promotion)
+ {
+ return;
+ }
+
+ pObjRef = GetCapturedRegister(regNum, pRD);
+
+ gcFlags |= GC_CALL_PINNED;
+ }
+#endif // TARGET_UNIX && !SOS_TARGET_LOONGARCH64
+
+#ifdef _DEBUG
+ if(IsScratchRegister(regNum, pRD))
+ {
+ // Scratch registers cannot be reported for non-leaf frames
+ _ASSERTE(flags & ActiveStackFrame);
+ }
+
+ LOG((LF_GCROOTS, LL_INFO1000, /* Part Two */
+ "at" FMT_ADDR "as ", DBG_ADDR(pObjRef) ));
+
+ VALIDATE_ROOT((gcFlags & GC_CALL_INTERIOR), hCallBack, pObjRef);
+
+ LOG_PIPTR(pObjRef, gcFlags, hCallBack);
+#endif //_DEBUG
+
+ gcFlags |= CHECK_APP_DOMAIN;
+
+ pCallBack(hCallBack, pObjRef, gcFlags DAC_ARG(DacSlotLocation(regNum, 0, false)));
+}
+
#elif defined(TARGET_RISCV64)
#if defined(TARGET_UNIX) && !defined(FEATURE_REDHAWK)
int esp = 13;
#elif defined(TARGET_ARM64)
int esp = 31;
+#elif defined(TARGET_LOONGARCH64)
+ int esp = 3;
#elif defined(TARGET_RISCV64)
int esp = 2;
#endif
REG(Lr, Lr),
{ FIELD_OFFSET(T_CONTEXT, Sp) },
#undef REG
+#elif defined(TARGET_LOONGARCH64)
+#undef REG
+#define REG(reg, field) { FIELD_OFFSET(T_KNONVOLATILE_CONTEXT_POINTERS, field) }
+#define vREG(reg, field) { FIELD_OFFSET(LoongArch64VolatileContextPointer, field) }
+ vREG(zero, R0),
+ REG(ra, Ra),
+ REG(tp, Tp),
+ { FIELD_OFFSET(T_CONTEXT, Sp) },
+ vREG(a0, A0),
+ vREG(a1, A1),
+ vREG(a2, A2),
+ vREG(a3, A3),
+ vREG(a4, A4),
+ vREG(a5, A5),
+ vREG(a6, A6),
+ vREG(a7, A7),
+ vREG(t0, T0),
+ vREG(t1, T1),
+ vREG(t2, T2),
+ vREG(t3, T3),
+ vREG(t4, T4),
+ vREG(t5, T5),
+ vREG(t6, T6),
+ vREG(t7, T7),
+ vREG(t8, T8),
+ vREG(x0, X0),
+ REG(fp, Fp),
+ REG(s0, S0),
+ REG(s1, S1),
+ REG(s2, S2),
+ REG(s3, S3),
+ REG(s4, S4),
+ REG(s5, S5),
+ REG(s6, S6),
+ REG(s7, S7),
+ REG(s8, S8),
+#undef vREG
+#undef REG
#elif defined(TARGET_RISCV64)
#undef REG
#define REG(reg, field) { FIELD_OFFSET(T_KNONVOLATILE_CONTEXT_POINTERS, field) }
#elif defined(TARGET_ARM)
iSPRegister = (FIELD_OFFSET(T_CONTEXT, Sp) - FIELD_OFFSET(T_CONTEXT, R0)) / sizeof(ULONG);
UINT iBFRegister = m_StackBaseRegister;
+#elif defined(TARGET_LOONGARCH64)
+ iSPRegister = (FIELD_OFFSET(T_CONTEXT, Sp) - FIELD_OFFSET(T_CONTEXT, R0)) / sizeof(ULONGLONG);
#elif defined(TARGET_RISCV64)
iSPRegister = (FIELD_OFFSET(T_CONTEXT, Sp) - FIELD_OFFSET(T_CONTEXT, R0)) / sizeof(ULONGLONG);
#endif
-#if defined(TARGET_ARM) || defined(TARGET_ARM64) || defined(TARGET_RISCV64)
+#if defined(TARGET_ARM) || defined(TARGET_ARM64) || defined(TARGET_RISCV64) || defined(TARGET_LOONGARCH64)
BYTE* pContext = (BYTE*)&(pRD->volatileCurrContextPointers);
#else
BYTE* pContext = (BYTE*)pRD->pCurrentContext;
{
break;
}
+#elif defined(TARGET_LOONGARCH64)
+ bool isVolatile = (iReg == 0 || (iReg >= 4 && iReg <= 21));
+ if (ctx == 0)
+ {
+ if (!isVolatile)
+ {
+ continue;
+ }
+ }
+ else if (isVolatile) // skip volatile registers for second context
+ {
+ continue;
+ }
#elif defined(TARGET_RISCV64)
bool isVolatile = (iReg == 0 || (iReg >= 5 && iReg <= 7) || (iReg >= 10 && iReg <= 17) || iReg >= 28);
if (ctx == 0)
pReg = *(SIZE_T**)((BYTE*)pRD->pCurrentContextPointers + rgRegisters[iEncodedReg].cbContextOffset);
}
-#elif defined(TARGET_ARM64) || defined(TARGET_RISCV64)
+#elif defined(TARGET_ARM64) || defined(TARGET_RISCV64) || defined(TARGET_LOONGARCH64)
pReg = *(SIZE_T**)(pContext + rgRegisters[iReg].cbContextOffset);
if (iEncodedReg == iSPRegister)
{
GcStackSlotBase base;
if (iSPRegister == iEncodedReg)
{
-#if defined(TARGET_ARM) || defined(TARGET_ARM64) || defined(TARGET_RISCV64)
+#if defined(TARGET_ARM) || defined(TARGET_ARM64) || defined(TARGET_RISCV64) || defined(TARGET_LOONGARCH64)
base = GC_SP_REL;
#else
if (0 == ctx)
base = GC_SP_REL;
else
base = GC_CALLER_SP_REL;
-#endif //defined(TARGET_ARM) || defined(TARGET_ARM64) || defined(TARGET_RISCV64)
+#endif //defined(TARGET_ARM) || defined(TARGET_ARM64) || defined(TARGET_RISCV64) || defined(TARGET_LOONGARCH64)
}
else
{
}
}
-#if defined(TARGET_ARM) || defined(TARGET_ARM64) || defined(TARGET_RISCV64)
+#if defined(TARGET_ARM) || defined(TARGET_ARM64) || defined(TARGET_RISCV64) || defined(TARGET_LOONGARCH64)
pContext = (BYTE*)pRD->pCurrentContextPointers;
#else
pContext = (BYTE*)pRD->pCallerContext;
{
*(ppVolatileReg+iReg) = ®disp.pCurrentContext->X0 + iReg;
}
+#elif defined(TARGET_LOONGARCH64)
+ FILL_REGS(pCurrentContext->R0, 33);
+ FILL_REGS(pCallerContext->R0, 33);
+
+ regdisp.pCurrentContextPointers = ®disp.ctxPtrsOne;
+ regdisp.pCallerContextPointers = ®disp.ctxPtrsTwo;
+
+ ULONG64 **ppCurrentReg = ®disp.pCurrentContextPointers->S0;
+ ULONG64 **ppCallerReg = ®disp.pCallerContextPointers->S0;
+
+ // Set S0-S8
+ for (iReg = 0; iReg < 9; iReg++)
+ {
+ *(ppCurrentReg + iReg) = ®disp.pCurrentContext->S0 + iReg;
+ *(ppCallerReg + iReg) = ®disp.pCallerContext->S0 + iReg;
+ }
+
+ // Set Ra, Tp, Fp
+ regdisp.pCurrentContextPointers->Ra = ®disp.pCurrentContext->Ra;
+ regdisp.pCallerContextPointers->Ra = ®disp.pCallerContext->Ra;
+ regdisp.pCurrentContextPointers->Tp = ®disp.pCurrentContext->Tp;
+ regdisp.pCallerContextPointers->Tp = ®disp.pCallerContext->Tp;
+ regdisp.pCurrentContextPointers->Fp = ®disp.pCurrentContext->Fp;
+ regdisp.pCallerContextPointers->Fp = ®disp.pCallerContext->Fp;
+
+ ULONG64 **ppVolatileReg = ®disp.volatileCurrContextPointers.A0;
+ for (iReg = 0; iReg < 18; iReg++)
+ {
+ *(ppVolatileReg+iReg) = ®disp.pCurrentContext->A0 + iReg;
+ }
+ regdisp.volatileCurrContextPointers.R0 = ®disp.pCurrentContext->R0;
#elif defined(TARGET_RISCV64)
FILL_REGS(pCurrentContext->R0, 33);
FILL_REGS(pCallerContext->R0, 33);
regdisp.volatileCurrContextPointers.T5 = ®disp.pCurrentContext->T5;
regdisp.volatileCurrContextPointers.T6 = ®disp.pCurrentContext->T6;
#else
-PORTABILITY_ASSERT("GcInfoDumper::EnumerateStateChanges is not implemented on this platform.")
+PORTABILITY_ASSERT("GcInfoDumper::EnumerateStateChanges is not implemented on this platform.");
#endif
#undef FILL_REGS
(GcInfoDecoderFlags)( DECODE_SECURITY_OBJECT
| DECODE_CODE_LENGTH
| DECODE_VARARG
-#if defined(TARGET_ARM) || defined(TARGET_ARM64) || defined(TARGET_RISCV64)
+#if defined(TARGET_ARM) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64)
| DECODE_HAS_TAILCALLS
-#endif // TARGET_ARM || TARGET_ARM64 || TARGET_RISCV64
+#endif // TARGET_ARM || TARGET_ARM64 || TARGET_LOONGARCH64 || TARGET_RISCV64
| DECODE_INTERRUPTIBILITY),
offset);
#ifdef PARTIALLY_INTERRUPTIBLE_GC_SUPPORTED
UINT32 safePointOffset = offset;
-#if defined(TARGET_AMD64) || defined(TARGET_ARM) || defined(TARGET_ARM64) || defined(TARGET_RISCV64)
+#if defined(TARGET_AMD64) || defined(TARGET_ARM) || defined(TARGET_ARM64) || defined(TARGET_RISCV64) || defined(TARGET_LOONGARCH64)
safePointOffset++;
#endif
if(safePointDecoder.IsSafePoint(safePointOffset))
#endif
+#ifdef TARGET_LOONGARCH64
+#include "daccess.h"
+
+#define UNW_FLAG_NHANDLER 0x0 /* any handler */
+#define UNW_FLAG_EHANDLER 0x1 /* filter handler */
+#define UNW_FLAG_UHANDLER 0x2 /* unwind handler */
+
+// This function returns the RVA of the end of the function (exclusive, so one byte after the actual end)
+// using the unwind info on LOONGARCH64. (see ExternalAPIs\Win9CoreSystem\inc\winnt.h)
+FORCEINLINE
+ULONG64
+RtlpGetFunctionEndAddress (
+ _In_ PT_RUNTIME_FUNCTION FunctionEntry,
+ _In_ ULONG64 ImageBase
+ )
+{
+ ULONG64 FunctionLength = FunctionEntry->UnwindData;
+
+ if ((FunctionLength & 3) != 0) {
+ FunctionLength = (FunctionLength >> 2) & 0x7ff;
+ } else {
+ FunctionLength = *(PTR_ULONG64)(ImageBase + FunctionLength) & 0x3ffff;
+ }
+
+ return FunctionEntry->BeginAddress + 4 * FunctionLength;
+}
+
+#define RUNTIME_FUNCTION__BeginAddress(FunctionEntry) ((FunctionEntry)->BeginAddress)
+#define RUNTIME_FUNCTION__SetBeginAddress(FunctionEntry,address) ((FunctionEntry)->BeginAddress = (address))
+
+#define RUNTIME_FUNCTION__EndAddress(FunctionEntry, ImageBase) (RtlpGetFunctionEndAddress(FunctionEntry, (ULONG64)(ImageBase)))
+
+#define RUNTIME_FUNCTION__SetUnwindInfoAddress(prf,address) do { (prf)->UnwindData = (address); } while (0)
+
+typedef struct _UNWIND_INFO {
+ // dummy
+} UNWIND_INFO, *PUNWIND_INFO;
+
+EXTERN_C
+NTSYSAPI
+PEXCEPTION_ROUTINE
+NTAPI
+RtlVirtualUnwind(
+ IN ULONG HandlerType,
+ IN ULONG64 ImageBase,
+ IN ULONG64 ControlPc,
+ IN PRUNTIME_FUNCTION FunctionEntry,
+ IN OUT PCONTEXT ContextRecord,
+ OUT PVOID *HandlerData,
+ OUT PULONG64 EstablisherFrame,
+ IN OUT PKNONVOLATILE_CONTEXT_POINTERS ContextPointers OPTIONAL
+ );
+
+#endif // TARGET_LOONGARCH64
+
#ifdef TARGET_RISCV64
#include "daccess.h"
#endif // TARGET_ARM64 && !HOST_ARM64
+#elif defined(HOST_AMD64) && defined(TARGET_LOONGARCH64) // Host amd64 managing LOONGARCH64 related code
+
+#ifndef CROSS_COMPILE
+#define CROSS_COMPILE
+#endif
+
+//
+// Specify the number of breakpoints and watchpoints that the OS
+// will track. Architecturally, LOONGARCH64 supports up to 16. In practice,
+// however, almost no one implements more than 4 of each.
+//
+
+#define LOONGARCH64_MAX_BREAKPOINTS 8
+#define LOONGARCH64_MAX_WATCHPOINTS 2
+
+#define CONTEXT_UNWOUND_TO_CALL 0x20000000
+
+typedef struct DECLSPEC_ALIGN(16) _T_CONTEXT {
+
+ //
+ // Control flags.
+ //
+
+ /* +0x000 */ DWORD ContextFlags;
+
+ //
+ // Integer registers
+ //
+ DWORD64 R0;
+ DWORD64 Ra;
+ DWORD64 Tp;
+ DWORD64 Sp;
+ DWORD64 A0;
+ DWORD64 A1;
+ DWORD64 A2;
+ DWORD64 A3;
+ DWORD64 A4;
+ DWORD64 A5;
+ DWORD64 A6;
+ DWORD64 A7;
+ DWORD64 T0;
+ DWORD64 T1;
+ DWORD64 T2;
+ DWORD64 T3;
+ DWORD64 T4;
+ DWORD64 T5;
+ DWORD64 T6;
+ DWORD64 T7;
+ DWORD64 T8;
+ DWORD64 X0;
+ DWORD64 Fp;
+ DWORD64 S0;
+ DWORD64 S1;
+ DWORD64 S2;
+ DWORD64 S3;
+ DWORD64 S4;
+ DWORD64 S5;
+ DWORD64 S6;
+ DWORD64 S7;
+ DWORD64 S8;
+ DWORD64 Pc;
+
+ //
+ // Floating Point Registers: FPR64/LSX/LASX.
+ //
+ ULONGLONG F[4*32];
+ DWORD64 Fcc;
+ DWORD Fcsr;
+} T_CONTEXT, *PT_CONTEXT;
+
+// _IMAGE_LOONGARCH64_RUNTIME_FUNCTION_ENTRY (see ExternalAPIs\Win9CoreSystem\inc\winnt.h)
+typedef struct _T_RUNTIME_FUNCTION {
+ DWORD BeginAddress;
+ union {
+ DWORD UnwindData;
+ struct {
+ DWORD Flag : 2;
+ DWORD FunctionLength : 11;
+ DWORD RegF : 3;
+ DWORD RegI : 4;
+ DWORD H : 1;
+ DWORD CR : 2;
+ DWORD FrameSize : 9;
+ } PackedUnwindData;
+ };
+} T_RUNTIME_FUNCTION, *PT_RUNTIME_FUNCTION;
+
+//
+// Define exception dispatch context structure.
+//
+
+typedef struct _T_DISPATCHER_CONTEXT {
+ DWORD64 ControlPc;
+ DWORD64 ImageBase;
+ PT_RUNTIME_FUNCTION FunctionEntry;
+ DWORD64 EstablisherFrame;
+ DWORD64 TargetPc;
+ PCONTEXT ContextRecord;
+ PEXCEPTION_ROUTINE LanguageHandler;
+ PVOID HandlerData;
+ PVOID HistoryTable;
+ DWORD ScopeIndex;
+ BOOLEAN ControlPcIsUnwound;
+ PBYTE NonVolatileRegisters;
+} T_DISPATCHER_CONTEXT, *PT_DISPATCHER_CONTEXT;
+
+//
+// Nonvolatile context pointer record.
+//
+
+typedef struct _T_KNONVOLATILE_CONTEXT_POINTERS {
+
+ PDWORD64 S0;
+ PDWORD64 S1;
+ PDWORD64 S2;
+ PDWORD64 S3;
+ PDWORD64 S4;
+ PDWORD64 S5;
+ PDWORD64 S6;
+ PDWORD64 S7;
+ PDWORD64 S8;
+ PDWORD64 Fp;
+ PDWORD64 Tp;
+ PDWORD64 Ra;
+
+ PDWORD64 F24;
+ PDWORD64 F25;
+ PDWORD64 F26;
+ PDWORD64 F27;
+ PDWORD64 F28;
+ PDWORD64 F29;
+ PDWORD64 F30;
+ PDWORD64 F31;
+} T_KNONVOLATILE_CONTEXT_POINTERS, *PT_KNONVOLATILE_CONTEXT_POINTERS;
+
#elif defined(HOST_AMD64) && defined(TARGET_RISCV64) // Host amd64 managing RISCV64 related code
#ifndef CROSS_COMPILE
#define DAC_CS_NATIVE_DATA_SIZE 80
#elif defined(TARGET_LINUX) && defined(TARGET_ARM64)
#define DAC_CS_NATIVE_DATA_SIZE 116
+#elif defined(TARGET_LINUX) && defined(TARGET_LOONGARCH64)
+#define DAC_CS_NATIVE_DATA_SIZE 96
#elif defined(TARGET_LINUX) && defined(TARGET_X86)
#define DAC_CS_NATIVE_DATA_SIZE 76
#elif defined(TARGET_LINUX) && defined(TARGET_AMD64)
#define DTCONTEXT_IS_ARM
#elif defined (TARGET_ARM64)
#define DTCONTEXT_IS_ARM64
+#elif defined (TARGET_LOONGARCH64)
+#define DTCONTEXT_IS_LOONGARCH64
#elif defined (TARGET_RISCV64)
#define DTCONTEXT_IS_RISCV64
#endif
} DT_CONTEXT;
+#elif defined(DTCONTEXT_IS_LOONGARCH64)
+
+#define DT_CONTEXT_LOONGARCH64 0x00800000L
+
+#define DT_CONTEXT_CONTROL (DT_CONTEXT_LOONGARCH64 | 0x1L)
+#define DT_CONTEXT_INTEGER (DT_CONTEXT_LOONGARCH64 | 0x2L)
+#define DT_CONTEXT_FLOATING_POINT (DT_CONTEXT_LOONGARCH64 | 0x4L)
+#define DT_CONTEXT_DEBUG_REGISTERS (DT_CONTEXT_LOONGARCH64 | 0x8L)
+
+#define DT_CONTEXT_FULL (DT_CONTEXT_CONTROL | DT_CONTEXT_INTEGER | DT_CONTEXT_FLOATING_POINT)
+#define DT_CONTEXT_ALL (DT_CONTEXT_CONTROL | DT_CONTEXT_INTEGER | DT_CONTEXT_FLOATING_POINT | DT_CONTEXT_DEBUG_REGISTERS)
+
+#define DT_LOONGARCH64_MAX_BREAKPOINTS 8
+#define DT_LOONGARCH64_MAX_WATCHPOINTS 2
+
+typedef struct DECLSPEC_ALIGN(16) {
+
+ //
+ // Control flags.
+ //
+
+ /* +0x000 */ DWORD ContextFlags;
+
+ //
+ // Integer registers.
+ //
+ DWORD64 R0;
+ DWORD64 Ra;
+ DWORD64 Tp;
+ DWORD64 Sp;
+ DWORD64 A0;//DWORD64 V0;
+ DWORD64 A1;//DWORD64 V1;
+ DWORD64 A2;
+ DWORD64 A3;
+ DWORD64 A4;
+ DWORD64 A5;
+ DWORD64 A6;
+ DWORD64 A7;
+ DWORD64 T0;
+ DWORD64 T1;
+ DWORD64 T2;
+ DWORD64 T3;
+ DWORD64 T4;
+ DWORD64 T5;
+ DWORD64 T6;
+ DWORD64 T7;
+ DWORD64 T8;
+ DWORD64 X0;
+ DWORD64 Fp;
+ DWORD64 S0;
+ DWORD64 S1;
+ DWORD64 S2;
+ DWORD64 S3;
+ DWORD64 S4;
+ DWORD64 S5;
+ DWORD64 S6;
+ DWORD64 S7;
+ DWORD64 S8;
+ DWORD64 Pc;
+
+ //
+ // Floating Point Registers: FPR64/LSX/LASX.
+ //
+ ULONGLONG F[4*32];
+ DWORD64 Fcc;
+ DWORD Fcsr;
+} DT_CONTEXT;
+
#elif defined(DTCONTEXT_IS_RISCV64)
#define DT_CONTEXT_RISCV64 0x01000000L
return (TADDR)context->Sp;
#elif defined(TARGET_ARM64)
return (TADDR)context->Sp;
+#elif defined(TARGET_LOONGARCH64)
+ return (TADDR)context->Sp;
#elif defined(TARGET_RISCV64)
return (TADDR)context->Sp;
#else
return (PCODE)context->Pc;
#elif defined(TARGET_ARM64)
return (PCODE)context->Pc;
+#elif defined(TARGET_LOONGARCH64)
+ return (PCODE)context->Pc;
#elif defined(TARGET_RISCV64)
return (PCODE)context->Pc;
#else
DECODE_EDIT_AND_CONTINUE = 0x800,
DECODE_REVERSE_PINVOKE_VAR = 0x1000,
DECODE_RETURN_KIND = 0x2000,
-#if defined(TARGET_ARM) || defined(TARGET_ARM64) || defined(TARGET_RISCV64)
+#if defined(TARGET_ARM) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64)
DECODE_HAS_TAILCALLS = 0x4000,
-#endif // TARGET_ARM || TARGET_ARM64 || TARGET_RISCV64
+#endif // TARGET_ARM || TARGET_ARM64 || TARGET_LOONGARCH64 || TARGET_RISCV64
};
enum GcInfoHeaderFlags
GC_INFO_HAS_STACK_BASE_REGISTER = 0x40,
#ifdef TARGET_AMD64
GC_INFO_WANTS_REPORT_ONLY_LEAF = 0x80,
-#elif defined(TARGET_ARM) || defined(TARGET_ARM64) || defined(TARGET_RISCV64)
+#elif defined(TARGET_ARM) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64)
GC_INFO_HAS_TAILCALLS = 0x80,
#endif // TARGET_AMD64
GC_INFO_HAS_EDIT_AND_CONTINUE_PRESERVED_SLOTS = 0x100,
bool HasMethodTableGenericsInstContext();
bool GetIsVarArg();
bool WantsReportOnlyLeaf();
-#if defined(TARGET_ARM) || defined(TARGET_ARM64) || defined(TARGET_RISCV64)
+#if defined(TARGET_ARM) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64)
bool HasTailCalls();
-#endif // TARGET_ARM || TARGET_ARM64 || TARGET_RISCV64
+#endif // TARGET_ARM || TARGET_ARM64 || TARGET_LOONGARCH64 || TARGET_RISCV64
ReturnKind GetReturnKind();
UINT32 GetCodeLength();
UINT32 GetStackBaseRegister();
bool m_GenericSecretParamIsMT;
#ifdef TARGET_AMD64
bool m_WantsReportOnlyLeaf;
-#elif defined(TARGET_ARM) || defined(TARGET_ARM64) || defined(TARGET_RISCV64)
+#elif defined(TARGET_ARM) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64)
bool m_HasTailCalls;
#endif // TARGET_AMD64
INT32 m_SecurityObjectStackSlot;
// 10 RT_ByRef
// 11 RT_Unset
-#elif defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_RISCV64)
+#elif defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64)
// Slim Header:
#define LIVESTATE_RLE_RUN_ENCBASE 2
#define LIVESTATE_RLE_SKIP_ENCBASE 4
+#elif defined(TARGET_LOONGARCH64)
+#ifndef TARGET_POINTER_SIZE
+#define TARGET_POINTER_SIZE 8 // equal to sizeof(void*) and the managed pointer size in bytes for this target
+#endif
+#define NUM_NORM_CODE_OFFSETS_PER_CHUNK (64)
+#define NUM_NORM_CODE_OFFSETS_PER_CHUNK_LOG2 (6)
+#define NORMALIZE_STACK_SLOT(x) ((x)>>3) // GC Pointers are 8-bytes aligned
+#define DENORMALIZE_STACK_SLOT(x) ((x)<<3)
+#define NORMALIZE_CODE_LENGTH(x) ((x)>>2) // All Instructions are 4 bytes long
+#define DENORMALIZE_CODE_LENGTH(x) ((x)<<2)
+#define NORMALIZE_STACK_BASE_REGISTER(x) ((x) == 22 ? 0 : 1) // Encode Frame pointer fp=$22 as zero
+#define DENORMALIZE_STACK_BASE_REGISTER(x) ((x) == 0 ? 22 : 3)
+#define NORMALIZE_SIZE_OF_STACK_AREA(x) ((x)>>3)
+#define DENORMALIZE_SIZE_OF_STACK_AREA(x) ((x)<<3)
+#define CODE_OFFSETS_NEED_NORMALIZATION 0
+#define NORMALIZE_CODE_OFFSET(x) (x) // Instructions are 4 bytes long, but the safe-point
+#define DENORMALIZE_CODE_OFFSET(x) (x) // offsets are encoded with a -1 adjustment.
+#define NORMALIZE_REGISTER(x) (x)
+#define DENORMALIZE_REGISTER(x) (x)
+#define NORMALIZE_NUM_SAFE_POINTS(x) (x)
+#define DENORMALIZE_NUM_SAFE_POINTS(x) (x)
+#define NORMALIZE_NUM_INTERRUPTIBLE_RANGES(x) (x)
+#define DENORMALIZE_NUM_INTERRUPTIBLE_RANGES(x) (x)
+
+#define PSP_SYM_STACK_SLOT_ENCBASE 6
+#define GENERICS_INST_CONTEXT_STACK_SLOT_ENCBASE 6
+#define SECURITY_OBJECT_STACK_SLOT_ENCBASE 6
+#define GS_COOKIE_STACK_SLOT_ENCBASE 6
+#define CODE_LENGTH_ENCBASE 8
+#define SIZE_OF_RETURN_KIND_IN_SLIM_HEADER 2
+#define SIZE_OF_RETURN_KIND_IN_FAT_HEADER 4
+// FP/SP encoded as 0 or 1.
+#define STACK_BASE_REGISTER_ENCBASE 2
+#define SIZE_OF_STACK_AREA_ENCBASE 3
+#define SIZE_OF_EDIT_AND_CONTINUE_PRESERVED_AREA_ENCBASE 4
+#define REVERSE_PINVOKE_FRAME_ENCBASE 6
+#define NUM_REGISTERS_ENCBASE 3
+#define NUM_STACK_SLOTS_ENCBASE 2
+#define NUM_UNTRACKED_SLOTS_ENCBASE 1
+#define NORM_PROLOG_SIZE_ENCBASE 5
+#define NORM_EPILOG_SIZE_ENCBASE 3
+#define NORM_CODE_OFFSET_DELTA_ENCBASE 3
+#define INTERRUPTIBLE_RANGE_DELTA1_ENCBASE 6
+#define INTERRUPTIBLE_RANGE_DELTA2_ENCBASE 6
+#define REGISTER_ENCBASE 3
+#define REGISTER_DELTA_ENCBASE 2
+#define STACK_SLOT_ENCBASE 6
+#define STACK_SLOT_DELTA_ENCBASE 4
+#define NUM_SAFE_POINTS_ENCBASE 3
+#define NUM_INTERRUPTIBLE_RANGES_ENCBASE 1
+#define NUM_EH_CLAUSES_ENCBASE 2
+#define POINTER_SIZE_ENCBASE 3
+#define LIVESTATE_RLE_RUN_ENCBASE 2
+#define LIVESTATE_RLE_SKIP_ENCBASE 4
+
#elif defined(TARGET_RISCV64)
#ifndef TARGET_POINTER_SIZE
#define TARGET_POINTER_SIZE 8 // equal to sizeof(void*) and the managed pointer size in bytes for this target
EM_NORC = 218, // Nanoradio Optimized RISC
EM_CSR_KALIMBA = 219, // CSR Kalimba architecture family
EM_AMDGPU = 224, // AMD GPU architecture
+ EM_LOONGARCH = 258, // LoongArch processor
// A request has been made to the maintainer of the official registry for
// such numbers for an official value for WebAssembly. As soon as one is
SHT_MIPS_OPTIONS = 0x7000000d, // General options
SHT_MIPS_ABIFLAGS = 0x7000002a, // ABI information.
+ SHT_LOONGARCH_REGINFO = 0x70000006, // Register usage information
+ SHT_LOONGARCH_OPTIONS = 0x7000000d, // General options
+ SHT_LOONGARCH_DWARF = 0x7000001e, // DWARF debugging section.
+ SHT_LOONGARCH_ABIFLAGS = 0x7000002a, // ABI information.
+
SHT_HIPROC = 0x7fffffff, // Highest processor arch-specific type.
SHT_LOUSER = 0x80000000, // Lowest type reserved for applications.
SHT_HIUSER = 0xffffffff // Highest type reserved for applications.
// Section data is string data by default.
SHF_MIPS_STRING = 0x80000000,
+ // Linker must retain only one copy.
+ SHF_LOONGARCH_NODUPES = 0x01000000,
+
+ // Linker must generate implicit hidden weak names.
+ SHF_LOONGARCH_NAMES = 0x02000000,
+
+ // Section data local to process.
+ SHF_LOONGARCH_LOCAL = 0x04000000,
+
+ // Do not strip this section.
+ SHF_LOONGARCH_NOSTRIP = 0x08000000,
+
+ // Section must be part of global data area.
+ SHF_LOONGARCH_GPREL = 0x10000000,
+
+ // This section should be merged.
+ SHF_LOONGARCH_MERGE = 0x20000000,
+
+ // Address size to be inferred from section entry size.
+ SHF_LOONGARCH_ADDR = 0x40000000,
+
+ // Section data is string data by default.
+ SHF_LOONGARCH_STRING = 0x80000000,
+
SHF_AMDGPU_HSA_GLOBAL = 0x00100000,
SHF_AMDGPU_HSA_READONLY = 0x00200000,
SHF_AMDGPU_HSA_CODE = 0x00400000,
PT_MIPS_OPTIONS = 0x70000002, // Options segment.
PT_MIPS_ABIFLAGS = 0x70000003, // Abiflags segment.
+ // LOONGARCH program header types.
+ PT_LOONGARCH_REGINFO = 0x70000000, // Register usage information.
+ PT_LOONGARCH_RTPROC = 0x70000001, // Runtime procedure table.
+ PT_LOONGARCH_OPTIONS = 0x70000002, // Options segment.
+ PT_LOONGARCH_ABIFLAGS = 0x70000003, // Abiflags segment.
+
// AMDGPU program header types.
PT_AMDGPU_HSA_LOAD_GLOBAL_PROGRAM = 0x60000000,
PT_AMDGPU_HSA_LOAD_GLOBAL_AGENT = 0x60000001,
#define IMAGE_FILE_MACHINE_NATIVE IMAGE_FILE_MACHINE_ARMNT
#elif defined(TARGET_ARM64)
#define IMAGE_FILE_MACHINE_NATIVE IMAGE_FILE_MACHINE_ARM64
+#elif defined(TARGET_LOONGARCH64)
+#define IMAGE_FILE_MACHINE_NATIVE IMAGE_FILE_MACHINE_LOONGARCH64
#elif defined(TARGET_S390X)
#define IMAGE_FILE_MACHINE_NATIVE IMAGE_FILE_MACHINE_UNKNOWN
#elif defined(TARGET_RISCV64)
} Arm64VolatileContextPointer;
#endif //TARGET_ARM64
+#if defined(TARGET_LOONGARCH64)
+typedef struct _LoongArch64VolatileContextPointer
+{
+ PDWORD64 R0;
+ PDWORD64 A0;
+ PDWORD64 A1;
+ PDWORD64 A2;
+ PDWORD64 A3;
+ PDWORD64 A4;
+ PDWORD64 A5;
+ PDWORD64 A6;
+ PDWORD64 A7;
+ PDWORD64 T0;
+ PDWORD64 T1;
+ PDWORD64 T2;
+ PDWORD64 T3;
+ PDWORD64 T4;
+ PDWORD64 T5;
+ PDWORD64 T6;
+ PDWORD64 T7;
+ PDWORD64 T8;
+ PDWORD64 X0;
+} LoongArch64VolatileContextPointer;
+#endif
+
#if defined(TARGET_RISCV64)
typedef struct _RiscV64VolatileContextPointer
{
Arm64VolatileContextPointer volatileCurrContextPointers;
#endif
+#ifdef TARGET_LOONGARCH64
+ LoongArch64VolatileContextPointer volatileCurrContextPointers;
+#endif
+
#ifdef TARGET_RISCV64
RiscV64VolatileContextPointer volatileCurrContextPointers;
#endif
_ASSERTE(GetRegdisplaySP(display) == GetSP(display->pCurrentContext));
return GetRegdisplaySP(display);
-#elif defined(TARGET_ARM64) || defined(TARGET_RISCV64)
+#elif defined(TARGET_ARM64) || defined(TARGET_RISCV64) || defined(TARGET_LOONGARCH64)
_ASSERTE(display->IsCallerContextValid);
return GetSP(display->pCallerContext);
return (LPVOID)((TADDR)display->pCurrentContext->R0);
#elif defined(TARGET_X86)
return (LPVOID)display->pCurrentContext->Eax;
+#elif defined(TARGET_LOONGARCH64)
+ return (LPVOID)display->pCurrentContext->A0;
#elif defined(TARGET_RISCV64)
return (LPVOID)display->pCurrentContext->A0;
#else
{
*(&pCtxPtrs->X19 + i) = (&pCtx->X19 + i);
}
-#elif defined(TARGET_ARM) // TARGET_ARM64
+#elif defined(TARGET_LOONGARCH64) // TARGET_ARM64
+ *(&pCtxPtrs->S0) = &pCtx->S0;
+ *(&pCtxPtrs->S1) = &pCtx->S1;
+ *(&pCtxPtrs->S2) = &pCtx->S2;
+ *(&pCtxPtrs->S3) = &pCtx->S3;
+ *(&pCtxPtrs->S4) = &pCtx->S4;
+ *(&pCtxPtrs->S5) = &pCtx->S5;
+ *(&pCtxPtrs->S6) = &pCtx->S6;
+ *(&pCtxPtrs->S7) = &pCtx->S7;
+ *(&pCtxPtrs->S8) = &pCtx->S8;
+ *(&pCtxPtrs->Tp) = &pCtx->Tp;
+ *(&pCtxPtrs->Fp) = &pCtx->Fp;
+ *(&pCtxPtrs->Ra) = &pCtx->Ra;
+#elif defined(TARGET_ARM) // TARGET_LOONGARCH64
// Copy over the nonvolatile integer registers (R4-R11)
for (int i = 0; i < 8; i++)
{
// Fill volatile context pointers. They can be used by GC in the case of the leaf frame
for (int i=0; i < 18; i++)
pRD->volatileCurrContextPointers.X[i] = &pctx->X[i];
-#elif defined(TARGET_RISCV64) // TARGET_ARM64
+#elif defined(TARGET_LOONGARCH64) // TARGET_ARM64
+ pRD->volatileCurrContextPointers.A0 = &pctx->A0;
+ pRD->volatileCurrContextPointers.A1 = &pctx->A1;
+ pRD->volatileCurrContextPointers.A2 = &pctx->A2;
+ pRD->volatileCurrContextPointers.A3 = &pctx->A3;
+ pRD->volatileCurrContextPointers.A4 = &pctx->A4;
+ pRD->volatileCurrContextPointers.A5 = &pctx->A5;
+ pRD->volatileCurrContextPointers.A6 = &pctx->A6;
+ pRD->volatileCurrContextPointers.A7 = &pctx->A7;
+ pRD->volatileCurrContextPointers.T0 = &pctx->T0;
+ pRD->volatileCurrContextPointers.T1 = &pctx->T1;
+ pRD->volatileCurrContextPointers.T2 = &pctx->T2;
+ pRD->volatileCurrContextPointers.T3 = &pctx->T3;
+ pRD->volatileCurrContextPointers.T4 = &pctx->T4;
+ pRD->volatileCurrContextPointers.T5 = &pctx->T5;
+ pRD->volatileCurrContextPointers.T6 = &pctx->T6;
+ pRD->volatileCurrContextPointers.T7 = &pctx->T7;
+ pRD->volatileCurrContextPointers.T8 = &pctx->T8;
+ pRD->volatileCurrContextPointers.X0 = &pctx->X0;
+#elif defined(TARGET_RISCV64) // TARGET_LOONGARCH64
pRD->volatileCurrContextPointers.A0 = &pctx->A0;
pRD->volatileCurrContextPointers.A1 = &pctx->A1;
pRD->volatileCurrContextPointers.A2 = &pctx->A2;
#elif defined(TARGET_ARM64)
_ASSERTE(regNum < 31);
return (size_t *)®s->X0 + regNum;
+#elif defined(TARGET_LOONGARCH64)
+ _ASSERTE(regNum < 32);
+ return (size_t *)®s->R0 + regNum;
#elif defined(TARGET_RISCV64)
_ASSERTE(regNum < 32);
return (size_t *)®s->R0 + regNum;
#error The Volatile type is currently only defined for Visual C++ and GNU C++
#endif
-#if defined(__GNUC__) && !defined(HOST_X86) && !defined(HOST_AMD64) && !defined(HOST_ARM) && !defined(HOST_ARM64) && !defined(HOST_RISCV64) && !defined(HOST_S390X)
-#error The Volatile type is currently only defined for GCC when targeting x86, AMD64, ARM, ARM64, RISCV64, or S390X CPUs
+#if defined(__GNUC__) && !defined(HOST_X86) && !defined(HOST_AMD64) && !defined(HOST_ARM) && !defined(HOST_ARM64) && !defined(HOST_LOONGARCH64) && !defined(HOST_RISCV64) && !defined(HOST_S390X)
+#error The Volatile type is currently only defined for GCC when targeting x86, AMD64, ARM, ARM64, LOONGARCH64, RISCV64, or S390X CPUs
#endif
#if defined(__GNUC__)
#elif defined(HOST_ARM) || defined(HOST_ARM64)
// This is functionally equivalent to the MemoryBarrier() macro used on ARM on Windows.
#define VOLATILE_MEMORY_BARRIER() asm volatile ("dmb ish" : : : "memory")
+#elif defined(HOST_LOONGARCH64)
+#define VOLATILE_MEMORY_BARRIER() asm volatile ("dbar 0 " : : : "memory")
#else
//
// For GCC, we prevent reordering by the compiler by inserting the following after a volatile
#define _M_ARM64 1
#elif defined(__s390x__) && !defined(_M_S390X)
#define _M_S390X 1
+#elif defined(__loongarch64) && !defined(_M_LOONGARCH64)
+#define _M_LOONGARCH64 1
#elif defined(__riscv) && (__riscv_xlen == 64) && !defined(_M_RISCV64)
#define _M_RISCV64 1
#endif
#define HOST_ARM64
#elif defined(_M_S390X) && !defined(HOST_S390X)
#define HOST_S390X
+#elif defined(_M_LOONGARCH64) && !defined(HOST_LOONGARCH64)
+#define HOST_LOONGARCH64
#elif defined(_M_RISCV64) && !defined(HOST_RISCV64)
#define HOST_RISCV64
#endif
} KNONVOLATILE_CONTEXT_POINTERS, *PKNONVOLATILE_CONTEXT_POINTERS;
+#elif defined(HOST_LOONGARCH64)
+
+// Please refer to src/coreclr/pal/src/arch/loongarch64/asmconstants.h
+#define CONTEXT_LOONGARCH64 0x00800000
+
+#define CONTEXT_CONTROL (CONTEXT_LOONGARCH64 | 0x1)
+#define CONTEXT_INTEGER (CONTEXT_LOONGARCH64 | 0x2)
+#define CONTEXT_FLOATING_POINT (CONTEXT_LOONGARCH64 | 0x4)
+#define CONTEXT_DEBUG_REGISTERS (CONTEXT_LOONGARCH64 | 0x8)
+#define CONTEXT_FULL (CONTEXT_CONTROL | CONTEXT_INTEGER | CONTEXT_FLOATING_POINT)
+
+#define CONTEXT_ALL (CONTEXT_CONTROL | CONTEXT_INTEGER | CONTEXT_FLOATING_POINT | CONTEXT_DEBUG_REGISTERS)
+
+#define CONTEXT_EXCEPTION_ACTIVE 0x8000000
+#define CONTEXT_SERVICE_ACTIVE 0x10000000
+#define CONTEXT_EXCEPTION_REQUEST 0x40000000
+#define CONTEXT_EXCEPTION_REPORTING 0x80000000
+
+//
+// This flag is set by the unwinder if it has unwound to a call
+// site, and cleared whenever it unwinds through a trap frame.
+// It is used by language-specific exception handlers to help
+// differentiate exception scopes during dispatching.
+//
+
+#define CONTEXT_UNWOUND_TO_CALL 0x20000000
+
+// begin_ntoshvp
+
+//
+// Specify the number of breakpoints and watchpoints that the OS
+// will track. Architecturally, LOONGARCH64 supports up to 16. In practice,
+// however, almost no one implements more than 4 of each.
+//
+
+#define LOONGARCH64_MAX_BREAKPOINTS 8
+#define LOONGARCH64_MAX_WATCHPOINTS 2
+
+typedef struct DECLSPEC_ALIGN(16) _CONTEXT {
+
+ //
+ // Control flags.
+ //
+
+ /* +0x000 */ DWORD ContextFlags;
+
+ //
+ // Integer registers.
+ //
+ DWORD64 R0;
+ DWORD64 Ra;
+ DWORD64 Tp;
+ DWORD64 Sp;
+ DWORD64 A0;//DWORD64 V0;
+ DWORD64 A1;//DWORD64 V1;
+ DWORD64 A2;
+ DWORD64 A3;
+ DWORD64 A4;
+ DWORD64 A5;
+ DWORD64 A6;
+ DWORD64 A7;
+ DWORD64 T0;
+ DWORD64 T1;
+ DWORD64 T2;
+ DWORD64 T3;
+ DWORD64 T4;
+ DWORD64 T5;
+ DWORD64 T6;
+ DWORD64 T7;
+ DWORD64 T8;
+ DWORD64 X0;
+ DWORD64 Fp;
+ DWORD64 S0;
+ DWORD64 S1;
+ DWORD64 S2;
+ DWORD64 S3;
+ DWORD64 S4;
+ DWORD64 S5;
+ DWORD64 S6;
+ DWORD64 S7;
+ DWORD64 S8;
+ DWORD64 Pc;
+
+ //
+ // Floating Point Registers: FPR64/LSX/LASX.
+ //
+ ULONGLONG F[4*32];
+ DWORD64 Fcc;
+ DWORD Fcsr;
+} CONTEXT, *PCONTEXT, *LPCONTEXT;
+
+//
+// Nonvolatile context pointer record.
+//
+
+typedef struct _KNONVOLATILE_CONTEXT_POINTERS {
+
+ PDWORD64 S0;
+ PDWORD64 S1;
+ PDWORD64 S2;
+ PDWORD64 S3;
+ PDWORD64 S4;
+ PDWORD64 S5;
+ PDWORD64 S6;
+ PDWORD64 S7;
+ PDWORD64 S8;
+ PDWORD64 Fp;
+ PDWORD64 Tp;
+ PDWORD64 Ra;
+
+ PDWORD64 F24;
+ PDWORD64 F25;
+ PDWORD64 F26;
+ PDWORD64 F27;
+ PDWORD64 F28;
+ PDWORD64 F29;
+ PDWORD64 F30;
+ PDWORD64 F31;
+} KNONVOLATILE_CONTEXT_POINTERS, *PKNONVOLATILE_CONTEXT_POINTERS;
+
#elif defined(HOST_RISCV64)
// Please refer to src/coreclr/pal/src/arch/riscv64/asmconstants.h
#define PAL_CS_NATIVE_DATA_SIZE 56
#elif defined(__sun) && defined(__x86_64__)
#define PAL_CS_NATIVE_DATA_SIZE 48
+#elif defined(__linux__) && defined(__loongarch64)
+#define PAL_CS_NATIVE_DATA_SIZE 96
#elif defined(__linux__) && defined(__riscv) && __riscv_xlen == 64
#define PAL_CS_NATIVE_DATA_SIZE 96
#else
FORCEINLINE void PAL_InterlockedOperationBarrier()
{
-#if defined(HOST_ARM64) || defined(HOST_RISCV64)
+#if defined(HOST_ARM64) || defined(HOST_LOONGARCH64) || defined(HOST_RISCV64)
// On arm64, most of the __sync* functions generate a code sequence like:
// loop:
// ldaxr (load acquire exclusive)
"nop");
#elif defined(HOST_ARM) || defined(HOST_ARM64)
__asm__ __volatile__( "yield");
+#elif defined(HOST_LOONGARCH64)
+ __asm__ volatile( "dbar 0; \n");
#elif defined(HOST_RISCV64)
// TODO-RISCV64-CQ: When Zihintpause is supported, replace with `pause` instruction.
__asm__ __volatile__(".word 0x0100000f");
#define LODWORD(_qw) ((ULONG)(_qw))
#if defined(MIDL_PASS) || defined(RC_INVOKED) || defined(_M_CEE_PURE) \
- || defined(_M_AMD64) || defined(__ARM_ARCH) || defined(_M_S390X) || defined(_M_RISCV64)
+ || defined(_M_AMD64) || defined(__ARM_ARCH) || defined(_M_S390X) || defined(_M_RISCV64) || defined(_M_LOONGARCH64)
#ifndef UInt32x32To64
#define UInt32x32To64(a, b) ((unsigned __int64)((ULONG)(a)) * (unsigned __int64)((ULONG)(b)))
DWORD Reserved;
} DISPATCHER_CONTEXT, *PDISPATCHER_CONTEXT;
-#elif defined(HOST_ARM64) || defined(HOST_RISCV64)
+#elif defined(HOST_ARM64) || defined(HOST_LOONGARCH64) || defined(HOST_RISCV64)
typedef struct _DISPATCHER_CONTEXT {
ULONG64 ControlPc;
#include "unixasmmacrosarm.inc"
#elif defined(HOST_ARM64)
#include "unixasmmacrosarm64.inc"
+#elif defined(HOST_LOONGARCH64)
+#include "unixasmmacrosloongarch64.inc"
#elif defined(HOST_RISCV64)
#include "unixasmmacrosriscv64.inc"
#elif defined(HOST_S390X)
--- /dev/null
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+.macro NESTED_ENTRY Name, Section, Handler
+ LEAF_ENTRY \Name, \Section
+ .ifnc \Handler, NoHandler
+ .cfi_personality 0x1c, C_FUNC(\Handler) // 0x1c == DW_EH_PE_pcrel | DW_EH_PE_sdata8
+ .endif
+.endm
+
+.macro NESTED_END Name, Section
+ LEAF_END \Name, \Section
+.endm
+
+.macro PATCH_LABEL Name
+ .global C_FUNC(\Name)
+C_FUNC(\Name):
+.endm
+
+.macro LEAF_ENTRY Name, Section
+ .global C_FUNC(\Name)
+ .type \Name, %function
+C_FUNC(\Name):
+ .cfi_startproc
+.endm
+
+.macro LEAF_END Name, Section
+ .size \Name, .-\Name
+ .cfi_endproc
+.endm
+
+.macro LEAF_END_MARKED Name, Section
+C_FUNC(\Name\()_End):
+ .global C_FUNC(\Name\()_End)
+ LEAF_END \Name, \Section
+ // make sure this symbol gets its own address
+ nop
+.endm
+
+.macro PREPARE_EXTERNAL_VAR Name, HelperReg
+ la.local \HelperReg, \Name
+.endm
+
+.macro PROLOG_STACK_ALLOC Size
+ addi.d $sp, $sp, -\Size
+ //.cfi_adjust_cfa_offset \Size
+ .cfi_def_cfa 3,\Size
+.endm
+
+.macro EPILOG_STACK_FREE Size
+ addi.d $sp, $sp, \Size
+ //.cfi_adjust_cfa_offset -\Size
+ .cfi_def_cfa 3,-\Size
+.endm
+
+.macro EPILOG_STACK_RESTORE
+ ori $sp, $fp, 0
+ .cfi_restore 3
+.endm
+
+////NOTE: reg must be the number and GPR type !!!
+.macro PROLOG_SAVE_REG reg, ofs
+ st.d $r\reg, $sp, \ofs
+ .cfi_rel_offset \reg, \ofs
+.endm
+
+////NOTE: reg1 and reg2 must be the number and GPR type !!!
+.macro PROLOG_SAVE_REG_PAIR reg1, reg2, ofs, __def_cfa_save=0
+//#ifdef FEATURE_LOONGSONISA
+// //NOTE:The offset of gssq/gslq must be 16-bytes aligned.
+// // here ofs must be 16-bytes aligned.
+// gssq \reg2, \reg1, \ofs(sp)
+//#else
+ st.d $r\reg1, $sp, \ofs
+ st.d $r\reg2, $sp, \ofs+8
+//#endif
+
+ .cfi_rel_offset \reg1, \ofs
+ .cfi_rel_offset \reg2, \ofs + 8
+ .if (\__def_cfa_save == 1)
+ ori $fp, $sp, 0
+ .cfi_def_cfa_register 22
+ .endif
+.endm
+
+////NOTE: reg1 and reg2 must be the number and GPR type !!!
+.macro PROLOG_SAVE_REG_PAIR_INDEXED reg1, reg2, ssize, __def_cfa_save=1
+ addi.d $sp, $sp, -\ssize
+ //.cfi_adjust_cfa_offset \ssize
+ .cfi_def_cfa 3,\ssize
+
+ st.d $r\reg1, $sp, 0
+ st.d $r\reg2, $sp, 8
+
+ .cfi_rel_offset \reg1, 0
+ .cfi_rel_offset \reg2, 8
+ .if (\__def_cfa_save == 1)
+ ori $fp, $sp, 0
+ .cfi_def_cfa_register 22
+ .endif
+.endm
+
+.macro EPILOG_RESTORE_REG reg, ofs
+ ld.d $r\reg, $sp, \ofs
+ .cfi_restore \reg
+ .cfi_def_cfa_register 3
+.endm
+
+.macro EPILOG_RESTORE_REG_PAIR reg1, reg2, ofs
+//#ifdef FEATURE_LOONGSONISA
+// gslq \reg2, \reg1, \ofs(sp)
+//#else
+ ld.d $r\reg2, $sp, \ofs+8
+ ld.d $r\reg1, $sp, \ofs
+//#endif
+ .cfi_restore \reg2
+ .cfi_restore \reg1
+.endm
+
+.macro EPILOG_RESTORE_REG_PAIR_INDEXED reg1, reg2, ssize
+//#ifdef FEATURE_LOONGSONISA
+// gslq \reg2, \reg1, 0(sp)
+//#else
+ ld.d $r\reg2, $sp, 8
+ ld.d $r\reg1, $sp, 0
+//#endif
+ .cfi_restore \reg2
+ .cfi_restore \reg1
+
+ addi.d $sp, $sp, \ssize
+ //.cfi_adjust_cfa_offset -\ssize
+ .cfi_def_cfa 3,-\ssize
+.endm
+
+.macro EPILOG_RETURN
+ jirl $r0, $ra, 0
+.endm
+
+.macro EMIT_BREAKPOINT
+ break 0
+.endm
+
+.macro EPILOG_BRANCH Target
+ b \Target
+.endm
+
+.macro EPILOG_BRANCH_REG reg
+ jirl $r0, \reg, 0
+.endm
+
+//-----------------------------------------------------------------------------
+// The Following sets of SAVE_*_REGISTERS expect the memory to be reserved and
+// base address to be passed in $reg
+//
+
+// Reserve 64 bytes of memory before calling SAVE_CALLEESAVED_REGISTERS
+.macro SAVE_CALLEESAVED_REGISTERS reg, ofs
+
+ PROLOG_SAVE_REG_PAIR 23, 24, \ofs + 16
+ PROLOG_SAVE_REG_PAIR 25, 26, \ofs + 32
+ PROLOG_SAVE_REG_PAIR 27, 28, \ofs + 48
+ PROLOG_SAVE_REG_PAIR 29, 30, \ofs + 64
+ PROLOG_SAVE_REG_PAIR 31, 2, \ofs + 80
+
+.endm
+
+// Reserve 64 bytes of memory before calling SAVE_ARGUMENT_REGISTERS
+.macro SAVE_ARGUMENT_REGISTERS reg, ofs
+
+//#ifdef FEATURE_LOONGSONISA
+// //NOTE:The offset of gssq/gslq must be 16-bytes aligned.
+// // here ofs must be 16-bytes aligned.
+// gssq a1, a0, \ofs(\reg)
+// gssq a3, a2, \ofs+16(\reg)
+// gssq a5, a4, \ofs+32(\reg)
+// gssq a7, a6, \ofs+48(\reg)
+//#else
+ st.d $a0, \reg, \ofs
+ st.d $a1, \reg, \ofs+8
+ st.d $a2, \reg, \ofs+16
+ st.d $a3, \reg, \ofs+24
+ st.d $a4, \reg, \ofs+32
+ st.d $a5, \reg, \ofs+40
+ st.d $a6, \reg, \ofs+48
+ st.d $a7, \reg, \ofs+56
+//#endif
+
+.endm
+
+// Reserve 64 bytes of memory before calling SAVE_FLOAT_ARGUMENT_REGISTERS
+.macro SAVE_FLOAT_ARGUMENT_REGISTERS reg, ofs
+
+//#ifdef FEATURE_LOONGSONISA
+// //NOTE:The offset of gssqc1/gslqc1 must be 16-bytes aligned.
+// // here ofs must be 16-bytes aligned.
+// gssqc1 $f13, $f12, \ofs(\reg)
+// gssqc1 $f15, $f14, \ofs+16(\reg)
+// gssqc1 $f17, $f16, \ofs+32(\reg)
+// gssqc1 $f19, $f18, \ofs+48(\reg)
+//#else
+ fst.d $f0, \reg, \ofs
+ fst.d $f1, \reg, \ofs+8
+ fst.d $f2, \reg, \ofs+16
+ fst.d $f3, \reg, \ofs+24
+ fst.d $f4, \reg, \ofs+32
+ fst.d $f5, \reg, \ofs+40
+ fst.d $f6, \reg, \ofs+48
+ fst.d $f7, \reg, \ofs+56
+//#endif
+
+.endm
+
+// Reserve 64 bytes of memory before calling SAVE_FLOAT_CALLEESAVED_REGISTERS
+.macro SAVE_FLOAT_CALLEESAVED_REGISTERS reg, ofs
+
+//#ifdef FEATURE_LOONGSONISA
+// //NOTE:The offset of gssqc1/gslqc1 must be 16-bytes aligned.
+// // here ofs must be 16-bytes aligned.
+// gssqc1 $f25, $f24, \ofs(\reg)
+// gssqc1 $f27, $f26, \ofs+16(\reg)
+// gssqc1 $f29, $f28, \ofs+32(\reg)
+// gssqc1 $f31, $f30, \ofs+48(\reg)
+//#else
+ fst.d $f24, \reg, \ofs
+ fst.d $f25, \reg, \ofs+8
+ fst.d $f26, \reg, \ofs+16
+ fst.d $f27, \reg, \ofs+24
+ fst.d $f28, \reg, \ofs+32
+ fst.d $f29, \reg, \ofs+40
+ fst.d $f30, \reg, \ofs+48
+ fst.d $f31, \reg, \ofs+56
+//#endif
+
+.endm
+
+.macro RESTORE_CALLEESAVED_REGISTERS reg, ofs
+
+ EPILOG_RESTORE_REG_PAIR 31, 2 \ofs + 80
+
+ EPILOG_RESTORE_REG_PAIR 29, 30, \ofs + 64
+ EPILOG_RESTORE_REG_PAIR 27, 28, \ofs + 48
+ EPILOG_RESTORE_REG_PAIR 25, 26, \ofs + 32
+ EPILOG_RESTORE_REG_PAIR 23, 24, \ofs + 16
+.endm
+
+.macro RESTORE_ARGUMENT_REGISTERS reg, ofs
+
+//#ifdef FEATURE_LOONGSONISA
+// //NOTE:The offset of gssq/gslq must be 16-bytes aligned.
+// // here ofs must be 16-bytes aligned.
+// gslq a7, a6, \ofs+48(\reg)
+// gslq a5, a4, \ofs+32(\reg)
+// gslq a3, a2, \ofs+16(\reg)
+// gslq a1, a0, \ofs(\reg)
+//#else
+ ld.d $a7, \reg, \ofs+56
+ ld.d $a6, \reg, \ofs+48
+ ld.d $a5, \reg, \ofs+40
+ ld.d $a4, \reg, \ofs+32
+ ld.d $a3, \reg, \ofs+24
+ ld.d $a2, \reg, \ofs+16
+ ld.d $a1, \reg, \ofs+8
+ ld.d $a0, \reg, \ofs
+//#endif
+
+.endm
+
+.macro RESTORE_FLOAT_ARGUMENT_REGISTERS reg, ofs
+
+//#ifdef FEATURE_LOONGSONISA
+// gslqc1 $f19, $f18, \ofs+48(\reg)
+// gslqc1 $f17, $f16, \ofs+32(\reg)
+// gslqc1 $f15, $f14, \ofs+16(\reg)
+// gslqc1 $f13, $f12, \ofs(\reg)
+//#else
+ fld.d $f7, \reg, \ofs+56
+ fld.d $f6, \reg, \ofs+48
+ fld.d $f5, \reg, \ofs+40
+ fld.d $f4, \reg, \ofs+32
+ fld.d $f3, \reg, \ofs+24
+ fld.d $f2, \reg, \ofs+16
+ fld.d $f1, \reg, \ofs+8
+ fld.d $f0, \reg, \ofs
+//#endif
+
+.endm
+
+.macro RESTORE_FLOAT_CALLEESAVED_REGISTERS reg, ofs
+
+//#ifdef FEATURE_LOONGSONISA
+// //NOTE:The offset of gssqc1/gslqc1 must be 16-bytes aligned.
+// // here ofs must be 16-bytes aligned.
+// gslqc1 $f25, $f24, \ofs(\reg)
+// gslqc1 $f27, $f26, \ofs+16(\reg)
+// gslqc1 $f29, $f28, \ofs+32(\reg)
+// gslqc1 $f31, $f30, \ofs+48(\reg)
+//#else
+ fld.d $f24, $r\reg, \ofs
+ fld.d $f25, $r\reg, \ofs+8
+ fld.d $f26, $r\reg, \ofs+16
+ fld.d $f27, $r\reg, \ofs+24
+ fld.d $f28, $r\reg, \ofs+32
+ fld.d $f29, $r\reg, \ofs+40
+ fld.d $f30, $r\reg, \ofs+48
+ fld.d $f31, $r\reg, \ofs+56
+//#endif
+
+.endm
+
+//-----------------------------------------------------------------------------
+// Define the prolog for a TransitionBlock-based method. This macro should be called first in the method and
+// comprises the entire prolog.The locals must be 8 byte aligned
+//
+// Save_argument_registers:
+// GPR_a7
+// GPR_a6
+// GPR_a5
+// GPR_a4
+// GPR_a3
+// GPR_a2
+// GPR_a1
+// GPR_a0
+//
+// General Registers:
+// GPR_tp
+// GPR_s8
+// GPR_s7
+// GPR_s6
+// GPR_s5
+// GPR_s4
+// GPR_s3
+// GPR_s2
+// GPR_s1
+// GPR_s0
+// GPR_ra
+// GPR_fp
+//
+// Float Point:
+// FPR_f31 / fs7
+// FPR_f30 / fs6
+// FPR_f29 / fs5
+// FPR_f28 / fs4
+// FPR_f27 / fs3
+// FPR_f26 / fs2
+// FPR_f25 / fs1
+// FPR_f24 / fs0
+// Extra:
+//
+.macro PROLOG_WITH_TRANSITION_BLOCK extraParameters = 0, extraLocals = 0, SaveFPRegs = 1
+
+ __PWTB_SaveFPArgs = \SaveFPRegs
+
+ __PWTB_FloatArgumentRegisters = \extraLocals
+
+ .if ((__PWTB_FloatArgumentRegisters % 16) != 0)
+ __PWTB_FloatArgumentRegisters = __PWTB_FloatArgumentRegisters + 8
+ .endif
+
+ __PWTB_TransitionBlock = __PWTB_FloatArgumentRegisters
+
+ .if (__PWTB_SaveFPArgs == 1)
+ __PWTB_TransitionBlock = __PWTB_TransitionBlock + SIZEOF__FloatArgumentRegisters
+ .endif
+
+
+ __PWTB_CalleeSavedRegisters = __PWTB_TransitionBlock
+ __PWTB_ArgumentRegisters = __PWTB_TransitionBlock + 96
+
+ // Including fp, ra, s0-s8, tp, and (a0-a7)arguments. (1+1+9+1)*8 + 8*8.
+ __PWTB_StackAlloc = __PWTB_TransitionBlock + 96 + 64
+ PROLOG_STACK_ALLOC __PWTB_StackAlloc
+ // $fp,$ra
+ PROLOG_SAVE_REG_PAIR 22, 1, __PWTB_CalleeSavedRegisters, 1
+
+ // First, Spill argument registers.
+ SAVE_ARGUMENT_REGISTERS $sp, __PWTB_ArgumentRegisters
+
+ // Then, Spill callee saved registers. $sp=$r3.
+ SAVE_CALLEESAVED_REGISTERS 3, __PWTB_CalleeSavedRegisters
+
+ // saving is f12-19.
+ .if (__PWTB_SaveFPArgs == 1)
+ SAVE_FLOAT_ARGUMENT_REGISTERS $sp, __PWTB_FloatArgumentRegisters
+ .endif
+
+.endm
+
+.macro EPILOG_WITH_TRANSITION_BLOCK_RETURN
+
+ RESTORE_CALLEESAVED_REGISTERS 3, __PWTB_CalleeSavedRegisters
+
+ EPILOG_RESTORE_REG_PAIR 22, 1, __PWTB_CalleeSavedRegisters
+
+ EPILOG_STACK_FREE __PWTB_StackAlloc
+
+ jirl $r0, $ra, 0
+
+.endm
+
+
+//-----------------------------------------------------------------------------
+// Provides a matching epilog to PROLOG_WITH_TRANSITION_BLOCK and ends by preparing for tail-calling.
+// Since this is a tail call argument registers are restored.
+//
+.macro EPILOG_WITH_TRANSITION_BLOCK_TAILCALL
+
+ .if (__PWTB_SaveFPArgs == 1)
+ RESTORE_FLOAT_ARGUMENT_REGISTERS $sp, __PWTB_FloatArgumentRegisters
+ .endif
+
+ RESTORE_CALLEESAVED_REGISTERS 3, __PWTB_CalleeSavedRegisters
+
+ RESTORE_ARGUMENT_REGISTERS $sp, __PWTB_ArgumentRegisters
+
+ EPILOG_RESTORE_REG_PAIR 22, 1, __PWTB_CalleeSavedRegisters
+
+ EPILOG_STACK_FREE __PWTB_StackAlloc
+
+.endm
+
+// ------------------------------------------------------------------
+// Macro to generate Redirection Stubs
+//
+// $reason : reason for redirection
+// Eg. GCThreadControl
+// NOTE: If you edit this macro, make sure you update GetCONTEXTFromRedirectedStubStackFrame.
+// This function is used by both the personality routine and the debugger to retrieve the original CONTEXT.
+.macro GenerateRedirectedHandledJITCaseStub reason
+
+#if NOTYET
+ GBLS __RedirectionStubFuncName
+ GBLS __RedirectionStubEndFuncName
+ GBLS __RedirectionFuncName
+__RedirectionStubFuncName SETS "RedirectedHandledJITCaseFor":CC:"$reason":CC:"_Stub"
+__RedirectionStubEndFuncName SETS "RedirectedHandledJITCaseFor":CC:"$reason":CC:"_StubEnd"
+__RedirectionFuncName SETS "|?RedirectedHandledJITCaseFor":CC:"$reason":CC:"@Thread@@CAXXZ|"
+
+ IMPORT $__RedirectionFuncName
+
+ NESTED_ENTRY $__RedirectionStubFuncName
+ addi.d $sp, $sp, -32 // stack slot for CONTEXT * and padding
+ PROLOG_SAVE_REG_PAIR 22, 1, 16, 1
+
+ //REDIRECTSTUB_SP_OFFSET_CONTEXT is defined in asmconstants.h and is used in GetCONTEXTFromRedirectedStubStackFrame
+ //If CONTEXT is not saved at 0 offset from SP it must be changed as well.
+ ASSERT REDIRECTSTUB_SP_OFFSET_CONTEXT == 0
+
+ // Stack alignment. This check is necessary as this function can be
+ // entered before complete execution of the prolog of another function.
+ andi $t4, $fp, 0xf
+ sub.d $sp, $sp, $t4
+
+
+ //
+ // Save a copy of the redirect CONTEXT*.
+ // This is needed for the debugger to unwind the stack.
+ //
+ bl GetCurrentSavedRedirectContext
+
+ st.d $v0, $sp, 0
+
+ //
+ // Fetch the interrupted pc and save it as our return address.
+ //
+ ld.d $a1, $a0, CONTEXT_PC
+ st.d $a1, $fp, 8
+
+ //
+ // Call target, which will do whatever we needed to do in the context
+ // of the target thread, and will RtlRestoreContext when it is done.
+ //
+ bl $__RedirectionFuncName
+
+ EMIT_BREAKPOINT // Unreachable
+
+// Put a label here to tell the debugger where the end of this function is.
+$__RedirectionStubEndFuncName
+ EXPORT $__RedirectionStubEndFuncName
+
+ NESTED_END
+#else
+ EMIT_BREAKPOINT
+#endif
+.endm
+
+//-----------------------------------------------------------------------------
+// Macro used to check (in debug builds only) whether the stack is 16-bytes aligned (a requirement before calling
+// out into C++/OS code). Invoke this directly after your prolog (if the stack frame size is fixed) or directly
+// before a call (if you have a frame pointer and a dynamic stack). A breakpoint will be invoked if the stack
+// is misaligned.
+//
+.macro CHECK_STACK_ALIGNMENT
+
+#ifdef _DEBUG
+ andi $t4, $sp, 0xf
+ beq $t4, $r0, 0f
+ EMIT_BREAKPOINT
+0:
+#endif
+.endm
REGISTER_ARM64_V29 = ( REGISTER_ARM64_V28 + 1 ) ,
REGISTER_ARM64_V30 = ( REGISTER_ARM64_V29 + 1 ) ,
REGISTER_ARM64_V31 = ( REGISTER_ARM64_V30 + 1 ) ,
+ REGISTER_LOONGARCH64_PC = 0,
+ REGISTER_LOONGARCH64_SP = ( REGISTER_LOONGARCH64_PC + 1 ) ,
+ REGISTER_LOONGARCH64_FP = ( REGISTER_LOONGARCH64_SP + 1 ) ,
+ REGISTER_LOONGARCH64_RA = ( REGISTER_LOONGARCH64_FP + 1 ) ,
+ REGISTER_LOONGARCH64_TP = ( REGISTER_LOONGARCH64_RA + 1 ) ,
+ REGISTER_LOONGARCH64_A0 = ( REGISTER_LOONGARCH64_TP + 1 ) ,
+ REGISTER_LOONGARCH64_A1 = ( REGISTER_LOONGARCH64_A0 + 1 ) ,
+ REGISTER_LOONGARCH64_A2 = ( REGISTER_LOONGARCH64_A1 + 1 ) ,
+ REGISTER_LOONGARCH64_A3 = ( REGISTER_LOONGARCH64_A2 + 1 ) ,
+ REGISTER_LOONGARCH64_A4 = ( REGISTER_LOONGARCH64_A3 + 1 ) ,
+ REGISTER_LOONGARCH64_A5 = ( REGISTER_LOONGARCH64_A4 + 1 ) ,
+ REGISTER_LOONGARCH64_A6 = ( REGISTER_LOONGARCH64_A5 + 1 ) ,
+ REGISTER_LOONGARCH64_A7 = ( REGISTER_LOONGARCH64_A6 + 1 ) ,
+ REGISTER_LOONGARCH64_T0 = ( REGISTER_LOONGARCH64_A7 + 1 ) ,
+ REGISTER_LOONGARCH64_T1 = ( REGISTER_LOONGARCH64_T0 + 1 ) ,
+ REGISTER_LOONGARCH64_T2 = ( REGISTER_LOONGARCH64_T1 + 1 ) ,
+ REGISTER_LOONGARCH64_T3 = ( REGISTER_LOONGARCH64_T2 + 1 ) ,
+ REGISTER_LOONGARCH64_T4 = ( REGISTER_LOONGARCH64_T3 + 1 ) ,
+ REGISTER_LOONGARCH64_T5 = ( REGISTER_LOONGARCH64_T4 + 1 ) ,
+ REGISTER_LOONGARCH64_T6 = ( REGISTER_LOONGARCH64_T5 + 1 ) ,
+ REGISTER_LOONGARCH64_T7 = ( REGISTER_LOONGARCH64_T6 + 1 ) ,
+ REGISTER_LOONGARCH64_T8 = ( REGISTER_LOONGARCH64_T7 + 1 ) ,
+ REGISTER_LOONGARCH64_X0 = ( REGISTER_LOONGARCH64_T8 + 1 ) ,
+ REGISTER_LOONGARCH64_S0 = ( REGISTER_LOONGARCH64_X0 + 1 ) ,
+ REGISTER_LOONGARCH64_S1 = ( REGISTER_LOONGARCH64_S0 + 1 ) ,
+ REGISTER_LOONGARCH64_S2 = ( REGISTER_LOONGARCH64_S1 + 1 ) ,
+ REGISTER_LOONGARCH64_S3 = ( REGISTER_LOONGARCH64_S2 + 1 ) ,
+ REGISTER_LOONGARCH64_S4 = ( REGISTER_LOONGARCH64_S3 + 1 ) ,
+ REGISTER_LOONGARCH64_S5 = ( REGISTER_LOONGARCH64_S4 + 1 ) ,
+ REGISTER_LOONGARCH64_S6 = ( REGISTER_LOONGARCH64_S5 + 1 ) ,
+ REGISTER_LOONGARCH64_S7 = ( REGISTER_LOONGARCH64_S6 + 1 ) ,
+ REGISTER_LOONGARCH64_S8 = ( REGISTER_LOONGARCH64_S7 + 1 ) ,
+ REGISTER_LOONGARCH64_F0 = ( REGISTER_LOONGARCH64_S8 + 1 ) ,
+ REGISTER_LOONGARCH64_F1 = ( REGISTER_LOONGARCH64_F0 + 1 ) ,
+ REGISTER_LOONGARCH64_F2 = ( REGISTER_LOONGARCH64_F1 + 1 ) ,
+ REGISTER_LOONGARCH64_F3 = ( REGISTER_LOONGARCH64_F2 + 1 ) ,
+ REGISTER_LOONGARCH64_F4 = ( REGISTER_LOONGARCH64_F3 + 1 ) ,
+ REGISTER_LOONGARCH64_F5 = ( REGISTER_LOONGARCH64_F4 + 1 ) ,
+ REGISTER_LOONGARCH64_F6 = ( REGISTER_LOONGARCH64_F5 + 1 ) ,
+ REGISTER_LOONGARCH64_F7 = ( REGISTER_LOONGARCH64_F6 + 1 ) ,
+ REGISTER_LOONGARCH64_F8 = ( REGISTER_LOONGARCH64_F7 + 1 ) ,
+ REGISTER_LOONGARCH64_F9 = ( REGISTER_LOONGARCH64_F8 + 1 ) ,
+ REGISTER_LOONGARCH64_F10 = ( REGISTER_LOONGARCH64_F9 + 1 ) ,
+ REGISTER_LOONGARCH64_F11 = ( REGISTER_LOONGARCH64_F10 + 1 ) ,
+ REGISTER_LOONGARCH64_F12 = ( REGISTER_LOONGARCH64_F11 + 1 ) ,
+ REGISTER_LOONGARCH64_F13 = ( REGISTER_LOONGARCH64_F12 + 1 ) ,
+ REGISTER_LOONGARCH64_F14 = ( REGISTER_LOONGARCH64_F13 + 1 ) ,
+ REGISTER_LOONGARCH64_F15 = ( REGISTER_LOONGARCH64_F14 + 1 ) ,
+ REGISTER_LOONGARCH64_F16 = ( REGISTER_LOONGARCH64_F15 + 1 ) ,
+ REGISTER_LOONGARCH64_F17 = ( REGISTER_LOONGARCH64_F16 + 1 ) ,
+ REGISTER_LOONGARCH64_F18 = ( REGISTER_LOONGARCH64_F17 + 1 ) ,
+ REGISTER_LOONGARCH64_F19 = ( REGISTER_LOONGARCH64_F18 + 1 ) ,
+ REGISTER_LOONGARCH64_F20 = ( REGISTER_LOONGARCH64_F19 + 1 ) ,
+ REGISTER_LOONGARCH64_F21 = ( REGISTER_LOONGARCH64_F20 + 1 ) ,
+ REGISTER_LOONGARCH64_F22 = ( REGISTER_LOONGARCH64_F21 + 1 ) ,
+ REGISTER_LOONGARCH64_F23 = ( REGISTER_LOONGARCH64_F22 + 1 ) ,
+ REGISTER_LOONGARCH64_F24 = ( REGISTER_LOONGARCH64_F23 + 1 ) ,
+ REGISTER_LOONGARCH64_F25 = ( REGISTER_LOONGARCH64_F24 + 1 ) ,
+ REGISTER_LOONGARCH64_F26 = ( REGISTER_LOONGARCH64_F25 + 1 ) ,
+ REGISTER_LOONGARCH64_F27 = ( REGISTER_LOONGARCH64_F26 + 1 ) ,
+ REGISTER_LOONGARCH64_F28 = ( REGISTER_LOONGARCH64_F27 + 1 ) ,
+ REGISTER_LOONGARCH64_F29 = ( REGISTER_LOONGARCH64_F28 + 1 ) ,
+ REGISTER_LOONGARCH64_F30 = ( REGISTER_LOONGARCH64_F29 + 1 ) ,
+ REGISTER_LOONGARCH64_F31 = ( REGISTER_LOONGARCH64_F30 + 1 ) ,
REGISTER_RISCV64_PC = 0,
REGISTER_RISCV64_SP = ( REGISTER_RISCV64_PC + 1 ),
REGISTER_RISCV64_FP = ( REGISTER_RISCV64_SP + 1 ),
add_definitions(-D__armv6__)
elseif(CLR_CMAKE_HOST_ARCH_ARM64)
set(PAL_ARCH_SOURCES_DIR arm64)
+elseif(CLR_CMAKE_HOST_ARCH_LOONGARCH64)
+ set(PAL_ARCH_SOURCES_DIR loongarch64)
elseif(CLR_CMAKE_HOST_ARCH_I386)
set(PAL_ARCH_SOURCES_DIR i386)
elseif(CLR_CMAKE_HOST_ARCH_S390X)
--- /dev/null
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+#ifndef __PAL_LOONGARCH64_ASMCONSTANTS_H__
+#define __PAL_LOONGARCH64_ASMCONSTANTS_H__
+
+// Please refer to src/coreclr/pal/inc/pal.h
+#define CONTEXT_LOONGARCH64 0x00800000
+
+#define CONTEXT_CONTROL_BIT (0)
+#define CONTEXT_INTEGER_BIT (1)
+#define CONTEXT_FLOATING_POINT_BIT (2)
+#define CONTEXT_DEBUG_REGISTERS_BIT (3)
+
+#define CONTEXT_CONTROL (CONTEXT_LOONGARCH64 | (1 << CONTEXT_CONTROL_BIT))
+#define CONTEXT_INTEGER (CONTEXT_LOONGARCH64 | (1 << CONTEXT_INTEGER_BIT))
+#define CONTEXT_FLOATING_POINT (CONTEXT_LOONGARCH64 | (1 << CONTEXT_FLOATING_POINT_BIT))
+#define CONTEXT_DEBUG_REGISTERS (CONTEXT_LOONGARCH64 | (1 << CONTEXT_DEBUG_REGISTERS_BIT))
+
+#define CONTEXT_FULL (CONTEXT_CONTROL | CONTEXT_INTEGER | CONTEXT_FLOATING_POINT)
+
+#define SIZEOF_LOONGARCH_GPR 8
+#define SIZEOF_LOONGARCH_FPR 8
+
+#define CONTEXT_ContextFlags 0
+// Note: Here CONTEXT_ContextFlags+8 for memory algin.
+#define CONTEXT_R0 CONTEXT_ContextFlags+SIZEOF_LOONGARCH_GPR
+#define CONTEXT_Ra CONTEXT_R0+SIZEOF_LOONGARCH_GPR
+#define CONTEXT_Tp CONTEXT_Ra+SIZEOF_LOONGARCH_GPR
+#define CONTEXT_Sp CONTEXT_Tp+SIZEOF_LOONGARCH_GPR
+#define CONTEXT_A0 CONTEXT_Sp+SIZEOF_LOONGARCH_GPR
+#define CONTEXT_A1 CONTEXT_A0+SIZEOF_LOONGARCH_GPR
+#define CONTEXT_A2 CONTEXT_A1+SIZEOF_LOONGARCH_GPR
+#define CONTEXT_A3 CONTEXT_A2+SIZEOF_LOONGARCH_GPR
+#define CONTEXT_A4 CONTEXT_A3+SIZEOF_LOONGARCH_GPR
+#define CONTEXT_A5 CONTEXT_A4+SIZEOF_LOONGARCH_GPR
+#define CONTEXT_A6 CONTEXT_A5+SIZEOF_LOONGARCH_GPR
+#define CONTEXT_A7 CONTEXT_A6+SIZEOF_LOONGARCH_GPR
+#define CONTEXT_T0 CONTEXT_A7+SIZEOF_LOONGARCH_GPR
+#define CONTEXT_T1 CONTEXT_T0+SIZEOF_LOONGARCH_GPR
+#define CONTEXT_T2 CONTEXT_T1+SIZEOF_LOONGARCH_GPR
+#define CONTEXT_T3 CONTEXT_T2+SIZEOF_LOONGARCH_GPR
+#define CONTEXT_T4 CONTEXT_T3+SIZEOF_LOONGARCH_GPR
+#define CONTEXT_T5 CONTEXT_T4+SIZEOF_LOONGARCH_GPR
+#define CONTEXT_T6 CONTEXT_T5+SIZEOF_LOONGARCH_GPR
+#define CONTEXT_T7 CONTEXT_T6+SIZEOF_LOONGARCH_GPR
+#define CONTEXT_T8 CONTEXT_T7+SIZEOF_LOONGARCH_GPR
+#define CONTEXT_R21 CONTEXT_T8+SIZEOF_LOONGARCH_GPR
+#define CONTEXT_Fp CONTEXT_R21+SIZEOF_LOONGARCH_GPR
+#define CONTEXT_S0 CONTEXT_Fp+SIZEOF_LOONGARCH_GPR
+#define CONTEXT_S1 CONTEXT_S0+SIZEOF_LOONGARCH_GPR
+#define CONTEXT_S2 CONTEXT_S1+SIZEOF_LOONGARCH_GPR
+#define CONTEXT_S3 CONTEXT_S2+SIZEOF_LOONGARCH_GPR
+#define CONTEXT_S4 CONTEXT_S3+SIZEOF_LOONGARCH_GPR
+#define CONTEXT_S5 CONTEXT_S4+SIZEOF_LOONGARCH_GPR
+#define CONTEXT_S6 CONTEXT_S5+SIZEOF_LOONGARCH_GPR
+#define CONTEXT_S7 CONTEXT_S6+SIZEOF_LOONGARCH_GPR
+#define CONTEXT_S8 CONTEXT_S7+SIZEOF_LOONGARCH_GPR
+#define CONTEXT_Pc CONTEXT_S8+SIZEOF_LOONGARCH_GPR
+#define CONTEXT_FPU_OFFSET CONTEXT_Pc+SIZEOF_LOONGARCH_GPR
+
+#define CONTEXT_F0 0
+#define CONTEXT_F1 CONTEXT_F0+SIZEOF_LOONGARCH_FPR
+#define CONTEXT_F2 CONTEXT_F1+SIZEOF_LOONGARCH_FPR
+#define CONTEXT_F3 CONTEXT_F2+SIZEOF_LOONGARCH_FPR
+#define CONTEXT_F4 CONTEXT_F3+SIZEOF_LOONGARCH_FPR
+#define CONTEXT_F5 CONTEXT_F4+SIZEOF_LOONGARCH_FPR
+#define CONTEXT_F6 CONTEXT_F5+SIZEOF_LOONGARCH_FPR
+#define CONTEXT_F7 CONTEXT_F6+SIZEOF_LOONGARCH_FPR
+#define CONTEXT_F8 CONTEXT_F7+SIZEOF_LOONGARCH_FPR
+#define CONTEXT_F9 CONTEXT_F8+SIZEOF_LOONGARCH_FPR
+#define CONTEXT_F10 CONTEXT_F9+SIZEOF_LOONGARCH_FPR
+#define CONTEXT_F11 CONTEXT_F10+SIZEOF_LOONGARCH_FPR
+#define CONTEXT_F12 CONTEXT_F11+SIZEOF_LOONGARCH_FPR
+#define CONTEXT_F13 CONTEXT_F12+SIZEOF_LOONGARCH_FPR
+#define CONTEXT_F14 CONTEXT_F13+SIZEOF_LOONGARCH_FPR
+#define CONTEXT_F15 CONTEXT_F14+SIZEOF_LOONGARCH_FPR
+#define CONTEXT_F16 CONTEXT_F15+SIZEOF_LOONGARCH_FPR
+#define CONTEXT_F17 CONTEXT_F16+SIZEOF_LOONGARCH_FPR
+#define CONTEXT_F18 CONTEXT_F17+SIZEOF_LOONGARCH_FPR
+#define CONTEXT_F19 CONTEXT_F18+SIZEOF_LOONGARCH_FPR
+#define CONTEXT_F20 CONTEXT_F19+SIZEOF_LOONGARCH_FPR
+#define CONTEXT_F21 CONTEXT_F20+SIZEOF_LOONGARCH_FPR
+#define CONTEXT_F22 CONTEXT_F21+SIZEOF_LOONGARCH_FPR
+#define CONTEXT_F23 CONTEXT_F22+SIZEOF_LOONGARCH_FPR
+#define CONTEXT_F24 CONTEXT_F23+SIZEOF_LOONGARCH_FPR
+#define CONTEXT_F25 CONTEXT_F24+SIZEOF_LOONGARCH_FPR
+#define CONTEXT_F26 CONTEXT_F25+SIZEOF_LOONGARCH_FPR
+#define CONTEXT_F27 CONTEXT_F26+SIZEOF_LOONGARCH_FPR
+#define CONTEXT_F28 CONTEXT_F27+SIZEOF_LOONGARCH_FPR
+#define CONTEXT_F29 CONTEXT_F28+SIZEOF_LOONGARCH_FPR
+#define CONTEXT_F30 CONTEXT_F29+SIZEOF_LOONGARCH_FPR
+#define CONTEXT_F31 CONTEXT_F30+SIZEOF_LOONGARCH_FPR
+#define CONTEXT_FLOAT_CONTROL_OFFSET (CONTEXT_FPU_OFFSET + 4*32*8)
+
+#endif
--- /dev/null
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+//
+// Implementation of _CONTEXT_CaptureContext for the LOONGARCH platform.
+// This function is processor dependent. It is used by exception handling,
+// and is always apply to the current thread.
+//
+
+#include "unixasmmacros.inc"
+#include "asmconstants.h"
+
+// Incoming:
+// a0: Context*
+// a1: Exception*
+//
+LEAF_ENTRY RtlRestoreContext, _TEXT
+#ifdef HAS_ADDRESS_SANITIZER
+ ld.w $r21, $a0, CONTEXT_ContextFlags
+ andi $r21, $r21, (1 << CONTEXT_CONTROL_BIT)
+ beq $r21, $r0, LOCAL_LABEL(Restore_CONTEXT_FLOATING_POINT)
+
+ addi.d $sp, $sp, -16
+ st.d a0, $sp, 0
+ st.d a1, $sp, 8
+
+ bl __asan_handle_no_return
+
+ ld.d $a0, $sp, 0
+ ld.d $a1, $sp, 8
+ addi.d $sp, $sp, 16
+
+LOCAL_LABEL(Restore_CONTEXT_FLOATING_POINT):
+#endif
+
+ ori $t4, $a0, 0
+ ld.w $r21, $a0, CONTEXT_ContextFlags
+ andi $t1, $r21, (1 << CONTEXT_FLOATING_POINT_BIT)
+ beqz $t1, LOCAL_LABEL(No_Restore_CONTEXT_FLOATING_POINT)
+
+ // 256-bits SIMD:LASX.
+ xvld $xr0, $a0, CONTEXT_FPU_OFFSET + 0
+ xvld $xr1, $a0, CONTEXT_FPU_OFFSET + 32*1
+ xvld $xr2, $a0, CONTEXT_FPU_OFFSET + 32*2
+ xvld $xr3, $a0, CONTEXT_FPU_OFFSET + 32*3
+ xvld $xr4, $a0, CONTEXT_FPU_OFFSET + 32*4
+ xvld $xr5, $a0, CONTEXT_FPU_OFFSET + 32*5
+ xvld $xr6, $a0, CONTEXT_FPU_OFFSET + 32*6
+ xvld $xr7, $a0, CONTEXT_FPU_OFFSET + 32*7
+ xvld $xr8, $a0, CONTEXT_FPU_OFFSET + 32*8
+ xvld $xr9, $a0, CONTEXT_FPU_OFFSET + 32*9
+ xvld $xr10, $a0, CONTEXT_FPU_OFFSET + 32*10
+ xvld $xr11, $a0, CONTEXT_FPU_OFFSET + 32*11
+ xvld $xr12, $a0, CONTEXT_FPU_OFFSET + 32*12
+ xvld $xr13, $a0, CONTEXT_FPU_OFFSET + 32*13
+ xvld $xr14, $a0, CONTEXT_FPU_OFFSET + 32*14
+ xvld $xr15, $a0, CONTEXT_FPU_OFFSET + 32*15
+ xvld $xr16, $a0, CONTEXT_FPU_OFFSET + 32*16
+ xvld $xr17, $a0, CONTEXT_FPU_OFFSET + 32*17
+ xvld $xr18, $a0, CONTEXT_FPU_OFFSET + 32*18
+ xvld $xr19, $a0, CONTEXT_FPU_OFFSET + 32*19
+ xvld $xr20, $a0, CONTEXT_FPU_OFFSET + 32*20
+ xvld $xr21, $a0, CONTEXT_FPU_OFFSET + 32*21
+ xvld $xr22, $a0, CONTEXT_FPU_OFFSET + 32*22
+ xvld $xr23, $a0, CONTEXT_FPU_OFFSET + 32*23
+ xvld $xr24, $a0, CONTEXT_FPU_OFFSET + 32*24
+ xvld $xr25, $a0, CONTEXT_FPU_OFFSET + 32*25
+ xvld $xr26, $a0, CONTEXT_FPU_OFFSET + 32*26
+ xvld $xr27, $a0, CONTEXT_FPU_OFFSET + 32*27
+ xvld $xr28, $a0, CONTEXT_FPU_OFFSET + 32*28
+ xvld $xr29, $a0, CONTEXT_FPU_OFFSET + 32*29
+ xvld $xr30, $a0, CONTEXT_FPU_OFFSET + 32*30
+ xvld $xr31, $a0, CONTEXT_FPU_OFFSET + 32*31
+
+ ld.d $t1, $a0, CONTEXT_FLOAT_CONTROL_OFFSET
+ movgr2cf $fcc0, $t1
+ srli.d $t1, $t1, 8
+ movgr2cf $fcc1, $t1
+ srli.d $t1, $t1, 8
+ movgr2cf $fcc2, $t1
+ srli.d $t1, $t1, 8
+ movgr2cf $fcc3, $t1
+ srli.d $t1, $t1, 8
+ movgr2cf $fcc4, $t1
+ srli.d $t1, $t1, 8
+ movgr2cf $fcc5, $t1
+ srli.d $t1, $t1, 8
+ movgr2cf $fcc6, $t1
+ srli.d $t1, $t1, 8
+ movgr2cf $fcc7, $t1
+
+ ld.w $t1, $a0, CONTEXT_FLOAT_CONTROL_OFFSET + 8
+ movgr2fcsr $fcsr0, $t1
+
+LOCAL_LABEL(No_Restore_CONTEXT_FLOATING_POINT):
+
+ andi $t1, $r21, (1 << CONTEXT_INTEGER_BIT)
+ beqz $t1, LOCAL_LABEL(No_Restore_CONTEXT_INTEGER)
+
+ ld.d $tp, $a0, CONTEXT_Tp
+ ld.d $a1, $a0, CONTEXT_A1
+ ld.d $a2, $a0, CONTEXT_A2
+ ld.d $a3, $a0, CONTEXT_A3
+ ld.d $a4, $a0, CONTEXT_A4
+ ld.d $a5, $a0, CONTEXT_A5
+ ld.d $a6, $a0, CONTEXT_A6
+ ld.d $a7, $a0, CONTEXT_A7
+ ld.d $t0, $a0, CONTEXT_T0
+ ld.d $t1, $a0, CONTEXT_T1
+ ld.d $t2, $a0, CONTEXT_T2
+ ld.d $t3, $a0, CONTEXT_T3
+ ld.d $t5, $a0, CONTEXT_T5
+ ld.d $t6, $a0, CONTEXT_T6
+ ld.d $t7, $a0, CONTEXT_T7
+ ld.d $t8, $a0, CONTEXT_T8
+
+ ld.d $s0, $a0, CONTEXT_S0
+ ld.d $s1, $a0, CONTEXT_S1
+ ld.d $s2, $a0, CONTEXT_S2
+ ld.d $s3, $a0, CONTEXT_S3
+ ld.d $s4, $a0, CONTEXT_S4
+ ld.d $s5, $a0, CONTEXT_S5
+ ld.d $s6, $a0, CONTEXT_S6
+ ld.d $s7, $a0, CONTEXT_S7
+ ld.d $s8, $a0, CONTEXT_S8
+
+ ld.d $a0, $a0, CONTEXT_A0
+
+LOCAL_LABEL(No_Restore_CONTEXT_INTEGER):
+
+ andi $r21, $r21, (1 << CONTEXT_CONTROL_BIT)
+ beq $r21, $r0, LOCAL_LABEL(No_Restore_CONTEXT_CONTROL)
+
+ ld.d $ra, $t4, CONTEXT_Ra
+ ld.d $fp, $t4, CONTEXT_Fp
+ ld.d $r21, $t4, CONTEXT_Pc
+ ld.d $sp, $t4, CONTEXT_Sp
+ // NOTE: the r21 and t4 had been trashed.
+ jirl $r0, $r21, 0
+
+
+LOCAL_LABEL(No_Restore_CONTEXT_CONTROL):
+ ld.d $r21, $t4, CONTEXT_R21
+ ld.d $t4, $t4, CONTEXT_T4
+ jirl $r0, $ra, 0
+LEAF_END RtlRestoreContext, _TEXT
+
+// Incoming:
+// a0: Context*
+
+LEAF_ENTRY RtlCaptureContext, _TEXT
+ PROLOG_STACK_ALLOC 16
+ st.d $r21, $sp, 0
+ li.w $r21, CONTEXT_FULL
+ st.w $r21, $a0, CONTEXT_ContextFlags
+ ld.d $r21, $sp, 0
+ EPILOG_STACK_FREE 16
+ b C_FUNC(CONTEXT_CaptureContext)
+LEAF_END RtlCaptureContext, _TEXT
+
+// Incoming:
+// a0: Context*
+//
+LEAF_ENTRY CONTEXT_CaptureContext, _TEXT
+ PROLOG_STACK_ALLOC 32
+ st.d $t0, $sp, 0
+ st.d $t1, $sp, 8
+ st.d $t3, $sp, 16
+
+ ld.w $t1, $a0, CONTEXT_ContextFlags
+ andi $t3, $t1, (1 << CONTEXT_CONTROL_BIT)
+ beqz $t3, LOCAL_LABEL(Done_CONTEXT_CONTROL)
+
+ addi.d $t0, $sp, 32
+ st.d $fp, $a0, CONTEXT_Fp
+ st.d $t0, $a0, CONTEXT_Sp
+ st.d $ra, $a0, CONTEXT_Ra
+ st.d $ra, $a0, CONTEXT_Pc
+
+LOCAL_LABEL(Done_CONTEXT_CONTROL):
+
+ andi $t3, $t1, (1 << CONTEXT_INTEGER_BIT)
+ beqz $t3, LOCAL_LABEL(Done_CONTEXT_INTEGER)
+
+ ld.d $t0, $sp, 0
+ ld.d $t1, $sp, 8
+ ld.d $t3, $sp, 16
+
+ st.d $tp, $a0, CONTEXT_Tp
+ st.d $a0, $a0, CONTEXT_A0
+ st.d $a1, $a0, CONTEXT_A1
+ st.d $a2, $a0, CONTEXT_A2
+ st.d $a3, $a0, CONTEXT_A3
+ st.d $a4, $a0, CONTEXT_A4
+ st.d $a5, $a0, CONTEXT_A5
+ st.d $a6, $a0, CONTEXT_A6
+ st.d $a7, $a0, CONTEXT_A7
+ st.d $t0, $a0, CONTEXT_T0
+ st.d $t1, $a0, CONTEXT_T1
+ st.d $t2, $a0, CONTEXT_T2
+ st.d $t3, $a0, CONTEXT_T3
+ st.d $t4, $a0, CONTEXT_T4
+ st.d $t5, $a0, CONTEXT_T5
+ st.d $t6, $a0, CONTEXT_T6
+ st.d $t7, $a0, CONTEXT_T7
+ st.d $t8, $a0, CONTEXT_T8
+ st.d $r21,$a0, CONTEXT_R21
+ st.d $s0, $a0, CONTEXT_S0
+ st.d $s1, $a0, CONTEXT_S1
+ st.d $s2, $a0, CONTEXT_S2
+ st.d $s3, $a0, CONTEXT_S3
+ st.d $s4, $a0, CONTEXT_S4
+ st.d $s5, $a0, CONTEXT_S5
+ st.d $s6, $a0, CONTEXT_S6
+ st.d $s7, $a0, CONTEXT_S7
+ st.d $s8, $a0, CONTEXT_S8
+
+LOCAL_LABEL(Done_CONTEXT_INTEGER):
+ ld.w $t1, $a0, CONTEXT_ContextFlags
+
+ andi $t3, $t1, (1 << CONTEXT_FLOATING_POINT_BIT)
+ beqz $t3, LOCAL_LABEL(Done_CONTEXT_FLOATING_POINT)
+
+ // 256-bits SIMD:LASX.
+ xvst $xr0 , $a0, CONTEXT_FPU_OFFSET + 32*0
+ xvst $xr1 , $a0, CONTEXT_FPU_OFFSET + 32*1
+ xvst $xr2 , $a0, CONTEXT_FPU_OFFSET + 32*2
+ xvst $xr3 , $a0, CONTEXT_FPU_OFFSET + 32*3
+ xvst $xr4 , $a0, CONTEXT_FPU_OFFSET + 32*4
+ xvst $xr5 , $a0, CONTEXT_FPU_OFFSET + 32*5
+ xvst $xr6 , $a0, CONTEXT_FPU_OFFSET + 32*6
+ xvst $xr7 , $a0, CONTEXT_FPU_OFFSET + 32*7
+ xvst $xr8 , $a0, CONTEXT_FPU_OFFSET + 32*8
+ xvst $xr9 , $a0, CONTEXT_FPU_OFFSET + 32*9
+ xvst $xr10, $a0, CONTEXT_FPU_OFFSET + 32*10
+ xvst $xr11, $a0, CONTEXT_FPU_OFFSET + 32*11
+ xvst $xr12, $a0, CONTEXT_FPU_OFFSET + 32*12
+ xvst $xr13, $a0, CONTEXT_FPU_OFFSET + 32*13
+ xvst $xr14, $a0, CONTEXT_FPU_OFFSET + 32*14
+ xvst $xr15, $a0, CONTEXT_FPU_OFFSET + 32*15
+ xvst $xr16, $a0, CONTEXT_FPU_OFFSET + 32*16
+ xvst $xr17, $a0, CONTEXT_FPU_OFFSET + 32*17
+ xvst $xr18, $a0, CONTEXT_FPU_OFFSET + 32*18
+ xvst $xr19, $a0, CONTEXT_FPU_OFFSET + 32*19
+ xvst $xr20, $a0, CONTEXT_FPU_OFFSET + 32*20
+ xvst $xr21, $a0, CONTEXT_FPU_OFFSET + 32*21
+ xvst $xr22, $a0, CONTEXT_FPU_OFFSET + 32*22
+ xvst $xr23, $a0, CONTEXT_FPU_OFFSET + 32*23
+ xvst $xr24, $a0, CONTEXT_FPU_OFFSET + 32*24
+ xvst $xr25, $a0, CONTEXT_FPU_OFFSET + 32*25
+ xvst $xr26, $a0, CONTEXT_FPU_OFFSET + 32*26
+ xvst $xr27, $a0, CONTEXT_FPU_OFFSET + 32*27
+ xvst $xr28, $a0, CONTEXT_FPU_OFFSET + 32*28
+ xvst $xr29, $a0, CONTEXT_FPU_OFFSET + 32*29
+ xvst $xr30, $a0, CONTEXT_FPU_OFFSET + 32*30
+ xvst $xr31, $a0, CONTEXT_FPU_OFFSET + 32*31
+
+ ori $t0, $r0, 0
+ movcf2gr $t0, $fcc0
+ st.b $t0, $a0, CONTEXT_FLOAT_CONTROL_OFFSET
+ movcf2gr $t0, $fcc1
+ st.b $t0, $a0, CONTEXT_FLOAT_CONTROL_OFFSET + 1
+ movcf2gr $t0, $fcc2
+ st.b $t0, $a0, CONTEXT_FLOAT_CONTROL_OFFSET + 2
+ movcf2gr $t0, $fcc3
+ st.b $t0, $a0, CONTEXT_FLOAT_CONTROL_OFFSET + 3
+ movcf2gr $t0, $fcc4
+ st.b $t0, $a0, CONTEXT_FLOAT_CONTROL_OFFSET + 4
+ movcf2gr $t0, $fcc5
+ st.b $t0, $a0, CONTEXT_FLOAT_CONTROL_OFFSET + 5
+ movcf2gr $t0, $fcc6
+ st.b $t0, $a0, CONTEXT_FLOAT_CONTROL_OFFSET + 6
+ movcf2gr $t0, $fcc7
+ st.b $t0, $a0, CONTEXT_FLOAT_CONTROL_OFFSET + 7
+
+ movfcsr2gr $t0, $fcsr0
+ st.w $t0, $a0, CONTEXT_FLOAT_CONTROL_OFFSET + 8
+
+LOCAL_LABEL(Done_CONTEXT_FLOATING_POINT):
+
+ EPILOG_STACK_FREE 32
+ jirl $r0, $ra, 0
+LEAF_END CONTEXT_CaptureContext, _TEXT
--- /dev/null
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+#include "unixasmmacros.inc"
+LEAF_ENTRY DBG_DebugBreak, _TEXT
+ EMIT_BREAKPOINT
+LEAF_END_MARKED DBG_DebugBreak, _TEXT
--- /dev/null
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+/*++
+
+
+
+Module Name:
+
+ processor.cpp
+
+Abstract:
+
+ Implementation of processor related functions for the ARM64
+ platform. These functions are processor dependent.
+
+
+
+--*/
+
+#include "pal/palinternal.h"