#undef REG
#elif defined(TARGET_RISCV64)
#undef REG
-#define REG(reg, field) { offsetof(RiscV64VolatileContextPointer, field) }
- REG(zero, R0),
- REG(a0, A0),
- REG(a1, A1),
- REG(a2, A2),
- REG(a3, A3),
- REG(a4, A4),
- REG(a5, A5),
- REG(a6, A6),
- REG(a7, A7),
- REG(t0, T0),
- REG(t1, T1),
- REG(t2, T2),
- REG(t3, T3),
- REG(t4, T4),
- REG(t5, T5),
- REG(t6, T6),
-#undef REG
-#define REG(reg, field) { offsetof(T_KNONVOLATILE_CONTEXT_POINTERS, field) }
+#define REG(reg, field) { FIELD_OFFSET(T_KNONVOLATILE_CONTEXT_POINTERS, field) }
+#define vREG(reg, field) { FIELD_OFFSET(RiscV64VolatileContextPointer, field) }
+ vREG(zero, R0),
+ REG(Ra, Ra),
+ { FIELD_OFFSET(T_CONTEXT, Sp) },
+ REG(Gp, Gp),
+ REG(Tp, Tp),
+ vREG(t0, T0),
+ vREG(t1, T1),
+ vREG(t2, T2),
+ REG(Fp, Fp),
REG(s1, S1),
+ vREG(a0, A0),
+ vREG(a1, A1),
+ vREG(a2, A2),
+ vREG(a3, A3),
+ vREG(a4, A4),
+ vREG(a5, A5),
+ vREG(a6, A6),
+ vREG(a7, A7),
REG(s2, S2),
REG(s3, S3),
REG(s4, S4),
REG(s9, S9),
REG(s10, S10),
REG(s11, S11),
- REG(ra, Ra),
- REG(gp, Gp),
- REG(tp, Tp),
- REG(fp, Fp),
- { offsetof(T_CONTEXT, Sp) },
+ vREG(t3, T3),
+ vREG(t4, T4),
+ vREG(t5, T5),
+ vREG(t6, T6),
+#undef vREG
#undef REG
#else
PORTABILITY_ASSERT("GcInfoDumper::ReportPointerRecord is not implemented on this platform.")
iSPRegister = (FIELD_OFFSET(T_CONTEXT, Sp) - FIELD_OFFSET(T_CONTEXT, R0)) / sizeof(ULONG);
UINT iBFRegister = m_StackBaseRegister;
#elif defined(TARGET_RISCV64)
- assert(!"unimplemented on RISCV64 yet");
- iSPRegister = 0;
+ iSPRegister = (FIELD_OFFSET(T_CONTEXT, Sp) - FIELD_OFFSET(T_CONTEXT, R0)) / sizeof(ULONGLONG);
#endif
-#if defined(TARGET_ARM) || defined(TARGET_ARM64)
+#if defined(TARGET_ARM) || defined(TARGET_ARM64) || defined(TARGET_RISCV64)
BYTE* pContext = (BYTE*)&(pRD->volatileCurrContextPointers);
#else
BYTE* pContext = (BYTE*)pRD->pCurrentContext;
{
break;
}
+#elif defined(TARGET_RISCV64)
+ bool isVolatile = (iReg == 0 || (iReg >= 5 && iReg <= 7) || (iReg >= 10 && iReg <= 17) || iReg >= 28);
+ if (ctx == 0)
+ {
+ if (!isVolatile)
+ {
+ continue;
+ }
+ }
+ else if (isVolatile) // skip volatile registers for second context
+ {
+ continue;
+ }
#endif
{
_ASSERTE(iReg < ARRAY_SIZE(rgRegisters));
pReg = *(SIZE_T**)((BYTE*)pRD->pCurrentContextPointers + rgRegisters[iEncodedReg].cbContextOffset);
}
-#elif defined(TARGET_ARM64)
+#elif defined(TARGET_ARM64) || defined(TARGET_RISCV64)
pReg = *(SIZE_T**)(pContext + rgRegisters[iReg].cbContextOffset);
if (iEncodedReg == iSPRegister)
{
GcStackSlotBase base;
if (iSPRegister == iEncodedReg)
{
-#if defined(TARGET_ARM) || defined(TARGET_ARM64)
+#if defined(TARGET_ARM) || defined(TARGET_ARM64) || defined(TARGET_RISCV64)
base = GC_SP_REL;
#else
if (0 == ctx)
base = GC_SP_REL;
else
base = GC_CALLER_SP_REL;
-#endif //defined(TARGET_ARM) || defined(TARGET_ARM64)
+#endif //defined(TARGET_ARM) || defined(TARGET_ARM64) || defined(TARGET_RISCV64)
}
else
{
}
}
-#if defined(TARGET_ARM) || defined(TARGET_ARM64)
+#if defined(TARGET_ARM) || defined(TARGET_ARM64) || defined(TARGET_RISCV64)
pContext = (BYTE*)pRD->pCurrentContextPointers;
#else
pContext = (BYTE*)pRD->pCallerContext;
*(ppVolatileReg+iReg) = ®disp.pCurrentContext->X0 + iReg;
}
#elif defined(TARGET_RISCV64)
-#pragma message("Unimplemented for RISCV64 yet.")
- assert(!"unimplemented on RISCV64 yet");
+ FILL_REGS(pCurrentContext->R0, 33);
+ FILL_REGS(pCallerContext->R0, 33);
+
+ regdisp.pCurrentContextPointers = ®disp.ctxPtrsOne;
+ regdisp.pCallerContextPointers = ®disp.ctxPtrsTwo;
+
+ // Set S1
+ regdisp.pCurrentContextPointers->S1 = ®disp.pCurrentContext->S1;
+ regdisp.pCallerContextPointers ->S1 = ®disp.pCallerContext ->S1;
+
+ ULONG64 **ppCurrentReg = ®disp.pCurrentContextPointers->S2;
+ ULONG64 **ppCallerReg = ®disp.pCallerContextPointers ->S2;
+ // Set S2-S11
+ for (iReg = 0; iReg < 10; iReg++)
+ {
+ *(ppCurrentReg + iReg) = ®disp.pCurrentContext->S2 + iReg;
+ *(ppCallerReg + iReg) = ®disp.pCallerContext ->S2 + iReg;
+ }
+
+ // Set Fp
+ regdisp.pCurrentContextPointers->Fp = ®disp.pCurrentContext->Fp;
+ regdisp.pCallerContextPointers ->Fp = ®disp.pCallerContext ->Fp;
+
+ // Set Gp
+ regdisp.pCurrentContextPointers->Gp = ®disp.pCurrentContext->Gp;
+ regdisp.pCallerContextPointers ->Gp = ®disp.pCallerContext ->Gp;
+
+ // Set Tp
+ regdisp.pCurrentContextPointers->Tp = ®disp.pCurrentContext->Tp;
+ regdisp.pCallerContextPointers ->Tp = ®disp.pCallerContext ->Tp;
+
+ // Set Ra
+ regdisp.pCurrentContextPointers->Ra = ®disp.pCurrentContext->Ra;
+ regdisp.pCallerContextPointers ->Ra = ®disp.pCallerContext ->Ra;
+
+ regdisp.volatileCurrContextPointers.R0 = ®disp.pCurrentContext->R0;
+ regdisp.volatileCurrContextPointers.A0 = ®disp.pCurrentContext->A0;
+ regdisp.volatileCurrContextPointers.A1 = ®disp.pCurrentContext->A1;
+ regdisp.volatileCurrContextPointers.A2 = ®disp.pCurrentContext->A2;
+ regdisp.volatileCurrContextPointers.A3 = ®disp.pCurrentContext->A3;
+ regdisp.volatileCurrContextPointers.A4 = ®disp.pCurrentContext->A4;
+ regdisp.volatileCurrContextPointers.A5 = ®disp.pCurrentContext->A5;
+ regdisp.volatileCurrContextPointers.A6 = ®disp.pCurrentContext->A6;
+ regdisp.volatileCurrContextPointers.A7 = ®disp.pCurrentContext->A7;
+ regdisp.volatileCurrContextPointers.T0 = ®disp.pCurrentContext->T0;
+ regdisp.volatileCurrContextPointers.T1 = ®disp.pCurrentContext->T1;
+ regdisp.volatileCurrContextPointers.T2 = ®disp.pCurrentContext->T2;
+ regdisp.volatileCurrContextPointers.T3 = ®disp.pCurrentContext->T3;
+ regdisp.volatileCurrContextPointers.T4 = ®disp.pCurrentContext->T4;
+ regdisp.volatileCurrContextPointers.T5 = ®disp.pCurrentContext->T5;
+ regdisp.volatileCurrContextPointers.T6 = ®disp.pCurrentContext->T6;
#else
PORTABILITY_ASSERT("GcInfoDumper::EnumerateStateChanges is not implemented on this platform.")
#endif
(GcInfoDecoderFlags)( DECODE_SECURITY_OBJECT
| DECODE_CODE_LENGTH
| DECODE_VARARG
-#if defined(TARGET_ARM) || defined(TARGET_ARM64)
+#if defined(TARGET_ARM) || defined(TARGET_ARM64) || defined(TARGET_RISCV64)
| DECODE_HAS_TAILCALLS
-#endif // TARGET_ARM || TARGET_ARM64
+#endif // TARGET_ARM || TARGET_ARM64 || TARGET_RISCV64
| DECODE_INTERRUPTIBILITY),
offset);
#ifdef PARTIALLY_INTERRUPTIBLE_GC_SUPPORTED
UINT32 safePointOffset = offset;
-#if defined(TARGET_AMD64) || defined(TARGET_ARM) || defined(TARGET_ARM64)
+#if defined(TARGET_AMD64) || defined(TARGET_ARM) || defined(TARGET_ARM64) || defined(TARGET_RISCV64)
safePointOffset++;
#endif
if(safePointDecoder.IsSafePoint(safePointOffset))