[RISC-V] Initial patch to fix RISCV64 interpreter (#94548)
[platform/upstream/dotnet/runtime.git] / src / coreclr / vm / riscv64 / stubs.cpp
1 // Licensed to the .NET Foundation under one or more agreements.
2 // The .NET Foundation licenses this file to you under the MIT license.
3 //
4 // File: stubs.cpp
5 //
6 // This file contains stub functions for unimplemented features need to
7 // run on the ARM64 platform.
8
9 #include "common.h"
10 #include "dllimportcallback.h"
11 #include "comdelegate.h"
12 #include "asmconstants.h"
13 #include "virtualcallstub.h"
14 #include "jitinterface.h"
15 #include "ecall.h"
16
17
18 #ifndef DACCESS_COMPILE
19 //-----------------------------------------------------------------------
20 // InstructionFormat for JAL/JALR (unconditional jump)
21 //-----------------------------------------------------------------------
22 class BranchInstructionFormat : public InstructionFormat
23 {
24     // Encoding of the VariationCode:
25     // bit(0) indicates whether this is a direct or an indirect jump.
26     // bit(1) indicates whether this is a branch with link -a.k.a call-
27
28     public:
29         enum VariationCodes
30         {
31             BIF_VAR_INDIRECT           = 0x00000001,
32             BIF_VAR_CALL               = 0x00000002,
33
34             BIF_VAR_JUMP               = 0x00000000,
35             BIF_VAR_INDIRECT_CALL      = 0x00000003
36         };
37     private:
38         BOOL IsIndirect(UINT variationCode)
39         {
40             return (variationCode & BIF_VAR_INDIRECT) != 0;
41         }
42         BOOL IsCall(UINT variationCode)
43         {
44             return (variationCode & BIF_VAR_CALL) != 0;
45         }
46
47
48     public:
49         BranchInstructionFormat() : InstructionFormat(InstructionFormat::k64)
50         {
51             LIMITED_METHOD_CONTRACT;
52         }
53
54         virtual UINT GetSizeOfInstruction(UINT refSize, UINT variationCode)
55         {
56             LIMITED_METHOD_CONTRACT;
57             _ASSERTE(refSize == InstructionFormat::k64);
58
59             if (IsIndirect(variationCode))
60                 return 16;
61             else
62                 return 12;
63         }
64
65         virtual UINT GetSizeOfData(UINT refSize, UINT variationCode)
66         {
67             WRAPPER_NO_CONTRACT;
68             return 8;
69         }
70
71
72         virtual UINT GetHotSpotOffset(UINT refsize, UINT variationCode)
73         {
74             WRAPPER_NO_CONTRACT;
75             return 0;
76         }
77
78         virtual BOOL CanReach(UINT refSize, UINT variationCode, BOOL fExternal, INT_PTR offset)
79         {
80             if (fExternal)
81             {
82                 // Note that the parameter 'offset' is not an offset but the target address itself (when fExternal is true)
83                 return (refSize == InstructionFormat::k64);
84             }
85             else
86             {
87                 return ((offset >= -0x80000000L && offset <= 0x7fffffff) || (refSize == InstructionFormat::k64));
88             }
89         }
90
91         virtual VOID EmitInstruction(UINT refSize, __int64 fixedUpReference, BYTE *pOutBufferRX, BYTE *pOutBufferRW, UINT variationCode, BYTE *pDataBuffer)
92         {
93             LIMITED_METHOD_CONTRACT;
94
95             if (IsIndirect(variationCode))
96             {
97                 _ASSERTE(((UINT_PTR)pDataBuffer & 7) == 0);
98
99                 __int64 dataOffset = pDataBuffer - pOutBufferRW;
100
101                 if ((dataOffset < -(0x80000000L)) || (dataOffset > 0x7fffffff))
102                     COMPlusThrow(kNotSupportedException);
103
104                 UINT16 imm12 = (UINT16)(0xFFF & dataOffset);
105                 // auipc  t1, dataOffset[31:12]
106                 // ld  t1, t1, dataOffset[11:0]
107                 // ld  t1, t1, 0
108                 // jalr  x0/1, t1,0
109
110                 *(DWORD*)pOutBufferRW = 0x00000317 | (((dataOffset + 0x800) >> 12) << 12); // auipc t1, dataOffset[31:12]
111                 *(DWORD*)(pOutBufferRW + 4) = 0x00033303 | (imm12 << 20); // ld  t1, t1, dataOffset[11:0]
112                 *(DWORD*)(pOutBufferRW + 8) = 0x00033303; // ld  t1, 0(t1)
113                 if (IsCall(variationCode))
114                 {
115                     *(DWORD*)(pOutBufferRW + 12) = 0x000300e7; // jalr  ra, t1, 0
116                 }
117                 else
118                 {
119                     *(DWORD*)(pOutBufferRW + 12) = 0x00030067 ;// jalr  x0, t1,0
120                 }
121
122                 *((__int64*)pDataBuffer) = fixedUpReference + (__int64)pOutBufferRX;
123             }
124             else
125             {
126                 _ASSERTE(((UINT_PTR)pDataBuffer & 7) == 0);
127
128                 __int64 dataOffset = pDataBuffer - pOutBufferRW;
129
130                 if ((dataOffset < -(0x80000000L)) || (dataOffset > 0x7fffffff))
131                     COMPlusThrow(kNotSupportedException);
132
133                 UINT16 imm12 = (UINT16)(0xFFF & dataOffset);
134                 // auipc  t1, dataOffset[31:12]
135                 // ld  t1, t1, dataOffset[11:0]
136                 // jalr  x0/1, t1,0
137
138                 *(DWORD*)pOutBufferRW = 0x00000317 | (((dataOffset + 0x800) >> 12) << 12);// auipc t1, dataOffset[31:12]
139                 *(DWORD*)(pOutBufferRW + 4) = 0x00033303 | (imm12 << 20); // ld  t1, t1, dataOffset[11:0]
140                 if (IsCall(variationCode))
141                 {
142                     *(DWORD*)(pOutBufferRW + 8) = 0x000300e7; // jalr  ra, t1, 0
143                 }
144                 else
145                 {
146                     *(DWORD*)(pOutBufferRW + 8) = 0x00030067 ;// jalr  x0, t1,0
147                 }
148
149                 if (!ClrSafeInt<__int64>::addition(fixedUpReference, (__int64)pOutBufferRX, fixedUpReference))
150                     COMPlusThrowArithmetic();
151                 *((__int64*)pDataBuffer) = fixedUpReference;
152             }
153         }
154 };
155
156 static BYTE gBranchIF[sizeof(BranchInstructionFormat)];
157
158 #endif
159
160 void ClearRegDisplayArgumentAndScratchRegisters(REGDISPLAY * pRD)
161 {
162     pRD->volatileCurrContextPointers.R0 = NULL;
163     pRD->volatileCurrContextPointers.A0 = NULL;
164     pRD->volatileCurrContextPointers.A1 = NULL;
165     pRD->volatileCurrContextPointers.A2 = NULL;
166     pRD->volatileCurrContextPointers.A3 = NULL;
167     pRD->volatileCurrContextPointers.A4 = NULL;
168     pRD->volatileCurrContextPointers.A5 = NULL;
169     pRD->volatileCurrContextPointers.A6 = NULL;
170     pRD->volatileCurrContextPointers.A7 = NULL;
171     pRD->volatileCurrContextPointers.T0 = NULL;
172     pRD->volatileCurrContextPointers.T1 = NULL;
173     pRD->volatileCurrContextPointers.T2 = NULL;
174     pRD->volatileCurrContextPointers.T3 = NULL;
175     pRD->volatileCurrContextPointers.T4 = NULL;
176     pRD->volatileCurrContextPointers.T5 = NULL;
177     pRD->volatileCurrContextPointers.T6 = NULL;
178 }
179
180 void LazyMachState::unwindLazyState(LazyMachState* baseState,
181                                     MachState* unwoundstate,
182                                     DWORD threadId,
183                                     int funCallDepth,
184                                     HostCallPreference hostCallPreference)
185 {
186     T_CONTEXT context;
187     T_KNONVOLATILE_CONTEXT_POINTERS nonVolContextPtrs;
188
189     context.ContextFlags = 0; // Read by PAL_VirtualUnwind.
190
191     context.Fp = unwoundstate->captureCalleeSavedRegisters[0] = baseState->captureCalleeSavedRegisters[0];
192     context.S1 = unwoundstate->captureCalleeSavedRegisters[1] = baseState->captureCalleeSavedRegisters[1];
193     context.S2 = unwoundstate->captureCalleeSavedRegisters[2] = baseState->captureCalleeSavedRegisters[2];
194     context.S3 = unwoundstate->captureCalleeSavedRegisters[3] = baseState->captureCalleeSavedRegisters[3];
195     context.S4 = unwoundstate->captureCalleeSavedRegisters[4] = baseState->captureCalleeSavedRegisters[4];
196     context.S5 = unwoundstate->captureCalleeSavedRegisters[5] = baseState->captureCalleeSavedRegisters[5];
197     context.S6 = unwoundstate->captureCalleeSavedRegisters[6] = baseState->captureCalleeSavedRegisters[6];
198     context.S7 = unwoundstate->captureCalleeSavedRegisters[7] = baseState->captureCalleeSavedRegisters[7];
199     context.S8 = unwoundstate->captureCalleeSavedRegisters[8] = baseState->captureCalleeSavedRegisters[8];
200     context.S9 = unwoundstate->captureCalleeSavedRegisters[9] = baseState->captureCalleeSavedRegisters[9];
201     context.S10 = unwoundstate->captureCalleeSavedRegisters[10] = baseState->captureCalleeSavedRegisters[10];
202     context.S11 = unwoundstate->captureCalleeSavedRegisters[11] = baseState->captureCalleeSavedRegisters[11];
203     context.Gp = unwoundstate->captureCalleeSavedRegisters[12] = baseState->captureCalleeSavedRegisters[12];
204     context.Tp = unwoundstate->captureCalleeSavedRegisters[13] = baseState->captureCalleeSavedRegisters[13];
205     context.Ra = NULL; // Filled by the unwinder
206
207     context.Sp = baseState->captureSp;
208     context.Pc = baseState->captureIp;
209
210 #if !defined(DACCESS_COMPILE)
211     // For DAC, if we get here, it means that the LazyMachState is uninitialized and we have to unwind it.
212     // The API we use to unwind in DAC is StackWalk64(), which does not support the context pointers.
213     //
214     // Restore the integer registers to KNONVOLATILE_CONTEXT_POINTERS to be used for unwinding.
215     nonVolContextPtrs.Fp = &unwoundstate->captureCalleeSavedRegisters[0];
216     nonVolContextPtrs.S1 = &unwoundstate->captureCalleeSavedRegisters[1];
217     nonVolContextPtrs.S2 = &unwoundstate->captureCalleeSavedRegisters[2];
218     nonVolContextPtrs.S3 = &unwoundstate->captureCalleeSavedRegisters[3];
219     nonVolContextPtrs.S4 = &unwoundstate->captureCalleeSavedRegisters[4];
220     nonVolContextPtrs.S5 = &unwoundstate->captureCalleeSavedRegisters[5];
221     nonVolContextPtrs.S6 = &unwoundstate->captureCalleeSavedRegisters[6];
222     nonVolContextPtrs.S7 = &unwoundstate->captureCalleeSavedRegisters[7];
223     nonVolContextPtrs.S8 = &unwoundstate->captureCalleeSavedRegisters[8];
224     nonVolContextPtrs.S9 = &unwoundstate->captureCalleeSavedRegisters[9];
225     nonVolContextPtrs.S10 = &unwoundstate->captureCalleeSavedRegisters[10];
226     nonVolContextPtrs.S11 = &unwoundstate->captureCalleeSavedRegisters[11];
227     nonVolContextPtrs.Gp = &unwoundstate->captureCalleeSavedRegisters[12];
228     nonVolContextPtrs.Tp = &unwoundstate->captureCalleeSavedRegisters[13];
229     nonVolContextPtrs.Ra = NULL; // Filled by the unwinder
230
231 #endif // DACCESS_COMPILE
232
233     LOG((LF_GCROOTS, LL_INFO100000, "STACKWALK    LazyMachState::unwindLazyState(ip:%p,sp:%p)\n", baseState->captureIp, baseState->captureSp));
234
235     PCODE pvControlPc;
236
237     do {
238
239 #ifndef TARGET_UNIX
240         pvControlPc = Thread::VirtualUnwindCallFrame(&context, &nonVolContextPtrs);
241 #else // !TARGET_UNIX
242 #ifdef DACCESS_COMPILE
243         HRESULT hr = DacVirtualUnwind(threadId, &context, &nonVolContextPtrs);
244         if (FAILED(hr))
245         {
246             DacError(hr);
247         }
248 #else // DACCESS_COMPILE
249         BOOL success = PAL_VirtualUnwind(&context, &nonVolContextPtrs);
250         if (!success)
251         {
252             _ASSERTE(!"unwindLazyState: Unwinding failed");
253             EEPOLICY_HANDLE_FATAL_ERROR(COR_E_EXECUTIONENGINE);
254         }
255 #endif // DACCESS_COMPILE
256         pvControlPc = GetIP(&context);
257 #endif // !TARGET_UNIX
258
259         if (funCallDepth > 0)
260         {
261             funCallDepth--;
262             if (funCallDepth == 0)
263                 break;
264         }
265         else
266         {
267             // Determine  whether given IP resides in JITted code. (It returns nonzero in that case.)
268             // Use it now to see if we've unwound to managed code yet.
269             BOOL fFailedReaderLock = FALSE;
270             BOOL fIsManagedCode = ExecutionManager::IsManagedCode(pvControlPc, hostCallPreference, &fFailedReaderLock);
271             if (fFailedReaderLock)
272             {
273                 // We don't know if we would have been able to find a JIT
274                 // manager, because we couldn't enter the reader lock without
275                 // yielding (and our caller doesn't want us to yield).  So abort
276                 // now.
277
278                 // Invalidate the lazyState we're returning, so the caller knows
279                 // we aborted before we could fully unwind
280                 unwoundstate->_isValid = false;
281                 return;
282             }
283
284             if (fIsManagedCode)
285                 break;
286
287         }
288     } while (true);
289
290 #ifdef TARGET_UNIX
291     unwoundstate->captureCalleeSavedRegisters[0] = context.Fp;
292     unwoundstate->captureCalleeSavedRegisters[1] = context.S1;
293     unwoundstate->captureCalleeSavedRegisters[2] = context.S2;
294     unwoundstate->captureCalleeSavedRegisters[3] = context.S3;
295     unwoundstate->captureCalleeSavedRegisters[4] = context.S4;
296     unwoundstate->captureCalleeSavedRegisters[5] = context.S5;
297     unwoundstate->captureCalleeSavedRegisters[6] = context.S6;
298     unwoundstate->captureCalleeSavedRegisters[7] = context.S7;
299     unwoundstate->captureCalleeSavedRegisters[8] = context.S8;
300     unwoundstate->captureCalleeSavedRegisters[9] = context.S9;
301     unwoundstate->captureCalleeSavedRegisters[10] = context.S10;
302     unwoundstate->captureCalleeSavedRegisters[11] = context.S11;
303     unwoundstate->captureCalleeSavedRegisters[12] = context.Gp;
304     unwoundstate->captureCalleeSavedRegisters[13] = context.Tp;
305 #endif
306
307 #ifdef DACCESS_COMPILE
308     // For DAC builds, we update the registers directly since we dont have context pointers
309     unwoundstate->captureCalleeSavedRegisters[0] = context.Fp;
310     unwoundstate->captureCalleeSavedRegisters[1] = context.S1;
311     unwoundstate->captureCalleeSavedRegisters[2] = context.S2;
312     unwoundstate->captureCalleeSavedRegisters[3] = context.S3;
313     unwoundstate->captureCalleeSavedRegisters[4] = context.S4;
314     unwoundstate->captureCalleeSavedRegisters[5] = context.S5;
315     unwoundstate->captureCalleeSavedRegisters[6] = context.S6;
316     unwoundstate->captureCalleeSavedRegisters[7] = context.S7;
317     unwoundstate->captureCalleeSavedRegisters[8] = context.S8;
318     unwoundstate->captureCalleeSavedRegisters[9] = context.S9;
319     unwoundstate->captureCalleeSavedRegisters[10] = context.S10;
320     unwoundstate->captureCalleeSavedRegisters[11] = context.S11;
321     unwoundstate->captureCalleeSavedRegisters[12] = context.Gp;
322     unwoundstate->captureCalleeSavedRegisters[13] = context.Tp;
323 #else // !DACCESS_COMPILE
324     // For non-DAC builds, update the register state from context pointers
325     unwoundstate->ptrCalleeSavedRegisters[0] = nonVolContextPtrs.Fp;
326     unwoundstate->ptrCalleeSavedRegisters[1] = nonVolContextPtrs.S1;
327     unwoundstate->ptrCalleeSavedRegisters[2] = nonVolContextPtrs.S2;
328     unwoundstate->ptrCalleeSavedRegisters[3] = nonVolContextPtrs.S3;
329     unwoundstate->ptrCalleeSavedRegisters[4] = nonVolContextPtrs.S4;
330     unwoundstate->ptrCalleeSavedRegisters[5] = nonVolContextPtrs.S5;
331     unwoundstate->ptrCalleeSavedRegisters[6] = nonVolContextPtrs.S6;
332     unwoundstate->ptrCalleeSavedRegisters[7] = nonVolContextPtrs.S7;
333     unwoundstate->ptrCalleeSavedRegisters[8] = nonVolContextPtrs.S8;
334     unwoundstate->ptrCalleeSavedRegisters[9] = nonVolContextPtrs.S9;
335     unwoundstate->ptrCalleeSavedRegisters[10] = nonVolContextPtrs.S10;
336     unwoundstate->ptrCalleeSavedRegisters[11] = nonVolContextPtrs.S11;
337     unwoundstate->ptrCalleeSavedRegisters[12] = nonVolContextPtrs.Gp;
338     unwoundstate->ptrCalleeSavedRegisters[13] = nonVolContextPtrs.Tp;
339 #endif // DACCESS_COMPILE
340
341     unwoundstate->_pc = context.Pc;
342     unwoundstate->_sp = context.Sp;
343
344     unwoundstate->_isValid = TRUE;
345 }
346
347 void HelperMethodFrame::UpdateRegDisplay(const PREGDISPLAY pRD)
348 {
349     CONTRACTL
350     {
351         NOTHROW;
352         GC_NOTRIGGER;
353         MODE_ANY;
354         SUPPORTS_DAC;
355     }
356     CONTRACTL_END;
357
358     pRD->IsCallerContextValid = FALSE;
359     pRD->IsCallerSPValid      = FALSE;        // Don't add usage of this field.  This is only temporary.
360
361     //
362     // Copy the saved state from the frame to the current context.
363     //
364
365     LOG((LF_GCROOTS, LL_INFO100000, "STACKWALK    HelperMethodFrame::UpdateRegDisplay cached ip:%p, sp:%p\n", m_MachState._pc, m_MachState._sp));
366
367  #if defined(DACCESS_COMPILE)
368     // For DAC, we may get here when the HMF is still uninitialized.
369     // So we may need to unwind here.
370     if (!m_MachState.isValid())
371     {
372         // This allocation throws on OOM.
373         MachState* pUnwoundState = (MachState*)DacAllocHostOnlyInstance(sizeof(*pUnwoundState), true);
374
375         InsureInit(false, pUnwoundState);
376
377         pRD->pCurrentContext->Pc = pRD->ControlPC = pUnwoundState->_pc;
378         pRD->pCurrentContext->Sp = pRD->SP        = pUnwoundState->_sp;
379         pRD->pCurrentContext->Fp = (DWORD64)(pUnwoundState->captureCalleeSavedRegisters[0]);
380         pRD->pCurrentContext->S1 = (DWORD64)(pUnwoundState->captureCalleeSavedRegisters[1]);
381         pRD->pCurrentContext->S2 = (DWORD64)(pUnwoundState->captureCalleeSavedRegisters[2]);
382         pRD->pCurrentContext->S3 = (DWORD64)(pUnwoundState->captureCalleeSavedRegisters[3]);
383         pRD->pCurrentContext->S4 = (DWORD64)(pUnwoundState->captureCalleeSavedRegisters[4]);
384         pRD->pCurrentContext->S5 = (DWORD64)(pUnwoundState->captureCalleeSavedRegisters[5]);
385         pRD->pCurrentContext->S6 = (DWORD64)(pUnwoundState->captureCalleeSavedRegisters[6]);
386         pRD->pCurrentContext->S7 = (DWORD64)(pUnwoundState->captureCalleeSavedRegisters[7]);
387         pRD->pCurrentContext->S8 = (DWORD64)(pUnwoundState->captureCalleeSavedRegisters[8]);
388         pRD->pCurrentContext->S9 = (DWORD64)(pUnwoundState->captureCalleeSavedRegisters[9]);
389         pRD->pCurrentContext->S10 = (DWORD64)(pUnwoundState->captureCalleeSavedRegisters[10]);
390         pRD->pCurrentContext->S11 = (DWORD64)(pUnwoundState->captureCalleeSavedRegisters[11]);
391         pRD->pCurrentContext->Gp = (DWORD64)(pUnwoundState->captureCalleeSavedRegisters[12]);
392         pRD->pCurrentContext->Tp = (DWORD64)(pUnwoundState->captureCalleeSavedRegisters[13]);
393         pRD->pCurrentContext->Ra = NULL; // Unwind again to get Caller's PC
394
395         pRD->pCurrentContextPointers->Fp = pUnwoundState->ptrCalleeSavedRegisters[0];
396         pRD->pCurrentContextPointers->S1 = pUnwoundState->ptrCalleeSavedRegisters[1];
397         pRD->pCurrentContextPointers->S2 = pUnwoundState->ptrCalleeSavedRegisters[2];
398         pRD->pCurrentContextPointers->S3 = pUnwoundState->ptrCalleeSavedRegisters[3];
399         pRD->pCurrentContextPointers->S4 = pUnwoundState->ptrCalleeSavedRegisters[4];
400         pRD->pCurrentContextPointers->S5 = pUnwoundState->ptrCalleeSavedRegisters[5];
401         pRD->pCurrentContextPointers->S6 = pUnwoundState->ptrCalleeSavedRegisters[6];
402         pRD->pCurrentContextPointers->S7 = pUnwoundState->ptrCalleeSavedRegisters[7];
403         pRD->pCurrentContextPointers->S8 = pUnwoundState->ptrCalleeSavedRegisters[8];
404         pRD->pCurrentContextPointers->S9 = pUnwoundState->ptrCalleeSavedRegisters[9];
405         pRD->pCurrentContextPointers->S10 = pUnwoundState->ptrCalleeSavedRegisters[10];
406         pRD->pCurrentContextPointers->S11 = pUnwoundState->ptrCalleeSavedRegisters[11];
407         pRD->pCurrentContextPointers->Gp = pUnwoundState->ptrCalleeSavedRegisters[12];
408         pRD->pCurrentContextPointers->Tp = pUnwoundState->ptrCalleeSavedRegisters[13];
409         pRD->pCurrentContextPointers->Ra = NULL;
410         return;
411     }
412 #endif // DACCESS_COMPILE
413
414     // reset pContext; it's only valid for active (top-most) frame
415     pRD->pContext = NULL;
416     pRD->ControlPC = GetReturnAddress(); // m_MachState._pc;
417     pRD->SP = (DWORD64)(size_t)m_MachState._sp;
418
419     pRD->pCurrentContext->Pc = pRD->ControlPC;
420     pRD->pCurrentContext->Sp = pRD->SP;
421
422 #ifdef TARGET_UNIX
423     pRD->pCurrentContext->Fp = m_MachState.ptrCalleeSavedRegisters[0] ? *m_MachState.ptrCalleeSavedRegisters[0] : m_MachState.captureCalleeSavedRegisters[0];
424     pRD->pCurrentContext->S1 = m_MachState.ptrCalleeSavedRegisters[1] ? *m_MachState.ptrCalleeSavedRegisters[1] : m_MachState.captureCalleeSavedRegisters[1];
425     pRD->pCurrentContext->S2 = m_MachState.ptrCalleeSavedRegisters[2] ? *m_MachState.ptrCalleeSavedRegisters[2] : m_MachState.captureCalleeSavedRegisters[2];
426     pRD->pCurrentContext->S3 = m_MachState.ptrCalleeSavedRegisters[3] ? *m_MachState.ptrCalleeSavedRegisters[3] : m_MachState.captureCalleeSavedRegisters[3];
427     pRD->pCurrentContext->S4 = m_MachState.ptrCalleeSavedRegisters[4] ? *m_MachState.ptrCalleeSavedRegisters[4] : m_MachState.captureCalleeSavedRegisters[4];
428     pRD->pCurrentContext->S5 = m_MachState.ptrCalleeSavedRegisters[5] ? *m_MachState.ptrCalleeSavedRegisters[5] : m_MachState.captureCalleeSavedRegisters[5];
429     pRD->pCurrentContext->S6 = m_MachState.ptrCalleeSavedRegisters[6] ? *m_MachState.ptrCalleeSavedRegisters[6] : m_MachState.captureCalleeSavedRegisters[6];
430     pRD->pCurrentContext->S7 = m_MachState.ptrCalleeSavedRegisters[7] ? *m_MachState.ptrCalleeSavedRegisters[7] : m_MachState.captureCalleeSavedRegisters[7];
431     pRD->pCurrentContext->S8 = m_MachState.ptrCalleeSavedRegisters[8] ? *m_MachState.ptrCalleeSavedRegisters[8] : m_MachState.captureCalleeSavedRegisters[8];
432     pRD->pCurrentContext->S9 = m_MachState.ptrCalleeSavedRegisters[9] ? *m_MachState.ptrCalleeSavedRegisters[9] : m_MachState.captureCalleeSavedRegisters[9];
433     pRD->pCurrentContext->S10 = m_MachState.ptrCalleeSavedRegisters[10] ? *m_MachState.ptrCalleeSavedRegisters[10] : m_MachState.captureCalleeSavedRegisters[10];
434     pRD->pCurrentContext->S11 = m_MachState.ptrCalleeSavedRegisters[11] ? *m_MachState.ptrCalleeSavedRegisters[11] : m_MachState.captureCalleeSavedRegisters[11];
435     pRD->pCurrentContext->Gp = m_MachState.ptrCalleeSavedRegisters[12] ? *m_MachState.ptrCalleeSavedRegisters[12] : m_MachState.captureCalleeSavedRegisters[12];
436     pRD->pCurrentContext->Tp = m_MachState.ptrCalleeSavedRegisters[13] ? *m_MachState.ptrCalleeSavedRegisters[13] : m_MachState.captureCalleeSavedRegisters[13];
437     pRD->pCurrentContext->Ra = NULL; // Unwind again to get Caller's PC
438 #else // TARGET_UNIX
439     pRD->pCurrentContext->Fp = *m_MachState.ptrCalleeSavedRegisters[0];
440     pRD->pCurrentContext->S1 = *m_MachState.ptrCalleeSavedRegisters[1];
441     pRD->pCurrentContext->S2 = *m_MachState.ptrCalleeSavedRegisters[2];
442     pRD->pCurrentContext->S3 = *m_MachState.ptrCalleeSavedRegisters[3];
443     pRD->pCurrentContext->S4 = *m_MachState.ptrCalleeSavedRegisters[4];
444     pRD->pCurrentContext->S5 = *m_MachState.ptrCalleeSavedRegisters[5];
445     pRD->pCurrentContext->S6 = *m_MachState.ptrCalleeSavedRegisters[6];
446     pRD->pCurrentContext->S7 = *m_MachState.ptrCalleeSavedRegisters[7];
447     pRD->pCurrentContext->S8 = *m_MachState.ptrCalleeSavedRegisters[8];
448     pRD->pCurrentContext->S9 = *m_MachState.ptrCalleeSavedRegisters[9];
449     pRD->pCurrentContext->S10 = *m_MachState.ptrCalleeSavedRegisters[10];
450     pRD->pCurrentContext->S11 = *m_MachState.ptrCalleeSavedRegisters[11];
451     pRD->pCurrentContext->Gp = *m_MachState.ptrCalleeSavedRegisters[12];
452     pRD->pCurrentContext->Tp = *m_MachState.ptrCalleeSavedRegisters[13];
453     pRD->pCurrentContext->Ra = NULL; // Unwind again to get Caller's PC
454 #endif
455
456 #if !defined(DACCESS_COMPILE)
457     pRD->pCurrentContextPointers->Fp = m_MachState.ptrCalleeSavedRegisters[0];
458     pRD->pCurrentContextPointers->S1 = m_MachState.ptrCalleeSavedRegisters[1];
459     pRD->pCurrentContextPointers->S2 = m_MachState.ptrCalleeSavedRegisters[2];
460     pRD->pCurrentContextPointers->S3 = m_MachState.ptrCalleeSavedRegisters[3];
461     pRD->pCurrentContextPointers->S4 = m_MachState.ptrCalleeSavedRegisters[4];
462     pRD->pCurrentContextPointers->S5 = m_MachState.ptrCalleeSavedRegisters[5];
463     pRD->pCurrentContextPointers->S6 = m_MachState.ptrCalleeSavedRegisters[6];
464     pRD->pCurrentContextPointers->S7 = m_MachState.ptrCalleeSavedRegisters[7];
465     pRD->pCurrentContextPointers->S8 = m_MachState.ptrCalleeSavedRegisters[8];
466     pRD->pCurrentContextPointers->S9 = m_MachState.ptrCalleeSavedRegisters[9];
467     pRD->pCurrentContextPointers->S10 = m_MachState.ptrCalleeSavedRegisters[10];
468     pRD->pCurrentContextPointers->S11 = m_MachState.ptrCalleeSavedRegisters[11];
469     pRD->pCurrentContextPointers->Gp = m_MachState.ptrCalleeSavedRegisters[12];
470     pRD->pCurrentContextPointers->Tp = m_MachState.ptrCalleeSavedRegisters[13];
471     pRD->pCurrentContextPointers->Ra = NULL; // Unwind again to get Caller's PC
472 #endif
473     ClearRegDisplayArgumentAndScratchRegisters(pRD);
474 }
475
476 #ifndef DACCESS_COMPILE
477 void ThisPtrRetBufPrecode::Init(MethodDesc* pMD, LoaderAllocator *pLoaderAllocator)
478 {
479     WRAPPER_NO_CONTRACT;
480
481     //Initially
482     //a0 -This ptr
483     //a1 -ReturnBuffer
484     m_rgCode[0] = 0x00050f93; // addi  t6, a0, 0x0
485     m_rgCode[1] = 0x00058513; // addi  a0, a1, 0x0
486     m_rgCode[2] = 0x000f8593; // addi  a1, t6, 0x0
487     m_rgCode[3] = 0x00000f97; // auipc t6, 0
488     m_rgCode[4] = 0x00cfbf83; // ld    t6, 12(t6)
489     m_rgCode[5] = 0x000f8067; // jalr  x0, 0(t6)
490
491     _ASSERTE((UINT32*)&m_pTarget == &m_rgCode[6]);
492     _ASSERTE(6 == ARRAY_SIZE(m_rgCode));
493
494     m_pTarget = GetPreStubEntryPoint();
495     m_pMethodDesc = (TADDR)pMD;
496 }
497
498 #endif // !DACCESS_COMPILE
499
500 void UpdateRegDisplayFromCalleeSavedRegisters(REGDISPLAY * pRD, CalleeSavedRegisters * pCalleeSaved)
501 {
502     LIMITED_METHOD_CONTRACT;
503     pRD->pCurrentContext->S1 = pCalleeSaved->s1;
504     pRD->pCurrentContext->S2 = pCalleeSaved->s2;
505     pRD->pCurrentContext->S3 = pCalleeSaved->s3;
506     pRD->pCurrentContext->S4 = pCalleeSaved->s4;
507     pRD->pCurrentContext->S5 = pCalleeSaved->s5;
508     pRD->pCurrentContext->S6 = pCalleeSaved->s6;
509     pRD->pCurrentContext->S7 = pCalleeSaved->s7;
510     pRD->pCurrentContext->S8 = pCalleeSaved->s8;
511     pRD->pCurrentContext->S9 = pCalleeSaved->s9;
512     pRD->pCurrentContext->S10 = pCalleeSaved->s10;
513     pRD->pCurrentContext->S11 = pCalleeSaved->s11;
514     pRD->pCurrentContext->Gp = pCalleeSaved->gp;
515     pRD->pCurrentContext->Tp = pCalleeSaved->tp;
516     pRD->pCurrentContext->Fp  = pCalleeSaved->fp;
517     pRD->pCurrentContext->Ra  = pCalleeSaved->ra;
518
519     T_KNONVOLATILE_CONTEXT_POINTERS * pContextPointers = pRD->pCurrentContextPointers;
520     pContextPointers->S1 = (PDWORD64)&pCalleeSaved->s1;
521     pContextPointers->S2 = (PDWORD64)&pCalleeSaved->s2;
522     pContextPointers->S3 = (PDWORD64)&pCalleeSaved->s3;
523     pContextPointers->S4 = (PDWORD64)&pCalleeSaved->s4;
524     pContextPointers->S5 = (PDWORD64)&pCalleeSaved->s5;
525     pContextPointers->S6 = (PDWORD64)&pCalleeSaved->s6;
526     pContextPointers->S7 = (PDWORD64)&pCalleeSaved->s7;
527     pContextPointers->S8 = (PDWORD64)&pCalleeSaved->s8;
528     pContextPointers->S9 = (PDWORD64)&pCalleeSaved->s9;
529     pContextPointers->S10 = (PDWORD64)&pCalleeSaved->s10;
530     pContextPointers->S11 = (PDWORD64)&pCalleeSaved->s11;
531     pContextPointers->Gp = (PDWORD64)&pCalleeSaved->gp;
532     pContextPointers->Tp = (PDWORD64)&pCalleeSaved->tp;
533     pContextPointers->Fp = (PDWORD64)&pCalleeSaved->fp;
534     pContextPointers->Ra  = (PDWORD64)&pCalleeSaved->ra;
535 }
536
537 void TransitionFrame::UpdateRegDisplay(const PREGDISPLAY pRD)
538 {
539     pRD->IsCallerContextValid = FALSE;
540     pRD->IsCallerSPValid      = FALSE;        // Don't add usage of this field.  This is only temporary.
541
542     // copy the callee saved regs
543     CalleeSavedRegisters *pCalleeSaved = GetCalleeSavedRegisters();
544     UpdateRegDisplayFromCalleeSavedRegisters(pRD, pCalleeSaved);
545
546     ClearRegDisplayArgumentAndScratchRegisters(pRD);
547
548     // copy the control registers
549     //pRD->pCurrentContext->Fp = pCalleeSaved->fp;//not needed for duplicated.
550     //pRD->pCurrentContext->Ra = pCalleeSaved->ra;//not needed for duplicated.
551     pRD->pCurrentContext->Pc = GetReturnAddress();
552     pRD->pCurrentContext->Sp = this->GetSP();
553
554     // Finally, syncup the regdisplay with the context
555     SyncRegDisplayToCurrentContext(pRD);
556
557     LOG((LF_GCROOTS, LL_INFO100000, "STACKWALK    TransitionFrame::UpdateRegDisplay(pc:%p, sp:%p)\n", pRD->ControlPC, pRD->SP));
558 }
559
560 void FaultingExceptionFrame::UpdateRegDisplay(const PREGDISPLAY pRD)
561 {
562     LIMITED_METHOD_DAC_CONTRACT;
563
564     // Copy the context to regdisplay
565     memcpy(pRD->pCurrentContext, &m_ctx, sizeof(T_CONTEXT));
566
567     pRD->ControlPC = ::GetIP(&m_ctx);
568     pRD->SP = ::GetSP(&m_ctx);
569
570     // Update the integer registers in KNONVOLATILE_CONTEXT_POINTERS from
571     // the exception context we have.
572     pRD->pCurrentContextPointers->S1 = (PDWORD64)&m_ctx.S1;
573     pRD->pCurrentContextPointers->S2 = (PDWORD64)&m_ctx.S2;
574     pRD->pCurrentContextPointers->S3 = (PDWORD64)&m_ctx.S3;
575     pRD->pCurrentContextPointers->S4 = (PDWORD64)&m_ctx.S4;
576     pRD->pCurrentContextPointers->S5 = (PDWORD64)&m_ctx.S5;
577     pRD->pCurrentContextPointers->S6 = (PDWORD64)&m_ctx.S6;
578     pRD->pCurrentContextPointers->S7 = (PDWORD64)&m_ctx.S7;
579     pRD->pCurrentContextPointers->S8 = (PDWORD64)&m_ctx.S8;
580     pRD->pCurrentContextPointers->S9 = (PDWORD64)&m_ctx.S9;
581     pRD->pCurrentContextPointers->S10 = (PDWORD64)&m_ctx.S10;
582     pRD->pCurrentContextPointers->S11 = (PDWORD64)&m_ctx.S11;
583     pRD->pCurrentContextPointers->Fp = (PDWORD64)&m_ctx.Fp;
584     pRD->pCurrentContextPointers->Gp = (PDWORD64)&m_ctx.Gp;
585     pRD->pCurrentContextPointers->Tp = (PDWORD64)&m_ctx.Tp;
586     pRD->pCurrentContextPointers->Ra = (PDWORD64)&m_ctx.Ra;
587
588     ClearRegDisplayArgumentAndScratchRegisters(pRD);
589
590     pRD->IsCallerContextValid = FALSE;
591     pRD->IsCallerSPValid      = FALSE;        // Don't add usage of this field.  This is only temporary.
592
593     LOG((LF_GCROOTS, LL_INFO100000, "STACKWALK    FaultingExceptionFrame::UpdateRegDisplay(pc:%p, sp:%p)\n", pRD->ControlPC, pRD->SP));
594 }
595
596 void InlinedCallFrame::UpdateRegDisplay(const PREGDISPLAY pRD)
597 {
598     CONTRACT_VOID
599     {
600         NOTHROW;
601         GC_NOTRIGGER;
602 #ifdef PROFILING_SUPPORTED
603         PRECONDITION(CORProfilerStackSnapshotEnabled() || InlinedCallFrame::FrameHasActiveCall(this));
604 #endif
605         HOST_NOCALLS;
606         MODE_ANY;
607         SUPPORTS_DAC;
608     }
609     CONTRACT_END;
610
611     if (!InlinedCallFrame::FrameHasActiveCall(this))
612     {
613         LOG((LF_CORDB, LL_ERROR, "WARNING: InlinedCallFrame::UpdateRegDisplay called on inactive frame %p\n", this));
614         return;
615     }
616
617     pRD->IsCallerContextValid = FALSE;
618     pRD->IsCallerSPValid      = FALSE;
619
620     pRD->pCurrentContext->Pc = *(DWORD64 *)&m_pCallerReturnAddress;
621     pRD->pCurrentContext->Sp = *(DWORD64 *)&m_pCallSiteSP;
622     pRD->pCurrentContext->Fp = *(DWORD64 *)&m_pCalleeSavedFP;
623
624     pRD->pCurrentContextPointers->S1 = NULL;
625     pRD->pCurrentContextPointers->S2 = NULL;
626     pRD->pCurrentContextPointers->S3 = NULL;
627     pRD->pCurrentContextPointers->S4 = NULL;
628     pRD->pCurrentContextPointers->S5 = NULL;
629     pRD->pCurrentContextPointers->S6 = NULL;
630     pRD->pCurrentContextPointers->S7 = NULL;
631     pRD->pCurrentContextPointers->S8 = NULL;
632     pRD->pCurrentContextPointers->S9 = NULL;
633     pRD->pCurrentContextPointers->S10 = NULL;
634     pRD->pCurrentContextPointers->S11 = NULL;
635     pRD->pCurrentContextPointers->Gp = NULL;
636     pRD->pCurrentContextPointers->Tp = NULL;
637
638     pRD->ControlPC = m_pCallerReturnAddress;
639     pRD->SP = (DWORD64) dac_cast<TADDR>(m_pCallSiteSP);
640
641     // reset pContext; it's only valid for active (top-most) frame
642     pRD->pContext = NULL;
643
644     ClearRegDisplayArgumentAndScratchRegisters(pRD);
645
646
647     // Update the frame pointer in the current context.
648     pRD->pCurrentContextPointers->Fp = &m_pCalleeSavedFP;
649
650     LOG((LF_GCROOTS, LL_INFO100000, "STACKWALK    InlinedCallFrame::UpdateRegDisplay(pc:%p, sp:%p)\n", pRD->ControlPC, pRD->SP));
651
652     RETURN;
653 }
654
655 #ifdef FEATURE_HIJACK
656 TADDR ResumableFrame::GetReturnAddressPtr(void)
657 {
658     LIMITED_METHOD_DAC_CONTRACT;
659     return dac_cast<TADDR>(m_Regs) + offsetof(T_CONTEXT, Pc);
660 }
661
662 void ResumableFrame::UpdateRegDisplay(const PREGDISPLAY pRD)
663 {
664     CONTRACT_VOID
665     {
666         NOTHROW;
667         GC_NOTRIGGER;
668         MODE_ANY;
669         SUPPORTS_DAC;
670     }
671     CONTRACT_END;
672
673     CopyMemory(pRD->pCurrentContext, m_Regs, sizeof(T_CONTEXT));
674
675     pRD->ControlPC = m_Regs->Pc;
676     pRD->SP = m_Regs->Sp;
677
678     pRD->pCurrentContextPointers->S1 = &m_Regs->S1;
679     pRD->pCurrentContextPointers->S2 = &m_Regs->S2;
680     pRD->pCurrentContextPointers->S3 = &m_Regs->S3;
681     pRD->pCurrentContextPointers->S4 = &m_Regs->S4;
682     pRD->pCurrentContextPointers->S5 = &m_Regs->S5;
683     pRD->pCurrentContextPointers->S6 = &m_Regs->S6;
684     pRD->pCurrentContextPointers->S7 = &m_Regs->S7;
685     pRD->pCurrentContextPointers->S8 = &m_Regs->S8;
686     pRD->pCurrentContextPointers->S9 = &m_Regs->S9;
687     pRD->pCurrentContextPointers->S10 = &m_Regs->S10;
688     pRD->pCurrentContextPointers->S11 = &m_Regs->S11;
689     pRD->pCurrentContextPointers->Tp = &m_Regs->Tp;
690     pRD->pCurrentContextPointers->Gp = &m_Regs->Gp;
691     pRD->pCurrentContextPointers->Fp = &m_Regs->Fp;
692     pRD->pCurrentContextPointers->Ra = &m_Regs->Ra;
693
694     pRD->volatileCurrContextPointers.R0 = &m_Regs->R0;
695     pRD->volatileCurrContextPointers.A0 = &m_Regs->A0;
696     pRD->volatileCurrContextPointers.A1 = &m_Regs->A1;
697     pRD->volatileCurrContextPointers.A2 = &m_Regs->A2;
698     pRD->volatileCurrContextPointers.A3 = &m_Regs->A3;
699     pRD->volatileCurrContextPointers.A4 = &m_Regs->A4;
700     pRD->volatileCurrContextPointers.A5 = &m_Regs->A5;
701     pRD->volatileCurrContextPointers.A6 = &m_Regs->A6;
702     pRD->volatileCurrContextPointers.A7 = &m_Regs->A7;
703     pRD->volatileCurrContextPointers.T0 = &m_Regs->T0;
704     pRD->volatileCurrContextPointers.T1 = &m_Regs->T1;
705     pRD->volatileCurrContextPointers.T2 = &m_Regs->T2;
706     pRD->volatileCurrContextPointers.T3 = &m_Regs->T3;
707     pRD->volatileCurrContextPointers.T4 = &m_Regs->T4;
708     pRD->volatileCurrContextPointers.T5 = &m_Regs->T5;
709     pRD->volatileCurrContextPointers.T6 = &m_Regs->T6;
710
711     pRD->IsCallerContextValid = FALSE;
712     pRD->IsCallerSPValid      = FALSE;        // Don't add usage of this field.  This is only temporary.
713
714     LOG((LF_GCROOTS, LL_INFO100000, "STACKWALK    ResumableFrame::UpdateRegDisplay(pc:%p, sp:%p)\n", pRD->ControlPC, pRD->SP));
715
716     RETURN;
717 }
718
719 void HijackFrame::UpdateRegDisplay(const PREGDISPLAY pRD)
720 {
721     LIMITED_METHOD_CONTRACT;
722
723     pRD->IsCallerContextValid = FALSE;
724     pRD->IsCallerSPValid      = FALSE;
725
726     pRD->pCurrentContext->Pc = m_ReturnAddress;
727     size_t s = sizeof(struct HijackArgs);
728     _ASSERTE(s%8 == 0); // HijackArgs contains register values and hence will be a multiple of 8
729     // stack must be multiple of 16. So if s is not multiple of 16 then there must be padding of 8 bytes
730     s = s + s%16;
731     pRD->pCurrentContext->Sp = PTR_TO_TADDR(m_Args) + s ;
732
733     pRD->pCurrentContext->S1 = m_Args->S1;
734     pRD->pCurrentContext->S2 = m_Args->S2;
735     pRD->pCurrentContext->S3 = m_Args->S3;
736     pRD->pCurrentContext->S4 = m_Args->S4;
737     pRD->pCurrentContext->S5 = m_Args->S5;
738     pRD->pCurrentContext->S6 = m_Args->S6;
739     pRD->pCurrentContext->S7 = m_Args->S7;
740     pRD->pCurrentContext->S8 = m_Args->S8;
741     pRD->pCurrentContext->S9 = m_Args->S9;
742     pRD->pCurrentContext->S10 = m_Args->S10;
743     pRD->pCurrentContext->S11 = m_Args->S11;
744     pRD->pCurrentContext->Gp = m_Args->Gp;
745     pRD->pCurrentContext->Tp = m_Args->Tp;
746     pRD->pCurrentContext->Fp = m_Args->Fp;
747     pRD->pCurrentContext->Ra = m_Args->Ra;
748
749     pRD->pCurrentContextPointers->S1 = &m_Args->S1;
750     pRD->pCurrentContextPointers->S2 = &m_Args->S2;
751     pRD->pCurrentContextPointers->S3 = &m_Args->S3;
752     pRD->pCurrentContextPointers->S4 = &m_Args->S4;
753     pRD->pCurrentContextPointers->S5 = &m_Args->S5;
754     pRD->pCurrentContextPointers->S6 = &m_Args->S6;
755     pRD->pCurrentContextPointers->S7 = &m_Args->S7;
756     pRD->pCurrentContextPointers->S8 = &m_Args->S8;
757     pRD->pCurrentContextPointers->S9 = &m_Args->S9;
758     pRD->pCurrentContextPointers->S10 = &m_Args->S10;
759     pRD->pCurrentContextPointers->S11 = &m_Args->S11;
760     pRD->pCurrentContextPointers->Gp = &m_Args->Gp;
761     pRD->pCurrentContextPointers->Tp = &m_Args->Tp;
762     pRD->pCurrentContextPointers->Fp = &m_Args->Fp;
763     pRD->pCurrentContextPointers->Ra = NULL;
764     SyncRegDisplayToCurrentContext(pRD);
765
766     LOG((LF_GCROOTS, LL_INFO100000, "STACKWALK    HijackFrame::UpdateRegDisplay(pc:%p, sp:%p)\n", pRD->ControlPC, pRD->SP));
767 }
768 #endif // FEATURE_HIJACK
769
770 #ifdef FEATURE_COMINTEROP
771
772 void emitCOMStubCall (ComCallMethodDesc *pCOMMethodRX, ComCallMethodDesc *pCOMMethodRW, PCODE target)
773 {
774     _ASSERTE(!"RISCV64: not implementation on riscv64!!!");
775 }
776 #endif // FEATURE_COMINTEROP
777
778 void JIT_TailCall()
779 {
780     _ASSERTE(!"RISCV64:NYI");
781 }
782
783 #if !defined(DACCESS_COMPILE)
784 EXTERN_C void JIT_UpdateWriteBarrierState(bool skipEphemeralCheck, size_t writeableOffset);
785
786 extern "C" void STDCALL JIT_PatchedCodeStart();
787 extern "C" void STDCALL JIT_PatchedCodeLast();
788
789 static void UpdateWriteBarrierState(bool skipEphemeralCheck)
790 {
791     BYTE *writeBarrierCodeStart = GetWriteBarrierCodeLocation((void*)JIT_PatchedCodeStart);
792     BYTE *writeBarrierCodeStartRW = writeBarrierCodeStart;
793     ExecutableWriterHolderNoLog<BYTE> writeBarrierWriterHolder;
794     if (IsWriteBarrierCopyEnabled())
795     {
796         writeBarrierWriterHolder.AssignExecutableWriterHolder(writeBarrierCodeStart, (BYTE*)JIT_PatchedCodeLast - (BYTE*)JIT_PatchedCodeStart);
797         writeBarrierCodeStartRW = writeBarrierWriterHolder.GetRW();
798     }
799     JIT_UpdateWriteBarrierState(GCHeapUtilities::IsServerHeap(), writeBarrierCodeStartRW - writeBarrierCodeStart);
800 }
801
802 void InitJITHelpers1()
803 {
804     STANDARD_VM_CONTRACT;
805
806     _ASSERTE(g_SystemInfo.dwNumberOfProcessors != 0);
807
808     // Allocation helpers, faster but non-logging
809     if (!((TrackAllocationsEnabled()) ||
810         (LoggingOn(LF_GCALLOC, LL_INFO10))
811 #ifdef _DEBUG
812         || (g_pConfig->ShouldInjectFault(INJECTFAULT_GCHEAP) != 0)
813 #endif // _DEBUG
814         ))
815     {
816         if (GCHeapUtilities::UseThreadAllocationContexts())
817         {
818             SetJitHelperFunction(CORINFO_HELP_NEWSFAST, JIT_NewS_MP_FastPortable);
819             SetJitHelperFunction(CORINFO_HELP_NEWSFAST_ALIGN8, JIT_NewS_MP_FastPortable);
820             SetJitHelperFunction(CORINFO_HELP_NEWARR_1_VC, JIT_NewArr1VC_MP_FastPortable);
821             SetJitHelperFunction(CORINFO_HELP_NEWARR_1_OBJ, JIT_NewArr1OBJ_MP_FastPortable);
822
823             ECall::DynamicallyAssignFCallImpl(GetEEFuncEntryPoint(AllocateString_MP_FastPortable), ECall::FastAllocateString);
824         }
825     }
826
827     UpdateWriteBarrierState(GCHeapUtilities::IsServerHeap());
828 }
829
830 #else
831 void UpdateWriteBarrierState(bool) {}
832 #endif // !defined(DACCESS_COMPILE)
833
834 PTR_CONTEXT GetCONTEXTFromRedirectedStubStackFrame(T_DISPATCHER_CONTEXT * pDispatcherContext)
835 {
836     LIMITED_METHOD_DAC_CONTRACT;
837
838     DWORD64 stackSlot = pDispatcherContext->EstablisherFrame + REDIRECTSTUB_SP_OFFSET_CONTEXT;
839     PTR_PTR_CONTEXT ppContext = dac_cast<PTR_PTR_CONTEXT>((TADDR)stackSlot);
840     return *ppContext;
841 }
842
843 PTR_CONTEXT GetCONTEXTFromRedirectedStubStackFrame(T_CONTEXT * pContext)
844 {
845     LIMITED_METHOD_DAC_CONTRACT;
846
847     DWORD64 stackSlot = pContext->Sp + REDIRECTSTUB_SP_OFFSET_CONTEXT;
848     PTR_PTR_CONTEXT ppContext = dac_cast<PTR_PTR_CONTEXT>((TADDR)stackSlot);
849     return *ppContext;
850 }
851
852 #if !defined(DACCESS_COMPILE)
853 FaultingExceptionFrame *GetFrameFromRedirectedStubStackFrame (DISPATCHER_CONTEXT *pDispatcherContext)
854 {
855     _ASSERTE(!"RISCV64: not implementation on riscv64!!!");
856     LIMITED_METHOD_CONTRACT;
857
858     return (FaultingExceptionFrame*)NULL;
859 }
860
861
862 BOOL
863 AdjustContextForVirtualStub(
864         EXCEPTION_RECORD *pExceptionRecord,
865         CONTEXT *pContext)
866 {
867     LIMITED_METHOD_CONTRACT;
868
869     Thread * pThread = GetThreadNULLOk();
870
871     // We may not have a managed thread object. Example is an AV on the helper thread.
872     // (perhaps during StubManager::IsStub)
873     if (pThread == NULL)
874     {
875         return FALSE;
876     }
877
878     PCODE f_IP = GetIP(pContext);
879
880     StubCodeBlockKind sk = RangeSectionStubManager::GetStubKind(f_IP);
881
882     if (sk == STUB_CODE_BLOCK_VSD_DISPATCH_STUB)
883     {
884         if (*PTR_DWORD(f_IP - 4) != DISPATCH_STUB_FIRST_DWORD)
885         {
886             _ASSERTE(!"AV in DispatchStub at unknown instruction");
887             return FALSE;
888         }
889     }
890     else
891     if (sk == STUB_CODE_BLOCK_VSD_RESOLVE_STUB)
892     {
893         if (*PTR_DWORD(f_IP) != RESOLVE_STUB_FIRST_DWORD)
894         {
895             _ASSERTE(!"AV in ResolveStub at unknown instruction");
896             return FALSE;
897         }
898     }
899     else
900     {
901         return FALSE;
902     }
903
904     PCODE callsite = GetAdjustedCallAddress(GetRA(pContext));
905
906     // Lr must already have been saved before calling so it should not be necessary to restore Lr
907
908     if (pExceptionRecord != NULL)
909     {
910         pExceptionRecord->ExceptionAddress = (PVOID)callsite;
911     }
912     SetIP(pContext, callsite);
913
914     return TRUE;
915 }
916 #endif // !DACCESS_COMPILE
917
918 UMEntryThunk * UMEntryThunk::Decode(void *pCallback)
919 {
920     _ASSERTE(offsetof(UMEntryThunkCode, m_code) == 0);
921     UMEntryThunkCode * pCode = (UMEntryThunkCode*)pCallback;
922
923     // We may be called with an unmanaged external code pointer instead. So if it doesn't look like one of our
924     // stubs (see UMEntryThunkCode::Encode below) then we'll return NULL. Luckily in these scenarios our
925     // caller will perform a hash lookup on successful return to verify our result in case random unmanaged
926     // code happens to look like ours.
927     if ((pCode->m_code[0] == 0x00000f97) && // auipc t6, 0
928         (pCode->m_code[1] == 0x018fb383) && // ld    t2, 24(t6)
929         (pCode->m_code[2] == 0x010fbf83) && // ld    t6, 16(t6)
930         (pCode->m_code[3] == 0x000f8067))   // jalr  x0, 0(t6)
931     {
932         return (UMEntryThunk*)pCode->m_pvSecretParam;
933     }
934
935     return NULL;
936 }
937
938 void UMEntryThunkCode::Encode(UMEntryThunkCode *pEntryThunkCodeRX, BYTE* pTargetCode, void* pvSecretParam)
939 {
940     // auipc t6, 0
941     // ld    t2, 24(t6)
942     // ld    t6, 16(t6)
943     // jalr  x0, 0(t6)
944     // m_pTargetCode data
945     // m_pvSecretParam data
946
947     m_code[0] = 0x00000f97; // auipc t6, 0
948     m_code[1] = 0x018fb383; // ld    t2, 24(t6)
949     m_code[2] = 0x010fbf83; // ld    t6, 16(t6)
950     m_code[3] = 0x000f8067; // jalr  x0, 0(t6)
951
952     m_pTargetCode = (TADDR)pTargetCode;
953     m_pvSecretParam = (TADDR)pvSecretParam;
954     FlushInstructionCache(GetCurrentProcess(),&pEntryThunkCodeRX->m_code,sizeof(m_code));
955 }
956
957 #ifndef DACCESS_COMPILE
958
959 void UMEntryThunkCode::Poison()
960 {
961     ExecutableWriterHolder<UMEntryThunkCode> thunkWriterHolder(this, sizeof(UMEntryThunkCode));
962     UMEntryThunkCode *pThisRW = thunkWriterHolder.GetRW();
963
964     pThisRW->m_pTargetCode = (TADDR)UMEntryThunk::ReportViolation;
965
966     // ld   a0, 24(t6)
967     pThisRW->m_code[1] = 0x018fb503;
968
969     ClrFlushInstructionCache(&m_code,sizeof(m_code));
970 }
971
972 #endif // DACCESS_COMPILE
973
974 #if !defined(DACCESS_COMPILE)
975 VOID ResetCurrentContext()
976 {
977     LIMITED_METHOD_CONTRACT;
978 }
979 #endif
980
981 LONG CLRNoCatchHandler(EXCEPTION_POINTERS* pExceptionInfo, PVOID pv)
982 {
983     return EXCEPTION_CONTINUE_SEARCH;
984 }
985
986 void FlushWriteBarrierInstructionCache()
987 {
988     // this wouldn't be called in arm64, just to comply with gchelpers.h
989 }
990
991 int StompWriteBarrierEphemeral(bool isRuntimeSuspended)
992 {
993     UpdateWriteBarrierState(GCHeapUtilities::IsServerHeap());
994     return SWB_PASS;
995 }
996
997 int StompWriteBarrierResize(bool isRuntimeSuspended, bool bReqUpperBoundsCheck)
998 {
999     UpdateWriteBarrierState(GCHeapUtilities::IsServerHeap());
1000     return SWB_PASS;
1001 }
1002
1003 #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
1004 int SwitchToWriteWatchBarrier(bool isRuntimeSuspended)
1005 {
1006     UpdateWriteBarrierState(GCHeapUtilities::IsServerHeap());
1007     return SWB_PASS;
1008 }
1009
1010 int SwitchToNonWriteWatchBarrier(bool isRuntimeSuspended)
1011 {
1012     UpdateWriteBarrierState(GCHeapUtilities::IsServerHeap());
1013     return SWB_PASS;
1014 }
1015 #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
1016
1017 #ifdef DACCESS_COMPILE
1018 BOOL GetAnyThunkTarget (T_CONTEXT *pctx, TADDR *pTarget, TADDR *pTargetMethodDesc)
1019 {
1020     _ASSERTE(!"RISCV64:NYI");
1021     return FALSE;
1022 }
1023 #endif // DACCESS_COMPILE
1024
1025 #ifndef DACCESS_COMPILE
1026 // ----------------------------------------------------------------
1027 // StubLinkerCPU methods
1028 // ----------------------------------------------------------------
1029
1030 void StubLinkerCPU::EmitMovConstant(IntReg reg, UINT64 imm)
1031 {
1032     // Adaptation of emitLoadImmediate
1033
1034     if (isValidSimm12(imm))
1035     {
1036         EmitAddImm(reg, 0 /* zero register */, imm & 0xFFF);
1037         return;
1038     }
1039
1040     // TODO-RISCV64: maybe optimized via emitDataConst(), check #86790
1041
1042     UINT32 msb;
1043     UINT32 high31;
1044
1045     BitScanReverse64(&msb, imm);
1046
1047     if (msb > 30)
1048     {
1049         high31 = (imm >> (msb - 30)) & 0x7FffFFff;
1050     }
1051     else
1052     {
1053         high31 = imm & 0x7FffFFff;
1054     }
1055
1056     // Since ADDIW use sign extension for immediate
1057     // we have to adjust higher 19 bit loaded by LUI
1058     // for case when low part is bigger than 0x800.
1059     UINT32 high19 = (high31 + 0x800) >> 12;
1060
1061     EmitLuImm(reg, high19);
1062     if (high31 & 0x800)
1063     {
1064         // EmitAddImm does not allow negative immediate values, so use EmitSubImm.
1065         EmitSubImm(reg, reg, ~high31 + 1 & 0xFFF);
1066     }
1067     else
1068     {
1069         EmitAddImm(reg, reg, high31 & 0x7FF);
1070     }
1071
1072     // And load remaining part by batches of 11 bits size.
1073     INT32 remainingShift = msb - 30;
1074
1075     // shiftAccumulator usage is an optimization allows to exclude `slli addi` iteration
1076     // if immediate bits `low11` for this iteration are zero.
1077     UINT32 shiftAccumulator = 0;
1078
1079     while (remainingShift > 0)
1080     {
1081         UINT32 shift = remainingShift >= 11 ? 11 : remainingShift % 11;
1082         UINT32 mask = 0x7ff >> (11 - shift);
1083         remainingShift -= shift;
1084         UINT32 low11 = (imm >> remainingShift) & mask;
1085         shiftAccumulator += shift;
1086
1087         if (low11)
1088         {
1089             EmitSllImm(reg, reg, shiftAccumulator);
1090             shiftAccumulator = 0;
1091
1092             EmitAddImm(reg, reg, low11);
1093         }
1094     }
1095
1096     if (shiftAccumulator)
1097     {
1098         EmitSllImm(reg, reg, shiftAccumulator);
1099     }
1100 }
1101
1102 void StubLinkerCPU::EmitJumpRegister(IntReg regTarget)
1103 {
1104     Emit32(0x00000067 | (regTarget << 15));
1105 }
1106
1107 void StubLinkerCPU::EmitProlog(unsigned short cIntRegArgs, unsigned short cFpRegArgs, unsigned short cbStackSpace)
1108 {
1109     _ASSERTE(!m_fProlog);
1110
1111     unsigned short numberOfEntriesOnStack  = 2 + cIntRegArgs + cFpRegArgs; // 2 for fp, ra
1112
1113     // Stack needs to be 16 byte aligned. Compute the required padding before saving it
1114     unsigned short totalPaddedFrameSize = static_cast<unsigned short>(ALIGN_UP(cbStackSpace + numberOfEntriesOnStack * sizeof(void*), 2 * sizeof(void*)));
1115     // The padding is going to be applied to the local stack
1116     cbStackSpace =  totalPaddedFrameSize - numberOfEntriesOnStack * sizeof(void*);
1117
1118     // Record the parameters of this prolog so that we can generate a matching epilog and unwind info.
1119     DescribeProlog(cIntRegArgs, cFpRegArgs, cbStackSpace);
1120
1121
1122     // N.B Despite the range of a jump with a sub sp is 4KB, we're limiting to 504 to save from emitting right prolog that's
1123     // expressable in unwind codes efficiently. The largest offset in typical unwindinfo encodings that we use is 504.
1124     // so allocations larger than 504 bytes would require setting the SP in multiple strides, which would complicate both
1125     // prolog and epilog generation as well as unwindinfo generation.
1126     _ASSERTE((totalPaddedFrameSize <= 504) && "NYI:RISCV64 Implement StubLinker prologs with larger than 504 bytes of frame size");
1127     if (totalPaddedFrameSize > 504)
1128         COMPlusThrow(kNotSupportedException);
1129
1130     // Here is how the stack would look like (Stack grows up)
1131     // [Low Address]
1132     //            +------------+
1133     //      SP -> |            | <-+
1134     //            :            :   | Stack Frame, (i.e outgoing arguments) including padding
1135     //            |            | <-+
1136     //            +------------+
1137     //            | FP         |
1138     //            +------------+
1139     //            | RA         |
1140     //            +------------+
1141     //            | F10        | <-+
1142     //            +------------+   |
1143     //            :            :   | Fp Args
1144     //            +------------+   |
1145     //            | F17        | <-+
1146     //            +------------+
1147     //            | X10        | <-+
1148     //            +------------+   |
1149     //            :            :   | Int Args
1150     //            +------------+   |
1151     //            | X17        | <-+
1152     //            +------------+
1153     //  Old SP -> |[Stack Args]|
1154     // [High Address]
1155
1156     // Regarding the order of operations in the prolog and epilog;
1157     // If the prolog and the epilog matches each other we can simplify emitting the unwind codes and save a few
1158     // bytes of unwind codes by making prolog and epilog share the same unwind codes.
1159     // In order to do that we need to make the epilog be the reverse of the prolog.
1160     // But we wouldn't want to add restoring of the argument registers as that's completely unnecessary.
1161     // Besides, saving argument registers cannot be expressed by the unwind code encodings.
1162     // So, we'll push saving the argument registers to the very last in the prolog, skip restoring it in epilog,
1163     // and also skip reporting it to the OS.
1164     //
1165     // Another bit that we can save is resetting the frame pointer.
1166     // This is not necessary when the SP doesn't get modified beyond prolog and epilog. (i.e no alloca/localloc)
1167     // And in that case we don't need to report setting up the FP either.
1168
1169     // 1. Relocate SP
1170     EmitSubImm(RegSp, RegSp, totalPaddedFrameSize);
1171
1172     unsigned cbOffset = 2 * sizeof(void*) + cbStackSpace; // 2 is for fp, ra
1173
1174     // 2. Store FP/RA
1175     EmitStore(RegFp, RegSp, cbStackSpace);
1176     EmitStore(RegRa, RegSp, cbStackSpace + sizeof(void*));
1177
1178     // 3. Set the frame pointer
1179     EmitMovReg(RegFp, RegSp);
1180
1181     // 4. Store floating point argument registers
1182     _ASSERTE(cFpRegArgs <= 8);
1183     for (unsigned short i = 0; i < cFpRegArgs; i++)
1184         EmitStore(FloatReg(i + 10), RegSp, cbOffset + i * sizeof(void*));
1185
1186     // 5. Store int argument registers
1187     cbOffset += cFpRegArgs * sizeof(void*);
1188     _ASSERTE(cIntRegArgs <= 8);
1189     for (unsigned short i = 0 ; i < cIntRegArgs; i++)
1190         EmitStore(IntReg(i + 10), RegSp, cbOffset + i * sizeof(void*));
1191 }
1192
1193 void StubLinkerCPU::EmitEpilog()
1194 {
1195     _ASSERTE(m_fProlog);
1196
1197     // 5. Restore int argument registers
1198     //    nop: We don't need to. They are scratch registers
1199
1200     // 4. Restore floating point argument registers
1201     //    nop: We don't need to. They are scratch registers
1202
1203     // 3. Restore the SP from FP
1204     //    N.B. We're assuming that the stublinker stubs doesn't do alloca, hence nop
1205
1206     // 2. Restore FP/RA
1207     EmitLoad(RegFp, RegSp, m_cbStackSpace);
1208     EmitLoad(RegRa, RegSp, m_cbStackSpace + sizeof(void*));
1209
1210     // 1. Restore SP
1211     EmitAddImm(RegSp, RegSp, GetStackFrameSize());
1212
1213     // jalr x0, 0(ra)
1214     EmitJumpRegister(RegRa);
1215 }
1216
1217 // Instruction types as per RISC-V Spec, Chapter 24 RV32/64G Instruction Set Listings
1218 static unsigned ITypeInstr(unsigned opcode, unsigned funct3, unsigned rd, unsigned rs1, int imm12)
1219 {
1220     _ASSERTE(!(opcode >> 7));
1221     _ASSERTE(!(funct3 >> 3));
1222     _ASSERTE(!(rd >> 5));
1223     _ASSERTE(!(rs1 >> 5));
1224     _ASSERTE(StubLinkerCPU::isValidSimm12(imm12));
1225     return opcode | (rd << 7) | (funct3 << 12) | (rs1 << 15) | (imm12 << 20);
1226 }
1227
1228 static unsigned STypeInstr(unsigned opcode, unsigned funct3, unsigned rs1, unsigned rs2, int imm12)
1229 {
1230     _ASSERTE(!(opcode >> 7));
1231     _ASSERTE(!(funct3 >> 3));
1232     _ASSERTE(!(rs1 >> 5));
1233     _ASSERTE(!(rs2 >> 5));
1234     _ASSERTE(StubLinkerCPU::isValidSimm12(imm12));
1235     int immLo5 = imm12 & 0x1f;
1236     int immHi7 = (imm12 >> 5) & 0x7f;
1237     return opcode | (immLo5 << 7) | (funct3 << 12) | (rs1 << 15) | (rs2 << 20) | (immHi7 << 25);
1238 }
1239
1240 static unsigned RTypeInstr(unsigned opcode, unsigned funct3, unsigned funct7, unsigned rd, unsigned rs1, unsigned rs2)
1241 {
1242     _ASSERTE(!(opcode >> 7));
1243     _ASSERTE(!(funct3 >> 3));
1244     _ASSERTE(!(funct7 >> 7));
1245     _ASSERTE(!(rd >> 5));
1246     _ASSERTE(!(rs1 >> 5));
1247     _ASSERTE(!(rs2 >> 5));
1248     return opcode | (rd << 7) | (funct3 << 12) | (rs1 << 15) | (rs2 << 20) | (funct7 << 25);
1249 }
1250
1251 void StubLinkerCPU::EmitLoad(IntReg dest, IntReg srcAddr, int offset)
1252 {
1253     Emit32(ITypeInstr(0x3, 0x3, dest, srcAddr, offset));  // ld
1254 }
1255 void StubLinkerCPU::EmitLoad(FloatReg dest, IntReg srcAddr, int offset)
1256 {
1257     Emit32(ITypeInstr(0x7, 0x3, dest, srcAddr, offset));  // fld
1258 }
1259
1260 void StubLinkerCPU:: EmitStore(IntReg src, IntReg destAddr, int offset)
1261 {
1262     Emit32(STypeInstr(0x23, 0x3, destAddr, src, offset));  // sd
1263 }
1264 void StubLinkerCPU::EmitStore(FloatReg src, IntReg destAddr, int offset)
1265 {
1266     Emit32(STypeInstr(0x27, 0x3, destAddr, src, offset));  // fsd
1267 }
1268
1269 void StubLinkerCPU::EmitMovReg(IntReg Xd, IntReg Xm)
1270 {
1271     EmitAddImm(Xd, Xm, 0);
1272 }
1273 void StubLinkerCPU::EmitMovReg(FloatReg dest, FloatReg source)
1274 {
1275     Emit32(RTypeInstr(0x53, 0, 0x11, dest, source, source));  // fsgnj.d
1276 }
1277
1278 void StubLinkerCPU::EmitSubImm(IntReg Xd, IntReg Xn, unsigned int value)
1279 {
1280     _ASSERTE(value <= 0x800);
1281     EmitAddImm(Xd, Xn, ~value + 0x1);
1282 }
1283 void StubLinkerCPU::EmitAddImm(IntReg Xd, IntReg Xn, unsigned int value)
1284 {
1285     Emit32(ITypeInstr(0x13, 0, Xd, Xn, value));  // addi
1286 }
1287 void StubLinkerCPU::EmitSllImm(IntReg Xd, IntReg Xn, unsigned int value)
1288 {
1289     _ASSERTE(!(value >> 6));
1290     Emit32(ITypeInstr(0x13, 0x1, Xd, Xn, value));  // slli
1291 }
1292 void StubLinkerCPU::EmitLuImm(IntReg Xd, unsigned int value)
1293 {
1294     _ASSERTE(value <= 0xFFFFF);
1295     Emit32((DWORD)(0x00000037 | (value << 12) | (Xd << 7))); // lui Xd, value
1296 }
1297
1298 void StubLinkerCPU::Init()
1299 {
1300     new (gBranchIF) BranchInstructionFormat();
1301 }
1302
1303 // Emits code to adjust arguments for static delegate target.
1304 VOID StubLinkerCPU::EmitShuffleThunk(ShuffleEntry *pShuffleEntryArray)
1305 {
1306     static const int argRegBase = 10;  // first argument register: a0, fa0
1307     static const IntReg t6 = 31, t5 = 30, a0 = argRegBase + 0;
1308     // On entry a0 holds the delegate instance. Look up the real target address stored in the MethodPtrAux
1309     // field and saved in t6. Tailcall to the target method after re-arranging the arguments
1310     EmitLoad(t6, a0, DelegateObject::GetOffsetOfMethodPtrAux());
1311     // load the indirection cell into t5 used by ResolveWorkerAsmStub
1312     EmitAddImm(t5, a0, DelegateObject::GetOffsetOfMethodPtrAux());
1313
1314     int delay_index[8] = {-1};
1315     bool is_store = false;
1316     UINT16 index = 0;
1317     int i = 0;
1318     for (ShuffleEntry* pEntry = pShuffleEntryArray; pEntry->srcofs != ShuffleEntry::SENTINEL; pEntry++, i++)
1319     {
1320         if (pEntry->srcofs & ShuffleEntry::REGMASK)
1321         {
1322             // Source in register, destination in register
1323
1324             // Both the srcofs and dstofs must be of the same kind of registers - float or general purpose.
1325             // If source is present in register then destination may be a stack-slot.
1326             _ASSERTE(((pEntry->dstofs & ShuffleEntry::FPREGMASK) == (pEntry->srcofs & ShuffleEntry::FPREGMASK)) || !(pEntry->dstofs & (ShuffleEntry::FPREGMASK | ShuffleEntry::REGMASK)));
1327             _ASSERTE((pEntry->dstofs & ShuffleEntry::OFSREGMASK) <= 8);//should amend for offset!
1328             _ASSERTE((pEntry->srcofs & ShuffleEntry::OFSREGMASK) <= 8);
1329
1330             if (pEntry->srcofs & ShuffleEntry::FPREGMASK)
1331             {
1332                 int j = 1;
1333                 while (pEntry[j].srcofs & ShuffleEntry::FPREGMASK)
1334                 {
1335                     j++;
1336                 }
1337                 assert((pEntry->dstofs - pEntry->srcofs) == index);
1338                 assert(8 > index);
1339
1340                 int tmp_reg = 0; // ft0.
1341                 ShuffleEntry* tmp_entry = pShuffleEntryArray + delay_index[0];
1342                 while (index)
1343                 {
1344                     EmitLoad(FloatReg(tmp_reg), RegSp, tmp_entry->srcofs * sizeof(void*));
1345                     tmp_reg++;
1346                     index--;
1347                     tmp_entry++;
1348                 }
1349
1350                 j -= 1;
1351                 tmp_entry = pEntry + j;
1352                 i += j;
1353                 while (pEntry[j].srcofs & ShuffleEntry::FPREGMASK)
1354                 {
1355                     FloatReg src = (pEntry[j].srcofs & ShuffleEntry::OFSREGMASK) + argRegBase;
1356                     if (pEntry[j].dstofs & ShuffleEntry::FPREGMASK) {
1357                         FloatReg dst = (pEntry[j].dstofs & ShuffleEntry::OFSREGMASK) + argRegBase;
1358                         EmitMovReg(dst, src);
1359                     }
1360                     else
1361                     {
1362                         EmitStore(src, RegSp, pEntry[j].dstofs * sizeof(void*));
1363                     }
1364                     j--;
1365                 }
1366
1367                 assert(tmp_reg <= 7);
1368                 while (tmp_reg > 0)
1369                 {
1370                     tmp_reg--;
1371                     EmitMovReg(FloatReg(index + argRegBase), FloatReg(tmp_reg));
1372                     index++;
1373                 }
1374                 index = 0;
1375                 pEntry = tmp_entry;
1376             }
1377             else
1378             {
1379                 assert(pEntry->dstofs & ShuffleEntry::REGMASK);
1380                 IntReg dst = (pEntry->dstofs & ShuffleEntry::OFSREGMASK) + argRegBase;
1381                 IntReg src = (pEntry->srcofs & ShuffleEntry::OFSREGMASK) + argRegBase;
1382                 assert(dst < src);
1383                 EmitMovReg(dst, src);
1384             }
1385         }
1386         else if (pEntry->dstofs & ShuffleEntry::REGMASK)
1387         {
1388             // source must be on the stack
1389             _ASSERTE(!(pEntry->srcofs & ShuffleEntry::REGMASK));
1390
1391             int dstReg = (pEntry->dstofs & ShuffleEntry::OFSREGMASK) + argRegBase;
1392             int srcOfs = (pEntry->srcofs & ShuffleEntry::OFSMASK) * sizeof(void*);
1393             if (pEntry->dstofs & ShuffleEntry::FPREGMASK)
1394             {
1395                 if (!is_store)
1396                 {
1397                     delay_index[index++] = i;
1398                     continue;
1399                 }
1400                 EmitLoad(FloatReg(dstReg), RegSp, srcOfs);
1401             }
1402             else
1403             {
1404                 EmitLoad(IntReg(dstReg), RegSp, srcOfs);
1405             }
1406         }
1407         else
1408         {
1409             // source & dest must be on the stack
1410             _ASSERTE(!(pEntry->srcofs & ShuffleEntry::REGMASK));
1411             _ASSERTE(!(pEntry->dstofs & ShuffleEntry::REGMASK));
1412
1413             IntReg t4 = 29;
1414             EmitLoad(t4, RegSp, pEntry->srcofs * sizeof(void*));
1415             EmitStore(t4, RegSp, pEntry->dstofs * sizeof(void*));
1416         }
1417     }
1418     // Tailcall to target
1419     // jalr x0, 0(t6)
1420     EmitJumpRegister(t6);
1421 }
1422
1423 // Emits code to adjust arguments for static delegate target.
1424 VOID StubLinkerCPU::EmitComputedInstantiatingMethodStub(MethodDesc* pSharedMD, struct ShuffleEntry *pShuffleEntryArray, void* extraArg)
1425 {
1426     STANDARD_VM_CONTRACT;
1427
1428     for (ShuffleEntry* pEntry = pShuffleEntryArray; pEntry->srcofs != ShuffleEntry::SENTINEL; pEntry++)
1429     {
1430         _ASSERTE(pEntry->dstofs & ShuffleEntry::REGMASK);
1431         _ASSERTE(pEntry->srcofs & ShuffleEntry::REGMASK);
1432         _ASSERTE(!(pEntry->dstofs & ShuffleEntry::FPREGMASK));
1433         _ASSERTE(!(pEntry->srcofs & ShuffleEntry::FPREGMASK));
1434         _ASSERTE(pEntry->dstofs != ShuffleEntry::HELPERREG);
1435         _ASSERTE(pEntry->srcofs != ShuffleEntry::HELPERREG);
1436
1437         EmitMovReg(IntReg((pEntry->dstofs & ShuffleEntry::OFSREGMASK) + 10), IntReg((pEntry->srcofs & ShuffleEntry::OFSREGMASK) + 10));
1438     }
1439
1440     MetaSig msig(pSharedMD);
1441     ArgIterator argit(&msig);
1442
1443     if (argit.HasParamType())
1444     {
1445         ArgLocDesc sInstArgLoc;
1446         argit.GetParamTypeLoc(&sInstArgLoc);
1447         int regHidden = sInstArgLoc.m_idxGenReg;
1448         _ASSERTE(regHidden != -1);
1449         regHidden += 10;//NOTE: RISCV64 should start at a0=10;
1450
1451         if (extraArg == NULL)
1452         {
1453             if (pSharedMD->RequiresInstMethodTableArg())
1454             {
1455                 // Unboxing stub case
1456                 // Fill param arg with methodtable of this pointer
1457                 // ld regHidden, a0, 0
1458                 EmitLoad(IntReg(regHidden), IntReg(10));
1459             }
1460         }
1461         else
1462         {
1463             EmitMovConstant(IntReg(regHidden), (UINT64)extraArg);
1464         }
1465     }
1466
1467     if (extraArg == NULL)
1468     {
1469         // Unboxing stub case
1470         // Address of the value type is address of the boxed instance plus sizeof(MethodDesc*).
1471         //  addi a0, a0, sizeof(MethodDesc*)
1472         EmitAddImm(IntReg(10), IntReg(10), sizeof(MethodDesc*));
1473     }
1474
1475     // Tail call the real target.
1476     EmitCallManagedMethod(pSharedMD, TRUE /* tail call */);
1477     SetTargetMethod(pSharedMD);
1478 }
1479
1480 void StubLinkerCPU::EmitCallLabel(CodeLabel *target, BOOL fTailCall, BOOL fIndirect)
1481 {
1482     BranchInstructionFormat::VariationCodes variationCode = BranchInstructionFormat::VariationCodes::BIF_VAR_JUMP;
1483     if (!fTailCall)
1484         variationCode = static_cast<BranchInstructionFormat::VariationCodes>(variationCode | BranchInstructionFormat::VariationCodes::BIF_VAR_CALL);
1485     if (fIndirect)
1486         variationCode = static_cast<BranchInstructionFormat::VariationCodes>(variationCode | BranchInstructionFormat::VariationCodes::BIF_VAR_INDIRECT);
1487
1488     EmitLabelRef(target, reinterpret_cast<BranchInstructionFormat&>(gBranchIF), (UINT)variationCode);
1489 }
1490
1491 void StubLinkerCPU::EmitCallManagedMethod(MethodDesc *pMD, BOOL fTailCall)
1492 {
1493     // Use direct call if possible.
1494     if (pMD->HasStableEntryPoint())
1495     {
1496         EmitCallLabel(NewExternalCodeLabel((LPVOID)pMD->GetStableEntryPoint()), fTailCall, FALSE);
1497     }
1498     else
1499     {
1500         EmitCallLabel(NewExternalCodeLabel((LPVOID)pMD->GetAddrOfSlot()), fTailCall, TRUE);
1501     }
1502 }
1503
1504
1505 #ifdef FEATURE_READYTORUN
1506
1507 //
1508 // Allocation of dynamic helpers
1509 //
1510
1511 #define DYNAMIC_HELPER_ALIGNMENT sizeof(TADDR)
1512
1513 #define BEGIN_DYNAMIC_HELPER_EMIT(size) \
1514     _ASSERTE(!"RISCV64: not implementation on riscv64!!!");
1515 #define END_DYNAMIC_HELPER_EMIT() \
1516     _ASSERTE(!"RISCV64: not implementation on riscv64!!!");
1517
1518 // Uses x8 as scratch register to store address of data label
1519 // After load x8 is increment to point to next data
1520 // only accepts positive offsets
1521 static void LoadRegPair(BYTE* p, int reg1, int reg2, UINT32 offset)
1522 {
1523     _ASSERTE(!"RISCV64: not implementation on riscv64!!!");
1524 }
1525
1526 PCODE DynamicHelpers::CreateHelper(LoaderAllocator * pAllocator, TADDR arg, PCODE target)
1527 {
1528     _ASSERTE(!"RISCV64: not implementation on riscv64!!!");
1529     return NULL;
1530 }
1531
1532 // Caller must ensure sufficient byte are allocated including padding (if applicable)
1533 void DynamicHelpers::EmitHelperWithArg(BYTE*& p, size_t rxOffset, LoaderAllocator * pAllocator, TADDR arg, PCODE target)
1534 {
1535     _ASSERTE(!"RISCV64: not implementation on riscv64!!!");
1536 }
1537
1538 PCODE DynamicHelpers::CreateHelperWithArg(LoaderAllocator * pAllocator, TADDR arg, PCODE target)
1539 {
1540     _ASSERTE(!"RISCV64: not implementation on riscv64!!!");
1541     return NULL;
1542 }
1543
1544 PCODE DynamicHelpers::CreateHelper(LoaderAllocator * pAllocator, TADDR arg, TADDR arg2, PCODE target)
1545 {
1546     _ASSERTE(!"RISCV64: not implementation on riscv64!!!");
1547     return NULL;
1548 }
1549
1550 PCODE DynamicHelpers::CreateHelperArgMove(LoaderAllocator * pAllocator, TADDR arg, PCODE target)
1551 {
1552     _ASSERTE(!"RISCV64: not implementation on riscv64!!!");
1553     return NULL;
1554 }
1555
1556 PCODE DynamicHelpers::CreateReturn(LoaderAllocator * pAllocator)
1557 {
1558     _ASSERTE(!"RISCV64: not implementation on riscv64!!!");
1559     return NULL;
1560 }
1561
1562 PCODE DynamicHelpers::CreateReturnConst(LoaderAllocator * pAllocator, TADDR arg)
1563 {
1564     _ASSERTE(!"RISCV64: not implementation on riscv64!!!");
1565     return NULL;
1566 }
1567
1568 PCODE DynamicHelpers::CreateReturnIndirConst(LoaderAllocator * pAllocator, TADDR arg, INT8 offset)
1569 {
1570     _ASSERTE(!"RISCV64: not implementation on riscv64!!!");
1571     return NULL;
1572 }
1573
1574 PCODE DynamicHelpers::CreateHelperWithTwoArgs(LoaderAllocator * pAllocator, TADDR arg, PCODE target)
1575 {
1576     _ASSERTE(!"RISCV64: not implementation on riscv64!!!");
1577     return NULL;
1578 }
1579
1580 PCODE DynamicHelpers::CreateHelperWithTwoArgs(LoaderAllocator * pAllocator, TADDR arg, TADDR arg2, PCODE target)
1581 {
1582     _ASSERTE(!"RISCV64: not implementation on riscv64!!!");
1583     return NULL;
1584 }
1585
1586 PCODE DynamicHelpers::CreateDictionaryLookupHelper(LoaderAllocator * pAllocator, CORINFO_RUNTIME_LOOKUP * pLookup, DWORD dictionaryIndexAndSlot, Module * pModule)
1587 {
1588     _ASSERTE(!"RISCV64: not implementation on riscv64!!!");
1589     return NULL;
1590 }
1591 #endif // FEATURE_READYTORUN
1592
1593
1594 #endif // #ifndef DACCESS_COMPILE