(byte)ILOpcode.ldc_i4_0, (byte)ILOpcode.conv_u,
(byte)ILOpcode.prefix1, unchecked((byte)ILOpcode.ceq),
(byte)ILOpcode.ret }, Array.Empty<LocalVariableDefinition>(), null);
+ case "SkipInit":
+ return new ILStubMethodIL(method, new byte[] { (byte)ILOpcode.ret }, Array.Empty<LocalVariableDefinition>(), null);
}
return null;
methInfo->options = (CorInfoOptions)0;
return true;
}
+ else if (tk == MscorlibBinder::GetMethod(METHOD__UNSAFE__SKIPINIT)->GetMemberDef())
+ {
+ static BYTE ilcode[] = { CEE_RET };
+
+ methInfo->ILCode = const_cast<BYTE*>(ilcode);
+ methInfo->ILCodeSize = sizeof(ilcode);
+ methInfo->maxStack = 0;
+ methInfo->EHcount = 0;
+ methInfo->options = (CorInfoOptions)0;
+ return true;
+ }
return false;
}
DEFINE_METASIG(GM(RefT_Int_RetRefT, IMAGE_CEE_CS_CALLCONV_DEFAULT, 1, r(M(0)) i, r(M(0))))
DEFINE_METASIG(GM(RefT_IntPtr_RetRefT, IMAGE_CEE_CS_CALLCONV_DEFAULT, 1, r(M(0)) I, r(M(0))))
DEFINE_METASIG(GM(PtrVoid_Int_RetPtrVoid, IMAGE_CEE_CS_CALLCONV_DEFAULT, 1, P(v) i, P(v)))
+DEFINE_METASIG(GM(RefT_RetVoid, IMAGE_CEE_CS_CALLCONV_DEFAULT, 1, r(M(0)), v))
DEFINE_METASIG_T(SM(SafeHandle_RefBool_RetIntPtr, C(SAFE_HANDLE) r(F), I ))
DEFINE_METASIG_T(SM(SafeHandle_RetVoid, C(SAFE_HANDLE), v ))
DEFINE_METHOD(UNSAFE, BYREF_WRITE_UNALIGNED, WriteUnaligned, GM_RefByte_T_RetVoid)
DEFINE_METHOD(UNSAFE, PTR_READ_UNALIGNED, ReadUnaligned, GM_PtrVoid_RetT)
DEFINE_METHOD(UNSAFE, PTR_WRITE_UNALIGNED, WriteUnaligned, GM_PtrVoid_T_RetVoid)
+DEFINE_METHOD(UNSAFE, SKIPINIT, SkipInit, GM_RefT_RetVoid)
DEFINE_CLASS(INTERLOCKED, Threading, Interlocked)
DEFINE_METHOD(INTERLOCKED, COMPARE_EXCHANGE_T, CompareExchange, GM_RefT_T_T_RetT)
// ceq
// ret
}
+
+ /// <summary>
+ /// Bypasses definite assignment rules by taking advantage of <c>out</c> semantics.
+ /// </summary>
+ [Intrinsic]
+ [NonVersionable]
+ [MethodImpl(MethodImplOptions.AggressiveInlining)]
+ public static void SkipInit<T>(out T value)
+ {
+ throw new PlatformNotSupportedException();
+
+ // ret
+ }
}
}
// Have to scale by a bunch. Move the number to a buffer where it has room to grow as it's scaled.
//
- Buf24 bufNum;
- _ = &bufNum; // workaround for CS0165
+ Unsafe.SkipInit(out Buf24 bufNum);
DebugPoison(ref bufNum);
bufNum.Low64 = low64;
ulong tmp;
uint hiProd;
- Buf24 bufProd;
- _ = &bufProd; // workaround for CS0165
+ Unsafe.SkipInit(out Buf24 bufProd);
DebugPoison(ref bufProd);
if ((d1.High | d1.Mid) == 0)
/// </summary>
internal static unsafe void VarDecDiv(ref DecCalc d1, ref DecCalc d2)
{
- Buf12 bufQuo;
- _ = &bufQuo; // workaround for CS0165
+ Unsafe.SkipInit(out Buf12 bufQuo);
DebugPoison(ref bufQuo);
uint power;
// Shift both dividend and divisor left by curScale.
//
- Buf16 bufRem;
- _ = &bufRem; // workaround for CS0165
+ Unsafe.SkipInit(out Buf16 bufRem);
DebugPoison(ref bufRem);
bufRem.Low64 = d1.Low64 << curScale;
//
// Start by finishing the shift left by curScale.
//
- Buf12 bufDivisor;
- _ = &bufDivisor; // workaround for CS0165
+ Unsafe.SkipInit(out Buf12 bufDivisor);
DebugPoison(ref bufDivisor);
bufDivisor.Low64 = divisor;
{
d1.uflags = d2.uflags;
// Try to scale up dividend to match divisor.
- Buf12 bufQuo;
- unsafe { _ = &bufQuo; } // workaround for CS0165
+ Unsafe.SkipInit(out Buf12 bufQuo);
DebugPoison(ref bufQuo);
bufQuo.Low64 = d1.Low64;
tmp = d2.Mid;
int shift = BitOperations.LeadingZeroCount(tmp);
- Buf28 b;
- _ = &b; // workaround for CS0165
+ Unsafe.SkipInit(out Buf28 b);
DebugPoison(ref b);
b.Buf24.Low64 = d1.Low64 << shift;
}
else
{
- Buf12 bufDivisor;
- _ = &bufDivisor; // workaround for CS0165
+ Unsafe.SkipInit(out Buf12 bufDivisor);
DebugPoison(ref bufDivisor);
bufDivisor.Low64 = d2.Low64 << shift;