* Memmove<T> to shared source.
* Delete RhpCopyObjectContents
* RhCompareObjectContentsAndPadding is ok, but added an assert.
* remove unused BulkMoveWithWriteBarrier in Augments
{
public partial class Buffer
{
- // Non-inlinable wrapper around the QCall that avoids polluting the fast path
- // with P/Invoke prolog/epilog.
- [MethodImpl(MethodImplOptions.NoInlining)]
- internal static unsafe void _ZeroMemory(ref byte b, nuint byteLength)
- {
- fixed (byte* bytePointer = &b)
- {
- __ZeroMemory(bytePointer, byteLength);
- }
- }
-
[LibraryImport(RuntimeHelpers.QCall, EntryPoint = "Buffer_Clear")]
private static unsafe partial void __ZeroMemory(void* b, nuint byteLength);
- // The maximum block size to for __BulkMoveWithWriteBarrier FCall. This is required to avoid GC starvation.
-#if DEBUG // Stress the mechanism in debug builds
- private const uint BulkMoveWithWriteBarrierChunk = 0x400;
-#else
- private const uint BulkMoveWithWriteBarrierChunk = 0x4000;
-#endif
-
- internal static void BulkMoveWithWriteBarrier(ref byte destination, ref byte source, nuint byteCount)
- {
- if (byteCount <= BulkMoveWithWriteBarrierChunk)
- __BulkMoveWithWriteBarrier(ref destination, ref source, byteCount);
- else
- _BulkMoveWithWriteBarrier(ref destination, ref source, byteCount);
- }
-
-#pragma warning disable IDE0060 // https://github.com/dotnet/roslyn-analyzers/issues/6228
- // Non-inlinable wrapper around the loop for copying large blocks in chunks
- [MethodImpl(MethodImplOptions.NoInlining)]
- private static void _BulkMoveWithWriteBarrier(ref byte destination, ref byte source, nuint byteCount)
- {
- Debug.Assert(byteCount > BulkMoveWithWriteBarrierChunk);
-
- if (Unsafe.AreSame(ref source, ref destination))
- return;
-
- // This is equivalent to: (destination - source) >= byteCount || (destination - source) < 0
- if ((nuint)(nint)Unsafe.ByteOffset(ref source, ref destination) >= byteCount)
- {
- // Copy forwards
- do
- {
- byteCount -= BulkMoveWithWriteBarrierChunk;
- __BulkMoveWithWriteBarrier(ref destination, ref source, BulkMoveWithWriteBarrierChunk);
- destination = ref Unsafe.AddByteOffset(ref destination, BulkMoveWithWriteBarrierChunk);
- source = ref Unsafe.AddByteOffset(ref source, BulkMoveWithWriteBarrierChunk);
- }
- while (byteCount > BulkMoveWithWriteBarrierChunk);
- }
- else
- {
- // Copy backwards
- do
- {
- byteCount -= BulkMoveWithWriteBarrierChunk;
- __BulkMoveWithWriteBarrier(ref Unsafe.AddByteOffset(ref destination, byteCount), ref Unsafe.AddByteOffset(ref source, byteCount), BulkMoveWithWriteBarrierChunk);
- }
- while (byteCount > BulkMoveWithWriteBarrierChunk);
- }
- __BulkMoveWithWriteBarrier(ref destination, ref source, byteCount);
- }
-#pragma warning restore IDE0060 // https://github.com/dotnet/roslyn-analyzers/issues/6228
+ [LibraryImport(RuntimeHelpers.QCall, EntryPoint = "Buffer_MemMove")]
+ private static unsafe partial void __Memmove(byte* dest, byte* src, nuint len);
[MethodImpl(MethodImplOptions.InternalCall)]
private static extern void __BulkMoveWithWriteBarrier(ref byte destination, ref byte source, nuint byteCount);
- [LibraryImport(RuntimeHelpers.QCall, EntryPoint = "Buffer_MemMove")]
- private static unsafe partial void __Memmove(byte* dest, byte* src, nuint len);
-
// Used by ilmarshalers.cpp
internal static unsafe void Memcpy(byte* dest, byte* src, int len)
{
Memmove(ref *(pDest + (uint)destIndex), ref Unsafe.Add(ref MemoryMarshal.GetArrayDataReference(src), (nint)(uint)srcIndex /* force zero-extension */), (uint)len);
}
-
- [MethodImpl(MethodImplOptions.AggressiveInlining)]
- internal static void Memmove<T>(ref T destination, ref T source, nuint elementCount)
- {
- if (!RuntimeHelpers.IsReferenceOrContainsReferences<T>())
- {
- // Blittable memmove
- Memmove(
- ref Unsafe.As<T, byte>(ref destination),
- ref Unsafe.As<T, byte>(ref source),
- elementCount * (nuint)Unsafe.SizeOf<T>());
- }
- else
- {
- // Non-blittable memmove
- BulkMoveWithWriteBarrier(
- ref Unsafe.As<T, byte>(ref destination),
- ref Unsafe.As<T, byte>(ref source),
- elementCount * (nuint)Unsafe.SizeOf<T>());
- }
- }
}
}
internal static extern unsafe object RhpNewFastMisalign(MethodTable * pEEType);
#endif // FEATURE_64BIT_ALIGNMENT
- [RuntimeImport(Redhawk.BaseName, "RhpCopyObjectContents")]
- [MethodImpl(MethodImplOptions.InternalCall)]
- internal static extern unsafe void RhpCopyObjectContents(object objDest, object objSrc);
-
[RuntimeImport(Redhawk.BaseName, "RhpAssignRef")]
[MethodImpl(MethodImplOptions.InternalCall)]
internal static extern unsafe void RhpAssignRef(ref object address, object obj);
}
}
- [RuntimeExport("RhMemberwiseClone")]
- public static unsafe object RhMemberwiseClone(object src)
- {
- object objClone;
-
- if (src.GetMethodTable()->IsArray)
- objClone = RhNewArray(src.GetMethodTable(), Unsafe.As<Array>(src).Length);
- else
- objClone = RhNewObject(src.GetMethodTable());
-
- InternalCalls.RhpCopyObjectContents(objClone, src);
-
- return objClone;
- }
-
[RuntimeExport("RhGetCurrentThreadStackTrace")]
[MethodImpl(MethodImplOptions.NoInlining)] // Ensures that the RhGetCurrentThreadStackTrace frame is always present
public static unsafe int RhGetCurrentThreadStackTrace(IntPtr[] outputBuffer)
[RuntimeImport(RuntimeLibrary, "RhNewObject")]
internal static extern object RhNewObject(EETypePtr pEEType);
+ // Move memory which may be on the heap which may have object references in it.
+ // In general, a memcpy on the heap is unsafe, but this is able to perform the
+ // correct write barrier such that the GC is not incorrectly impacted.
+ // NOTE: it is only ok to use this directly when copying small chunks of memory (like boxing a struct)
+ // otherwise use helpers from System.Buffer to avoid running uninterruptible code for too long
[MethodImplAttribute(MethodImplOptions.InternalCall)]
[RuntimeImport(RuntimeLibrary, "RhBulkMoveWithWriteBarrier")]
internal static extern unsafe void RhBulkMoveWithWriteBarrier(ref byte dmem, ref byte smem, uint size);
-
// Allocate handle.
[MethodImpl(MethodImplOptions.InternalCall)]
[RuntimeImport(RuntimeLibrary, "RhpHandleAlloc")]
return (uint32_t)CGCDesc::GetCGCDescFromMT(pMT)->GetSize();
}
-COOP_PINVOKE_HELPER(void, RhpCopyObjectContents, (Object* pobjDest, Object* pobjSrc))
-{
- size_t cbDest = pobjDest->GetSize() - sizeof(ObjHeader);
- size_t cbSrc = pobjSrc->GetSize() - sizeof(ObjHeader);
- if (cbSrc != cbDest)
- return;
-
- ASSERT(pobjDest->get_EEType()->HasReferenceFields() == pobjSrc->get_EEType()->HasReferenceFields());
-
- if (pobjDest->get_EEType()->HasReferenceFields())
- {
- GCSafeCopyMemoryWithWriteBarrier(pobjDest, pobjSrc, cbDest);
- }
- else
- {
- memcpy(pobjDest, pobjSrc, cbDest);
- }
-}
-
COOP_PINVOKE_HELPER(FC_BOOL_RET, RhCompareObjectContentsAndPadding, (Object* pObj1, Object* pObj2))
{
ASSERT(pObj1->get_EEType()->IsEquivalentTo(pObj2->get_EEType()));
+ ASSERT(pObj1->get_EEType()->IsValueType());
+
MethodTable * pEEType = pObj1->get_EEType();
size_t cbFields = pEEType->get_BaseSize() - (sizeof(ObjHeader) + sizeof(MethodTable*));
uint8_t * pbFields1 = (uint8_t*)pObj1 + sizeof(MethodTable*);
uint8_t * pbFields2 = (uint8_t*)pObj2 + sizeof(MethodTable*);
+ // memcmp is ok in a COOP method as we are comparing structs which are typically small.
FC_RETURN_BOOL(memcmp(pbFields1, pbFields2, cbFields) == 0);
}
return new RuntimeTypeHandle(obj.GetEETypePtr());
}
- // Move memory which may be on the heap which may have object references in it.
- // In general, a memcpy on the heap is unsafe, but this is able to perform the
- // correct write barrier such that the GC is not incorrectly impacted.
- public static unsafe void BulkMoveWithWriteBarrier(IntPtr dmem, IntPtr smem, int size)
- {
- RuntimeImports.RhBulkMoveWithWriteBarrier(ref *(byte*)dmem.ToPointer(), ref *(byte*)smem.ToPointer(), (uint)size);
- }
-
public static IntPtr GetUniversalTransitionThunk()
{
return RuntimeImports.RhGetUniversalTransitionThunk();
{
public static partial class Buffer
{
- // Non-inlinable wrapper around the QCall that avoids polluting the fast path
- // with P/Invoke prolog/epilog.
- [MethodImpl(MethodImplOptions.NoInlining)]
- internal static unsafe void _ZeroMemory(ref byte b, nuint byteLength)
- {
- fixed (byte* bytePointer = &b)
- {
- RuntimeImports.memset(bytePointer, 0, byteLength);
- }
- }
-
- internal static void BulkMoveWithWriteBarrier(ref byte dmem, ref byte smem, nuint size)
- => RuntimeImports.RhBulkMoveWithWriteBarrier(ref dmem, ref smem, size);
+ [MethodImpl(MethodImplOptions.AggressiveInlining)]
+ private static unsafe void __ZeroMemory(void* b, nuint byteLength) =>
+ RuntimeImports.memset((byte*)b, 0, byteLength);
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private static unsafe void __Memmove(byte* dest, byte* src, nuint len) =>
RuntimeImports.memmove(dest, src, len);
- // This method has different signature for x64 and other platforms and is done for performance reasons.
[MethodImpl(MethodImplOptions.AggressiveInlining)]
- internal static unsafe void Memmove<T>(ref T destination, ref T source, nuint elementCount)
- {
- if (!RuntimeHelpers.IsReferenceOrContainsReferences<T>())
- {
- // Blittable memmove
-
- Memmove(
- ref Unsafe.As<T, byte>(ref destination),
- ref Unsafe.As<T, byte>(ref source),
- elementCount * (nuint)Unsafe.SizeOf<T>());
- }
- else
- {
- // Non-blittable memmove
- RuntimeImports.RhBulkMoveWithWriteBarrier(
- ref Unsafe.As<T, byte>(ref destination),
- ref Unsafe.As<T, byte>(ref source),
- elementCount * (nuint)Unsafe.SizeOf<T>());
- }
- }
+ private static void __BulkMoveWithWriteBarrier(ref byte destination, ref byte source, nuint byteCount) =>
+ RuntimeImports.RhBulkMoveWithWriteBarrier(ref destination, ref source, byteCount);
}
}
}
[Intrinsic]
- protected object MemberwiseClone()
+ protected internal unsafe object MemberwiseClone()
{
- return RuntimeImports.RhMemberwiseClone(this);
+ object clone = this.GetMethodTable()->IsArray ?
+ RuntimeExports.RhNewArray(this.GetMethodTable(), Unsafe.As<Array>(this).Length) :
+ RuntimeExports.RhNewObject(this.GetMethodTable());
+
+ // copy contents of "this" to the clone
+
+ nuint byteCount = RuntimeHelpers.GetRawObjectDataSize(this);
+ ref byte src = ref this.GetRawData();
+ ref byte dst = ref clone.GetRawData();
+
+ if (this.GetMethodTable()->HasGCPointers)
+ Buffer.BulkMoveWithWriteBarrier(ref dst, ref src, byteCount);
+ else
+ Buffer.Memmove(ref dst, ref src, byteCount);
+
+ return clone;
}
}
}
if ((!eeType.IsValueType) || eeType.IsPrimitive)
return obj;
- return RuntimeImports.RhMemberwiseClone(obj);
+ return obj.MemberwiseClone();
}
public static new bool Equals(object? o1, object? o2)
internal static unsafe nuint GetRawObjectDataSize(this object obj)
{
- Debug.Assert(obj.GetEETypePtr().ComponentSize == 0);
- return obj.GetEETypePtr().BaseSize - (uint)sizeof(ObjHeader) - (uint)sizeof(MethodTable*);
+ MethodTable* pMT = GetMethodTable(obj);
+
+ // See comment on RawArrayData for details
+ nuint rawSize = pMT->BaseSize - (nuint)(2 * sizeof(IntPtr));
+ if (pMT->HasComponentSize)
+ rawSize += (uint)Unsafe.As<RawArrayData>(obj).Length * (nuint)pMT->ComponentSize;
+
+ GC.KeepAlive(obj); // Keep MethodTable alive
+
+ return rawSize;
}
internal static unsafe ushort GetElementSize(this Array array)
internal static unsafe void RhUnbox(object? obj, ref byte data, EETypePtr pUnboxToEEType)
=> RhUnbox(obj, ref data, pUnboxToEEType.ToPointer());
- [MethodImpl(MethodImplOptions.InternalCall)]
- [RuntimeImport(RuntimeLibrary, "RhMemberwiseClone")]
- internal static extern object RhMemberwiseClone(object obj);
-
// Busy spin for the given number of iterations.
[LibraryImport(RuntimeLibrary, EntryPoint = "RhSpinWait")]
[SuppressGCTransition]
[StructLayout(LayoutKind.Sequential, Size = 64)]
private struct Block64 { }
#endif // HAS_CUSTOM_BLOCKS
+
+ // Non-inlinable wrapper around the QCall that avoids polluting the fast path
+ // with P/Invoke prolog/epilog.
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ internal static unsafe void _ZeroMemory(ref byte b, nuint byteLength)
+ {
+ fixed (byte* bytePointer = &b)
+ {
+ __ZeroMemory(bytePointer, byteLength);
+ }
+ }
+
+#if !MONO // Mono BulkMoveWithWriteBarrier is in terms of elements (not bytes) and takes a type handle.
+
+ [MethodImpl(MethodImplOptions.AggressiveInlining)]
+ internal static void Memmove<T>(ref T destination, ref T source, nuint elementCount)
+ {
+ if (!RuntimeHelpers.IsReferenceOrContainsReferences<T>())
+ {
+ // Blittable memmove
+ Memmove(
+ ref Unsafe.As<T, byte>(ref destination),
+ ref Unsafe.As<T, byte>(ref source),
+ elementCount * (nuint)Unsafe.SizeOf<T>());
+ }
+ else
+ {
+ // Non-blittable memmove
+ BulkMoveWithWriteBarrier(
+ ref Unsafe.As<T, byte>(ref destination),
+ ref Unsafe.As<T, byte>(ref source),
+ elementCount * (nuint)Unsafe.SizeOf<T>());
+ }
+ }
+
+ // The maximum block size to for __BulkMoveWithWriteBarrier FCall. This is required to avoid GC starvation.
+#if DEBUG // Stress the mechanism in debug builds
+ private const uint BulkMoveWithWriteBarrierChunk = 0x400;
+#else
+ private const uint BulkMoveWithWriteBarrierChunk = 0x4000;
+#endif
+
+ internal static void BulkMoveWithWriteBarrier(ref byte destination, ref byte source, nuint byteCount)
+ {
+ if (byteCount <= BulkMoveWithWriteBarrierChunk)
+ __BulkMoveWithWriteBarrier(ref destination, ref source, byteCount);
+ else
+ _BulkMoveWithWriteBarrier(ref destination, ref source, byteCount);
+ }
+
+#pragma warning disable IDE0060 // https://github.com/dotnet/roslyn-analyzers/issues/6228
+ // Non-inlinable wrapper around the loop for copying large blocks in chunks
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ private static void _BulkMoveWithWriteBarrier(ref byte destination, ref byte source, nuint byteCount)
+ {
+ Debug.Assert(byteCount > BulkMoveWithWriteBarrierChunk);
+
+ if (Unsafe.AreSame(ref source, ref destination))
+ return;
+
+ // This is equivalent to: (destination - source) >= byteCount || (destination - source) < 0
+ if ((nuint)(nint)Unsafe.ByteOffset(ref source, ref destination) >= byteCount)
+ {
+ // Copy forwards
+ do
+ {
+ byteCount -= BulkMoveWithWriteBarrierChunk;
+ __BulkMoveWithWriteBarrier(ref destination, ref source, BulkMoveWithWriteBarrierChunk);
+ destination = ref Unsafe.AddByteOffset(ref destination, BulkMoveWithWriteBarrierChunk);
+ source = ref Unsafe.AddByteOffset(ref source, BulkMoveWithWriteBarrierChunk);
+ }
+ while (byteCount > BulkMoveWithWriteBarrierChunk);
+ }
+ else
+ {
+ // Copy backwards
+ do
+ {
+ byteCount -= BulkMoveWithWriteBarrierChunk;
+ __BulkMoveWithWriteBarrier(ref Unsafe.AddByteOffset(ref destination, byteCount), ref Unsafe.AddByteOffset(ref source, byteCount), BulkMoveWithWriteBarrierChunk);
+ }
+ while (byteCount > BulkMoveWithWriteBarrierChunk);
+ }
+ __BulkMoveWithWriteBarrier(ref destination, ref source, byteCount);
+ }
+#pragma warning restore IDE0060 // https://github.com/dotnet/roslyn-analyzers/issues/6228
+
+#endif // !MONO
}
}
public partial class Buffer
{
[MethodImpl(MethodImplOptions.InternalCall)]
- private static extern unsafe void __Memmove(byte* dest, byte* src, nuint len);
+ private static extern unsafe void __ZeroMemory(void* b, nuint byteLength);
[MethodImpl(MethodImplOptions.InternalCall)]
- private static extern void BulkMoveWithWriteBarrier(ref byte dmem, ref byte smem, nuint len, IntPtr type_handle);
-
- [MethodImpl(MethodImplOptions.NoInlining)]
- internal static unsafe void _ZeroMemory(ref byte b, nuint byteLength)
- {
- fixed (byte* bytePointer = &b)
- {
- __ZeroMemory(bytePointer, byteLength);
- }
- }
+ private static extern unsafe void __Memmove(byte* dest, byte* src, nuint len);
[MethodImpl(MethodImplOptions.InternalCall)]
- private static extern unsafe void __ZeroMemory(void* p, nuint byteLength);
+ private static extern void BulkMoveWithWriteBarrier(ref byte dmem, ref byte smem, nuint len, IntPtr type_handle);
[MethodImpl(MethodImplOptions.AggressiveInlining)]
internal static void Memmove<T>(ref T destination, ref T source, nuint elementCount)