1 // Licensed to the .NET Foundation under one or more agreements.
2 // The .NET Foundation licenses this file to you under the MIT license.
3 // See the LICENSE file in the project root for more information.
5 #if AMD64 || ARM64 || (BIT32 && !ARM)
6 #define HAS_CUSTOM_BLOCKS
11 //Only contains static methods. Does not require serialization
14 using System.Runtime.CompilerServices;
15 using System.Runtime.ConstrainedExecution;
16 using System.Runtime.InteropServices;
17 using System.Runtime.Versioning;
18 using System.Diagnostics;
19 using System.Security;
21 using Internal.Runtime.CompilerServices;
24 using nint = System.Int64;
25 using nuint = System.UInt64;
27 using nint = System.Int32;
28 using nuint = System.UInt32;
31 public static class Buffer
33 // Copies from one primitive array to another primitive array without
34 // respecting types. This calls memmove internally. The count and
35 // offset parameters here are in bytes. If you want to use traditional
36 // array element indices and counts, use Array.Copy.
37 [MethodImplAttribute(MethodImplOptions.InternalCall)]
38 public static extern void BlockCopy(Array src, int srcOffset,
39 Array dst, int dstOffset, int count);
41 // This is ported from the optimized CRT assembly in memchr.asm. The JIT generates
42 // pretty good code here and this ends up being within a couple % of the CRT asm.
43 // It is however cross platform as the CRT hasn't ported their fast version to 64-bit
46 internal unsafe static int IndexOfByte(byte* src, byte value, int index, int count)
48 Debug.Assert(src != null, "src should not be null");
50 byte* pByte = src + index;
52 // Align up the pointer to sizeof(int).
53 while (((int)pByte & 3) != 0)
57 else if (*pByte == value)
58 return (int)(pByte - src);
64 // Fill comparer with value byte for comparisons
66 // comparer = 0/0/value/value
67 uint comparer = (((uint)value << 8) + (uint)value);
68 // comparer = value/value/value/value
69 comparer = (comparer << 16) + comparer;
71 // Run through buffer until we hit a 4-byte section which contains
72 // the byte we're looking for or until we exhaust the buffer.
75 // Test the buffer for presence of value. comparer contains the byte
76 // replicated 4 times.
77 uint t1 = *(uint*)pByte;
79 uint t2 = 0x7efefeff + t1;
84 // if t1 is zero then these 4-bytes don't contain a match
87 // We've found a match for value, figure out which position it's in.
88 int foundIndex = (int)(pByte - src);
89 if (pByte[0] == value)
91 else if (pByte[1] == value)
92 return foundIndex + 1;
93 else if (pByte[2] == value)
94 return foundIndex + 2;
95 else if (pByte[3] == value)
96 return foundIndex + 3;
103 // Catch any bytes that might be left at the tail of the buffer
107 return (int)(pByte - src);
113 // If we don't have a match return -1;
117 // Returns a bool to indicate if the array is of primitive data types
119 [MethodImplAttribute(MethodImplOptions.InternalCall)]
120 private static extern bool IsPrimitiveTypeArray(Array array);
122 // Gets a particular byte out of the array. The array must be an
123 // array of primitives.
125 // This essentially does the following:
126 // return ((byte*)array) + index.
128 [MethodImplAttribute(MethodImplOptions.InternalCall)]
129 private static extern byte _GetByte(Array array, int index);
131 public static byte GetByte(Array array, int index)
133 // Is the array present?
135 throw new ArgumentNullException(nameof(array));
137 // Is it of primitive types?
138 if (!IsPrimitiveTypeArray(array))
139 throw new ArgumentException(SR.Arg_MustBePrimArray, nameof(array));
141 // Is the index in valid range of the array?
142 if (index < 0 || index >= _ByteLength(array))
143 throw new ArgumentOutOfRangeException(nameof(index));
145 return _GetByte(array, index);
148 // Sets a particular byte in an the array. The array must be an
149 // array of primitives.
151 // This essentially does the following:
152 // *(((byte*)array) + index) = value.
154 [MethodImplAttribute(MethodImplOptions.InternalCall)]
155 private static extern void _SetByte(Array array, int index, byte value);
157 public static void SetByte(Array array, int index, byte value)
159 // Is the array present?
161 throw new ArgumentNullException(nameof(array));
163 // Is it of primitive types?
164 if (!IsPrimitiveTypeArray(array))
165 throw new ArgumentException(SR.Arg_MustBePrimArray, nameof(array));
167 // Is the index in valid range of the array?
168 if (index < 0 || index >= _ByteLength(array))
169 throw new ArgumentOutOfRangeException(nameof(index));
171 // Make the FCall to do the work
172 _SetByte(array, index, value);
176 // Gets a particular byte out of the array. The array must be an
177 // array of primitives.
179 // This essentially does the following:
180 // return array.length * sizeof(array.UnderlyingElementType).
182 [MethodImplAttribute(MethodImplOptions.InternalCall)]
183 private static extern int _ByteLength(Array array);
185 public static int ByteLength(Array array)
187 // Is the array present?
189 throw new ArgumentNullException(nameof(array));
191 // Is it of primitive types?
192 if (!IsPrimitiveTypeArray(array))
193 throw new ArgumentException(SR.Arg_MustBePrimArray, nameof(array));
195 return _ByteLength(array);
198 internal unsafe static void ZeroMemory(byte* src, long len)
204 internal unsafe static void Memcpy(byte[] dest, int destIndex, byte* src, int srcIndex, int len)
206 Debug.Assert((srcIndex >= 0) && (destIndex >= 0) && (len >= 0), "Index and length must be non-negative!");
207 Debug.Assert(dest.Length - destIndex >= len, "not enough bytes in dest");
208 // If dest has 0 elements, the fixed statement will throw an
209 // IndexOutOfRangeException. Special-case 0-byte copies.
212 fixed (byte* pDest = dest)
214 Memcpy(pDest + destIndex, src + srcIndex, len);
218 internal unsafe static void Memcpy(byte* pDest, int destIndex, byte[] src, int srcIndex, int len)
220 Debug.Assert((srcIndex >= 0) && (destIndex >= 0) && (len >= 0), "Index and length must be non-negative!");
221 Debug.Assert(src.Length - srcIndex >= len, "not enough bytes in src");
222 // If dest has 0 elements, the fixed statement will throw an
223 // IndexOutOfRangeException. Special-case 0-byte copies.
226 fixed (byte* pSrc = src)
228 Memcpy(pDest + destIndex, pSrc + srcIndex, len);
232 // This is tricky to get right AND fast, so lets make it useful for the whole Fx.
233 // E.g. System.Runtime.WindowsRuntime!WindowsRuntimeBufferExtensions.MemCopy uses it.
235 // This method has a slightly different behavior on arm and other platforms.
236 // On arm this method behaves like memcpy and does not handle overlapping buffers.
237 // While on other platforms it behaves like memmove and handles overlapping buffers.
238 // This behavioral difference is unfortunate but intentional because
239 // 1. This method is given access to other internal dlls and this close to release we do not want to change it.
240 // 2. It is difficult to get this right for arm and again due to release dates we would like to visit it later.
242 [MethodImplAttribute(MethodImplOptions.InternalCall)]
243 internal unsafe static extern void Memcpy(byte* dest, byte* src, int len);
245 [MethodImplAttribute(MethodImplOptions.AggressiveInlining)]
246 internal unsafe static void Memcpy(byte* dest, byte* src, int len)
248 Debug.Assert(len >= 0, "Negative length in memcopy!");
249 Memmove(dest, src, (uint)len);
253 // This method has different signature for x64 and other platforms and is done for performance reasons.
254 internal unsafe static void Memmove(byte* dest, byte* src, nuint len)
256 #if AMD64 || (BIT32 && !ARM)
257 const nuint CopyThreshold = 2048;
260 // Determined optimal value for Windows.
261 // https://github.com/dotnet/coreclr/issues/13843
262 const nuint CopyThreshold = UInt64.MaxValue;
263 #else // PLATFORM_WINDOWS
264 // Managed code is currently faster than glibc unoptimized memmove
265 // TODO-ARM64-UNIX-OPT revisit when glibc optimized memmove is in Linux distros
266 // https://github.com/dotnet/coreclr/issues/13844
267 const nuint CopyThreshold = UInt64.MaxValue;
268 #endif // PLATFORM_WINDOWS
270 const nuint CopyThreshold = 512;
271 #endif // AMD64 || (BIT32 && !ARM)
273 // P/Invoke into the native version when the buffers are overlapping.
275 if (((nuint)dest - (nuint)src < len) || ((nuint)src - (nuint)dest < len)) goto PInvoke;
277 byte* srcEnd = src + len;
278 byte* destEnd = dest + len;
280 if (len <= 16) goto MCPY02;
281 if (len > 64) goto MCPY05;
284 // Copy bytes which are multiples of 16 and leave the remainder for MCPY01 to handle.
285 Debug.Assert(len > 16 && len <= 64);
286 #if HAS_CUSTOM_BLOCKS
287 *(Block16*)dest = *(Block16*)src; // [0,16]
289 *(long*)dest = *(long*)src;
290 *(long*)(dest + 8) = *(long*)(src + 8); // [0,16]
292 *(int*)dest = *(int*)src;
293 *(int*)(dest + 4) = *(int*)(src + 4);
294 *(int*)(dest + 8) = *(int*)(src + 8);
295 *(int*)(dest + 12) = *(int*)(src + 12); // [0,16]
297 if (len <= 32) goto MCPY01;
298 #if HAS_CUSTOM_BLOCKS
299 *(Block16*)(dest + 16) = *(Block16*)(src + 16); // [0,32]
301 *(long*)(dest + 16) = *(long*)(src + 16);
302 *(long*)(dest + 24) = *(long*)(src + 24); // [0,32]
304 *(int*)(dest + 16) = *(int*)(src + 16);
305 *(int*)(dest + 20) = *(int*)(src + 20);
306 *(int*)(dest + 24) = *(int*)(src + 24);
307 *(int*)(dest + 28) = *(int*)(src + 28); // [0,32]
309 if (len <= 48) goto MCPY01;
310 #if HAS_CUSTOM_BLOCKS
311 *(Block16*)(dest + 32) = *(Block16*)(src + 32); // [0,48]
313 *(long*)(dest + 32) = *(long*)(src + 32);
314 *(long*)(dest + 40) = *(long*)(src + 40); // [0,48]
316 *(int*)(dest + 32) = *(int*)(src + 32);
317 *(int*)(dest + 36) = *(int*)(src + 36);
318 *(int*)(dest + 40) = *(int*)(src + 40);
319 *(int*)(dest + 44) = *(int*)(src + 44); // [0,48]
323 // Unconditionally copy the last 16 bytes using destEnd and srcEnd and return.
324 Debug.Assert(len > 16 && len <= 64);
325 #if HAS_CUSTOM_BLOCKS
326 *(Block16*)(destEnd - 16) = *(Block16*)(srcEnd - 16);
328 *(long*)(destEnd - 16) = *(long*)(srcEnd - 16);
329 *(long*)(destEnd - 8) = *(long*)(srcEnd - 8);
331 *(int*)(destEnd - 16) = *(int*)(srcEnd - 16);
332 *(int*)(destEnd - 12) = *(int*)(srcEnd - 12);
333 *(int*)(destEnd - 8) = *(int*)(srcEnd - 8);
334 *(int*)(destEnd - 4) = *(int*)(srcEnd - 4);
339 // Copy the first 8 bytes and then unconditionally copy the last 8 bytes and return.
340 if ((len & 24) == 0) goto MCPY03;
341 Debug.Assert(len >= 8 && len <= 16);
343 *(long*)dest = *(long*)src;
344 *(long*)(destEnd - 8) = *(long*)(srcEnd - 8);
346 *(int*)dest = *(int*)src;
347 *(int*)(dest + 4) = *(int*)(src + 4);
348 *(int*)(destEnd - 8) = *(int*)(srcEnd - 8);
349 *(int*)(destEnd - 4) = *(int*)(srcEnd - 4);
354 // Copy the first 4 bytes and then unconditionally copy the last 4 bytes and return.
355 if ((len & 4) == 0) goto MCPY04;
356 Debug.Assert(len >= 4 && len < 8);
357 *(int*)dest = *(int*)src;
358 *(int*)(destEnd - 4) = *(int*)(srcEnd - 4);
362 // Copy the first byte. For pending bytes, do an unconditionally copy of the last 2 bytes and return.
363 Debug.Assert(len < 4);
364 if (len == 0) return;
366 if ((len & 2) == 0) return;
367 *(short*)(destEnd - 2) = *(short*)(srcEnd - 2);
371 // PInvoke to the native version when the copy length exceeds the threshold.
372 if (len > CopyThreshold)
376 // Copy 64-bytes at a time until the remainder is less than 64.
377 // If remainder is greater than 16 bytes, then jump to MCPY00. Otherwise, unconditionally copy the last 16 bytes and return.
378 Debug.Assert(len > 64 && len <= CopyThreshold);
382 #if HAS_CUSTOM_BLOCKS
383 *(Block64*)dest = *(Block64*)src;
385 *(long*)dest = *(long*)src;
386 *(long*)(dest + 8) = *(long*)(src + 8);
387 *(long*)(dest + 16) = *(long*)(src + 16);
388 *(long*)(dest + 24) = *(long*)(src + 24);
389 *(long*)(dest + 32) = *(long*)(src + 32);
390 *(long*)(dest + 40) = *(long*)(src + 40);
391 *(long*)(dest + 48) = *(long*)(src + 48);
392 *(long*)(dest + 56) = *(long*)(src + 56);
394 *(int*)dest = *(int*)src;
395 *(int*)(dest + 4) = *(int*)(src + 4);
396 *(int*)(dest + 8) = *(int*)(src + 8);
397 *(int*)(dest + 12) = *(int*)(src + 12);
398 *(int*)(dest + 16) = *(int*)(src + 16);
399 *(int*)(dest + 20) = *(int*)(src + 20);
400 *(int*)(dest + 24) = *(int*)(src + 24);
401 *(int*)(dest + 28) = *(int*)(src + 28);
402 *(int*)(dest + 32) = *(int*)(src + 32);
403 *(int*)(dest + 36) = *(int*)(src + 36);
404 *(int*)(dest + 40) = *(int*)(src + 40);
405 *(int*)(dest + 44) = *(int*)(src + 44);
406 *(int*)(dest + 48) = *(int*)(src + 48);
407 *(int*)(dest + 52) = *(int*)(src + 52);
408 *(int*)(dest + 56) = *(int*)(src + 56);
409 *(int*)(dest + 60) = *(int*)(src + 60);
414 if (n != 0) goto MCPY06;
417 if (len > 16) goto MCPY00;
418 #if HAS_CUSTOM_BLOCKS
419 *(Block16*)(destEnd - 16) = *(Block16*)(srcEnd - 16);
421 *(long*)(destEnd - 16) = *(long*)(srcEnd - 16);
422 *(long*)(destEnd - 8) = *(long*)(srcEnd - 8);
424 *(int*)(destEnd - 16) = *(int*)(srcEnd - 16);
425 *(int*)(destEnd - 12) = *(int*)(srcEnd - 12);
426 *(int*)(destEnd - 8) = *(int*)(srcEnd - 8);
427 *(int*)(destEnd - 4) = *(int*)(srcEnd - 4);
432 _Memmove(dest, src, len);
435 // This method has different signature for x64 and other platforms and is done for performance reasons.
436 [MethodImpl(MethodImplOptions.AggressiveInlining)]
437 internal static void Memmove<T>(ref T destination, ref T source, nuint elementCount)
439 if (!RuntimeHelpers.IsReferenceOrContainsReferences<T>())
444 new ByReference<byte>(ref Unsafe.As<T, byte>(ref destination)),
445 new ByReference<byte>(ref Unsafe.As<T, byte>(ref source)),
446 elementCount * (nuint)Unsafe.SizeOf<T>());
450 // Non-blittable memmove
452 // Try to avoid calling RhBulkMoveWithWriteBarrier if we can get away
454 if (!Unsafe.AreSame(ref destination, ref source) && elementCount != 0)
456 RuntimeImports.RhBulkMoveWithWriteBarrier(
457 ref Unsafe.As<T, byte>(ref destination),
458 ref Unsafe.As<T, byte>(ref source),
459 elementCount * (nuint)Unsafe.SizeOf<T>());
464 // This method has different signature for x64 and other platforms and is done for performance reasons.
465 private static void Memmove(ByReference<byte> dest, ByReference<byte> src, nuint len)
467 #if AMD64 || (BIT32 && !ARM)
468 const nuint CopyThreshold = 2048;
471 // Determined optimal value for Windows.
472 // https://github.com/dotnet/coreclr/issues/13843
473 const nuint CopyThreshold = UInt64.MaxValue;
474 #else // PLATFORM_WINDOWS
475 // Managed code is currently faster than glibc unoptimized memmove
476 // TODO-ARM64-UNIX-OPT revisit when glibc optimized memmove is in Linux distros
477 // https://github.com/dotnet/coreclr/issues/13844
478 const nuint CopyThreshold = UInt64.MaxValue;
479 #endif // PLATFORM_WINDOWS
481 const nuint CopyThreshold = 512;
482 #endif // AMD64 || (BIT32 && !ARM)
484 // P/Invoke into the native version when the buffers are overlapping.
486 if (((nuint)Unsafe.ByteOffset(ref src.Value, ref dest.Value) < len) || ((nuint)Unsafe.ByteOffset(ref dest.Value, ref src.Value) < len))
491 // Use "(IntPtr)(nint)len" to avoid overflow checking on the explicit cast to IntPtr
493 ref byte srcEnd = ref Unsafe.Add(ref src.Value, (IntPtr)(nint)len);
494 ref byte destEnd = ref Unsafe.Add(ref dest.Value, (IntPtr)(nint)len);
502 // Copy bytes which are multiples of 16 and leave the remainder for MCPY01 to handle.
503 Debug.Assert(len > 16 && len <= 64);
504 #if HAS_CUSTOM_BLOCKS
505 Unsafe.As<byte, Block16>(ref dest.Value) = Unsafe.As<byte, Block16>(ref src.Value); // [0,16]
507 Unsafe.As<byte, long>(ref dest.Value) = Unsafe.As<byte, long>(ref src.Value);
508 Unsafe.As<byte, long>(ref Unsafe.Add(ref dest.Value, 8)) = Unsafe.As<byte, long>(ref Unsafe.Add(ref src.Value, 8)); // [0,16]
510 Unsafe.As<byte, int>(ref dest.Value) = Unsafe.As<byte, int>(ref src.Value);
511 Unsafe.As<byte, int>(ref Unsafe.Add(ref dest.Value, 4)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref src.Value, 4));
512 Unsafe.As<byte, int>(ref Unsafe.Add(ref dest.Value, 8)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref src.Value, 8));
513 Unsafe.As<byte, int>(ref Unsafe.Add(ref dest.Value, 12)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref src.Value, 12)); // [0,16]
517 #if HAS_CUSTOM_BLOCKS
518 Unsafe.As<byte, Block16>(ref Unsafe.Add(ref dest.Value, 16)) = Unsafe.As<byte, Block16>(ref Unsafe.Add(ref src.Value, 16)); // [0,32]
520 Unsafe.As<byte, long>(ref Unsafe.Add(ref dest.Value, 16)) = Unsafe.As<byte, long>(ref Unsafe.Add(ref src.Value, 16));
521 Unsafe.As<byte, long>(ref Unsafe.Add(ref dest.Value, 24)) = Unsafe.As<byte, long>(ref Unsafe.Add(ref src.Value, 24)); // [0,32]
523 Unsafe.As<byte, int>(ref Unsafe.Add(ref dest.Value, 16)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref src.Value, 16));
524 Unsafe.As<byte, int>(ref Unsafe.Add(ref dest.Value, 20)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref src.Value, 20));
525 Unsafe.As<byte, int>(ref Unsafe.Add(ref dest.Value, 24)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref src.Value, 24));
526 Unsafe.As<byte, int>(ref Unsafe.Add(ref dest.Value, 28)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref src.Value, 28)); // [0,32]
530 #if HAS_CUSTOM_BLOCKS
531 Unsafe.As<byte, Block16>(ref Unsafe.Add(ref dest.Value, 32)) = Unsafe.As<byte, Block16>(ref Unsafe.Add(ref src.Value, 32)); // [0,48]
533 Unsafe.As<byte, long>(ref Unsafe.Add(ref dest.Value, 32)) = Unsafe.As<byte, long>(ref Unsafe.Add(ref src.Value, 32));
534 Unsafe.As<byte, long>(ref Unsafe.Add(ref dest.Value, 40)) = Unsafe.As<byte, long>(ref Unsafe.Add(ref src.Value, 40)); // [0,48]
536 Unsafe.As<byte, int>(ref Unsafe.Add(ref dest.Value, 32)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref src.Value, 32));
537 Unsafe.As<byte, int>(ref Unsafe.Add(ref dest.Value, 36)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref src.Value, 36));
538 Unsafe.As<byte, int>(ref Unsafe.Add(ref dest.Value, 40)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref src.Value, 40));
539 Unsafe.As<byte, int>(ref Unsafe.Add(ref dest.Value, 44)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref src.Value, 44)); // [0,48]
543 // Unconditionally copy the last 16 bytes using destEnd and srcEnd and return.
544 Debug.Assert(len > 16 && len <= 64);
545 #if HAS_CUSTOM_BLOCKS
546 Unsafe.As<byte, Block16>(ref Unsafe.Add(ref destEnd, -16)) = Unsafe.As<byte, Block16>(ref Unsafe.Add(ref srcEnd, -16));
548 Unsafe.As<byte, long>(ref Unsafe.Add(ref destEnd, -16)) = Unsafe.As<byte, long>(ref Unsafe.Add(ref srcEnd, -16));
549 Unsafe.As<byte, long>(ref Unsafe.Add(ref destEnd, -8)) = Unsafe.As<byte, long>(ref Unsafe.Add(ref srcEnd, -8));
551 Unsafe.As<byte, int>(ref Unsafe.Add(ref destEnd, -16)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref srcEnd, -16));
552 Unsafe.As<byte, int>(ref Unsafe.Add(ref destEnd, -12)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref srcEnd, -12));
553 Unsafe.As<byte, int>(ref Unsafe.Add(ref destEnd, -8)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref srcEnd, -8));
554 Unsafe.As<byte, int>(ref Unsafe.Add(ref destEnd, -4)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref srcEnd, -4));
559 // Copy the first 8 bytes and then unconditionally copy the last 8 bytes and return.
562 Debug.Assert(len >= 8 && len <= 16);
564 Unsafe.As<byte, long>(ref dest.Value) = Unsafe.As<byte, long>(ref src.Value);
565 Unsafe.As<byte, long>(ref Unsafe.Add(ref destEnd, -8)) = Unsafe.As<byte, long>(ref Unsafe.Add(ref srcEnd, -8));
567 Unsafe.As<byte, int>(ref dest.Value) = Unsafe.As<byte, int>(ref src.Value);
568 Unsafe.As<byte, int>(ref Unsafe.Add(ref dest.Value, 4)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref src.Value, 4));
569 Unsafe.As<byte, int>(ref Unsafe.Add(ref destEnd, -8)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref srcEnd, -8));
570 Unsafe.As<byte, int>(ref Unsafe.Add(ref destEnd, -4)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref srcEnd, -4));
575 // Copy the first 4 bytes and then unconditionally copy the last 4 bytes and return.
578 Debug.Assert(len >= 4 && len < 8);
579 Unsafe.As<byte, int>(ref dest.Value) = Unsafe.As<byte, int>(ref src.Value);
580 Unsafe.As<byte, int>(ref Unsafe.Add(ref destEnd, -4)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref srcEnd, -4));
584 // Copy the first byte. For pending bytes, do an unconditionally copy of the last 2 bytes and return.
585 Debug.Assert(len < 4);
588 dest.Value = src.Value;
591 Unsafe.As<byte, short>(ref Unsafe.Add(ref destEnd, -2)) = Unsafe.As<byte, short>(ref Unsafe.Add(ref srcEnd, -2));
595 // PInvoke to the native version when the copy length exceeds the threshold.
596 if (len > CopyThreshold)
600 // Copy 64-bytes at a time until the remainder is less than 64.
601 // If remainder is greater than 16 bytes, then jump to MCPY00. Otherwise, unconditionally copy the last 16 bytes and return.
602 Debug.Assert(len > 64 && len <= CopyThreshold);
606 #if HAS_CUSTOM_BLOCKS
607 Unsafe.As<byte, Block64>(ref dest.Value) = Unsafe.As<byte, Block64>(ref src.Value);
609 Unsafe.As<byte, long>(ref dest.Value) = Unsafe.As<byte, long>(ref src.Value);
610 Unsafe.As<byte, long>(ref Unsafe.Add(ref dest.Value, 8)) = Unsafe.As<byte, long>(ref Unsafe.Add(ref src.Value, 8));
611 Unsafe.As<byte, long>(ref Unsafe.Add(ref dest.Value, 16)) = Unsafe.As<byte, long>(ref Unsafe.Add(ref src.Value, 16));
612 Unsafe.As<byte, long>(ref Unsafe.Add(ref dest.Value, 24)) = Unsafe.As<byte, long>(ref Unsafe.Add(ref src.Value, 24));
613 Unsafe.As<byte, long>(ref Unsafe.Add(ref dest.Value, 32)) = Unsafe.As<byte, long>(ref Unsafe.Add(ref src.Value, 32));
614 Unsafe.As<byte, long>(ref Unsafe.Add(ref dest.Value, 40)) = Unsafe.As<byte, long>(ref Unsafe.Add(ref src.Value, 40));
615 Unsafe.As<byte, long>(ref Unsafe.Add(ref dest.Value, 48)) = Unsafe.As<byte, long>(ref Unsafe.Add(ref src.Value, 48));
616 Unsafe.As<byte, long>(ref Unsafe.Add(ref dest.Value, 56)) = Unsafe.As<byte, long>(ref Unsafe.Add(ref src.Value, 56));
618 Unsafe.As<byte, int>(ref dest.Value) = Unsafe.As<byte, int>(ref src.Value);
619 Unsafe.As<byte, int>(ref Unsafe.Add(ref dest.Value, 4)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref src.Value, 4));
620 Unsafe.As<byte, int>(ref Unsafe.Add(ref dest.Value, 8)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref src.Value, 8));
621 Unsafe.As<byte, int>(ref Unsafe.Add(ref dest.Value, 12)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref src.Value, 12));
622 Unsafe.As<byte, int>(ref Unsafe.Add(ref dest.Value, 16)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref src.Value, 16));
623 Unsafe.As<byte, int>(ref Unsafe.Add(ref dest.Value, 20)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref src.Value, 20));
624 Unsafe.As<byte, int>(ref Unsafe.Add(ref dest.Value, 24)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref src.Value, 24));
625 Unsafe.As<byte, int>(ref Unsafe.Add(ref dest.Value, 28)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref src.Value, 28));
626 Unsafe.As<byte, int>(ref Unsafe.Add(ref dest.Value, 32)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref src.Value, 32));
627 Unsafe.As<byte, int>(ref Unsafe.Add(ref dest.Value, 36)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref src.Value, 36));
628 Unsafe.As<byte, int>(ref Unsafe.Add(ref dest.Value, 40)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref src.Value, 40));
629 Unsafe.As<byte, int>(ref Unsafe.Add(ref dest.Value, 44)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref src.Value, 44));
630 Unsafe.As<byte, int>(ref Unsafe.Add(ref dest.Value, 48)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref src.Value, 48));
631 Unsafe.As<byte, int>(ref Unsafe.Add(ref dest.Value, 52)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref src.Value, 52));
632 Unsafe.As<byte, int>(ref Unsafe.Add(ref dest.Value, 56)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref src.Value, 56));
633 Unsafe.As<byte, int>(ref Unsafe.Add(ref dest.Value, 60)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref src.Value, 60));
635 dest = new ByReference<byte>(ref Unsafe.Add(ref dest.Value, 64));
636 src = new ByReference<byte>(ref Unsafe.Add(ref src.Value, 64));
644 #if HAS_CUSTOM_BLOCKS
645 Unsafe.As<byte, Block16>(ref Unsafe.Add(ref destEnd, -16)) = Unsafe.As<byte, Block16>(ref Unsafe.Add(ref srcEnd, -16));
647 Unsafe.As<byte, long>(ref Unsafe.Add(ref destEnd, -16)) = Unsafe.As<byte, long>(ref Unsafe.Add(ref srcEnd, -16));
648 Unsafe.As<byte, long>(ref Unsafe.Add(ref destEnd, -8)) = Unsafe.As<byte, long>(ref Unsafe.Add(ref srcEnd, -8));
650 Unsafe.As<byte, int>(ref Unsafe.Add(ref destEnd, -16)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref srcEnd, -16));
651 Unsafe.As<byte, int>(ref Unsafe.Add(ref destEnd, -12)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref srcEnd, -12));
652 Unsafe.As<byte, int>(ref Unsafe.Add(ref destEnd, -8)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref srcEnd, -8));
653 Unsafe.As<byte, int>(ref Unsafe.Add(ref destEnd, -4)) = Unsafe.As<byte, int>(ref Unsafe.Add(ref srcEnd, -4));
658 // If the buffers overlap perfectly, there's no point to copying the data.
659 if (Unsafe.AreSame(ref dest.Value, ref src.Value))
665 _Memmove(ref dest.Value, ref src.Value, len);
668 // Non-inlinable wrapper around the QCall that avoids polluting the fast path
669 // with P/Invoke prolog/epilog.
670 [MethodImplAttribute(MethodImplOptions.NoInlining)]
671 private unsafe static void _Memmove(byte* dest, byte* src, nuint len)
673 __Memmove(dest, src, len);
676 // Non-inlinable wrapper around the QCall that avoids polluting the fast path
677 // with P/Invoke prolog/epilog.
678 [MethodImplAttribute(MethodImplOptions.NoInlining)]
679 private unsafe static void _Memmove(ref byte dest, ref byte src, nuint len)
681 fixed (byte* pDest = &dest)
682 fixed (byte* pSrc = &src)
683 __Memmove(pDest, pSrc, len);
686 [DllImport(JitHelpers.QCall, CharSet = CharSet.Unicode)]
687 extern private unsafe static void __Memmove(byte* dest, byte* src, nuint len);
689 // The attributes on this method are chosen for best JIT performance.
690 // Please do not edit unless intentional.
691 [MethodImplAttribute(MethodImplOptions.AggressiveInlining)]
692 [CLSCompliant(false)]
693 public static unsafe void MemoryCopy(void* source, void* destination, long destinationSizeInBytes, long sourceBytesToCopy)
695 if (sourceBytesToCopy > destinationSizeInBytes)
697 ThrowHelper.ThrowArgumentOutOfRangeException(ExceptionArgument.sourceBytesToCopy);
699 Memmove((byte*)destination, (byte*)source, checked((nuint)sourceBytesToCopy));
703 // The attributes on this method are chosen for best JIT performance.
704 // Please do not edit unless intentional.
705 [MethodImplAttribute(MethodImplOptions.AggressiveInlining)]
706 [CLSCompliant(false)]
707 public static unsafe void MemoryCopy(void* source, void* destination, ulong destinationSizeInBytes, ulong sourceBytesToCopy)
709 if (sourceBytesToCopy > destinationSizeInBytes)
711 ThrowHelper.ThrowArgumentOutOfRangeException(ExceptionArgument.sourceBytesToCopy);
714 Memmove((byte*)destination, (byte*)source, sourceBytesToCopy);
716 Memmove((byte*)destination, (byte*)source, checked((uint)sourceBytesToCopy));
720 #if HAS_CUSTOM_BLOCKS
721 [StructLayout(LayoutKind.Sequential, Size = 16)]
722 private struct Block16 { }
724 [StructLayout(LayoutKind.Sequential, Size = 64)]
725 private struct Block64 { }
726 #endif // HAS_CUSTOM_BLOCKS