2 * xxHash - Fast Hash algorithm
3 * Copyright (c) 2012-2020, Yann Collet, Facebook, Inc.
5 * You can contact the author at :
6 * - xxHash homepage: http://www.xxhash.com
7 * - xxHash source repository : https://github.com/Cyan4973/xxHash
9 * This source code is licensed under both the BSD-style license (found in the
10 * LICENSE file in the root directory of this source tree) and the GPLv2 (found
11 * in the COPYING file in the root directory of this source tree).
12 * You may select, at your option, one of the above-listed licenses.
16 /* *************************************
18 ***************************************/
19 /*!XXH_FORCE_MEMORY_ACCESS :
20 * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.
21 * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.
22 * The below switch allow to select different access method for improved performance.
23 * Method 0 (default) : use `memcpy()`. Safe and portable.
24 * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable).
25 * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.
26 * Method 2 : direct access. This method doesn't depend on compiler but violate C standard.
27 * It can generate buggy code on targets which do not support unaligned memory accesses.
28 * But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6)
29 * See http://stackoverflow.com/a/32095106/646947 for details.
30 * Prefer these methods in priority order (0 > 1 > 2)
32 #ifndef XXH_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */
33 # if (defined(__INTEL_COMPILER) && !defined(WIN32)) || \
34 (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) )) || \
36 # define XXH_FORCE_MEMORY_ACCESS 1
40 /*!XXH_ACCEPT_NULL_INPUT_POINTER :
41 * If the input pointer is a null pointer, xxHash default behavior is to trigger a memory access error, since it is a bad pointer.
42 * When this option is enabled, xxHash output for null input pointers will be the same as a null-length input.
43 * By default, this option is disabled. To enable it, uncomment below define :
45 /* #define XXH_ACCEPT_NULL_INPUT_POINTER 1 */
47 /*!XXH_FORCE_NATIVE_FORMAT :
48 * By default, xxHash library provides endian-independent Hash values, based on little-endian convention.
49 * Results are therefore identical for little-endian and big-endian CPU.
50 * This comes at a performance cost for big-endian CPU, since some swapping is required to emulate little-endian format.
51 * Should endian-independence be of no importance for your application, you may set the #define below to 1,
52 * to improve speed for Big-endian CPU.
53 * This option has no impact on Little_Endian CPU.
55 #ifndef XXH_FORCE_NATIVE_FORMAT /* can be defined externally */
56 # define XXH_FORCE_NATIVE_FORMAT 0
59 /*!XXH_FORCE_ALIGN_CHECK :
60 * This is a minor performance trick, only useful with lots of very small keys.
61 * It means : check for aligned/unaligned input.
62 * The check costs one initial branch per hash; set to 0 when the input data
63 * is guaranteed to be aligned.
65 #ifndef XXH_FORCE_ALIGN_CHECK /* can be defined externally */
66 # if defined(__i386) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64)
67 # define XXH_FORCE_ALIGN_CHECK 0
69 # define XXH_FORCE_ALIGN_CHECK 1
74 /* *************************************
75 * Includes & Memory related functions
76 ***************************************/
77 /* Modify the local functions below should you wish to use some other memory routines */
78 /* for malloc(), free() */
80 #include <stddef.h> /* size_t */
81 static void* XXH_malloc(size_t s) { return malloc(s); }
82 static void XXH_free (void* p) { free(p); }
85 static void* XXH_memcpy(void* dest, const void* src, size_t size) { return memcpy(dest,src,size); }
87 #ifndef XXH_STATIC_LINKING_ONLY
88 # define XXH_STATIC_LINKING_ONLY
93 /* *************************************
94 * Compiler Specific Options
95 ***************************************/
96 #if (defined(__GNUC__) && !defined(__STRICT_ANSI__)) || defined(__cplusplus) || defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
97 # define INLINE_KEYWORD inline
99 # define INLINE_KEYWORD
102 #if defined(__GNUC__) || defined(__ICCARM__)
103 # define FORCE_INLINE_ATTR __attribute__((always_inline))
104 #elif defined(_MSC_VER)
105 # define FORCE_INLINE_ATTR __forceinline
107 # define FORCE_INLINE_ATTR
110 #define FORCE_INLINE_TEMPLATE static INLINE_KEYWORD FORCE_INLINE_ATTR
114 # pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
118 /* *************************************
120 ***************************************/
123 # if !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
125 typedef uint8_t BYTE;
126 typedef uint16_t U16;
127 typedef uint32_t U32;
129 typedef uint64_t U64;
131 typedef unsigned char BYTE;
132 typedef unsigned short U16;
133 typedef unsigned int U32;
134 typedef signed int S32;
135 typedef unsigned long long U64; /* if your compiler doesn't support unsigned long long, replace by another 64-bit type here. Note that xxhash.h will also need to be updated. */
140 #if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2))
142 /* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */
143 static U32 XXH_read32(const void* memPtr) { return *(const U32*) memPtr; }
144 static U64 XXH_read64(const void* memPtr) { return *(const U64*) memPtr; }
146 #elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1))
148 /* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
149 /* currently only defined for gcc and icc */
150 typedef union { U32 u32; U64 u64; } __attribute__((packed)) unalign;
152 static U32 XXH_read32(const void* ptr) { return ((const unalign*)ptr)->u32; }
153 static U64 XXH_read64(const void* ptr) { return ((const unalign*)ptr)->u64; }
157 /* portable and safe solution. Generally efficient.
158 * see : http://stackoverflow.com/a/32095106/646947
161 static U32 XXH_read32(const void* memPtr)
164 memcpy(&val, memPtr, sizeof(val));
168 static U64 XXH_read64(const void* memPtr)
171 memcpy(&val, memPtr, sizeof(val));
175 #endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */
178 /* ****************************************
179 * Compiler-specific Functions and Macros
180 ******************************************/
181 #define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
183 /* Note : although _rotl exists for minGW (GCC under windows), performance seems poor */
184 #if defined(_MSC_VER)
185 # define XXH_rotl32(x,r) _rotl(x,r)
186 # define XXH_rotl64(x,r) _rotl64(x,r)
188 #if defined(__ICCARM__)
189 # include <intrinsics.h>
190 # define XXH_rotl32(x,r) __ROR(x,(32 - r))
192 # define XXH_rotl32(x,r) ((x << r) | (x >> (32 - r)))
194 # define XXH_rotl64(x,r) ((x << r) | (x >> (64 - r)))
197 #if defined(_MSC_VER) /* Visual Studio */
198 # define XXH_swap32 _byteswap_ulong
199 # define XXH_swap64 _byteswap_uint64
200 #elif GCC_VERSION >= 403
201 # define XXH_swap32 __builtin_bswap32
202 # define XXH_swap64 __builtin_bswap64
204 static U32 XXH_swap32 (U32 x)
206 return ((x << 24) & 0xff000000 ) |
207 ((x << 8) & 0x00ff0000 ) |
208 ((x >> 8) & 0x0000ff00 ) |
209 ((x >> 24) & 0x000000ff );
211 static U64 XXH_swap64 (U64 x)
213 return ((x << 56) & 0xff00000000000000ULL) |
214 ((x << 40) & 0x00ff000000000000ULL) |
215 ((x << 24) & 0x0000ff0000000000ULL) |
216 ((x << 8) & 0x000000ff00000000ULL) |
217 ((x >> 8) & 0x00000000ff000000ULL) |
218 ((x >> 24) & 0x0000000000ff0000ULL) |
219 ((x >> 40) & 0x000000000000ff00ULL) |
220 ((x >> 56) & 0x00000000000000ffULL);
225 /* *************************************
226 * Architecture Macros
227 ***************************************/
228 typedef enum { XXH_bigEndian=0, XXH_littleEndian=1 } XXH_endianess;
230 /* XXH_CPU_LITTLE_ENDIAN can be defined externally, for example on the compiler command line */
231 #ifndef XXH_CPU_LITTLE_ENDIAN
232 static const int g_one = 1;
233 # define XXH_CPU_LITTLE_ENDIAN (*(const char*)(&g_one))
237 /* ***************************
239 *****************************/
240 typedef enum { XXH_aligned, XXH_unaligned } XXH_alignment;
242 FORCE_INLINE_TEMPLATE U32 XXH_readLE32_align(const void* ptr, XXH_endianess endian, XXH_alignment align)
244 if (align==XXH_unaligned)
245 return endian==XXH_littleEndian ? XXH_read32(ptr) : XXH_swap32(XXH_read32(ptr));
247 return endian==XXH_littleEndian ? *(const U32*)ptr : XXH_swap32(*(const U32*)ptr);
250 FORCE_INLINE_TEMPLATE U32 XXH_readLE32(const void* ptr, XXH_endianess endian)
252 return XXH_readLE32_align(ptr, endian, XXH_unaligned);
255 static U32 XXH_readBE32(const void* ptr)
257 return XXH_CPU_LITTLE_ENDIAN ? XXH_swap32(XXH_read32(ptr)) : XXH_read32(ptr);
260 FORCE_INLINE_TEMPLATE U64 XXH_readLE64_align(const void* ptr, XXH_endianess endian, XXH_alignment align)
262 if (align==XXH_unaligned)
263 return endian==XXH_littleEndian ? XXH_read64(ptr) : XXH_swap64(XXH_read64(ptr));
265 return endian==XXH_littleEndian ? *(const U64*)ptr : XXH_swap64(*(const U64*)ptr);
268 FORCE_INLINE_TEMPLATE U64 XXH_readLE64(const void* ptr, XXH_endianess endian)
270 return XXH_readLE64_align(ptr, endian, XXH_unaligned);
273 static U64 XXH_readBE64(const void* ptr)
275 return XXH_CPU_LITTLE_ENDIAN ? XXH_swap64(XXH_read64(ptr)) : XXH_read64(ptr);
279 /* *************************************
281 ***************************************/
282 #define XXH_STATIC_ASSERT(c) { enum { XXH_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */
285 /* *************************************
287 ***************************************/
288 static const U32 PRIME32_1 = 2654435761U;
289 static const U32 PRIME32_2 = 2246822519U;
290 static const U32 PRIME32_3 = 3266489917U;
291 static const U32 PRIME32_4 = 668265263U;
292 static const U32 PRIME32_5 = 374761393U;
294 static const U64 PRIME64_1 = 11400714785074694791ULL;
295 static const U64 PRIME64_2 = 14029467366897019727ULL;
296 static const U64 PRIME64_3 = 1609587929392839161ULL;
297 static const U64 PRIME64_4 = 9650029242287828579ULL;
298 static const U64 PRIME64_5 = 2870177450012600261ULL;
300 XXH_PUBLIC_API unsigned XXH_versionNumber (void) { return XXH_VERSION_NUMBER; }
303 /* **************************
305 ****************************/
306 XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* restrict dstState, const XXH32_state_t* restrict srcState)
308 memcpy(dstState, srcState, sizeof(*dstState));
311 XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* restrict dstState, const XXH64_state_t* restrict srcState)
313 memcpy(dstState, srcState, sizeof(*dstState));
317 /* ***************************
318 * Simple Hash Functions
319 *****************************/
321 static U32 XXH32_round(U32 seed, U32 input)
323 seed += input * PRIME32_2;
324 seed = XXH_rotl32(seed, 13);
329 FORCE_INLINE_TEMPLATE U32 XXH32_endian_align(const void* input, size_t len, U32 seed, XXH_endianess endian, XXH_alignment align)
331 const BYTE* p = (const BYTE*)input;
332 const BYTE* bEnd = p + len;
334 #define XXH_get32bits(p) XXH_readLE32_align(p, endian, align)
336 #ifdef XXH_ACCEPT_NULL_INPUT_POINTER
339 bEnd=p=(const BYTE*)(size_t)16;
344 const BYTE* const limit = bEnd - 16;
345 U32 v1 = seed + PRIME32_1 + PRIME32_2;
346 U32 v2 = seed + PRIME32_2;
348 U32 v4 = seed - PRIME32_1;
351 v1 = XXH32_round(v1, XXH_get32bits(p)); p+=4;
352 v2 = XXH32_round(v2, XXH_get32bits(p)); p+=4;
353 v3 = XXH32_round(v3, XXH_get32bits(p)); p+=4;
354 v4 = XXH32_round(v4, XXH_get32bits(p)); p+=4;
357 h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7) + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18);
359 h32 = seed + PRIME32_5;
365 h32 += XXH_get32bits(p) * PRIME32_3;
366 h32 = XXH_rotl32(h32, 17) * PRIME32_4 ;
371 h32 += (*p) * PRIME32_5;
372 h32 = XXH_rotl32(h32, 11) * PRIME32_1 ;
386 XXH_PUBLIC_API unsigned int XXH32 (const void* input, size_t len, unsigned int seed)
389 /* Simple version, good for code maintenance, but unfortunately slow for small inputs */
390 XXH32_CREATESTATE_STATIC(state);
391 XXH32_reset(state, seed);
392 XXH32_update(state, input, len);
393 return XXH32_digest(state);
395 XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
397 if (XXH_FORCE_ALIGN_CHECK) {
398 if ((((size_t)input) & 3) == 0) { /* Input is 4-bytes aligned, leverage the speed benefit */
399 if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
400 return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned);
402 return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned);
405 if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
406 return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned);
408 return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned);
413 static U64 XXH64_round(U64 acc, U64 input)
415 acc += input * PRIME64_2;
416 acc = XXH_rotl64(acc, 31);
421 static U64 XXH64_mergeRound(U64 acc, U64 val)
423 val = XXH64_round(0, val);
425 acc = acc * PRIME64_1 + PRIME64_4;
429 FORCE_INLINE_TEMPLATE U64 XXH64_endian_align(const void* input, size_t len, U64 seed, XXH_endianess endian, XXH_alignment align)
431 const BYTE* p = (const BYTE*)input;
432 const BYTE* const bEnd = p + len;
434 #define XXH_get64bits(p) XXH_readLE64_align(p, endian, align)
436 #ifdef XXH_ACCEPT_NULL_INPUT_POINTER
439 bEnd=p=(const BYTE*)(size_t)32;
444 const BYTE* const limit = bEnd - 32;
445 U64 v1 = seed + PRIME64_1 + PRIME64_2;
446 U64 v2 = seed + PRIME64_2;
448 U64 v4 = seed - PRIME64_1;
451 v1 = XXH64_round(v1, XXH_get64bits(p)); p+=8;
452 v2 = XXH64_round(v2, XXH_get64bits(p)); p+=8;
453 v3 = XXH64_round(v3, XXH_get64bits(p)); p+=8;
454 v4 = XXH64_round(v4, XXH_get64bits(p)); p+=8;
457 h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
458 h64 = XXH64_mergeRound(h64, v1);
459 h64 = XXH64_mergeRound(h64, v2);
460 h64 = XXH64_mergeRound(h64, v3);
461 h64 = XXH64_mergeRound(h64, v4);
464 h64 = seed + PRIME64_5;
470 U64 const k1 = XXH64_round(0, XXH_get64bits(p));
472 h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4;
477 h64 ^= (U64)(XXH_get32bits(p)) * PRIME64_1;
478 h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;
483 h64 ^= (*p) * PRIME64_5;
484 h64 = XXH_rotl64(h64, 11) * PRIME64_1;
498 XXH_PUBLIC_API unsigned long long XXH64 (const void* input, size_t len, unsigned long long seed)
501 /* Simple version, good for code maintenance, but unfortunately slow for small inputs */
502 XXH64_CREATESTATE_STATIC(state);
503 XXH64_reset(state, seed);
504 XXH64_update(state, input, len);
505 return XXH64_digest(state);
507 XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
509 if (XXH_FORCE_ALIGN_CHECK) {
510 if ((((size_t)input) & 7)==0) { /* Input is aligned, let's leverage the speed advantage */
511 if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
512 return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned);
514 return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned);
517 if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
518 return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned);
520 return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned);
525 /* **************************************************
526 * Advanced Hash Functions
527 ****************************************************/
529 XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void)
531 return (XXH32_state_t*)XXH_malloc(sizeof(XXH32_state_t));
533 XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr)
539 XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void)
541 return (XXH64_state_t*)XXH_malloc(sizeof(XXH64_state_t));
543 XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr)
552 XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t* statePtr, unsigned int seed)
554 XXH32_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */
555 memset(&state, 0, sizeof(state)-4); /* do not write into reserved, for future removal */
556 state.v1 = seed + PRIME32_1 + PRIME32_2;
557 state.v2 = seed + PRIME32_2;
559 state.v4 = seed - PRIME32_1;
560 memcpy(statePtr, &state, sizeof(state));
565 XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH64_state_t* statePtr, unsigned long long seed)
567 XXH64_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */
568 memset(&state, 0, sizeof(state)-8); /* do not write into reserved, for future removal */
569 state.v1 = seed + PRIME64_1 + PRIME64_2;
570 state.v2 = seed + PRIME64_2;
572 state.v4 = seed - PRIME64_1;
573 memcpy(statePtr, &state, sizeof(state));
578 FORCE_INLINE_TEMPLATE XXH_errorcode XXH32_update_endian (XXH32_state_t* state, const void* input, size_t len, XXH_endianess endian)
580 const BYTE* p = (const BYTE*)input;
581 const BYTE* const bEnd = p + len;
583 #ifdef XXH_ACCEPT_NULL_INPUT_POINTER
584 if (input==NULL) return XXH_ERROR;
587 state->total_len_32 += (unsigned)len;
588 state->large_len |= (len>=16) | (state->total_len_32>=16);
590 if (state->memsize + len < 16) { /* fill in tmp buffer */
591 XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, len);
592 state->memsize += (unsigned)len;
596 if (state->memsize) { /* some data left from previous update */
597 XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, 16-state->memsize);
598 { const U32* p32 = state->mem32;
599 state->v1 = XXH32_round(state->v1, XXH_readLE32(p32, endian)); p32++;
600 state->v2 = XXH32_round(state->v2, XXH_readLE32(p32, endian)); p32++;
601 state->v3 = XXH32_round(state->v3, XXH_readLE32(p32, endian)); p32++;
602 state->v4 = XXH32_round(state->v4, XXH_readLE32(p32, endian)); p32++;
604 p += 16-state->memsize;
609 const BYTE* const limit = bEnd - 16;
616 v1 = XXH32_round(v1, XXH_readLE32(p, endian)); p+=4;
617 v2 = XXH32_round(v2, XXH_readLE32(p, endian)); p+=4;
618 v3 = XXH32_round(v3, XXH_readLE32(p, endian)); p+=4;
619 v4 = XXH32_round(v4, XXH_readLE32(p, endian)); p+=4;
629 XXH_memcpy(state->mem32, p, (size_t)(bEnd-p));
630 state->memsize = (unsigned)(bEnd-p);
636 XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* state_in, const void* input, size_t len)
638 XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
640 if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
641 return XXH32_update_endian(state_in, input, len, XXH_littleEndian);
643 return XXH32_update_endian(state_in, input, len, XXH_bigEndian);
648 FORCE_INLINE_TEMPLATE U32 XXH32_digest_endian (const XXH32_state_t* state, XXH_endianess endian)
650 const BYTE * p = (const BYTE*)state->mem32;
651 const BYTE* const bEnd = (const BYTE*)(state->mem32) + state->memsize;
654 if (state->large_len) {
655 h32 = XXH_rotl32(state->v1, 1) + XXH_rotl32(state->v2, 7) + XXH_rotl32(state->v3, 12) + XXH_rotl32(state->v4, 18);
657 h32 = state->v3 /* == seed */ + PRIME32_5;
660 h32 += state->total_len_32;
663 h32 += XXH_readLE32(p, endian) * PRIME32_3;
664 h32 = XXH_rotl32(h32, 17) * PRIME32_4;
669 h32 += (*p) * PRIME32_5;
670 h32 = XXH_rotl32(h32, 11) * PRIME32_1;
684 XXH_PUBLIC_API unsigned int XXH32_digest (const XXH32_state_t* state_in)
686 XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
688 if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
689 return XXH32_digest_endian(state_in, XXH_littleEndian);
691 return XXH32_digest_endian(state_in, XXH_bigEndian);
696 /* **** XXH64 **** */
698 FORCE_INLINE_TEMPLATE XXH_errorcode XXH64_update_endian (XXH64_state_t* state, const void* input, size_t len, XXH_endianess endian)
700 const BYTE* p = (const BYTE*)input;
701 const BYTE* const bEnd = p + len;
703 #ifdef XXH_ACCEPT_NULL_INPUT_POINTER
704 if (input==NULL) return XXH_ERROR;
707 state->total_len += len;
709 if (state->memsize + len < 32) { /* fill in tmp buffer */
711 XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, len);
713 state->memsize += (U32)len;
717 if (state->memsize) { /* tmp buffer is full */
718 XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, 32-state->memsize);
719 state->v1 = XXH64_round(state->v1, XXH_readLE64(state->mem64+0, endian));
720 state->v2 = XXH64_round(state->v2, XXH_readLE64(state->mem64+1, endian));
721 state->v3 = XXH64_round(state->v3, XXH_readLE64(state->mem64+2, endian));
722 state->v4 = XXH64_round(state->v4, XXH_readLE64(state->mem64+3, endian));
723 p += 32-state->memsize;
728 const BYTE* const limit = bEnd - 32;
735 v1 = XXH64_round(v1, XXH_readLE64(p, endian)); p+=8;
736 v2 = XXH64_round(v2, XXH_readLE64(p, endian)); p+=8;
737 v3 = XXH64_round(v3, XXH_readLE64(p, endian)); p+=8;
738 v4 = XXH64_round(v4, XXH_readLE64(p, endian)); p+=8;
748 XXH_memcpy(state->mem64, p, (size_t)(bEnd-p));
749 state->memsize = (unsigned)(bEnd-p);
755 XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* state_in, const void* input, size_t len)
757 XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
759 if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
760 return XXH64_update_endian(state_in, input, len, XXH_littleEndian);
762 return XXH64_update_endian(state_in, input, len, XXH_bigEndian);
767 FORCE_INLINE_TEMPLATE U64 XXH64_digest_endian (const XXH64_state_t* state, XXH_endianess endian)
769 const BYTE * p = (const BYTE*)state->mem64;
770 const BYTE* const bEnd = (const BYTE*)state->mem64 + state->memsize;
773 if (state->total_len >= 32) {
774 U64 const v1 = state->v1;
775 U64 const v2 = state->v2;
776 U64 const v3 = state->v3;
777 U64 const v4 = state->v4;
779 h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
780 h64 = XXH64_mergeRound(h64, v1);
781 h64 = XXH64_mergeRound(h64, v2);
782 h64 = XXH64_mergeRound(h64, v3);
783 h64 = XXH64_mergeRound(h64, v4);
785 h64 = state->v3 + PRIME64_5;
788 h64 += (U64) state->total_len;
791 U64 const k1 = XXH64_round(0, XXH_readLE64(p, endian));
793 h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4;
798 h64 ^= (U64)(XXH_readLE32(p, endian)) * PRIME64_1;
799 h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;
804 h64 ^= (*p) * PRIME64_5;
805 h64 = XXH_rotl64(h64, 11) * PRIME64_1;
819 XXH_PUBLIC_API unsigned long long XXH64_digest (const XXH64_state_t* state_in)
821 XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
823 if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
824 return XXH64_digest_endian(state_in, XXH_littleEndian);
826 return XXH64_digest_endian(state_in, XXH_bigEndian);
830 /* **************************
831 * Canonical representation
832 ****************************/
834 /*! Default XXH result types are basic unsigned 32 and 64 bits.
835 * The canonical representation follows human-readable write convention, aka big-endian (large digits first).
836 * These functions allow transformation of hash result into and from its canonical format.
837 * This way, hash values can be written into a file or buffer, and remain comparable across different systems and programs.
840 XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash)
842 XXH_STATIC_ASSERT(sizeof(XXH32_canonical_t) == sizeof(XXH32_hash_t));
843 if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap32(hash);
844 memcpy(dst, &hash, sizeof(*dst));
847 XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash)
849 XXH_STATIC_ASSERT(sizeof(XXH64_canonical_t) == sizeof(XXH64_hash_t));
850 if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap64(hash);
851 memcpy(dst, &hash, sizeof(*dst));
854 XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src)
856 return XXH_readBE32(src);
859 XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src)
861 return XXH_readBE64(src);