21# define XXH_NAMESPACE ZSTD_
68#if defined (__cplusplus)
91#if (defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API)) \
92 && !defined(XXH_INLINE_ALL_31684351384)
94# define XXH_INLINE_ALL_31684351384
96# undef XXH_STATIC_LINKING_ONLY
97# define XXH_STATIC_LINKING_ONLY
100# if defined(__GNUC__)
101# define XXH_PUBLIC_API static __inline __attribute__((unused))
102# elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) )
103# define XXH_PUBLIC_API static inline
104# elif defined(_MSC_VER)
105# define XXH_PUBLIC_API static __inline
108# define XXH_PUBLIC_API static
124# undef XXH_versionNumber
127# undef XXH32_createState
128# undef XXH32_freeState
132# undef XXH32_copyState
133# undef XXH32_canonicalFromHash
134# undef XXH32_hashFromCanonical
137# undef XXH64_createState
138# undef XXH64_freeState
142# undef XXH64_copyState
143# undef XXH64_canonicalFromHash
144# undef XXH64_hashFromCanonical
147# undef XXH3_64bits_withSecret
148# undef XXH3_64bits_withSeed
149# undef XXH3_64bits_withSecretandSeed
150# undef XXH3_createState
151# undef XXH3_freeState
152# undef XXH3_copyState
153# undef XXH3_64bits_reset
154# undef XXH3_64bits_reset_withSeed
155# undef XXH3_64bits_reset_withSecret
156# undef XXH3_64bits_update
157# undef XXH3_64bits_digest
158# undef XXH3_generateSecret
162# undef XXH3_128bits_withSeed
163# undef XXH3_128bits_withSecret
164# undef XXH3_128bits_reset
165# undef XXH3_128bits_reset_withSeed
166# undef XXH3_128bits_reset_withSecret
167# undef XXH3_128bits_reset_withSecretandSeed
168# undef XXH3_128bits_update
169# undef XXH3_128bits_digest
170# undef XXH128_isEqual
172# undef XXH128_canonicalFromHash
173# undef XXH128_hashFromCanonical
178# define XXH_NAMESPACE XXH_INLINE_
186# define XXH_IPREF(Id) XXH_NAMESPACE ## Id
187# define XXH_OK XXH_IPREF(XXH_OK)
188# define XXH_ERROR XXH_IPREF(XXH_ERROR)
189# define XXH_errorcode XXH_IPREF(XXH_errorcode)
190# define XXH32_canonical_t XXH_IPREF(XXH32_canonical_t)
191# define XXH64_canonical_t XXH_IPREF(XXH64_canonical_t)
192# define XXH128_canonical_t XXH_IPREF(XXH128_canonical_t)
193# define XXH32_state_s XXH_IPREF(XXH32_state_s)
194# define XXH32_state_t XXH_IPREF(XXH32_state_t)
195# define XXH64_state_s XXH_IPREF(XXH64_state_s)
196# define XXH64_state_t XXH_IPREF(XXH64_state_t)
197# define XXH3_state_s XXH_IPREF(XXH3_state_s)
198# define XXH3_state_t XXH_IPREF(XXH3_state_t)
199# define XXH128_hash_t XXH_IPREF(XXH128_hash_t)
201# undef XXHASH_H_5627135585666179
202# undef XXHASH_H_STATIC_13879238742
210#ifndef XXHASH_H_5627135585666179
211#define XXHASH_H_5627135585666179 1
220#if !defined(XXH_INLINE_ALL) && !defined(XXH_PRIVATE_API)
221# if defined(WIN32) && defined(_MSC_VER) && (defined(XXH_IMPORT) || defined(XXH_EXPORT))
223# define XXH_PUBLIC_API __declspec(dllexport)
225# define XXH_PUBLIC_API __declspec(dllimport)
228# define XXH_PUBLIC_API
246# define XXH_NAMESPACE
251# define XXH_CAT(A,B) A##B
252# define XXH_NAME2(A,B) XXH_CAT(A,B)
253# define XXH_versionNumber XXH_NAME2(XXH_NAMESPACE, XXH_versionNumber)
255# define XXH32 XXH_NAME2(XXH_NAMESPACE, XXH32)
256# define XXH32_createState XXH_NAME2(XXH_NAMESPACE, XXH32_createState)
257# define XXH32_freeState XXH_NAME2(XXH_NAMESPACE, XXH32_freeState)
258# define XXH32_reset XXH_NAME2(XXH_NAMESPACE, XXH32_reset)
259# define XXH32_update XXH_NAME2(XXH_NAMESPACE, XXH32_update)
260# define XXH32_digest XXH_NAME2(XXH_NAMESPACE, XXH32_digest)
261# define XXH32_copyState XXH_NAME2(XXH_NAMESPACE, XXH32_copyState)
262# define XXH32_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH32_canonicalFromHash)
263# define XXH32_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH32_hashFromCanonical)
265# define XXH64 XXH_NAME2(XXH_NAMESPACE, XXH64)
266# define XXH64_createState XXH_NAME2(XXH_NAMESPACE, XXH64_createState)
267# define XXH64_freeState XXH_NAME2(XXH_NAMESPACE, XXH64_freeState)
268# define XXH64_reset XXH_NAME2(XXH_NAMESPACE, XXH64_reset)
269# define XXH64_update XXH_NAME2(XXH_NAMESPACE, XXH64_update)
270# define XXH64_digest XXH_NAME2(XXH_NAMESPACE, XXH64_digest)
271# define XXH64_copyState XXH_NAME2(XXH_NAMESPACE, XXH64_copyState)
272# define XXH64_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH64_canonicalFromHash)
273# define XXH64_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH64_hashFromCanonical)
275# define XXH3_64bits XXH_NAME2(XXH_NAMESPACE, XXH3_64bits)
276# define XXH3_64bits_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSecret)
277# define XXH3_64bits_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSeed)
278# define XXH3_64bits_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSecretandSeed)
279# define XXH3_createState XXH_NAME2(XXH_NAMESPACE, XXH3_createState)
280# define XXH3_freeState XXH_NAME2(XXH_NAMESPACE, XXH3_freeState)
281# define XXH3_copyState XXH_NAME2(XXH_NAMESPACE, XXH3_copyState)
282# define XXH3_64bits_reset XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset)
283# define XXH3_64bits_reset_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSeed)
284# define XXH3_64bits_reset_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSecret)
285# define XXH3_64bits_reset_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSecretandSeed)
286# define XXH3_64bits_update XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_update)
287# define XXH3_64bits_digest XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_digest)
288# define XXH3_generateSecret XXH_NAME2(XXH_NAMESPACE, XXH3_generateSecret)
289# define XXH3_generateSecret_fromSeed XXH_NAME2(XXH_NAMESPACE, XXH3_generateSecret_fromSeed)
291# define XXH128 XXH_NAME2(XXH_NAMESPACE, XXH128)
292# define XXH3_128bits XXH_NAME2(XXH_NAMESPACE, XXH3_128bits)
293# define XXH3_128bits_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSeed)
294# define XXH3_128bits_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSecret)
295# define XXH3_128bits_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSecretandSeed)
296# define XXH3_128bits_reset XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset)
297# define XXH3_128bits_reset_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSeed)
298# define XXH3_128bits_reset_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSecret)
299# define XXH3_128bits_reset_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSecretandSeed)
300# define XXH3_128bits_update XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_update)
301# define XXH3_128bits_digest XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_digest)
302# define XXH128_isEqual XXH_NAME2(XXH_NAMESPACE, XXH128_isEqual)
303# define XXH128_cmp XXH_NAME2(XXH_NAMESPACE, XXH128_cmp)
304# define XXH128_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH128_canonicalFromHash)
305# define XXH128_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH128_hashFromCanonical)
312#define XXH_VERSION_MAJOR 0
313#define XXH_VERSION_MINOR 8
314#define XXH_VERSION_RELEASE 1
315#define XXH_VERSION_NUMBER (XXH_VERSION_MAJOR *100*100 + XXH_VERSION_MINOR *100 + XXH_VERSION_RELEASE)
338#if defined(XXH_DOXYGEN)
346#elif !defined (__VMS) \
347 && (defined (__cplusplus) \
348 || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) ) )
354# if UINT_MAX == 0xFFFFFFFFUL
357# if ULONG_MAX == 0xFFFFFFFFUL
360# error "unsupported platform: need a 32-bit type"
565 unsigned char digest[4];
592#ifdef __has_attribute
593# define XXH_HAS_ATTRIBUTE(x) __has_attribute(x)
595# define XXH_HAS_ATTRIBUTE(x) 0
599#if defined(__STDC_VERSION__) && (__STDC_VERSION__ > 201710L) && defined(__has_c_attribute)
600# define XXH_HAS_C_ATTRIBUTE(x) __has_c_attribute(x)
602# define XXH_HAS_C_ATTRIBUTE(x) 0
605#if defined(__cplusplus) && defined(__has_cpp_attribute)
606# define XXH_HAS_CPP_ATTRIBUTE(x) __has_cpp_attribute(x)
608# define XXH_HAS_CPP_ATTRIBUTE(x) 0
617#if XXH_HAS_C_ATTRIBUTE(x)
618# define XXH_FALLTHROUGH [[fallthrough]]
619#elif XXH_HAS_CPP_ATTRIBUTE(x)
620# define XXH_FALLTHROUGH [[fallthrough]]
621#elif XXH_HAS_ATTRIBUTE(__fallthrough__)
622# define XXH_FALLTHROUGH __attribute__ ((fallthrough))
624# define XXH_FALLTHROUGH
633#ifndef XXH_NO_LONG_LONG
637#if defined(XXH_DOXYGEN)
644#elif !defined (__VMS) \
645 && (defined (__cplusplus) \
646 || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) ) )
651# if defined(__LP64__) && ULONG_MAX == 0xFFFFFFFFFFFFFFFFULL
791#define XXH3_SECRET_SIZE_MIN 136
826typedef struct XXH3_state_s XXH3_state_t;
923typedef struct {
unsigned char digest[
sizeof(XXH128_hash_t)]; } XXH128_canonical_t;
938#if defined(XXH_STATIC_LINKING_ONLY) && !defined(XXHASH_H_STATIC_13879238742)
939#define XXHASH_H_STATIC_13879238742
966struct XXH32_state_s {
976#ifndef XXH_NO_LONG_LONG
990struct XXH64_state_s {
1002#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L)
1003# include <stdalign.h>
1004# define XXH_ALIGN(n) alignas(n)
1005#elif defined(__cplusplus) && (__cplusplus >= 201103L)
1007# define XXH_ALIGN(n) alignas(n)
1008#elif defined(__GNUC__)
1009# define XXH_ALIGN(n) __attribute__ ((aligned(n)))
1010#elif defined(_MSC_VER)
1011# define XXH_ALIGN(n) __declspec(align(n))
1013# define XXH_ALIGN(n)
1017#if !(defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L)) \
1018 && ! (defined(__cplusplus) && (__cplusplus >= 201103L)) \
1019 && defined(__GNUC__)
1020# define XXH_ALIGN_MEMBER(align, type) type XXH_ALIGN(align)
1022# define XXH_ALIGN_MEMBER(align, type) XXH_ALIGN(align) type
1032#define XXH3_INTERNALBUFFER_SIZE 256
1041#define XXH3_SECRET_DEFAULT_SIZE 192
1065struct XXH3_state_s {
1068 XXH_ALIGN_MEMBER(64,
unsigned char customSecret[XXH3_SECRET_DEFAULT_SIZE]);
1070 XXH_ALIGN_MEMBER(64,
unsigned char buffer[XXH3_INTERNALBUFFER_SIZE]);
1076 size_t nbStripesSoFar;
1080 size_t nbStripesPerBlock;
1088 const unsigned char* extSecret;
1094#undef XXH_ALIGN_MEMBER
1107#define XXH3_INITSTATE(XXH3_state_ptr) { (XXH3_state_ptr)->seed = 0; }
1194 const void* secret,
size_t secretSize,
1199 const void* secret,
size_t secretSize,
1204 const void* secret,
size_t secretSize,
1209 const void* secret,
size_t secretSize,
1215#if defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API)
1216# define XXH_IMPLEMENTATION
1249#if ( defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API) \
1250 || defined(XXH_IMPLEMENTATION) ) && !defined(XXH_IMPLEM_13a8737387)
1251# define XXH_IMPLEM_13a8737387
1269# define XXH_NO_LONG_LONG
1270# undef XXH_NO_LONG_LONG
1321# define XXH_FORCE_MEMORY_ACCESS 0
1349# define XXH_FORCE_ALIGN_CHECK 0
1371# define XXH_NO_INLINE_HINTS 0
1383# define XXH32_ENDJMP 0
1392# define XXH_OLD_NAMES
1393# undef XXH_OLD_NAMES
1399#ifndef XXH_FORCE_MEMORY_ACCESS
1401# if !defined(__clang__) && \
1403 (defined(__INTEL_COMPILER) && !defined(_WIN32)) || \
1405 defined(__GNUC__) && ( \
1406 (defined(__ARM_ARCH) && __ARM_ARCH >= 7) || \
1408 defined(__mips__) && \
1409 (__mips <= 5 || __mips_isa_rev < 6) && \
1410 (!defined(__mips16) || defined(__mips_mips16e2)) \
1415# define XXH_FORCE_MEMORY_ACCESS 1
1419#ifndef XXH_FORCE_ALIGN_CHECK
1420# if defined(__i386) || defined(__x86_64__) || defined(__aarch64__) \
1421 || defined(_M_IX86) || defined(_M_X64) || defined(_M_ARM64)
1422# define XXH_FORCE_ALIGN_CHECK 0
1424# define XXH_FORCE_ALIGN_CHECK 1
1428#ifndef XXH_NO_INLINE_HINTS
1429# if defined(__OPTIMIZE_SIZE__) \
1430 || defined(__NO_INLINE__)
1431# define XXH_NO_INLINE_HINTS 1
1433# define XXH_NO_INLINE_HINTS 0
1439# define XXH32_ENDJMP 0
1453#define ZSTD_DEPS_NEED_MALLOC
1455static void* XXH_malloc(
size_t s) {
return ZSTD_malloc(s); }
1456static void XXH_free (
void* p) { ZSTD_free(p); }
1464# pragma warning(disable : 4127)
1467#if XXH_NO_INLINE_HINTS
1468# if defined(__GNUC__) || defined(__clang__)
1469# define XXH_FORCE_INLINE static __attribute__((unused))
1471# define XXH_FORCE_INLINE static
1473# define XXH_NO_INLINE static
1475#elif defined(__GNUC__) || defined(__clang__)
1476# define XXH_FORCE_INLINE static __inline__ __attribute__((always_inline, unused))
1477# define XXH_NO_INLINE static __attribute__((noinline))
1478#elif defined(_MSC_VER)
1479# define XXH_FORCE_INLINE static __forceinline
1480# define XXH_NO_INLINE static __declspec(noinline)
1481#elif defined (__cplusplus) \
1482 || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L))
1483# define XXH_FORCE_INLINE static inline
1484# define XXH_NO_INLINE static
1486# define XXH_FORCE_INLINE static
1487# define XXH_NO_INLINE static
1503#ifndef XXH_DEBUGLEVEL
1505# define XXH_DEBUGLEVEL DEBUGLEVEL
1507# define XXH_DEBUGLEVEL 0
1511#if (XXH_DEBUGLEVEL>=1)
1513# define XXH_ASSERT(c) assert(c)
1515# define XXH_ASSERT(c) ((void)0)
1519#ifndef XXH_STATIC_ASSERT
1520# if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L)
1522# define XXH_STATIC_ASSERT_WITH_MESSAGE(c,m) do { static_assert((c),m); } while(0)
1523# elif defined(__cplusplus) && (__cplusplus >= 201103L)
1524# define XXH_STATIC_ASSERT_WITH_MESSAGE(c,m) do { static_assert((c),m); } while(0)
1526# define XXH_STATIC_ASSERT_WITH_MESSAGE(c,m) do { struct xxh_sa { char x[(c) ? 1 : -1]; }; } while(0)
1528# define XXH_STATIC_ASSERT(c) XXH_STATIC_ASSERT_WITH_MESSAGE((c),#c)
1547#if defined(__GNUC__) || defined(__clang__)
1548# define XXH_COMPILER_GUARD(var) __asm__ __volatile__("" : "+r" (var))
1550# define XXH_COMPILER_GUARD(var) ((void)0)
1556#if !defined (__VMS) \
1557 && (defined (__cplusplus) \
1558 || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) ) )
1560 typedef uint8_t xxh_u8;
1562 typedef unsigned char xxh_u8;
1624#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3))
1629#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2))
1635static xxh_u32 XXH_read32(
const void* memPtr) {
return *(
const xxh_u32*) memPtr; }
1637#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1))
1648static xxh_u32 XXH_read32(
const void* ptr)
1651 return ((
const xxh_unalign*)ptr)->u32;
1660static xxh_u32 XXH_read32(
const void* memPtr)
1663 XXH_memcpy(&val, memPtr,
sizeof(val));
1688#ifndef XXH_CPU_LITTLE_ENDIAN
1693# if defined(_WIN32) \
1694 || defined(__LITTLE_ENDIAN__) \
1695 || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
1696# define XXH_CPU_LITTLE_ENDIAN 1
1697# elif defined(__BIG_ENDIAN__) \
1698 || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
1699# define XXH_CPU_LITTLE_ENDIAN 0
1707static int XXH_isLittleEndian(
void)
1713 const union { xxh_u32 u; xxh_u8
c[4]; } one = { 1 };
1716# define XXH_CPU_LITTLE_ENDIAN XXH_isLittleEndian()
1726#define XXH_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
1729# define XXH_HAS_BUILTIN(x) __has_builtin(x)
1731# define XXH_HAS_BUILTIN(x) 0
1747#if !defined(NO_CLANG_BUILTIN) && XXH_HAS_BUILTIN(__builtin_rotateleft32) \
1748 && XXH_HAS_BUILTIN(__builtin_rotateleft64)
1749# define XXH_rotl32 __builtin_rotateleft32
1750# define XXH_rotl64 __builtin_rotateleft64
1752#elif defined(_MSC_VER)
1753# define XXH_rotl32(x,r) _rotl(x,r)
1754# define XXH_rotl64(x,r) _rotl64(x,r)
1756# define XXH_rotl32(x,r) (((x) << (r)) | ((x) >> (32 - (r))))
1757# define XXH_rotl64(x,r) (((x) << (r)) | ((x) >> (64 - (r))))
1768#if defined(_MSC_VER)
1769# define XXH_swap32 _byteswap_ulong
1770#elif XXH_GCC_VERSION >= 403
1771# define XXH_swap32 __builtin_bswap32
1773static xxh_u32 XXH_swap32 (xxh_u32 x)
1775 return ((x << 24) & 0xff000000 ) |
1776 ((x << 8) & 0x00ff0000 ) |
1777 ((x >> 8) & 0x0000ff00 ) |
1778 ((x >> 24) & 0x000000ff );
1801#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3))
1803XXH_FORCE_INLINE xxh_u32
XXH_readLE32(
const void* memPtr)
1805 const xxh_u8* bytePtr = (
const xxh_u8 *)memPtr;
1807 | ((xxh_u32)bytePtr[1] << 8)
1808 | ((xxh_u32)bytePtr[2] << 16)
1809 | ((xxh_u32)bytePtr[3] << 24);
1812XXH_FORCE_INLINE xxh_u32 XXH_readBE32(
const void* memPtr)
1814 const xxh_u8* bytePtr = (
const xxh_u8 *)memPtr;
1816 | ((xxh_u32)bytePtr[2] << 8)
1817 | ((xxh_u32)bytePtr[1] << 16)
1818 | ((xxh_u32)bytePtr[0] << 24);
1827static xxh_u32 XXH_readBE32(
const void* ptr)
1833XXH_FORCE_INLINE xxh_u32
1861#define XXH_PRIME32_1 0x9E3779B1U
1862#define XXH_PRIME32_2 0x85EBCA77U
1863#define XXH_PRIME32_3 0xC2B2AE3DU
1864#define XXH_PRIME32_4 0x27D4EB2FU
1865#define XXH_PRIME32_5 0x165667B1U
1868# define PRIME32_1 XXH_PRIME32_1
1869# define PRIME32_2 XXH_PRIME32_2
1870# define PRIME32_3 XXH_PRIME32_3
1871# define PRIME32_4 XXH_PRIME32_4
1872# define PRIME32_5 XXH_PRIME32_5
1886static xxh_u32 XXH32_round(xxh_u32 acc, xxh_u32
input)
1888 acc +=
input * XXH_PRIME32_2;
1890 acc *= XXH_PRIME32_1;
1891#if (defined(__SSE4_1__) || defined(__aarch64__)) && !defined(XXH_ENABLE_AUTOVECTORIZE)
1925 XXH_COMPILER_GUARD(acc);
1940static xxh_u32 XXH32_avalanche(xxh_u32 h32)
1943 h32 *= XXH_PRIME32_2;
1945 h32 *= XXH_PRIME32_3;
1950#define XXH_get32bits(p) XXH_readLE32_align(p, align)
1967XXH32_finalize(xxh_u32 h32,
const xxh_u8* ptr,
size_t len,
XXH_alignment align)
1969#define XXH_PROCESS1 do { \
1970 h32 += (*ptr++) * XXH_PRIME32_5; \
1971 h32 = XXH_rotl32(h32, 11) * XXH_PRIME32_1; \
1974#define XXH_PROCESS4 do { \
1975 h32 += XXH_get32bits(ptr) * XXH_PRIME32_3; \
1977 h32 = XXH_rotl32(h32, 17) * XXH_PRIME32_4; \
1980 if (ptr==
NULL) XXH_ASSERT(
len == 0);
1983 if (!XXH32_ENDJMP) {
1993 return XXH32_avalanche(h32);
1996 case 12: XXH_PROCESS4;
1998 case 8: XXH_PROCESS4;
2000 case 4: XXH_PROCESS4;
2001 return XXH32_avalanche(h32);
2003 case 13: XXH_PROCESS4;
2005 case 9: XXH_PROCESS4;
2007 case 5: XXH_PROCESS4;
2009 return XXH32_avalanche(h32);
2011 case 14: XXH_PROCESS4;
2013 case 10: XXH_PROCESS4;
2015 case 6: XXH_PROCESS4;
2018 return XXH32_avalanche(h32);
2020 case 15: XXH_PROCESS4;
2022 case 11: XXH_PROCESS4;
2024 case 7: XXH_PROCESS4;
2026 case 3: XXH_PROCESS1;
2028 case 2: XXH_PROCESS1;
2030 case 1: XXH_PROCESS1;
2032 case 0:
return XXH32_avalanche(h32);
2040# define PROCESS1 XXH_PROCESS1
2041# define PROCESS4 XXH_PROCESS4
2055XXH_FORCE_INLINE xxh_u32
2063 const xxh_u8*
const bEnd =
input +
len;
2064 const xxh_u8*
const limit = bEnd - 15;
2065 xxh_u32 v1 =
seed + XXH_PRIME32_1 + XXH_PRIME32_2;
2066 xxh_u32 v2 =
seed + XXH_PRIME32_2;
2067 xxh_u32 v3 =
seed + 0;
2068 xxh_u32 v4 =
seed - XXH_PRIME32_1;
2080 h32 =
seed + XXH_PRIME32_5;
2083 h32 += (xxh_u32)
len;
2085 return XXH32_finalize(h32,
input,
len&15, align);
2099 if ((((
size_t)
input) & 3) == 0) {
2127 XXH_memcpy(dstState, srcState,
sizeof(*dstState));
2133 XXH_ASSERT(statePtr !=
NULL);
2134 memset(statePtr, 0,
sizeof(*statePtr));
2135 statePtr->v[0] =
seed + XXH_PRIME32_1 + XXH_PRIME32_2;
2136 statePtr->v[1] =
seed + XXH_PRIME32_2;
2137 statePtr->v[2] =
seed + 0;
2138 statePtr->v[3] =
seed - XXH_PRIME32_1;
2148 XXH_ASSERT(
len == 0);
2152 {
const xxh_u8* p = (
const xxh_u8*)
input;
2153 const xxh_u8*
const bEnd = p +
len;
2156 state->large_len |= (
XXH32_hash_t)((
len>=16) | (state->total_len_32>=16));
2158 if (state->memsize +
len < 16) {
2159 XXH_memcpy((xxh_u8*)(state->mem32) + state->memsize,
input,
len);
2164 if (state->memsize) {
2165 XXH_memcpy((xxh_u8*)(state->mem32) + state->memsize,
input, 16-state->memsize);
2166 {
const xxh_u32* p32 = state->mem32;
2167 state->v[0] = XXH32_round(state->v[0],
XXH_readLE32(p32)); p32++;
2168 state->v[1] = XXH32_round(state->v[1],
XXH_readLE32(p32)); p32++;
2169 state->v[2] = XXH32_round(state->v[2],
XXH_readLE32(p32)); p32++;
2170 state->v[3] = XXH32_round(state->v[3],
XXH_readLE32(p32));
2172 p += 16-state->memsize;
2177 const xxh_u8*
const limit = bEnd - 16;
2180 state->v[0] = XXH32_round(state->v[0],
XXH_readLE32(p)); p+=4;
2181 state->v[1] = XXH32_round(state->v[1],
XXH_readLE32(p)); p+=4;
2182 state->v[2] = XXH32_round(state->v[2],
XXH_readLE32(p)); p+=4;
2183 state->v[3] = XXH32_round(state->v[3],
XXH_readLE32(p)); p+=4;
2189 XXH_memcpy(state->mem32, p, (
size_t)(bEnd-p));
2190 state->memsize = (unsigned)(bEnd-p);
2203 if (state->large_len) {
2209 h32 = state->v[2] + XXH_PRIME32_5;
2212 h32 += state->total_len_32;
2214 return XXH32_finalize(h32, (
const xxh_u8*)state->mem32, state->memsize,
XXH_aligned);
2238 XXH_memcpy(
dst, &hash,
sizeof(*
dst));
2243 return XXH_readBE32(
src);
2247#ifndef XXH_NO_LONG_LONG
2265#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3))
2270#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2))
2273static xxh_u64 XXH_read64(
const void* memPtr)
2275 return *(
const xxh_u64*) memPtr;
2278#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1))
2289static xxh_u64 XXH_read64(
const void* ptr)
2292 return ((
const xxh_unalign64*)ptr)->u64;
2301static xxh_u64 XXH_read64(
const void* memPtr)
2304 XXH_memcpy(&val, memPtr,
sizeof(val));
2310#if defined(_MSC_VER)
2311# define XXH_swap64 _byteswap_uint64
2312#elif XXH_GCC_VERSION >= 403
2313# define XXH_swap64 __builtin_bswap64
2315static xxh_u64 XXH_swap64(xxh_u64 x)
2317 return ((x << 56) & 0xff00000000000000ULL) |
2318 ((x << 40) & 0x00ff000000000000ULL) |
2319 ((x << 24) & 0x0000ff0000000000ULL) |
2320 ((x << 8) & 0x000000ff00000000ULL) |
2321 ((x >> 8) & 0x00000000ff000000ULL) |
2322 ((x >> 24) & 0x0000000000ff0000ULL) |
2323 ((x >> 40) & 0x000000000000ff00ULL) |
2324 ((x >> 56) & 0x00000000000000ffULL);
2330#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3))
2332XXH_FORCE_INLINE xxh_u64
XXH_readLE64(
const void* memPtr)
2334 const xxh_u8* bytePtr = (
const xxh_u8 *)memPtr;
2336 | ((xxh_u64)bytePtr[1] << 8)
2337 | ((xxh_u64)bytePtr[2] << 16)
2338 | ((xxh_u64)bytePtr[3] << 24)
2339 | ((xxh_u64)bytePtr[4] << 32)
2340 | ((xxh_u64)bytePtr[5] << 40)
2341 | ((xxh_u64)bytePtr[6] << 48)
2342 | ((xxh_u64)bytePtr[7] << 56);
2345XXH_FORCE_INLINE xxh_u64 XXH_readBE64(
const void* memPtr)
2347 const xxh_u8* bytePtr = (
const xxh_u8 *)memPtr;
2349 | ((xxh_u64)bytePtr[6] << 8)
2350 | ((xxh_u64)bytePtr[5] << 16)
2351 | ((xxh_u64)bytePtr[4] << 24)
2352 | ((xxh_u64)bytePtr[3] << 32)
2353 | ((xxh_u64)bytePtr[2] << 40)
2354 | ((xxh_u64)bytePtr[1] << 48)
2355 | ((xxh_u64)bytePtr[0] << 56);
2364static xxh_u64 XXH_readBE64(
const void* ptr)
2370XXH_FORCE_INLINE xxh_u64
2388#define XXH_PRIME64_1 0x9E3779B185EBCA87ULL
2389#define XXH_PRIME64_2 0xC2B2AE3D27D4EB4FULL
2390#define XXH_PRIME64_3 0x165667B19E3779F9ULL
2391#define XXH_PRIME64_4 0x85EBCA77C2B2AE63ULL
2392#define XXH_PRIME64_5 0x27D4EB2F165667C5ULL
2395# define PRIME64_1 XXH_PRIME64_1
2396# define PRIME64_2 XXH_PRIME64_2
2397# define PRIME64_3 XXH_PRIME64_3
2398# define PRIME64_4 XXH_PRIME64_4
2399# define PRIME64_5 XXH_PRIME64_5
2402static xxh_u64 XXH64_round(xxh_u64 acc, xxh_u64
input)
2404 acc +=
input * XXH_PRIME64_2;
2406 acc *= XXH_PRIME64_1;
2410static xxh_u64 XXH64_mergeRound(xxh_u64 acc, xxh_u64 val)
2412 val = XXH64_round(0, val);
2414 acc = acc * XXH_PRIME64_1 + XXH_PRIME64_4;
2418static xxh_u64 XXH64_avalanche(xxh_u64 h64)
2421 h64 *= XXH_PRIME64_2;
2423 h64 *= XXH_PRIME64_3;
2429#define XXH_get64bits(p) XXH_readLE64_align(p, align)
2432XXH64_finalize(xxh_u64 h64,
const xxh_u8* ptr,
size_t len,
XXH_alignment align)
2434 if (ptr==
NULL) XXH_ASSERT(
len == 0);
2440 h64 =
XXH_rotl64(h64,27) * XXH_PRIME64_1 + XXH_PRIME64_4;
2446 h64 =
XXH_rotl64(h64, 23) * XXH_PRIME64_2 + XXH_PRIME64_3;
2450 h64 ^= (*ptr++) * XXH_PRIME64_5;
2454 return XXH64_avalanche(h64);
2458# define PROCESS1_64 XXH_PROCESS1_64
2459# define PROCESS4_64 XXH_PROCESS4_64
2460# define PROCESS8_64 XXH_PROCESS8_64
2462# undef XXH_PROCESS1_64
2463# undef XXH_PROCESS4_64
2464# undef XXH_PROCESS8_64
2467XXH_FORCE_INLINE xxh_u64
2474 const xxh_u8*
const bEnd =
input +
len;
2475 const xxh_u8*
const limit = bEnd - 31;
2476 xxh_u64 v1 =
seed + XXH_PRIME64_1 + XXH_PRIME64_2;
2477 xxh_u64 v2 =
seed + XXH_PRIME64_2;
2478 xxh_u64 v3 =
seed + 0;
2479 xxh_u64 v4 =
seed - XXH_PRIME64_1;
2489 h64 = XXH64_mergeRound(h64, v1);
2490 h64 = XXH64_mergeRound(h64, v2);
2491 h64 = XXH64_mergeRound(h64, v3);
2492 h64 = XXH64_mergeRound(h64, v4);
2495 h64 =
seed + XXH_PRIME64_5;
2498 h64 += (xxh_u64)
len;
2500 return XXH64_finalize(h64,
input,
len, align);
2515 if ((((
size_t)
input) & 7)==0) {
2541 XXH_memcpy(dstState, srcState,
sizeof(*dstState));
2547 XXH_ASSERT(statePtr !=
NULL);
2548 memset(statePtr, 0,
sizeof(*statePtr));
2549 statePtr->v[0] =
seed + XXH_PRIME64_1 + XXH_PRIME64_2;
2550 statePtr->v[1] =
seed + XXH_PRIME64_2;
2551 statePtr->v[2] =
seed + 0;
2552 statePtr->v[3] =
seed - XXH_PRIME64_1;
2561 XXH_ASSERT(
len == 0);
2565 {
const xxh_u8* p = (
const xxh_u8*)
input;
2566 const xxh_u8*
const bEnd = p +
len;
2568 state->total_len +=
len;
2570 if (state->memsize +
len < 32) {
2571 XXH_memcpy(((xxh_u8*)state->mem64) + state->memsize,
input,
len);
2572 state->memsize += (xxh_u32)
len;
2576 if (state->memsize) {
2577 XXH_memcpy(((xxh_u8*)state->mem64) + state->memsize,
input, 32-state->memsize);
2578 state->v[0] = XXH64_round(state->v[0],
XXH_readLE64(state->mem64+0));
2579 state->v[1] = XXH64_round(state->v[1],
XXH_readLE64(state->mem64+1));
2580 state->v[2] = XXH64_round(state->v[2],
XXH_readLE64(state->mem64+2));
2581 state->v[3] = XXH64_round(state->v[3],
XXH_readLE64(state->mem64+3));
2582 p += 32 - state->memsize;
2587 const xxh_u8*
const limit = bEnd - 32;
2590 state->v[0] = XXH64_round(state->v[0],
XXH_readLE64(p)); p+=8;
2591 state->v[1] = XXH64_round(state->v[1],
XXH_readLE64(p)); p+=8;
2592 state->v[2] = XXH64_round(state->v[2],
XXH_readLE64(p)); p+=8;
2593 state->v[3] = XXH64_round(state->v[3],
XXH_readLE64(p)); p+=8;
2599 XXH_memcpy(state->mem64, p, (
size_t)(bEnd-p));
2600 state->memsize = (unsigned)(bEnd-p);
2613 if (state->total_len >= 32) {
2615 h64 = XXH64_mergeRound(h64, state->v[0]);
2616 h64 = XXH64_mergeRound(h64, state->v[1]);
2617 h64 = XXH64_mergeRound(h64, state->v[2]);
2618 h64 = XXH64_mergeRound(h64, state->v[3]);
2620 h64 = state->v[2] + XXH_PRIME64_5;
2623 h64 += (xxh_u64) state->total_len;
2625 return XXH64_finalize(h64, (
const xxh_u8*)state->mem64, (
size_t)state->total_len,
XXH_aligned);
2636 XXH_memcpy(
dst, &hash,
sizeof(*
dst));
2642 return XXH_readBE64(
src);
2660#if ((defined(sun) || defined(__sun)) && __cplusplus)
2661# define XXH_RESTRICT
2662#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
2663# define XXH_RESTRICT restrict
2666# define XXH_RESTRICT
2669#if (defined(__GNUC__) && (__GNUC__ >= 3)) \
2670 || (defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 800)) \
2671 || defined(__clang__)
2672# define XXH_likely(x) __builtin_expect(x, 1)
2673# define XXH_unlikely(x) __builtin_expect(x, 0)
2675# define XXH_likely(x) (x)
2676# define XXH_unlikely(x) (x)
2679#if defined(__GNUC__) || defined(__clang__)
2680# if defined(__ARM_NEON__) || defined(__ARM_NEON) \
2681 || defined(__aarch64__) || defined(_M_ARM) \
2682 || defined(_M_ARM64) || defined(_M_ARM64EC)
2683# define inline __inline__
2684# include <arm_neon.h>
2686# elif defined(__AVX2__)
2687# include <immintrin.h>
2688# elif defined(__SSE2__)
2689# include <emmintrin.h>
2693#if defined(_MSC_VER)
2766#if defined(__thumb__) && !defined(__thumb2__) && defined(__ARM_ARCH_ISA_ARM)
2767# warning "XXH3 is highly inefficient without ARM or Thumb-2."
2785# define XXH_VECTOR XXH_SCALAR
2795enum XXH_VECTOR_TYPE {
2817# define XXH_ACC_ALIGN 8
2822# define XXH_SCALAR 0
2825# define XXH_AVX512 3
2832 defined(__ARM_NEON__) || defined(__ARM_NEON) \
2833 || defined(_M_ARM) || defined(_M_ARM64) || defined(_M_ARM64EC) \
2835 defined(_WIN32) || defined(__LITTLE_ENDIAN__) \
2836 || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) \
2838# define XXH_VECTOR XXH_NEON
2839# elif defined(__AVX512F__)
2840# define XXH_VECTOR XXH_AVX512
2841# elif defined(__AVX2__)
2842# define XXH_VECTOR XXH_AVX2
2843# elif defined(__SSE2__) || defined(_M_AMD64) || defined(_M_X64) || (defined(_M_IX86_FP) && (_M_IX86_FP == 2))
2844# define XXH_VECTOR XXH_SSE2
2845# elif (defined(__PPC64__) && defined(__POWER8_VECTOR__)) \
2846 || (defined(__s390x__) && defined(__VEC__)) \
2847 && defined(__GNUC__)
2848# define XXH_VECTOR XXH_VSX
2850# define XXH_VECTOR XXH_SCALAR
2858#ifndef XXH_ACC_ALIGN
2859# if defined(XXH_X86DISPATCH)
2860# define XXH_ACC_ALIGN 64
2861# elif XXH_VECTOR == XXH_SCALAR
2862# define XXH_ACC_ALIGN 8
2863# elif XXH_VECTOR == XXH_SSE2
2864# define XXH_ACC_ALIGN 16
2865# elif XXH_VECTOR == XXH_AVX2
2866# define XXH_ACC_ALIGN 32
2867# elif XXH_VECTOR == XXH_NEON
2868# define XXH_ACC_ALIGN 16
2869# elif XXH_VECTOR == XXH_VSX
2870# define XXH_ACC_ALIGN 16
2871# elif XXH_VECTOR == XXH_AVX512
2872# define XXH_ACC_ALIGN 64
2876#if defined(XXH_X86DISPATCH) || XXH_VECTOR == XXH_SSE2 \
2877 || XXH_VECTOR == XXH_AVX2 || XXH_VECTOR == XXH_AVX512
2878# define XXH_SEC_ALIGN XXH_ACC_ALIGN
2880# define XXH_SEC_ALIGN 8
2904#if XXH_VECTOR == XXH_AVX2 \
2905 && defined(__GNUC__) && !defined(__clang__) \
2906 && defined(__OPTIMIZE__) && !defined(__OPTIMIZE_SIZE__)
2907# pragma GCC push_options
2908# pragma GCC optimize("-O2")
2912#if XXH_VECTOR == XXH_NEON
2993# if !defined(XXH_NO_VZIP_HACK) \
2994 && (defined(__GNUC__) || defined(__clang__)) \
2995 && (defined(__arm__) || defined(__thumb__) || defined(_M_ARM))
2996# define XXH_SPLIT_IN_PLACE(in, outLo, outHi) \
3001 __asm__("vzip.32 %e0, %f0" : "+w" (in)); \
3002 (outLo) = vget_low_u32 (vreinterpretq_u32_u64(in)); \
3003 (outHi) = vget_high_u32(vreinterpretq_u32_u64(in)); \
3006# define XXH_SPLIT_IN_PLACE(in, outLo, outHi) \
3008 (outLo) = vmovn_u64 (in); \
3009 (outHi) = vshrn_n_u64 ((in), 32); \
3050# ifndef XXH3_NEON_LANES
3051# if (defined(__aarch64__) || defined(__arm64__) || defined(_M_ARM64) || defined(_M_ARM64EC)) \
3052 && !defined(__OPTIMIZE_SIZE__)
3053# define XXH3_NEON_LANES 6
3055# define XXH3_NEON_LANES XXH_ACC_NB
3068#if XXH_VECTOR == XXH_VSX
3069# if defined(__s390x__)
3070# include <s390intrin.h>
3080# if defined(__GNUC__) && !defined(__APPLE_ALTIVEC__)
3081# define __APPLE_ALTIVEC__
3083# include <altivec.h>
3086typedef __vector
unsigned long long xxh_u64x2;
3087typedef __vector
unsigned char xxh_u8x16;
3088typedef __vector
unsigned xxh_u32x4;
3091# if defined(__BIG_ENDIAN__) \
3092 || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
3093# define XXH_VSX_BE 1
3094# elif defined(__VEC_ELEMENT_REG_ORDER__) && __VEC_ELEMENT_REG_ORDER__ == __ORDER_BIG_ENDIAN__
3095# warning "-maltivec=be is not recommended. Please use native endianness."
3096# define XXH_VSX_BE 1
3098# define XXH_VSX_BE 0
3103# if defined(__POWER9_VECTOR__) || (defined(__clang__) && defined(__s390x__))
3104# define XXH_vec_revb vec_revb
3109XXH_FORCE_INLINE xxh_u64x2 XXH_vec_revb(xxh_u64x2 val)
3111 xxh_u8x16
const vByteSwap = { 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, 0x00,
3112 0x0F, 0x0E, 0x0D, 0x0C, 0x0B, 0x0A, 0x09, 0x08 };
3113 return vec_perm(val, val, vByteSwap);
3121XXH_FORCE_INLINE xxh_u64x2 XXH_vec_loadu(
const void *ptr)
3124 XXH_memcpy(&
ret, ptr,
sizeof(xxh_u64x2));
3137# if defined(__s390x__)
3139# define XXH_vec_mulo vec_mulo
3140# define XXH_vec_mule vec_mule
3141# elif defined(__clang__) && XXH_HAS_BUILTIN(__builtin_altivec_vmuleuw)
3143# define XXH_vec_mulo __builtin_altivec_vmulouw
3144# define XXH_vec_mule __builtin_altivec_vmuleuw
3148XXH_FORCE_INLINE xxh_u64x2 XXH_vec_mulo(xxh_u32x4
a, xxh_u32x4
b)
3151 __asm__(
"vmulouw %0, %1, %2" :
"=v" (result) :
"v" (
a),
"v" (
b));
3154XXH_FORCE_INLINE xxh_u64x2 XXH_vec_mule(xxh_u32x4
a, xxh_u32x4
b)
3157 __asm__(
"vmuleuw %0, %1, %2" :
"=v" (result) :
"v" (
a),
"v" (
b));
3166#if defined(XXH_NO_PREFETCH)
3167# define XXH_PREFETCH(ptr) (void)(ptr)
3169# if defined(_MSC_VER) && (defined(_M_X64) || defined(_M_IX86))
3170# include <mmintrin.h>
3171# define XXH_PREFETCH(ptr) _mm_prefetch((const char*)(ptr), _MM_HINT_T0)
3172# elif defined(__GNUC__) && ( (__GNUC__ >= 4) || ( (__GNUC__ == 3) && (__GNUC_MINOR__ >= 1) ) )
3173# define XXH_PREFETCH(ptr) __builtin_prefetch((ptr), 0 , 3 )
3175# define XXH_PREFETCH(ptr) (void)(ptr)
3184#define XXH_SECRET_DEFAULT_SIZE 192
3186#if (XXH_SECRET_DEFAULT_SIZE < XXH3_SECRET_SIZE_MIN)
3187# error "default keyset is not large enough"
3191XXH_ALIGN(64) static
const xxh_u8 XXH3_kSecret[XXH_SECRET_DEFAULT_SIZE] = {
3192 0xb8, 0xfe, 0x6c, 0x39, 0x23, 0xa4, 0x4b, 0xbe, 0x7c, 0x01, 0x81, 0x2c, 0xf7, 0x21, 0xad, 0x1c,
3193 0xde, 0xd4, 0x6d, 0xe9, 0x83, 0x90, 0x97, 0xdb, 0x72, 0x40, 0xa4, 0xa4, 0xb7, 0xb3, 0x67, 0x1f,
3194 0xcb, 0x79, 0xe6, 0x4e, 0xcc, 0xc0, 0xe5, 0x78, 0x82, 0x5a, 0xd0, 0x7d, 0xcc, 0xff, 0x72, 0x21,
3195 0xb8, 0x08, 0x46, 0x74, 0xf7, 0x43, 0x24, 0x8e, 0xe0, 0x35, 0x90, 0xe6, 0x81, 0x3a, 0x26, 0x4c,
3196 0x3c, 0x28, 0x52, 0xbb, 0x91, 0xc3, 0x00, 0xcb, 0x88, 0xd0, 0x65, 0x8b, 0x1b, 0x53, 0x2e, 0xa3,
3197 0x71, 0x64, 0x48, 0x97, 0xa2, 0x0d, 0xf9, 0x4e, 0x38, 0x19, 0xef, 0x46, 0xa9, 0xde, 0xac, 0xd8,
3198 0xa8, 0xfa, 0x76, 0x3f, 0xe3, 0x9c, 0x34, 0x3f, 0xf9, 0xdc, 0xbb, 0xc7, 0xc7, 0x0b, 0x4f, 0x1d,
3199 0x8a, 0x51, 0xe0, 0x4b, 0xcd, 0xb4, 0x59, 0x31, 0xc8, 0x9f, 0x7e, 0xc9, 0xd9, 0x78, 0x73, 0x64,
3200 0xea, 0xc5, 0xac, 0x83, 0x34, 0xd3, 0xeb, 0xc3, 0xc5, 0x81, 0xa0, 0xff, 0xfa, 0x13, 0x63, 0xeb,
3201 0x17, 0x0d, 0xdd, 0x51, 0xb7, 0xf0, 0xda, 0x49, 0xd3, 0x16, 0x55, 0x26, 0x29, 0xd4, 0x68, 0x9e,
3202 0x2b, 0x16, 0xbe, 0x58, 0x7d, 0x47, 0xa1, 0xfc, 0x8f, 0xf8, 0xb8, 0xd1, 0x7a, 0xd0, 0x31, 0xce,
3203 0x45, 0xcb, 0x3a, 0x8f, 0x95, 0x16, 0x04, 0x28, 0xaf, 0xd7, 0xfb, 0xca, 0xbb, 0x4b, 0x40, 0x7e,
3208# define kSecret XXH3_kSecret
3228XXH_FORCE_INLINE xxh_u64
3229XXH_mult32to64(xxh_u64 x, xxh_u64 y)
3231 return (x & 0xFFFFFFFF) * (y & 0xFFFFFFFF);
3233#elif defined(_MSC_VER) && defined(_M_IX86)
3234# define XXH_mult32to64(x, y) __emulu((unsigned)(x), (unsigned)(y))
3243# define XXH_mult32to64(x, y) ((xxh_u64)(xxh_u32)(x) * (xxh_u64)(xxh_u32)(y))
3256XXH_mult64to128(xxh_u64 lhs, xxh_u64 rhs)
3273#if (defined(__GNUC__) || defined(__clang__)) && !defined(__wasm__) \
3274 && defined(__SIZEOF_INT128__) \
3275 || (defined(_INTEGRAL_MAX_BITS) && _INTEGRAL_MAX_BITS >= 128)
3277 __uint128_t
const product = (__uint128_t)lhs * (__uint128_t)rhs;
3279 r128.low64 = (xxh_u64)(product);
3280 r128.high64 = (xxh_u64)(product >> 64);
3290#elif (defined(_M_X64) || defined(_M_IA64)) && !defined(_M_ARM64EC)
3293# pragma intrinsic(_umul128)
3295 xxh_u64 product_high;
3296 xxh_u64
const product_low = _umul128(lhs, rhs, &product_high);
3298 r128.low64 = product_low;
3299 r128.high64 = product_high;
3307#elif defined(_M_ARM64) || defined(_M_ARM64EC)
3310# pragma intrinsic(__umulh)
3313 r128.low64 = lhs * rhs;
3314 r128.high64 = __umulh(lhs, rhs);
3362 xxh_u64
const lo_lo = XXH_mult32to64(lhs & 0xFFFFFFFF, rhs & 0xFFFFFFFF);
3363 xxh_u64
const hi_lo = XXH_mult32to64(lhs >> 32, rhs & 0xFFFFFFFF);
3364 xxh_u64
const lo_hi = XXH_mult32to64(lhs & 0xFFFFFFFF, rhs >> 32);
3365 xxh_u64
const hi_hi = XXH_mult32to64(lhs >> 32, rhs >> 32);
3368 xxh_u64
const cross = (lo_lo >> 32) + (hi_lo & 0xFFFFFFFF) + lo_hi;
3369 xxh_u64
const upper = (hi_lo >> 32) + (cross >> 32) + hi_hi;
3370 xxh_u64
const lower = (cross << 32) | (lo_lo & 0xFFFFFFFF);
3374 r128.high64 = upper;
3390XXH3_mul128_fold64(xxh_u64 lhs, xxh_u64 rhs)
3392 XXH128_hash_t product = XXH_mult64to128(lhs, rhs);
3393 return product.low64 ^ product.high64;
3397XXH_FORCE_INLINE xxh_u64 XXH_xorshift64(xxh_u64 v64,
int shift)
3399 XXH_ASSERT(0 <= shift && shift < 64);
3400 return v64 ^ (v64 >> shift);
3409 h64 = XXH_xorshift64(h64, 37);
3410 h64 *= 0x165667919E3779F9ULL;
3411 h64 = XXH_xorshift64(h64, 32);
3424 h64 *= 0x9FB21C651E98DF25ULL;
3425 h64 ^= (h64 >> 35) +
len ;
3426 h64 *= 0x9FB21C651E98DF25ULL;
3427 return XXH_xorshift64(h64, 28);
3468 XXH_ASSERT(1 <=
len &&
len <= 3);
3469 XXH_ASSERT(secret !=
NULL);
3475 { xxh_u8
const c1 =
input[0];
3478 xxh_u32
const combined = ((xxh_u32)c1 << 16) | ((xxh_u32)c2 << 24)
3479 | ((xxh_u32)c3 << 0) | ((xxh_u32)
len << 8);
3481 xxh_u64
const keyed = (xxh_u64)combined ^ bitflip;
3482 return XXH64_avalanche(keyed);
3490 XXH_ASSERT(secret !=
NULL);
3491 XXH_ASSERT(4 <=
len &&
len <= 8);
3492 seed ^= (xxh_u64)XXH_swap32((xxh_u32)
seed) << 32;
3496 xxh_u64
const input64 = input2 + (((xxh_u64)input1) << 32);
3497 xxh_u64
const keyed = input64 ^ bitflip;
3498 return XXH3_rrmxmx(keyed,
len);
3506 XXH_ASSERT(secret !=
NULL);
3507 XXH_ASSERT(9 <=
len &&
len <= 16);
3512 xxh_u64
const acc =
len
3513 + XXH_swap64(input_lo) + input_hi
3514 + XXH3_mul128_fold64(input_lo, input_hi);
3515 return XXH3_avalanche(acc);
3522 XXH_ASSERT(
len <= 16);
3523 {
if (XXH_likely(
len > 8))
return XXH3_len_9to16_64b(
input,
len, secret,
seed);
3524 if (XXH_likely(
len >= 4))
return XXH3_len_4to8_64b(
input,
len, secret,
seed);
3556XXH_FORCE_INLINE xxh_u64 XXH3_mix16B(
const xxh_u8* XXH_RESTRICT
input,
3557 const xxh_u8* XXH_RESTRICT secret, xxh_u64 seed64)
3559#if defined(__GNUC__) && !defined(__clang__) \
3560 && defined(__i386__) && defined(__SSE2__) \
3561 && !defined(XXH_ENABLE_AUTOVECTORIZE)
3577 XXH_COMPILER_GUARD(seed64);
3581 return XXH3_mul128_fold64(
3590XXH3_len_17to128_64b(
const xxh_u8* XXH_RESTRICT
input,
size_t len,
3591 const xxh_u8* XXH_RESTRICT secret,
size_t secretSize,
3594 XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize;
3595 XXH_ASSERT(16 <
len &&
len <= 128);
3597 { xxh_u64 acc =
len * XXH_PRIME64_1;
3601 acc += XXH3_mix16B(
input+48, secret+96,
seed);
3604 acc += XXH3_mix16B(
input+32, secret+64,
seed);
3607 acc += XXH3_mix16B(
input+16, secret+32,
seed);
3610 acc += XXH3_mix16B(
input+0, secret+0,
seed);
3613 return XXH3_avalanche(acc);
3617#define XXH3_MIDSIZE_MAX 240
3620XXH3_len_129to240_64b(
const xxh_u8* XXH_RESTRICT
input,
size_t len,
3621 const xxh_u8* XXH_RESTRICT secret,
size_t secretSize,
3624 XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize;
3625 XXH_ASSERT(128 <
len &&
len <= XXH3_MIDSIZE_MAX);
3627 #define XXH3_MIDSIZE_STARTOFFSET 3
3628 #define XXH3_MIDSIZE_LASTOFFSET 17
3630 { xxh_u64 acc =
len * XXH_PRIME64_1;
3631 int const nbRounds = (
int)
len / 16;
3633 for (
i=0;
i<8;
i++) {
3634 acc += XXH3_mix16B(
input+(16*
i), secret+(16*
i),
seed);
3636 acc = XXH3_avalanche(acc);
3637 XXH_ASSERT(nbRounds >= 8);
3638#if defined(__clang__) \
3639 && (defined(__ARM_NEON) || defined(__ARM_NEON__)) \
3640 && !defined(XXH_ENABLE_AUTOVECTORIZE)
3661 #pragma clang loop vectorize(disable)
3663 for (
i=8 ;
i < nbRounds;
i++) {
3664 acc += XXH3_mix16B(
input+(16*
i), secret+(16*(
i-8)) + XXH3_MIDSIZE_STARTOFFSET,
seed);
3667 acc += XXH3_mix16B(
input +
len - 16, secret + XXH3_SECRET_SIZE_MIN - XXH3_MIDSIZE_LASTOFFSET,
seed);
3668 return XXH3_avalanche(acc);
3675#define XXH_STRIPE_LEN 64
3676#define XXH_SECRET_CONSUME_RATE 8
3677#define XXH_ACC_NB (XXH_STRIPE_LEN / sizeof(xxh_u64))
3680# define STRIPE_LEN XXH_STRIPE_LEN
3681# define ACC_NB XXH_ACC_NB
3684XXH_FORCE_INLINE
void XXH_writeLE64(
void*
dst, xxh_u64 v64)
3687 XXH_memcpy(
dst, &v64,
sizeof(v64));
3695#if !defined (__VMS) \
3696 && (defined (__cplusplus) \
3697 || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) ) )
3698 typedef int64_t xxh_i64;
3701 typedef long long xxh_i64;
3728#if (XXH_VECTOR == XXH_AVX512) \
3729 || (defined(XXH_DISPATCH_AVX512) && XXH_DISPATCH_AVX512 != 0)
3731#ifndef XXH_TARGET_AVX512
3732# define XXH_TARGET_AVX512
3735XXH_FORCE_INLINE XXH_TARGET_AVX512
void
3736XXH3_accumulate_512_avx512(
void* XXH_RESTRICT acc,
3737 const void* XXH_RESTRICT
input,
3738 const void* XXH_RESTRICT secret)
3740 __m512i*
const xacc = (__m512i *) acc;
3741 XXH_ASSERT((((
size_t)acc) & 63) == 0);
3746 __m512i
const data_vec = _mm512_loadu_si512 (
input);
3748 __m512i
const key_vec = _mm512_loadu_si512 (secret);
3750 __m512i
const data_key = _mm512_xor_si512 (data_vec, key_vec);
3752 __m512i
const data_key_lo = _mm512_shuffle_epi32 (data_key, (_MM_PERM_ENUM)_MM_SHUFFLE(0, 3, 0, 1));
3754 __m512i
const product = _mm512_mul_epu32 (data_key, data_key_lo);
3756 __m512i
const data_swap = _mm512_shuffle_epi32(data_vec, (_MM_PERM_ENUM)_MM_SHUFFLE(1, 0, 3, 2));
3757 __m512i
const sum = _mm512_add_epi64(*xacc, data_swap);
3759 *xacc = _mm512_add_epi64(product, sum);
3784XXH_FORCE_INLINE XXH_TARGET_AVX512
void
3785XXH3_scrambleAcc_avx512(
void* XXH_RESTRICT acc,
const void* XXH_RESTRICT secret)
3787 XXH_ASSERT((((
size_t)acc) & 63) == 0);
3789 { __m512i*
const xacc = (__m512i*) acc;
3790 const __m512i prime32 = _mm512_set1_epi32((
int)XXH_PRIME32_1);
3793 __m512i
const acc_vec = *xacc;
3794 __m512i
const shifted = _mm512_srli_epi64 (acc_vec, 47);
3795 __m512i
const data_vec = _mm512_xor_si512 (acc_vec, shifted);
3797 __m512i
const key_vec = _mm512_loadu_si512 (secret);
3798 __m512i
const data_key = _mm512_xor_si512 (data_vec, key_vec);
3801 __m512i
const data_key_hi = _mm512_shuffle_epi32 (data_key, (_MM_PERM_ENUM)_MM_SHUFFLE(0, 3, 0, 1));
3802 __m512i
const prod_lo = _mm512_mul_epu32 (data_key, prime32);
3803 __m512i
const prod_hi = _mm512_mul_epu32 (data_key_hi, prime32);
3804 *xacc = _mm512_add_epi64(prod_lo, _mm512_slli_epi64(prod_hi, 32));
3808XXH_FORCE_INLINE XXH_TARGET_AVX512
void
3809XXH3_initCustomSecret_avx512(
void* XXH_RESTRICT customSecret, xxh_u64 seed64)
3813 XXH_ASSERT(((
size_t)customSecret & 63) == 0);
3814 (void)(&XXH_writeLE64);
3815 {
int const nbRounds = XXH_SECRET_DEFAULT_SIZE /
sizeof(__m512i);
3816 __m512i
const seed = _mm512_mask_set1_epi64(_mm512_set1_epi64((xxh_i64)seed64), 0xAA, (xxh_i64)(0
U - seed64));
3818 const __m512i*
const src = (
const __m512i*) ((
const void*) XXH3_kSecret);
3819 __m512i*
const dest = ( __m512i*) customSecret;
3821 XXH_ASSERT(((
size_t)
src & 63) == 0);
3822 XXH_ASSERT(((
size_t)
dest & 63) == 0);
3823 for (
i=0;
i < nbRounds; ++
i) {
3829 } remote_const_void;
3830 remote_const_void.cp =
src +
i;
3831 dest[
i] = _mm512_add_epi64(_mm512_stream_load_si512(remote_const_void.p),
seed);
3837#if (XXH_VECTOR == XXH_AVX2) \
3838 || (defined(XXH_DISPATCH_AVX2) && XXH_DISPATCH_AVX2 != 0)
3840#ifndef XXH_TARGET_AVX2
3841# define XXH_TARGET_AVX2
3844XXH_FORCE_INLINE XXH_TARGET_AVX2
void
3845XXH3_accumulate_512_avx2(
void* XXH_RESTRICT acc,
3846 const void* XXH_RESTRICT
input,
3847 const void* XXH_RESTRICT secret)
3849 XXH_ASSERT((((
size_t)acc) & 31) == 0);
3850 { __m256i*
const xacc = (__m256i *) acc;
3853 const __m256i*
const xinput = (
const __m256i *)
input;
3856 const __m256i*
const xsecret = (
const __m256i *) secret;
3859 for (
i=0;
i < XXH_STRIPE_LEN/
sizeof(__m256i);
i++) {
3861 __m256i
const data_vec = _mm256_loadu_si256 (xinput+
i);
3863 __m256i
const key_vec = _mm256_loadu_si256 (xsecret+
i);
3865 __m256i
const data_key = _mm256_xor_si256 (data_vec, key_vec);
3867 __m256i
const data_key_lo = _mm256_shuffle_epi32 (data_key, _MM_SHUFFLE(0, 3, 0, 1));
3869 __m256i
const product = _mm256_mul_epu32 (data_key, data_key_lo);
3871 __m256i
const data_swap = _mm256_shuffle_epi32(data_vec, _MM_SHUFFLE(1, 0, 3, 2));
3872 __m256i
const sum = _mm256_add_epi64(xacc[
i], data_swap);
3874 xacc[
i] = _mm256_add_epi64(product, sum);
3878XXH_FORCE_INLINE XXH_TARGET_AVX2
void
3879XXH3_scrambleAcc_avx2(
void* XXH_RESTRICT acc,
const void* XXH_RESTRICT secret)
3881 XXH_ASSERT((((
size_t)acc) & 31) == 0);
3882 { __m256i*
const xacc = (__m256i*) acc;
3885 const __m256i*
const xsecret = (
const __m256i *) secret;
3886 const __m256i prime32 = _mm256_set1_epi32((
int)XXH_PRIME32_1);
3889 for (
i=0;
i < XXH_STRIPE_LEN/
sizeof(__m256i);
i++) {
3891 __m256i
const acc_vec = xacc[
i];
3892 __m256i
const shifted = _mm256_srli_epi64 (acc_vec, 47);
3893 __m256i
const data_vec = _mm256_xor_si256 (acc_vec, shifted);
3895 __m256i
const key_vec = _mm256_loadu_si256 (xsecret+
i);
3896 __m256i
const data_key = _mm256_xor_si256 (data_vec, key_vec);
3899 __m256i
const data_key_hi = _mm256_shuffle_epi32 (data_key, _MM_SHUFFLE(0, 3, 0, 1));
3900 __m256i
const prod_lo = _mm256_mul_epu32 (data_key, prime32);
3901 __m256i
const prod_hi = _mm256_mul_epu32 (data_key_hi, prime32);
3902 xacc[
i] = _mm256_add_epi64(prod_lo, _mm256_slli_epi64(prod_hi, 32));
3907XXH_FORCE_INLINE XXH_TARGET_AVX2
void XXH3_initCustomSecret_avx2(
void* XXH_RESTRICT customSecret, xxh_u64 seed64)
3912 (void)(&XXH_writeLE64);
3913 XXH_PREFETCH(customSecret);
3914 { __m256i
const seed = _mm256_set_epi64x((xxh_i64)(0
U - seed64), (xxh_i64)seed64, (xxh_i64)(0
U - seed64), (xxh_i64)seed64);
3916 const __m256i*
const src = (
const __m256i*) ((
const void*) XXH3_kSecret);
3917 __m256i*
dest = ( __m256i*) customSecret;
3919# if defined(__GNUC__) || defined(__clang__)
3925 XXH_COMPILER_GUARD(
dest);
3927 XXH_ASSERT(((
size_t)
src & 31) == 0);
3928 XXH_ASSERT(((
size_t)
dest & 31) == 0);
3931 dest[0] = _mm256_add_epi64(_mm256_stream_load_si256(
src+0),
seed);
3932 dest[1] = _mm256_add_epi64(_mm256_stream_load_si256(
src+1),
seed);
3933 dest[2] = _mm256_add_epi64(_mm256_stream_load_si256(
src+2),
seed);
3934 dest[3] = _mm256_add_epi64(_mm256_stream_load_si256(
src+3),
seed);
3935 dest[4] = _mm256_add_epi64(_mm256_stream_load_si256(
src+4),
seed);
3936 dest[5] = _mm256_add_epi64(_mm256_stream_load_si256(
src+5),
seed);
3943#if (XXH_VECTOR == XXH_SSE2) || defined(XXH_X86DISPATCH)
3945#ifndef XXH_TARGET_SSE2
3946# define XXH_TARGET_SSE2
3949XXH_FORCE_INLINE XXH_TARGET_SSE2
void
3950XXH3_accumulate_512_sse2(
void* XXH_RESTRICT acc,
3951 const void* XXH_RESTRICT
input,
3952 const void* XXH_RESTRICT secret)
3955 XXH_ASSERT((((
size_t)acc) & 15) == 0);
3956 { __m128i*
const xacc = (__m128i *) acc;
3959 const __m128i*
const xinput = (
const __m128i *)
input;
3962 const __m128i*
const xsecret = (
const __m128i *) secret;
3965 for (
i=0;
i < XXH_STRIPE_LEN/
sizeof(__m128i);
i++) {
3967 __m128i
const data_vec = _mm_loadu_si128 (xinput+
i);
3969 __m128i
const key_vec = _mm_loadu_si128 (xsecret+
i);
3971 __m128i
const data_key = _mm_xor_si128 (data_vec, key_vec);
3973 __m128i
const data_key_lo = _mm_shuffle_epi32 (data_key, _MM_SHUFFLE(0, 3, 0, 1));
3975 __m128i
const product = _mm_mul_epu32 (data_key, data_key_lo);
3977 __m128i
const data_swap = _mm_shuffle_epi32(data_vec, _MM_SHUFFLE(1,0,3,2));
3978 __m128i
const sum = _mm_add_epi64(xacc[
i], data_swap);
3980 xacc[
i] = _mm_add_epi64(product, sum);
3984XXH_FORCE_INLINE XXH_TARGET_SSE2
void
3985XXH3_scrambleAcc_sse2(
void* XXH_RESTRICT acc,
const void* XXH_RESTRICT secret)
3987 XXH_ASSERT((((
size_t)acc) & 15) == 0);
3988 { __m128i*
const xacc = (__m128i*) acc;
3991 const __m128i*
const xsecret = (
const __m128i *) secret;
3992 const __m128i prime32 = _mm_set1_epi32((
int)XXH_PRIME32_1);
3995 for (
i=0;
i < XXH_STRIPE_LEN/
sizeof(__m128i);
i++) {
3997 __m128i
const acc_vec = xacc[
i];
3998 __m128i
const shifted = _mm_srli_epi64 (acc_vec, 47);
3999 __m128i
const data_vec = _mm_xor_si128 (acc_vec, shifted);
4001 __m128i
const key_vec = _mm_loadu_si128 (xsecret+
i);
4002 __m128i
const data_key = _mm_xor_si128 (data_vec, key_vec);
4005 __m128i
const data_key_hi = _mm_shuffle_epi32 (data_key, _MM_SHUFFLE(0, 3, 0, 1));
4006 __m128i
const prod_lo = _mm_mul_epu32 (data_key, prime32);
4007 __m128i
const prod_hi = _mm_mul_epu32 (data_key_hi, prime32);
4008 xacc[
i] = _mm_add_epi64(prod_lo, _mm_slli_epi64(prod_hi, 32));
4013XXH_FORCE_INLINE XXH_TARGET_SSE2
void XXH3_initCustomSecret_sse2(
void* XXH_RESTRICT customSecret, xxh_u64 seed64)
4016 (void)(&XXH_writeLE64);
4017 {
int const nbRounds = XXH_SECRET_DEFAULT_SIZE /
sizeof(__m128i);
4019# if defined(_MSC_VER) && defined(_M_IX86) && _MSC_VER < 1900
4021 XXH_ALIGN(16)
const xxh_i64 seed64x2[2] = { (xxh_i64)seed64, (xxh_i64)(0
U - seed64) };
4022 __m128i
const seed = _mm_load_si128((__m128i
const*)seed64x2);
4024 __m128i
const seed = _mm_set_epi64x((xxh_i64)(0
U - seed64), (xxh_i64)seed64);
4028 const void*
const src16 = XXH3_kSecret;
4029 __m128i* dst16 = (__m128i*) customSecret;
4030# if defined(__GNUC__) || defined(__clang__)
4036 XXH_COMPILER_GUARD(dst16);
4038 XXH_ASSERT(((
size_t)src16 & 15) == 0);
4039 XXH_ASSERT(((
size_t)dst16 & 15) == 0);
4041 for (
i=0;
i < nbRounds; ++
i) {
4042 dst16[
i] = _mm_add_epi64(_mm_load_si128((
const __m128i *)src16+
i),
seed);
4048#if (XXH_VECTOR == XXH_NEON)
4051XXH_FORCE_INLINE
void
4052XXH3_scalarRound(
void* XXH_RESTRICT acc,
void const* XXH_RESTRICT
input,
4053 void const* XXH_RESTRICT secret,
size_t lane);
4055XXH_FORCE_INLINE
void
4056XXH3_scalarScrambleRound(
void* XXH_RESTRICT acc,
4057 void const* XXH_RESTRICT secret,
size_t lane);
4069XXH_FORCE_INLINE
void
4070XXH3_accumulate_512_neon(
void* XXH_RESTRICT acc,
4071 const void* XXH_RESTRICT
input,
4072 const void* XXH_RESTRICT secret)
4074 XXH_ASSERT((((
size_t)acc) & 15) == 0);
4075 XXH_STATIC_ASSERT(XXH3_NEON_LANES > 0 && XXH3_NEON_LANES <= XXH_ACC_NB && XXH3_NEON_LANES % 2 == 0);
4077 uint64x2_t*
const xacc = (uint64x2_t *) acc;
4079 uint8_t
const*
const xinput = (
const uint8_t *)
input;
4080 uint8_t
const*
const xsecret = (
const uint8_t *) secret;
4084 for (
i=0;
i < XXH3_NEON_LANES / 2;
i++) {
4086 uint8x16_t data_vec = vld1q_u8(xinput + (
i * 16));
4088 uint8x16_t key_vec = vld1q_u8(xsecret + (
i * 16));
4089 uint64x2_t data_key;
4090 uint32x2_t data_key_lo, data_key_hi;
4092 uint64x2_t
const data64 = vreinterpretq_u64_u8(data_vec);
4093 uint64x2_t
const swapped = vextq_u64(data64, data64, 1);
4094 xacc[
i] = vaddq_u64 (xacc[
i], swapped);
4096 data_key = vreinterpretq_u64_u8(veorq_u8(data_vec, key_vec));
4100 XXH_SPLIT_IN_PLACE(data_key, data_key_lo, data_key_hi);
4102 xacc[
i] = vmlal_u32 (xacc[
i], data_key_lo, data_key_hi);
4106 for (
i = XXH3_NEON_LANES;
i < XXH_ACC_NB;
i++) {
4107 XXH3_scalarRound(acc,
input, secret,
i);
4112XXH_FORCE_INLINE
void
4113XXH3_scrambleAcc_neon(
void* XXH_RESTRICT acc,
const void* XXH_RESTRICT secret)
4115 XXH_ASSERT((((
size_t)acc) & 15) == 0);
4117 { uint64x2_t* xacc = (uint64x2_t*) acc;
4118 uint8_t
const* xsecret = (uint8_t
const*) secret;
4119 uint32x2_t prime = vdup_n_u32 (XXH_PRIME32_1);
4123 for (
i=0;
i < XXH3_NEON_LANES / 2;
i++) {
4125 uint64x2_t acc_vec = xacc[
i];
4126 uint64x2_t shifted = vshrq_n_u64 (acc_vec, 47);
4127 uint64x2_t data_vec = veorq_u64 (acc_vec, shifted);
4130 uint8x16_t key_vec = vld1q_u8 (xsecret + (
i * 16));
4131 uint64x2_t data_key = veorq_u64 (data_vec, vreinterpretq_u64_u8(key_vec));
4134 uint32x2_t data_key_lo, data_key_hi;
4138 XXH_SPLIT_IN_PLACE(data_key, data_key_lo, data_key_hi);
4157 uint64x2_t prod_hi = vmull_u32 (data_key_hi, prime);
4159 xacc[
i] = vshlq_n_u64(prod_hi, 32);
4161 xacc[
i] = vmlal_u32(xacc[
i], data_key_lo, prime);
4165 for (
i = XXH3_NEON_LANES;
i < XXH_ACC_NB;
i++) {
4166 XXH3_scalarScrambleRound(acc, secret,
i);
4173#if (XXH_VECTOR == XXH_VSX)
4175XXH_FORCE_INLINE
void
4176XXH3_accumulate_512_vsx(
void* XXH_RESTRICT acc,
4177 const void* XXH_RESTRICT
input,
4178 const void* XXH_RESTRICT secret)
4181 unsigned int*
const xacc = (
unsigned int*) acc;
4182 xxh_u64x2
const*
const xinput = (xxh_u64x2
const*)
input;
4183 xxh_u64x2
const*
const xsecret = (xxh_u64x2
const*) secret;
4184 xxh_u64x2
const v32 = { 32, 32 };
4186 for (
i = 0;
i < XXH_STRIPE_LEN /
sizeof(xxh_u64x2);
i++) {
4188 xxh_u64x2
const data_vec = XXH_vec_loadu(xinput +
i);
4190 xxh_u64x2
const key_vec = XXH_vec_loadu(xsecret +
i);
4191 xxh_u64x2
const data_key = data_vec ^ key_vec;
4193 xxh_u32x4
const shuffled = (xxh_u32x4)vec_rl(data_key, v32);
4195 xxh_u64x2
const product = XXH_vec_mulo((xxh_u32x4)data_key, shuffled);
4197 xxh_u64x2 acc_vec = (xxh_u64x2)vec_xl(0, xacc + 4 *
i);
4202 acc_vec += vec_permi(data_vec, data_vec, 2);
4204 acc_vec += vec_xxpermdi(data_vec, data_vec, 2);
4207 vec_xst((xxh_u32x4)acc_vec, 0, xacc + 4 *
i);
4211XXH_FORCE_INLINE
void
4212XXH3_scrambleAcc_vsx(
void* XXH_RESTRICT acc,
const void* XXH_RESTRICT secret)
4214 XXH_ASSERT((((
size_t)acc) & 15) == 0);
4216 { xxh_u64x2*
const xacc = (xxh_u64x2*) acc;
4217 const xxh_u64x2*
const xsecret = (
const xxh_u64x2*) secret;
4219 xxh_u64x2
const v32 = { 32, 32 };
4220 xxh_u64x2
const v47 = { 47, 47 };
4221 xxh_u32x4
const prime = { XXH_PRIME32_1, XXH_PRIME32_1, XXH_PRIME32_1, XXH_PRIME32_1 };
4223 for (
i = 0;
i < XXH_STRIPE_LEN /
sizeof(xxh_u64x2);
i++) {
4225 xxh_u64x2
const acc_vec = xacc[
i];
4226 xxh_u64x2
const data_vec = acc_vec ^ (acc_vec >> v47);
4229 xxh_u64x2
const key_vec = XXH_vec_loadu(xsecret +
i);
4230 xxh_u64x2
const data_key = data_vec ^ key_vec;
4234 xxh_u64x2
const prod_even = XXH_vec_mule((xxh_u32x4)data_key, prime);
4236 xxh_u64x2
const prod_odd = XXH_vec_mulo((xxh_u32x4)data_key, prime);
4237 xacc[
i] = prod_odd + (prod_even << v32);
4252XXH_FORCE_INLINE
void
4253XXH3_scalarRound(
void* XXH_RESTRICT acc,
4254 void const* XXH_RESTRICT
input,
4255 void const* XXH_RESTRICT secret,
4258 xxh_u64* xacc = (xxh_u64*) acc;
4259 xxh_u8
const* xinput = (xxh_u8
const*)
input;
4260 xxh_u8
const* xsecret = (xxh_u8
const*) secret;
4261 XXH_ASSERT(lane < XXH_ACC_NB);
4262 XXH_ASSERT(((
size_t)acc & (XXH_ACC_ALIGN-1)) == 0);
4264 xxh_u64
const data_val =
XXH_readLE64(xinput + lane * 8);
4265 xxh_u64
const data_key = data_val ^
XXH_readLE64(xsecret + lane * 8);
4266 xacc[lane ^ 1] += data_val;
4267 xacc[lane] += XXH_mult32to64(data_key & 0xFFFFFFFF, data_key >> 32);
4275XXH_FORCE_INLINE
void
4276XXH3_accumulate_512_scalar(
void* XXH_RESTRICT acc,
4277 const void* XXH_RESTRICT
input,
4278 const void* XXH_RESTRICT secret)
4281 for (
i=0;
i < XXH_ACC_NB;
i++) {
4282 XXH3_scalarRound(acc,
input, secret,
i);
4293XXH_FORCE_INLINE
void
4294XXH3_scalarScrambleRound(
void* XXH_RESTRICT acc,
4295 void const* XXH_RESTRICT secret,
4298 xxh_u64*
const xacc = (xxh_u64*) acc;
4299 const xxh_u8*
const xsecret = (
const xxh_u8*) secret;
4300 XXH_ASSERT((((
size_t)acc) & (XXH_ACC_ALIGN-1)) == 0);
4301 XXH_ASSERT(lane < XXH_ACC_NB);
4303 xxh_u64
const key64 =
XXH_readLE64(xsecret + lane * 8);
4304 xxh_u64 acc64 = xacc[lane];
4305 acc64 = XXH_xorshift64(acc64, 47);
4307 acc64 *= XXH_PRIME32_1;
4316XXH_FORCE_INLINE
void
4317XXH3_scrambleAcc_scalar(
void* XXH_RESTRICT acc,
const void* XXH_RESTRICT secret)
4320 for (
i=0;
i < XXH_ACC_NB;
i++) {
4321 XXH3_scalarScrambleRound(acc, secret,
i);
4325XXH_FORCE_INLINE
void
4326XXH3_initCustomSecret_scalar(
void* XXH_RESTRICT customSecret, xxh_u64 seed64)
4333 const xxh_u8* kSecretPtr = XXH3_kSecret;
4336#if defined(__clang__) && defined(__aarch64__)
4369 XXH_COMPILER_GUARD(kSecretPtr);
4375 XXH_ASSERT(kSecretPtr == XXH3_kSecret);
4377 {
int const nbRounds = XXH_SECRET_DEFAULT_SIZE / 16;
4379 for (
i=0;
i < nbRounds;
i++) {
4388 XXH_writeLE64((xxh_u8*)customSecret + 16*
i, lo);
4389 XXH_writeLE64((xxh_u8*)customSecret + 16*
i + 8, hi);
4394typedef void (*XXH3_f_accumulate_512)(
void* XXH_RESTRICT,
const void*,
const void*);
4395typedef void (*XXH3_f_scrambleAcc)(
void* XXH_RESTRICT,
const void*);
4396typedef void (*XXH3_f_initCustomSecret)(
void* XXH_RESTRICT, xxh_u64);
4399#if (XXH_VECTOR == XXH_AVX512)
4401#define XXH3_accumulate_512 XXH3_accumulate_512_avx512
4402#define XXH3_scrambleAcc XXH3_scrambleAcc_avx512
4403#define XXH3_initCustomSecret XXH3_initCustomSecret_avx512
4405#elif (XXH_VECTOR == XXH_AVX2)
4407#define XXH3_accumulate_512 XXH3_accumulate_512_avx2
4408#define XXH3_scrambleAcc XXH3_scrambleAcc_avx2
4409#define XXH3_initCustomSecret XXH3_initCustomSecret_avx2
4411#elif (XXH_VECTOR == XXH_SSE2)
4413#define XXH3_accumulate_512 XXH3_accumulate_512_sse2
4414#define XXH3_scrambleAcc XXH3_scrambleAcc_sse2
4415#define XXH3_initCustomSecret XXH3_initCustomSecret_sse2
4417#elif (XXH_VECTOR == XXH_NEON)
4419#define XXH3_accumulate_512 XXH3_accumulate_512_neon
4420#define XXH3_scrambleAcc XXH3_scrambleAcc_neon
4421#define XXH3_initCustomSecret XXH3_initCustomSecret_scalar
4423#elif (XXH_VECTOR == XXH_VSX)
4425#define XXH3_accumulate_512 XXH3_accumulate_512_vsx
4426#define XXH3_scrambleAcc XXH3_scrambleAcc_vsx
4427#define XXH3_initCustomSecret XXH3_initCustomSecret_scalar
4431#define XXH3_accumulate_512 XXH3_accumulate_512_scalar
4432#define XXH3_scrambleAcc XXH3_scrambleAcc_scalar
4433#define XXH3_initCustomSecret XXH3_initCustomSecret_scalar
4439#ifndef XXH_PREFETCH_DIST
4441# define XXH_PREFETCH_DIST 320
4443# if (XXH_VECTOR == XXH_AVX512)
4444# define XXH_PREFETCH_DIST 512
4446# define XXH_PREFETCH_DIST 384
4456XXH_FORCE_INLINE
void
4457XXH3_accumulate( xxh_u64* XXH_RESTRICT acc,
4458 const xxh_u8* XXH_RESTRICT
input,
4459 const xxh_u8* XXH_RESTRICT secret,
4461 XXH3_f_accumulate_512 f_acc512)
4464 for (n = 0; n < nbStripes; n++ ) {
4465 const xxh_u8*
const in =
input + n*XXH_STRIPE_LEN;
4466 XXH_PREFETCH(in + XXH_PREFETCH_DIST);
4469 secret + n*XXH_SECRET_CONSUME_RATE);
4473XXH_FORCE_INLINE
void
4474XXH3_hashLong_internal_loop(xxh_u64* XXH_RESTRICT acc,
4475 const xxh_u8* XXH_RESTRICT
input,
size_t len,
4476 const xxh_u8* XXH_RESTRICT secret,
size_t secretSize,
4477 XXH3_f_accumulate_512 f_acc512,
4478 XXH3_f_scrambleAcc f_scramble)
4480 size_t const nbStripesPerBlock = (secretSize - XXH_STRIPE_LEN) / XXH_SECRET_CONSUME_RATE;
4481 size_t const block_len = XXH_STRIPE_LEN * nbStripesPerBlock;
4482 size_t const nb_blocks = (
len - 1) / block_len;
4486 XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN);
4488 for (n = 0; n < nb_blocks; n++) {
4489 XXH3_accumulate(acc,
input + n*block_len, secret, nbStripesPerBlock, f_acc512);
4490 f_scramble(acc, secret + secretSize - XXH_STRIPE_LEN);
4494 XXH_ASSERT(
len > XXH_STRIPE_LEN);
4495 {
size_t const nbStripes = ((
len - 1) - (block_len * nb_blocks)) / XXH_STRIPE_LEN;
4496 XXH_ASSERT(nbStripes <= (secretSize / XXH_SECRET_CONSUME_RATE));
4497 XXH3_accumulate(acc,
input + nb_blocks*block_len, secret, nbStripes, f_acc512);
4500 {
const xxh_u8*
const p =
input +
len - XXH_STRIPE_LEN;
4501#define XXH_SECRET_LASTACC_START 7
4502 f_acc512(acc, p, secret + secretSize - XXH_STRIPE_LEN - XXH_SECRET_LASTACC_START);
4506XXH_FORCE_INLINE xxh_u64
4507XXH3_mix2Accs(
const xxh_u64* XXH_RESTRICT acc,
const xxh_u8* XXH_RESTRICT secret)
4509 return XXH3_mul128_fold64(
4515XXH3_mergeAccs(
const xxh_u64* XXH_RESTRICT acc,
const xxh_u8* XXH_RESTRICT secret, xxh_u64
start)
4517 xxh_u64 result64 =
start;
4520 for (
i = 0;
i < 4;
i++) {
4521 result64 += XXH3_mix2Accs(acc+2*
i, secret + 16*
i);
4522#if defined(__clang__) \
4523 && (defined(__arm__) || defined(__thumb__)) \
4524 && (defined(__ARM_NEON) || defined(__ARM_NEON__)) \
4525 && !defined(XXH_ENABLE_AUTOVECTORIZE)
4534 XXH_COMPILER_GUARD(result64);
4538 return XXH3_avalanche(result64);
4541#define XXH3_INIT_ACC { XXH_PRIME32_3, XXH_PRIME64_1, XXH_PRIME64_2, XXH_PRIME64_3, \
4542 XXH_PRIME64_4, XXH_PRIME32_2, XXH_PRIME64_5, XXH_PRIME32_1 }
4545XXH3_hashLong_64b_internal(
const void* XXH_RESTRICT
input,
size_t len,
4546 const void* XXH_RESTRICT secret,
size_t secretSize,
4547 XXH3_f_accumulate_512 f_acc512,
4548 XXH3_f_scrambleAcc f_scramble)
4550 XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[XXH_ACC_NB] = XXH3_INIT_ACC;
4552 XXH3_hashLong_internal_loop(acc, (
const xxh_u8*)
input,
len, (
const xxh_u8*)secret, secretSize, f_acc512, f_scramble);
4557#define XXH_SECRET_MERGEACCS_START 11
4558 XXH_ASSERT(secretSize >=
sizeof(acc) + XXH_SECRET_MERGEACCS_START);
4559 return XXH3_mergeAccs(acc, (
const xxh_u8*)secret + XXH_SECRET_MERGEACCS_START, (xxh_u64)
len * XXH_PRIME64_1);
4568XXH3_hashLong_64b_withSecret(
const void* XXH_RESTRICT
input,
size_t len,
4569 XXH64_hash_t seed64,
const xxh_u8* XXH_RESTRICT secret,
size_t secretLen)
4572 return XXH3_hashLong_64b_internal(
input,
len, secret, secretLen, XXH3_accumulate_512, XXH3_scrambleAcc);
4582XXH3_hashLong_64b_default(
const void* XXH_RESTRICT
input,
size_t len,
4583 XXH64_hash_t seed64,
const xxh_u8* XXH_RESTRICT secret,
size_t secretLen)
4585 (void)seed64; (void)secret; (void)secretLen;
4586 return XXH3_hashLong_64b_internal(
input,
len, XXH3_kSecret,
sizeof(XXH3_kSecret), XXH3_accumulate_512, XXH3_scrambleAcc);
4601XXH3_hashLong_64b_withSeed_internal(
const void*
input,
size_t len,
4603 XXH3_f_accumulate_512 f_acc512,
4604 XXH3_f_scrambleAcc f_scramble,
4605 XXH3_f_initCustomSecret f_initSec)
4608 return XXH3_hashLong_64b_internal(
input,
len,
4609 XXH3_kSecret,
sizeof(XXH3_kSecret),
4610 f_acc512, f_scramble);
4611 { XXH_ALIGN(XXH_SEC_ALIGN) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE];
4612 f_initSec(secret,
seed);
4613 return XXH3_hashLong_64b_internal(
input,
len, secret,
sizeof(secret),
4614 f_acc512, f_scramble);
4622XXH3_hashLong_64b_withSeed(
const void*
input,
size_t len,
4625 (void)secret; (void)secretLen;
4626 return XXH3_hashLong_64b_withSeed_internal(
input,
len,
seed,
4627 XXH3_accumulate_512, XXH3_scrambleAcc, XXH3_initCustomSecret);
4631typedef XXH64_hash_t (*XXH3_hashLong64_f)(
const void* XXH_RESTRICT, size_t,
4635XXH3_64bits_internal(
const void* XXH_RESTRICT
input,
size_t len,
4636 XXH64_hash_t seed64,
const void* XXH_RESTRICT secret,
size_t secretLen,
4637 XXH3_hashLong64_f f_hashLong)
4639 XXH_ASSERT(secretLen >= XXH3_SECRET_SIZE_MIN);
4648 return XXH3_len_0to16_64b((
const xxh_u8*)
input,
len, (
const xxh_u8*)secret, seed64);
4650 return XXH3_len_17to128_64b((
const xxh_u8*)
input,
len, (
const xxh_u8*)secret, secretLen, seed64);
4651 if (
len <= XXH3_MIDSIZE_MAX)
4652 return XXH3_len_129to240_64b((
const xxh_u8*)
input,
len, (
const xxh_u8*)secret, secretLen, seed64);
4653 return f_hashLong(
input,
len, seed64, (
const xxh_u8*)secret, secretLen);
4662 return XXH3_64bits_internal(
input,
len, 0, XXH3_kSecret,
sizeof(XXH3_kSecret), XXH3_hashLong_64b_default);
4669 return XXH3_64bits_internal(
input,
len, 0, secret, secretSize, XXH3_hashLong_64b_withSecret);
4676 return XXH3_64bits_internal(
input,
len,
seed, XXH3_kSecret,
sizeof(XXH3_kSecret), XXH3_hashLong_64b_withSeed);
4682 if (
len <= XXH3_MIDSIZE_MAX)
4683 return XXH3_64bits_internal(
input,
len,
seed, XXH3_kSecret,
sizeof(XXH3_kSecret),
NULL);
4684 return XXH3_hashLong_64b_withSecret(
input,
len,
seed, (
const xxh_u8*)secret, secretSize);
4713static void* XXH_alignedMalloc(
size_t s,
size_t align)
4715 XXH_ASSERT(align <= 128 && align >= 8);
4716 XXH_ASSERT((align & (align-1)) == 0);
4717 XXH_ASSERT(s != 0 && s < (s + align));
4719 xxh_u8* base = (xxh_u8*)XXH_malloc(s + align);
4727 size_t offset = align - ((size_t)base & (align - 1));
4729 xxh_u8* ptr = base + offset;
4731 XXH_ASSERT((
size_t)ptr % align == 0);
4734 ptr[-1] = (xxh_u8)offset;
4744static void XXH_alignedFree(
void* p)
4747 xxh_u8* ptr = (xxh_u8*)p;
4749 xxh_u8 offset = ptr[-1];
4751 xxh_u8* base = ptr - offset;
4758 XXH3_state_t*
const state = (XXH3_state_t*)XXH_alignedMalloc(
sizeof(XXH3_state_t), 64);
4760 XXH3_INITSTATE(state);
4767 XXH_alignedFree(statePtr);
4773XXH3_copyState(XXH3_state_t* dst_state,
const XXH3_state_t* src_state)
4775 XXH_memcpy(dst_state, src_state,
sizeof(*dst_state));
4779XXH3_reset_internal(XXH3_state_t* statePtr,
4781 const void* secret,
size_t secretSize)
4783 size_t const initStart = offsetof(XXH3_state_t, bufferedSize);
4784 size_t const initLength = offsetof(XXH3_state_t, nbStripesPerBlock) - initStart;
4785 XXH_ASSERT(offsetof(XXH3_state_t, nbStripesPerBlock) > initStart);
4786 XXH_ASSERT(statePtr !=
NULL);
4788 memset((
char*)statePtr + initStart, 0, initLength);
4789 statePtr->acc[0] = XXH_PRIME32_3;
4790 statePtr->acc[1] = XXH_PRIME64_1;
4791 statePtr->acc[2] = XXH_PRIME64_2;
4792 statePtr->acc[3] = XXH_PRIME64_3;
4793 statePtr->acc[4] = XXH_PRIME64_4;
4794 statePtr->acc[5] = XXH_PRIME32_2;
4795 statePtr->acc[6] = XXH_PRIME64_5;
4796 statePtr->acc[7] = XXH_PRIME32_1;
4797 statePtr->seed =
seed;
4798 statePtr->useSeed = (
seed != 0);
4799 statePtr->extSecret = (
const unsigned char*)secret;
4800 XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN);
4801 statePtr->secretLimit = secretSize - XXH_STRIPE_LEN;
4802 statePtr->nbStripesPerBlock = statePtr->secretLimit / XXH_SECRET_CONSUME_RATE;
4810 XXH3_reset_internal(statePtr, 0, XXH3_kSecret, XXH_SECRET_DEFAULT_SIZE);
4819 XXH3_reset_internal(statePtr, 0, secret, secretSize);
4821 if (secretSize < XXH3_SECRET_SIZE_MIN)
return XXH_ERROR;
4831 if ((
seed != statePtr->seed) || (statePtr->extSecret !=
NULL))
4832 XXH3_initCustomSecret(statePtr->customSecret,
seed);
4833 XXH3_reset_internal(statePtr,
seed,
NULL, XXH_SECRET_DEFAULT_SIZE);
4843 if (secretSize < XXH3_SECRET_SIZE_MIN)
return XXH_ERROR;
4844 XXH3_reset_internal(statePtr, seed64, secret, secretSize);
4845 statePtr->useSeed = 1;
4852XXH_FORCE_INLINE
void
4853XXH3_consumeStripes(xxh_u64* XXH_RESTRICT acc,
4854 size_t* XXH_RESTRICT nbStripesSoFarPtr,
size_t nbStripesPerBlock,
4855 const xxh_u8* XXH_RESTRICT
input,
size_t nbStripes,
4856 const xxh_u8* XXH_RESTRICT secret,
size_t secretLimit,
4857 XXH3_f_accumulate_512 f_acc512,
4858 XXH3_f_scrambleAcc f_scramble)
4860 XXH_ASSERT(nbStripes <= nbStripesPerBlock);
4861 XXH_ASSERT(*nbStripesSoFarPtr < nbStripesPerBlock);
4862 if (nbStripesPerBlock - *nbStripesSoFarPtr <= nbStripes) {
4864 size_t const nbStripesToEndofBlock = nbStripesPerBlock - *nbStripesSoFarPtr;
4865 size_t const nbStripesAfterBlock = nbStripes - nbStripesToEndofBlock;
4866 XXH3_accumulate(acc,
input, secret + nbStripesSoFarPtr[0] * XXH_SECRET_CONSUME_RATE, nbStripesToEndofBlock, f_acc512);
4867 f_scramble(acc, secret + secretLimit);
4868 XXH3_accumulate(acc,
input + nbStripesToEndofBlock * XXH_STRIPE_LEN, secret, nbStripesAfterBlock, f_acc512);
4869 *nbStripesSoFarPtr = nbStripesAfterBlock;
4871 XXH3_accumulate(acc,
input, secret + nbStripesSoFarPtr[0] * XXH_SECRET_CONSUME_RATE, nbStripes, f_acc512);
4872 *nbStripesSoFarPtr += nbStripes;
4876#ifndef XXH3_STREAM_USE_STACK
4878# define XXH3_STREAM_USE_STACK 1
4885XXH3_update(XXH3_state_t* XXH_RESTRICT
const state,
4886 const xxh_u8* XXH_RESTRICT
input,
size_t len,
4887 XXH3_f_accumulate_512 f_acc512,
4888 XXH3_f_scrambleAcc f_scramble)
4891 XXH_ASSERT(
len == 0);
4895 XXH_ASSERT(state !=
NULL);
4896 {
const xxh_u8*
const bEnd =
input +
len;
4897 const unsigned char*
const secret = (state->extSecret ==
NULL) ? state->customSecret : state->extSecret;
4898#if defined(XXH3_STREAM_USE_STACK) && XXH3_STREAM_USE_STACK >= 1
4903 XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[8]; memcpy(acc, state->acc,
sizeof(acc));
4905 xxh_u64* XXH_RESTRICT
const acc = state->acc;
4907 state->totalLen +=
len;
4908 XXH_ASSERT(state->bufferedSize <= XXH3_INTERNALBUFFER_SIZE);
4911 if (state->bufferedSize +
len <= XXH3_INTERNALBUFFER_SIZE) {
4912 XXH_memcpy(state->buffer + state->bufferedSize,
input,
len);
4918 #define XXH3_INTERNALBUFFER_STRIPES (XXH3_INTERNALBUFFER_SIZE / XXH_STRIPE_LEN)
4925 if (state->bufferedSize) {
4926 size_t const loadSize = XXH3_INTERNALBUFFER_SIZE - state->bufferedSize;
4927 XXH_memcpy(state->buffer + state->bufferedSize,
input, loadSize);
4929 XXH3_consumeStripes(acc,
4930 &state->nbStripesSoFar, state->nbStripesPerBlock,
4931 state->buffer, XXH3_INTERNALBUFFER_STRIPES,
4932 secret, state->secretLimit,
4933 f_acc512, f_scramble);
4934 state->bufferedSize = 0;
4936 XXH_ASSERT(
input < bEnd);
4939 if ((
size_t)(bEnd -
input) > state->nbStripesPerBlock * XXH_STRIPE_LEN) {
4940 size_t nbStripes = (size_t)(bEnd - 1 -
input) / XXH_STRIPE_LEN;
4941 XXH_ASSERT(state->nbStripesPerBlock >= state->nbStripesSoFar);
4943 {
size_t const nbStripesToEnd = state->nbStripesPerBlock - state->nbStripesSoFar;
4944 XXH_ASSERT(nbStripesToEnd <= nbStripes);
4945 XXH3_accumulate(acc,
input, secret + state->nbStripesSoFar * XXH_SECRET_CONSUME_RATE, nbStripesToEnd, f_acc512);
4946 f_scramble(acc, secret + state->secretLimit);
4947 state->nbStripesSoFar = 0;
4948 input += nbStripesToEnd * XXH_STRIPE_LEN;
4949 nbStripes -= nbStripesToEnd;
4952 while(nbStripes >= state->nbStripesPerBlock) {
4953 XXH3_accumulate(acc,
input, secret, state->nbStripesPerBlock, f_acc512);
4954 f_scramble(acc, secret + state->secretLimit);
4955 input += state->nbStripesPerBlock * XXH_STRIPE_LEN;
4956 nbStripes -= state->nbStripesPerBlock;
4959 XXH3_accumulate(acc,
input, secret, nbStripes, f_acc512);
4960 input += nbStripes * XXH_STRIPE_LEN;
4961 XXH_ASSERT(
input < bEnd);
4962 state->nbStripesSoFar = nbStripes;
4964 XXH_memcpy(state->buffer +
sizeof(state->buffer) - XXH_STRIPE_LEN,
input - XXH_STRIPE_LEN, XXH_STRIPE_LEN);
4965 XXH_ASSERT(bEnd -
input <= XXH_STRIPE_LEN);
4969 if (bEnd -
input > XXH3_INTERNALBUFFER_SIZE) {
4970 const xxh_u8*
const limit = bEnd - XXH3_INTERNALBUFFER_SIZE;
4972 XXH3_consumeStripes(acc,
4973 &state->nbStripesSoFar, state->nbStripesPerBlock,
4974 input, XXH3_INTERNALBUFFER_STRIPES,
4975 secret, state->secretLimit,
4976 f_acc512, f_scramble);
4977 input += XXH3_INTERNALBUFFER_SIZE;
4980 XXH_memcpy(state->buffer +
sizeof(state->buffer) - XXH_STRIPE_LEN,
input - XXH_STRIPE_LEN, XXH_STRIPE_LEN);
4985 XXH_ASSERT(
input < bEnd);
4986 XXH_ASSERT(bEnd -
input <= XXH3_INTERNALBUFFER_SIZE);
4987 XXH_ASSERT(state->bufferedSize == 0);
4988 XXH_memcpy(state->buffer,
input, (
size_t)(bEnd-
input));
4990#if defined(XXH3_STREAM_USE_STACK) && XXH3_STREAM_USE_STACK >= 1
4992 memcpy(state->acc, acc,
sizeof(acc));
5003 return XXH3_update(state, (
const xxh_u8*)
input,
len,
5004 XXH3_accumulate_512, XXH3_scrambleAcc);
5008XXH_FORCE_INLINE
void
5010 const XXH3_state_t* state,
5011 const unsigned char* secret)
5017 XXH_memcpy(acc, state->acc,
sizeof(state->acc));
5018 if (state->bufferedSize >= XXH_STRIPE_LEN) {
5019 size_t const nbStripes = (state->bufferedSize - 1) / XXH_STRIPE_LEN;
5020 size_t nbStripesSoFar = state->nbStripesSoFar;
5021 XXH3_consumeStripes(acc,
5022 &nbStripesSoFar, state->nbStripesPerBlock,
5023 state->buffer, nbStripes,
5024 secret, state->secretLimit,
5025 XXH3_accumulate_512, XXH3_scrambleAcc);
5027 XXH3_accumulate_512(acc,
5028 state->buffer + state->bufferedSize - XXH_STRIPE_LEN,
5029 secret + state->secretLimit - XXH_SECRET_LASTACC_START);
5031 xxh_u8 lastStripe[XXH_STRIPE_LEN];
5032 size_t const catchupSize = XXH_STRIPE_LEN - state->bufferedSize;
5033 XXH_ASSERT(state->bufferedSize > 0);
5034 XXH_memcpy(lastStripe, state->buffer +
sizeof(state->buffer) - catchupSize, catchupSize);
5035 XXH_memcpy(lastStripe + catchupSize, state->buffer, state->bufferedSize);
5036 XXH3_accumulate_512(acc,
5038 secret + state->secretLimit - XXH_SECRET_LASTACC_START);
5045 const unsigned char*
const secret = (state->extSecret ==
NULL) ? state->customSecret : state->extSecret;
5046 if (state->totalLen > XXH3_MIDSIZE_MAX) {
5048 XXH3_digest_long(acc, state, secret);
5049 return XXH3_mergeAccs(acc,
5050 secret + XXH_SECRET_MERGEACCS_START,
5051 (xxh_u64)state->totalLen * XXH_PRIME64_1);
5057 secret, state->secretLimit + XXH_STRIPE_LEN);
5079XXH_FORCE_INLINE XXH128_hash_t
5084 XXH_ASSERT(1 <=
len &&
len <= 3);
5085 XXH_ASSERT(secret !=
NULL);
5091 { xxh_u8
const c1 =
input[0];
5094 xxh_u32
const combinedl = ((xxh_u32)c1 <<16) | ((xxh_u32)c2 << 24)
5095 | ((xxh_u32)c3 << 0) | ((xxh_u32)
len << 8);
5096 xxh_u32
const combinedh =
XXH_rotl32(XXH_swap32(combinedl), 13);
5099 xxh_u64
const keyed_lo = (xxh_u64)combinedl ^ bitflipl;
5100 xxh_u64
const keyed_hi = (xxh_u64)combinedh ^ bitfliph;
5102 h128.low64 = XXH64_avalanche(keyed_lo);
5103 h128.high64 = XXH64_avalanche(keyed_hi);
5108XXH_FORCE_INLINE XXH128_hash_t
5112 XXH_ASSERT(secret !=
NULL);
5113 XXH_ASSERT(4 <=
len &&
len <= 8);
5114 seed ^= (xxh_u64)XXH_swap32((xxh_u32)
seed) << 32;
5117 xxh_u64
const input_64 = input_lo + ((xxh_u64)input_hi << 32);
5119 xxh_u64
const keyed = input_64 ^ bitflip;
5122 XXH128_hash_t m128 = XXH_mult64to128(keyed, XXH_PRIME64_1 + (
len << 2));
5124 m128.high64 += (m128.low64 << 1);
5125 m128.low64 ^= (m128.high64 >> 3);
5127 m128.low64 = XXH_xorshift64(m128.low64, 35);
5128 m128.low64 *= 0x9FB21C651E98DF25ULL;
5129 m128.low64 = XXH_xorshift64(m128.low64, 28);
5130 m128.high64 = XXH3_avalanche(m128.high64);
5135XXH_FORCE_INLINE XXH128_hash_t
5139 XXH_ASSERT(secret !=
NULL);
5140 XXH_ASSERT(9 <=
len &&
len <= 16);
5145 XXH128_hash_t m128 = XXH_mult64to128(input_lo ^ input_hi ^ bitflipl, XXH_PRIME64_1);
5150 m128.low64 += (xxh_u64)(
len - 1) << 54;
5151 input_hi ^= bitfliph;
5159 if (
sizeof(
void *) <
sizeof(xxh_u64)) {
5166 m128.high64 += (input_hi & 0xFFFFFFFF00000000ULL) + XXH_mult32to64((xxh_u32)input_hi, XXH_PRIME32_2);
5192 m128.high64 += input_hi + XXH_mult32to64((xxh_u32)input_hi, XXH_PRIME32_2 - 1);
5195 m128.low64 ^= XXH_swap64(m128.high64);
5198 XXH128_hash_t h128 = XXH_mult64to128(m128.low64, XXH_PRIME64_2);
5199 h128.high64 += m128.high64 * XXH_PRIME64_2;
5201 h128.low64 = XXH3_avalanche(h128.low64);
5202 h128.high64 = XXH3_avalanche(h128.high64);
5210XXH_FORCE_INLINE XXH128_hash_t
5213 XXH_ASSERT(
len <= 16);
5217 { XXH128_hash_t h128;
5220 h128.low64 = XXH64_avalanche(
seed ^ bitflipl);
5221 h128.high64 = XXH64_avalanche(
seed ^ bitfliph);
5229XXH_FORCE_INLINE XXH128_hash_t
5230XXH128_mix32B(XXH128_hash_t acc,
const xxh_u8* input_1,
const xxh_u8* input_2,
5233 acc.low64 += XXH3_mix16B (input_1, secret+0,
seed);
5235 acc.high64 += XXH3_mix16B (input_2, secret+16,
seed);
5241XXH_FORCE_INLINE XXH128_hash_t
5242XXH3_len_17to128_128b(
const xxh_u8* XXH_RESTRICT
input,
size_t len,
5243 const xxh_u8* XXH_RESTRICT secret,
size_t secretSize,
5246 XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize;
5247 XXH_ASSERT(16 <
len &&
len <= 128);
5249 { XXH128_hash_t acc;
5250 acc.low64 =
len * XXH_PRIME64_1;
5262 { XXH128_hash_t h128;
5263 h128.low64 = acc.low64 + acc.high64;
5264 h128.high64 = (acc.low64 * XXH_PRIME64_1)
5265 + (acc.high64 * XXH_PRIME64_4)
5266 + ((
len -
seed) * XXH_PRIME64_2);
5267 h128.low64 = XXH3_avalanche(h128.low64);
5268 h128.high64 = (
XXH64_hash_t)0 - XXH3_avalanche(h128.high64);
5274XXH_NO_INLINE XXH128_hash_t
5275XXH3_len_129to240_128b(
const xxh_u8* XXH_RESTRICT
input,
size_t len,
5276 const xxh_u8* XXH_RESTRICT secret,
size_t secretSize,
5279 XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize;
5280 XXH_ASSERT(128 <
len &&
len <= XXH3_MIDSIZE_MAX);
5282 { XXH128_hash_t acc;
5283 int const nbRounds = (
int)
len / 32;
5285 acc.low64 =
len * XXH_PRIME64_1;
5287 for (
i=0;
i<4;
i++) {
5288 acc = XXH128_mix32B(acc,
5294 acc.low64 = XXH3_avalanche(acc.low64);
5295 acc.high64 = XXH3_avalanche(acc.high64);
5296 XXH_ASSERT(nbRounds >= 4);
5297 for (
i=4 ;
i < nbRounds;
i++) {
5298 acc = XXH128_mix32B(acc,
5301 secret + XXH3_MIDSIZE_STARTOFFSET + (32 * (
i - 4)),
5305 acc = XXH128_mix32B(acc,
5308 secret + XXH3_SECRET_SIZE_MIN - XXH3_MIDSIZE_LASTOFFSET - 16,
5311 { XXH128_hash_t h128;
5312 h128.low64 = acc.low64 + acc.high64;
5313 h128.high64 = (acc.low64 * XXH_PRIME64_1)
5314 + (acc.high64 * XXH_PRIME64_4)
5315 + ((
len -
seed) * XXH_PRIME64_2);
5316 h128.low64 = XXH3_avalanche(h128.low64);
5317 h128.high64 = (
XXH64_hash_t)0 - XXH3_avalanche(h128.high64);
5323XXH_FORCE_INLINE XXH128_hash_t
5324XXH3_hashLong_128b_internal(
const void* XXH_RESTRICT
input,
size_t len,
5325 const xxh_u8* XXH_RESTRICT secret,
size_t secretSize,
5326 XXH3_f_accumulate_512 f_acc512,
5327 XXH3_f_scrambleAcc f_scramble)
5329 XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[XXH_ACC_NB] = XXH3_INIT_ACC;
5331 XXH3_hashLong_internal_loop(acc, (
const xxh_u8*)
input,
len, secret, secretSize, f_acc512, f_scramble);
5335 XXH_ASSERT(secretSize >=
sizeof(acc) + XXH_SECRET_MERGEACCS_START);
5336 { XXH128_hash_t h128;
5337 h128.low64 = XXH3_mergeAccs(acc,
5338 secret + XXH_SECRET_MERGEACCS_START,
5339 (xxh_u64)
len * XXH_PRIME64_1);
5340 h128.high64 = XXH3_mergeAccs(acc,
5342 -
sizeof(acc) - XXH_SECRET_MERGEACCS_START,
5343 ~((xxh_u64)
len * XXH_PRIME64_2));
5351XXH_NO_INLINE XXH128_hash_t
5352XXH3_hashLong_128b_default(
const void* XXH_RESTRICT
input,
size_t len,
5354 const void* XXH_RESTRICT secret,
size_t secretLen)
5356 (void)seed64; (void)secret; (void)secretLen;
5357 return XXH3_hashLong_128b_internal(
input,
len, XXH3_kSecret,
sizeof(XXH3_kSecret),
5358 XXH3_accumulate_512, XXH3_scrambleAcc);
5365XXH_FORCE_INLINE XXH128_hash_t
5366XXH3_hashLong_128b_withSecret(
const void* XXH_RESTRICT
input,
size_t len,
5368 const void* XXH_RESTRICT secret,
size_t secretLen)
5371 return XXH3_hashLong_128b_internal(
input,
len, (
const xxh_u8*)secret, secretLen,
5372 XXH3_accumulate_512, XXH3_scrambleAcc);
5375XXH_FORCE_INLINE XXH128_hash_t
5376XXH3_hashLong_128b_withSeed_internal(
const void* XXH_RESTRICT
input,
size_t len,
5378 XXH3_f_accumulate_512 f_acc512,
5379 XXH3_f_scrambleAcc f_scramble,
5380 XXH3_f_initCustomSecret f_initSec)
5383 return XXH3_hashLong_128b_internal(
input,
len,
5384 XXH3_kSecret,
sizeof(XXH3_kSecret),
5385 f_acc512, f_scramble);
5386 { XXH_ALIGN(XXH_SEC_ALIGN) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE];
5387 f_initSec(secret, seed64);
5388 return XXH3_hashLong_128b_internal(
input,
len, (
const xxh_u8*)secret,
sizeof(secret),
5389 f_acc512, f_scramble);
5396XXH_NO_INLINE XXH128_hash_t
5397XXH3_hashLong_128b_withSeed(
const void*
input,
size_t len,
5398 XXH64_hash_t seed64,
const void* XXH_RESTRICT secret,
size_t secretLen)
5400 (void)secret; (void)secretLen;
5401 return XXH3_hashLong_128b_withSeed_internal(
input,
len, seed64,
5402 XXH3_accumulate_512, XXH3_scrambleAcc, XXH3_initCustomSecret);
5405typedef XXH128_hash_t (*XXH3_hashLong128_f)(
const void* XXH_RESTRICT, size_t,
5408XXH_FORCE_INLINE XXH128_hash_t
5409XXH3_128bits_internal(
const void*
input,
size_t len,
5410 XXH64_hash_t seed64,
const void* XXH_RESTRICT secret,
size_t secretLen,
5411 XXH3_hashLong128_f f_hl128)
5413 XXH_ASSERT(secretLen >= XXH3_SECRET_SIZE_MIN);
5421 return XXH3_len_0to16_128b((
const xxh_u8*)
input,
len, (
const xxh_u8*)secret, seed64);
5423 return XXH3_len_17to128_128b((
const xxh_u8*)
input,
len, (
const xxh_u8*)secret, secretLen, seed64);
5424 if (
len <= XXH3_MIDSIZE_MAX)
5425 return XXH3_len_129to240_128b((
const xxh_u8*)
input,
len, (
const xxh_u8*)secret, secretLen, seed64);
5426 return f_hl128(
input,
len, seed64, secret, secretLen);
5435 return XXH3_128bits_internal(
input,
len, 0,
5436 XXH3_kSecret,
sizeof(XXH3_kSecret),
5437 XXH3_hashLong_128b_default);
5444 return XXH3_128bits_internal(
input,
len, 0,
5445 (
const xxh_u8*)secret, secretSize,
5446 XXH3_hashLong_128b_withSecret);
5454 XXH3_kSecret,
sizeof(XXH3_kSecret),
5455 XXH3_hashLong_128b_withSeed);
5462 if (
len <= XXH3_MIDSIZE_MAX)
5463 return XXH3_128bits_internal(
input,
len,
seed, XXH3_kSecret,
sizeof(XXH3_kSecret),
NULL);
5464 return XXH3_hashLong_128b_withSecret(
input,
len,
seed, secret, secretSize);
5514 return XXH3_update(state, (
const xxh_u8*)
input,
len,
5515 XXH3_accumulate_512, XXH3_scrambleAcc);
5521 const unsigned char*
const secret = (state->extSecret ==
NULL) ? state->customSecret : state->extSecret;
5522 if (state->totalLen > XXH3_MIDSIZE_MAX) {
5524 XXH3_digest_long(acc, state, secret);
5525 XXH_ASSERT(state->secretLimit + XXH_STRIPE_LEN >=
sizeof(acc) + XXH_SECRET_MERGEACCS_START);
5526 { XXH128_hash_t h128;
5527 h128.low64 = XXH3_mergeAccs(acc,
5528 secret + XXH_SECRET_MERGEACCS_START,
5529 (xxh_u64)state->totalLen * XXH_PRIME64_1);
5530 h128.high64 = XXH3_mergeAccs(acc,
5531 secret + state->secretLimit + XXH_STRIPE_LEN
5532 -
sizeof(acc) - XXH_SECRET_MERGEACCS_START,
5533 ~((xxh_u64)state->totalLen * XXH_PRIME64_2));
5541 secret, state->secretLimit + XXH_STRIPE_LEN);
5553 return !(memcmp(&h1, &h2,
sizeof(h1)));
5563 XXH128_hash_t
const h1 = *(
const XXH128_hash_t*)h128_1;
5564 XXH128_hash_t
const h2 = *(
const XXH128_hash_t*)h128_2;
5565 int const hcmp = (h1.high64 > h2.high64) - (h2.high64 > h1.high64);
5567 if (hcmp)
return hcmp;
5568 return (h1.low64 > h2.low64) - (h2.low64 > h1.low64);
5579 hash.high64 = XXH_swap64(hash.high64);
5580 hash.low64 = XXH_swap64(hash.low64);
5582 XXH_memcpy(
dst, &hash.high64,
sizeof(hash.high64));
5583 XXH_memcpy((
char*)
dst +
sizeof(hash.high64), &hash.low64,
sizeof(hash.low64));
5591 h.high64 = XXH_readBE64(
src);
5592 h.low64 = XXH_readBE64(
src->digest + 8);
5602#define XXH_MIN(x, y) (((x) > (y)) ? (y) : (x))
5604XXH_FORCE_INLINE
void XXH3_combine16(
void*
dst, XXH128_hash_t h128)
5612XXH3_generateSecret(
void* secretBuffer,
size_t secretSize,
const void* customSeed,
size_t customSeedSize)
5614#if (XXH_DEBUGLEVEL >= 1)
5615 XXH_ASSERT(secretBuffer !=
NULL);
5616 XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN);
5620 if (secretSize < XXH3_SECRET_SIZE_MIN)
return XXH_ERROR;
5623 if (customSeedSize == 0) {
5624 customSeed = XXH3_kSecret;
5625 customSeedSize = XXH_SECRET_DEFAULT_SIZE;
5627#if (XXH_DEBUGLEVEL >= 1)
5628 XXH_ASSERT(customSeed !=
NULL);
5635 while (pos < secretSize) {
5636 size_t const toCopy = XXH_MIN((secretSize - pos), customSeedSize);
5637 memcpy((
char*)secretBuffer + pos, customSeed, toCopy);
5641 {
size_t const nbSeg16 = secretSize / 16;
5643 XXH128_canonical_t scrambler;
5645 for (n=0; n<nbSeg16; n++) {
5646 XXH128_hash_t
const h128 =
XXH128(&scrambler,
sizeof(scrambler), n);
5647 XXH3_combine16((
char*)secretBuffer + n*16, h128);
5659 XXH_ALIGN(XXH_SEC_ALIGN) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE];
5660 XXH3_initCustomSecret(secret,
seed);
5661 XXH_ASSERT(secretBuffer !=
NULL);
5662 memcpy(secretBuffer, secret, XXH_SECRET_DEFAULT_SIZE);
5668#if XXH_VECTOR == XXH_AVX2 \
5669 && defined(__GNUC__) && !defined(__clang__) \
5670 && defined(__OPTIMIZE__) && !defined(__OPTIMIZE_SIZE__)
5671# pragma GCC pop_options
5684#if defined (__cplusplus)
#define XXH32_update
Definition xxhash.h:259
#define XXH3_128bits_reset
Definition xxhash.h:296
#define XXH3_64bits_reset_withSecret
Definition xxhash.h:284
#define XXH3_64bits_withSecretandSeed
Definition xxhash.h:278
#define XXH64_hashFromCanonical
Definition xxhash.h:273
#define XXH64_freeState
Definition xxhash.h:267
#define XXH32_reset
Definition xxhash.h:258
#define XXH128_canonicalFromHash
Definition xxhash.h:304
#define XXH128_isEqual
Definition xxhash.h:302
#define XXH3_128bits_reset_withSecretandSeed
Definition xxhash.h:299
#define XXH3_64bits_withSecret
Definition xxhash.h:276
#define XXH3_generateSecret
Definition xxhash.h:288
unsigned long long XXH64_hash_t
Definition xxhash.h:219
#define XXH3_64bits
Definition xxhash.h:275
#define XXH3_64bits_withSeed
Definition xxhash.h:277
#define XXH64_digest
Definition xxhash.h:270
#define XXH3_64bits_update
Definition xxhash.h:286
#define XXH3_128bits_update
Definition xxhash.h:300
#define XXH3_copyState
Definition xxhash.h:281
#define XXH64_copyState
Definition xxhash.h:271
#define XXH3_64bits_reset_withSeed
Definition xxhash.h:283
#define XXH3_128bits_reset_withSeed
Definition xxhash.h:297
#define XXH32_hashFromCanonical
Definition xxhash.h:263
#define XXH3_64bits_reset_withSecretandSeed
Definition xxhash.h:285
#define XXH32_copyState
Definition xxhash.h:261
#define XXH3_64bits_digest
Definition xxhash.h:287
#define XXH3_128bits_withSecretandSeed
Definition xxhash.h:295
#define XXH3_128bits
Definition xxhash.h:292
#define XXH3_createState
Definition xxhash.h:279
#define XXH32_freeState
Definition xxhash.h:257
#define XXH3_128bits_withSecret
Definition xxhash.h:294
#define XXH128
Definition xxhash.h:291
#define XXH3_generateSecret_fromSeed
Definition xxhash.h:289
#define XXH32
Definition xxhash.h:255
#define XXH64_reset
Definition xxhash.h:268
#define XXH32_canonicalFromHash
Definition xxhash.h:262
#define XXH32_digest
Definition xxhash.h:260
#define XXH128_cmp
Definition xxhash.h:303
#define XXH_VERSION_NUMBER
Version number, encoded as two digits each.
Definition xxhash.h:315
#define XXH64_canonicalFromHash
Definition xxhash.h:272
#define XXH64_update
Definition xxhash.h:269
#define XXH3_freeState
Definition xxhash.h:280
#define XXH3_128bits_digest
Definition xxhash.h:301
#define XXH128_hashFromCanonical
Definition xxhash.h:305
#define XXH3_128bits_withSeed
Definition xxhash.h:293
#define XXH64
Definition xxhash.h:265
#define XXH3_64bits_reset
Definition xxhash.h:282
#define XXH3_128bits_reset_withSecret
Definition xxhash.h:298
struct XXH32_state_s XXH32_state_t
The opaque state struct for the XXH32 streaming API.
Definition xxhash.h:172
#define XXH_FALLTHROUGH
Definition xxhash.h:624
struct XXH64_state_s XXH64_state_t
The opaque state struct for the XXH64 streaming API.
Definition xxhash.h:229
char * dst
Definition lz4.h:833
const char * src
Definition lz4.h:866
char * dest
Definition lz4.h:806
#define __attribute__(unused_ric_since_2004)
Definition main_cr.c:84
#define input(b, o, c, n, m)
Definition compress42.c:610
Canonical (big endian) representation of XXH32_hash_t.
Definition xxhash.h:204
Canonical (big endian) representation of XXH64_hash_t.
Definition xxhash.h:239
Definition poolTests.c:28
#define const
Definition zconf.h:230
#define XXH_rotl64(x, r)
Definition xxhash.c:195
FORCE_INLINE U32 XXH32_endian_align(const void *input, size_t len, U32 seed, XXH_endianness endian, XXH_alignment align)
Definition xxhash.c:352
#define XXH_STATIC_ASSERT(c)
Definition xxhash.c:256
#define XXH_CPU_LITTLE_ENDIAN
Definition xxhash.c:225
FORCE_INLINE U64 XXH_readLE64(const void *ptr, XXH_endianness endian)
Definition xxhash.c:653
FORCE_INLINE U32 XXH_readLE32_align(const void *ptr, XXH_endianness endian, XXH_alignment align)
Definition xxhash.c:234
FORCE_INLINE U64 XXH_readLE64_align(const void *ptr, XXH_endianness endian, XXH_alignment align)
Definition xxhash.c:645
#define XXH_rotl32(x, r)
Definition xxhash.c:194
FORCE_INLINE U32 XXH_readLE32(const void *ptr, XXH_endianness endian)
Definition xxhash.c:242
#define XXH_get64bits(p)
Definition xxhash.c:699
#define XXH_get32bits(p)
Definition xxhash.c:288
FORCE_INLINE U64 XXH64_endian_align(const void *input, size_t len, U64 seed, XXH_endianness endian, XXH_alignment align)
Definition xxhash.c:811
XXH_alignment
Definition xxhash.c:232
@ XXH_aligned
Definition xxhash.c:232
@ XXH_unaligned
Definition xxhash.c:232
#define XXH_FORCE_ALIGN_CHECK
Definition xxhash.c:97
XXH_errorcode
Definition xxhash.h:79
@ XXH_ERROR
Definition xxhash.h:79
@ XXH_OK
Definition xxhash.h:79
XXH_PUBLIC_API XXH32_state_t * XXH32_createState(void)
Definition xxhash.c:422
#define XXH_PUBLIC_API
Definition xxhash.h:110
XXH_PUBLIC_API XXH64_state_t * XXH64_createState(void)
Definition xxhash.c:883
unsigned int XXH32_hash_t
Definition xxhash.h:162
XXH_PUBLIC_API unsigned XXH_versionNumber(void)
Definition xxhash.c:257
#define b(i)
Definition sha256.c:42
#define c(i)
Definition sha256.c:43
#define a(i)
Definition sha256.c:41
#define h(i)
Definition sha256.c:48
const lzma_allocator const uint8_t * in
Definition block.h:527
lzma_index ** i
Definition index.h:629
#define NULL
Definition getopt1.c:37
static uint32_t const uint8_t uint32_t len
Definition memcmplen.h:44
static uint32_t const uint8_t uint32_t uint32_t limit
Definition memcmplen.h:45
ret
Definition zlib_interface.c:30
#define ZSTD_memcpy(d, s, n)
Definition zstd_deps.h:32
uint64_t u64
Definition zstd_decompress.c:63
uint32_t u32
Definition zstd_decompress.c:62
uint32_t seed
Definition stream_decompress.c:26