ROOTANA
Loading...
Searching...
No Matches
xxhash.cxx
Go to the documentation of this file.
1/*
2xxHash - Fast Hash algorithm
3Copyright (C) 2012-2015, Yann Collet
4
5BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
6
7Redistribution and use in source and binary forms, with or without
8modification, are permitted provided that the following conditions are
9met:
10
11* Redistributions of source code must retain the above copyright
12notice, this list of conditions and the following disclaimer.
13* Redistributions in binary form must reproduce the above
14copyright notice, this list of conditions and the following disclaimer
15in the documentation and/or other materials provided with the
16distribution.
17
18THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
30You can contact the author at :
31- xxHash source repository : https://github.com/Cyan4973/xxHash
32*/
33
34
35/**************************************
36* Tuning parameters
37**************************************/
38/* Unaligned memory access is automatically enabled for "common" CPU, such as x86.
39 * For others CPU, the compiler will be more cautious, and insert extra code to ensure aligned access is respected.
40 * If you know your target CPU supports unaligned memory access, you want to force this option manually to improve performance.
41 * You can also enable this parameter if you know your input data will always be aligned (boundaries of 4, for U32).
42 */
43#if defined(__ARM_FEATURE_UNALIGNED) || defined(__i386) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64)
44# define XXH_USE_UNALIGNED_ACCESS 1
45#endif
46
47/* XXH_ACCEPT_NULL_INPUT_POINTER :
48 * If the input pointer is a null pointer, xxHash default behavior is to trigger a memory access error, since it is a bad pointer.
49 * When this option is enabled, xxHash output for null input pointers will be the same as a null-length input.
50 * By default, this option is disabled. To enable it, uncomment below define :
51 */
52/* #define XXH_ACCEPT_NULL_INPUT_POINTER 1 */
53
54/* XXH_FORCE_NATIVE_FORMAT :
55 * By default, xxHash library provides endian-independant Hash values, based on little-endian convention.
56 * Results are therefore identical for little-endian and big-endian CPU.
57 * This comes at a performance cost for big-endian CPU, since some swapping is required to emulate little-endian format.
58 * Should endian-independance be of no importance for your application, you may set the #define below to 1.
59 * It will improve speed for Big-endian CPU.
60 * This option has no impact on Little_Endian CPU.
61 */
62#define XXH_FORCE_NATIVE_FORMAT 0
63
64
65/**************************************
66* Compiler Specific Options
67***************************************/
68#ifdef _MSC_VER /* Visual Studio */
69# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
70# define FORCE_INLINE static __forceinline
71#else
72# if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
73# ifdef __GNUC__
74# define FORCE_INLINE static inline __attribute__((always_inline))
75# else
76# define FORCE_INLINE static inline
77# endif
78# else
79# define FORCE_INLINE static
80# endif /* __STDC_VERSION__ */
81#endif
82
83
84/**************************************
85* Includes & Memory related functions
86***************************************/
87#include "mxxhash.h"
88/* Modify the local functions below should you wish to use some other memory routines */
89/* for malloc(), free() */
90#include <stdlib.h>
91static void* XXH_malloc(size_t s) { return malloc(s); }
92static void XXH_free (void* p) { free(p); }
93/* for memcpy() */
94#include <string.h>
95static void* XXH_memcpy(void* dest, const void* src, size_t size) { return memcpy(dest,src,size); }
96
97
98/**************************************
99* Basic Types
100***************************************/
101#if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
102# include <stdint.h>
103 typedef uint8_t BYTE;
104 typedef uint16_t U16;
105 typedef uint32_t U32;
106 typedef int32_t S32;
107 typedef uint64_t U64;
108#else
109 typedef unsigned char BYTE;
110 typedef unsigned short U16;
111 typedef unsigned int U32;
112 typedef signed int S32;
113 typedef unsigned long long U64;
114#endif
115
116static U32 XXH_read32(const void* memPtr)
117{
118 U32 val32;
119 memcpy(&val32, memPtr, 4);
120 return val32;
121}
122
123static U64 XXH_read64(const void* memPtr)
124{
125 U64 val64;
126 memcpy(&val64, memPtr, 8);
127 return val64;
128}
129
130
131
132/******************************************
133* Compiler-specific Functions and Macros
134******************************************/
135#define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
136
137/* Note : although _rotl exists for minGW (GCC under windows), performance seems poor */
138#if defined(_MSC_VER)
139# define XXH_rotl32(x,r) _rotl(x,r)
140# define XXH_rotl64(x,r) _rotl64(x,r)
141#else
142# define XXH_rotl32(x,r) ((x << r) | (x >> (32 - r)))
143# define XXH_rotl64(x,r) ((x << r) | (x >> (64 - r)))
144#endif
145
146#if defined(_MSC_VER) /* Visual Studio */
147# define XXH_swap32 _byteswap_ulong
148# define XXH_swap64 _byteswap_uint64
149#elif GCC_VERSION >= 403
150# define XXH_swap32 __builtin_bswap32
151# define XXH_swap64 __builtin_bswap64
152#else
154{
155 return ((x << 24) & 0xff000000 ) |
156 ((x << 8) & 0x00ff0000 ) |
157 ((x >> 8) & 0x0000ff00 ) |
158 ((x >> 24) & 0x000000ff );
159}
161{
162 return ((x << 56) & 0xff00000000000000ULL) |
163 ((x << 40) & 0x00ff000000000000ULL) |
164 ((x << 24) & 0x0000ff0000000000ULL) |
165 ((x << 8) & 0x000000ff00000000ULL) |
166 ((x >> 8) & 0x00000000ff000000ULL) |
167 ((x >> 24) & 0x0000000000ff0000ULL) |
168 ((x >> 40) & 0x000000000000ff00ULL) |
169 ((x >> 56) & 0x00000000000000ffULL);
170}
171#endif
172
173
174/***************************************
175* Architecture Macros
176***************************************/
178#ifndef XXH_CPU_LITTLE_ENDIAN /* XXH_CPU_LITTLE_ENDIAN can be defined externally, for example using a compiler switch */
179static const int one = 1;
180# define XXH_CPU_LITTLE_ENDIAN (*(const char*)(&one))
181#endif
182
183
184/*****************************
185* Memory reads
186*****************************/
188
190{
191 if (align==XXH_unaligned)
192 return endian==XXH_littleEndian ? XXH_read32(ptr) : XXH_swap32(XXH_read32(ptr));
193 else
194 return endian==XXH_littleEndian ? *(const U32*)ptr : XXH_swap32(*(const U32*)ptr);
195}
196
198{
199 return XXH_readLE32_align(ptr, endian, XXH_unaligned);
200}
201
203{
204 if (align==XXH_unaligned)
205 return endian==XXH_littleEndian ? XXH_read64(ptr) : XXH_swap64(XXH_read64(ptr));
206 else
207 return endian==XXH_littleEndian ? *(const U64*)ptr : XXH_swap64(*(const U64*)ptr);
208}
209
211{
212 return XXH_readLE64_align(ptr, endian, XXH_unaligned);
213}
214
215
216/***************************************
217* Macros
218***************************************/
219#define XXH_STATIC_ASSERT(c) { enum { XXH_static_assert = 1/(!!(c)) }; } /* use only *after* variable declarations */
220
221
222/***************************************
223* Constants
224***************************************/
225#define PRIME32_1 2654435761U
226#define PRIME32_2 2246822519U
227#define PRIME32_3 3266489917U
228#define PRIME32_4 668265263U
229#define PRIME32_5 374761393U
230
231#define PRIME64_1 11400714785074694791ULL
232#define PRIME64_2 14029467366897019727ULL
233#define PRIME64_3 1609587929392839161ULL
234#define PRIME64_4 9650029242287828579ULL
235#define PRIME64_5 2870177450012600261ULL
236
237
238/*****************************
239* Simple Hash Functions
240*****************************/
241FORCE_INLINE U32 XXH32_endian_align(const void* input, size_t len, U32 seed, XXH_endianess endian, XXH_alignment align)
242{
243 const BYTE* p = (const BYTE*)input;
244 const BYTE* bEnd = p + len;
245 U32 h32;
246#define XXH_get32bits(p) XXH_readLE32_align(p, endian, align)
247
248#ifdef XXH_ACCEPT_NULL_INPUT_POINTER
249 if (p==NULL)
250 {
251 len=0;
252 bEnd=p=(const BYTE*)(size_t)16;
253 }
254#endif
255
256 if (len>=16)
257 {
258 const BYTE* const limit = bEnd - 16;
259 U32 v1 = seed + PRIME32_1 + PRIME32_2;
260 U32 v2 = seed + PRIME32_2;
261 U32 v3 = seed + 0;
262 U32 v4 = seed - PRIME32_1;
263
264 do
265 {
266 v1 += XXH_get32bits(p) * PRIME32_2;
267 v1 = XXH_rotl32(v1, 13);
268 v1 *= PRIME32_1;
269 p+=4;
270 v2 += XXH_get32bits(p) * PRIME32_2;
271 v2 = XXH_rotl32(v2, 13);
272 v2 *= PRIME32_1;
273 p+=4;
274 v3 += XXH_get32bits(p) * PRIME32_2;
275 v3 = XXH_rotl32(v3, 13);
276 v3 *= PRIME32_1;
277 p+=4;
278 v4 += XXH_get32bits(p) * PRIME32_2;
279 v4 = XXH_rotl32(v4, 13);
280 v4 *= PRIME32_1;
281 p+=4;
282 }
283 while (p<=limit);
284
285 h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7) + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18);
286 }
287 else
288 {
289 h32 = seed + PRIME32_5;
290 }
291
292 h32 += (U32) len;
293
294 while (p+4<=bEnd)
295 {
296 h32 += XXH_get32bits(p) * PRIME32_3;
297 h32 = XXH_rotl32(h32, 17) * PRIME32_4 ;
298 p+=4;
299 }
300
301 while (p<bEnd)
302 {
303 h32 += (*p) * PRIME32_5;
304 h32 = XXH_rotl32(h32, 11) * PRIME32_1 ;
305 p++;
306 }
307
308 h32 ^= h32 >> 15;
309 h32 *= PRIME32_2;
310 h32 ^= h32 >> 13;
311 h32 *= PRIME32_3;
312 h32 ^= h32 >> 16;
313
314 return h32;
315}
316
317
318unsigned XXH32 (const void* input, size_t len, unsigned seed)
319{
320#if 0
321 /* Simple version, good for code maintenance, but unfortunately slow for small inputs */
322 XXH32_state_t state;
323 XXH32_reset(&state, seed);
324 XXH32_update(&state, input, len);
325 return XXH32_digest(&state);
326#else
328
329# if !defined(XXH_USE_UNALIGNED_ACCESS)
330 if ((((size_t)input) & 3) == 0) /* Input is 4-bytes aligned, leverage the speed benefit */
331 {
332 if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
333 return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned);
334 else
335 return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned);
336 }
337# endif
338
339 if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
340 return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned);
341 else
342 return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned);
343#endif
344}
345
346FORCE_INLINE U64 XXH64_endian_align(const void* input, size_t len, U64 seed, XXH_endianess endian, XXH_alignment align)
347{
348 const BYTE* p = (const BYTE*)input;
349 const BYTE* bEnd = p + len;
350 U64 h64;
351#define XXH_get64bits(p) XXH_readLE64_align(p, endian, align)
352
353#ifdef XXH_ACCEPT_NULL_INPUT_POINTER
354 if (p==NULL)
355 {
356 len=0;
357 bEnd=p=(const BYTE*)(size_t)32;
358 }
359#endif
360
361 if (len>=32)
362 {
363 const BYTE* const limit = bEnd - 32;
364 U64 v1 = seed + PRIME64_1 + PRIME64_2;
365 U64 v2 = seed + PRIME64_2;
366 U64 v3 = seed + 0;
367 U64 v4 = seed - PRIME64_1;
368
369 do
370 {
371 v1 += XXH_get64bits(p) * PRIME64_2;
372 p+=8;
373 v1 = XXH_rotl64(v1, 31);
374 v1 *= PRIME64_1;
375 v2 += XXH_get64bits(p) * PRIME64_2;
376 p+=8;
377 v2 = XXH_rotl64(v2, 31);
378 v2 *= PRIME64_1;
379 v3 += XXH_get64bits(p) * PRIME64_2;
380 p+=8;
381 v3 = XXH_rotl64(v3, 31);
382 v3 *= PRIME64_1;
383 v4 += XXH_get64bits(p) * PRIME64_2;
384 p+=8;
385 v4 = XXH_rotl64(v4, 31);
386 v4 *= PRIME64_1;
387 }
388 while (p<=limit);
389
390 h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
391
392 v1 *= PRIME64_2;
393 v1 = XXH_rotl64(v1, 31);
394 v1 *= PRIME64_1;
395 h64 ^= v1;
396 h64 = h64 * PRIME64_1 + PRIME64_4;
397
398 v2 *= PRIME64_2;
399 v2 = XXH_rotl64(v2, 31);
400 v2 *= PRIME64_1;
401 h64 ^= v2;
402 h64 = h64 * PRIME64_1 + PRIME64_4;
403
404 v3 *= PRIME64_2;
405 v3 = XXH_rotl64(v3, 31);
406 v3 *= PRIME64_1;
407 h64 ^= v3;
408 h64 = h64 * PRIME64_1 + PRIME64_4;
409
410 v4 *= PRIME64_2;
411 v4 = XXH_rotl64(v4, 31);
412 v4 *= PRIME64_1;
413 h64 ^= v4;
414 h64 = h64 * PRIME64_1 + PRIME64_4;
415 }
416 else
417 {
418 h64 = seed + PRIME64_5;
419 }
420
421 h64 += (U64) len;
422
423 while (p+8<=bEnd)
424 {
425 U64 k1 = XXH_get64bits(p);
426 k1 *= PRIME64_2;
427 k1 = XXH_rotl64(k1,31);
428 k1 *= PRIME64_1;
429 h64 ^= k1;
430 h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4;
431 p+=8;
432 }
433
434 if (p+4<=bEnd)
435 {
436 h64 ^= (U64)(XXH_get32bits(p)) * PRIME64_1;
437 h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;
438 p+=4;
439 }
440
441 while (p<bEnd)
442 {
443 h64 ^= (*p) * PRIME64_5;
444 h64 = XXH_rotl64(h64, 11) * PRIME64_1;
445 p++;
446 }
447
448 h64 ^= h64 >> 33;
449 h64 *= PRIME64_2;
450 h64 ^= h64 >> 29;
451 h64 *= PRIME64_3;
452 h64 ^= h64 >> 32;
453
454 return h64;
455}
456
457
458unsigned long long XXH64 (const void* input, size_t len, unsigned long long seed)
459{
460#if 0
461 /* Simple version, good for code maintenance, but unfortunately slow for small inputs */
462 XXH64_state_t state;
463 XXH64_reset(&state, seed);
464 XXH64_update(&state, input, len);
465 return XXH64_digest(&state);
466#else
468
469# if !defined(XXH_USE_UNALIGNED_ACCESS)
470 if ((((size_t)input) & 7)==0) /* Input is aligned, let's leverage the speed advantage */
471 {
472 if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
473 return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned);
474 else
475 return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned);
476 }
477# endif
478
479 if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
480 return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned);
481 else
482 return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned);
483#endif
484}
485
486/****************************************************
487* Advanced Hash Functions
488****************************************************/
489
490/*** Allocation ***/
491typedef struct
492{
499 U32 mem32[4]; /* defined as U32 for alignment */
502
503typedef struct
504{
511 U64 mem64[4]; /* defined as U64 for alignment */
514
515
517{
518 XXH_STATIC_ASSERT(sizeof(XXH32_state_t) >= sizeof(XXH_istate32_t)); /* A compilation error here means XXH32_state_t is not large enough */
519 return (XXH32_state_t*)XXH_malloc(sizeof(XXH32_state_t));
520}
522{
523 XXH_free(statePtr);
524 return XXH_OK;
525}
526
528{
529 XXH_STATIC_ASSERT(sizeof(XXH64_state_t) >= sizeof(XXH_istate64_t)); /* A compilation error here means XXH64_state_t is not large enough */
530 return (XXH64_state_t*)XXH_malloc(sizeof(XXH64_state_t));
531}
533{
534 XXH_free(statePtr);
535 return XXH_OK;
536}
537
538
539/*** Hash feed ***/
540
542{
543 XXH_istate32_t* state = (XXH_istate32_t*) state_in;
544 state->seed = seed;
545 state->v1 = seed + PRIME32_1 + PRIME32_2;
546 state->v2 = seed + PRIME32_2;
547 state->v3 = seed + 0;
548 state->v4 = seed - PRIME32_1;
549 state->total_len = 0;
550 state->memsize = 0;
551 return XXH_OK;
552}
553
554XXH_errorcode XXH64_reset(XXH64_state_t* state_in, unsigned long long seed)
555{
556 XXH_istate64_t* state = (XXH_istate64_t*) state_in;
557 state->seed = seed;
558 state->v1 = seed + PRIME64_1 + PRIME64_2;
559 state->v2 = seed + PRIME64_2;
560 state->v3 = seed + 0;
561 state->v4 = seed - PRIME64_1;
562 state->total_len = 0;
563 state->memsize = 0;
564 return XXH_OK;
565}
566
567
568FORCE_INLINE XXH_errorcode XXH32_update_endian (XXH32_state_t* state_in, const void* input, size_t len, XXH_endianess endian)
569{
570 XXH_istate32_t* state = (XXH_istate32_t *) state_in;
571 const BYTE* p = (const BYTE*)input;
572 const BYTE* const bEnd = p + len;
573
574#ifdef XXH_ACCEPT_NULL_INPUT_POINTER
575 if (input==NULL) return XXH_ERROR;
576#endif
577
578 state->total_len += len;
579
580 if (state->memsize + len < 16) /* fill in tmp buffer */
581 {
582 XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, len);
583 state->memsize += (U32)len;
584 return XXH_OK;
585 }
586
587 if (state->memsize) /* some data left from previous update */
588 {
589 XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, 16-state->memsize);
590 {
591 const U32* p32 = state->mem32;
592 state->v1 += XXH_readLE32(p32, endian) * PRIME32_2;
593 state->v1 = XXH_rotl32(state->v1, 13);
594 state->v1 *= PRIME32_1;
595 p32++;
596 state->v2 += XXH_readLE32(p32, endian) * PRIME32_2;
597 state->v2 = XXH_rotl32(state->v2, 13);
598 state->v2 *= PRIME32_1;
599 p32++;
600 state->v3 += XXH_readLE32(p32, endian) * PRIME32_2;
601 state->v3 = XXH_rotl32(state->v3, 13);
602 state->v3 *= PRIME32_1;
603 p32++;
604 state->v4 += XXH_readLE32(p32, endian) * PRIME32_2;
605 state->v4 = XXH_rotl32(state->v4, 13);
606 state->v4 *= PRIME32_1;
607 p32++;
608 }
609 p += 16-state->memsize;
610 state->memsize = 0;
611 }
612
613 if (p <= bEnd-16)
614 {
615 const BYTE* const limit = bEnd - 16;
616 U32 v1 = state->v1;
617 U32 v2 = state->v2;
618 U32 v3 = state->v3;
619 U32 v4 = state->v4;
620
621 do
622 {
623 v1 += XXH_readLE32(p, endian) * PRIME32_2;
624 v1 = XXH_rotl32(v1, 13);
625 v1 *= PRIME32_1;
626 p+=4;
627 v2 += XXH_readLE32(p, endian) * PRIME32_2;
628 v2 = XXH_rotl32(v2, 13);
629 v2 *= PRIME32_1;
630 p+=4;
631 v3 += XXH_readLE32(p, endian) * PRIME32_2;
632 v3 = XXH_rotl32(v3, 13);
633 v3 *= PRIME32_1;
634 p+=4;
635 v4 += XXH_readLE32(p, endian) * PRIME32_2;
636 v4 = XXH_rotl32(v4, 13);
637 v4 *= PRIME32_1;
638 p+=4;
639 }
640 while (p<=limit);
641
642 state->v1 = v1;
643 state->v2 = v2;
644 state->v3 = v3;
645 state->v4 = v4;
646 }
647
648 if (p < bEnd)
649 {
650 XXH_memcpy(state->mem32, p, bEnd-p);
651 state->memsize = (int)(bEnd-p);
652 }
653
654 return XXH_OK;
655}
656
657XXH_errorcode XXH32_update (XXH32_state_t* state_in, const void* input, size_t len)
658{
660
661 if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
662 return XXH32_update_endian(state_in, input, len, XXH_littleEndian);
663 else
664 return XXH32_update_endian(state_in, input, len, XXH_bigEndian);
665}
666
667
668
670{
671 const XXH_istate32_t* state = (const XXH_istate32_t*) state_in;
672 const BYTE * p = (const BYTE*)state->mem32;
673 const BYTE* bEnd = (const BYTE*)(state->mem32) + state->memsize;
674 U32 h32;
675
676 if (state->total_len >= 16)
677 {
678 h32 = XXH_rotl32(state->v1, 1) + XXH_rotl32(state->v2, 7) + XXH_rotl32(state->v3, 12) + XXH_rotl32(state->v4, 18);
679 }
680 else
681 {
682 h32 = state->seed + PRIME32_5;
683 }
684
685 h32 += (U32) state->total_len;
686
687 while (p+4<=bEnd)
688 {
689 h32 += XXH_readLE32(p, endian) * PRIME32_3;
690 h32 = XXH_rotl32(h32, 17) * PRIME32_4;
691 p+=4;
692 }
693
694 while (p<bEnd)
695 {
696 h32 += (*p) * PRIME32_5;
697 h32 = XXH_rotl32(h32, 11) * PRIME32_1;
698 p++;
699 }
700
701 h32 ^= h32 >> 15;
702 h32 *= PRIME32_2;
703 h32 ^= h32 >> 13;
704 h32 *= PRIME32_3;
705 h32 ^= h32 >> 16;
706
707 return h32;
708}
709
710
712{
714
715 if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
716 return XXH32_digest_endian(state_in, XXH_littleEndian);
717 else
718 return XXH32_digest_endian(state_in, XXH_bigEndian);
719}
720
721
722FORCE_INLINE XXH_errorcode XXH64_update_endian (XXH64_state_t* state_in, const void* input, size_t len, XXH_endianess endian)
723{
724 XXH_istate64_t * state = (XXH_istate64_t *) state_in;
725 const BYTE* p = (const BYTE*)input;
726 const BYTE* const bEnd = p + len;
727
728#ifdef XXH_ACCEPT_NULL_INPUT_POINTER
729 if (input==NULL) return XXH_ERROR;
730#endif
731
732 state->total_len += len;
733
734 if (state->memsize + len < 32) /* fill in tmp buffer */
735 {
736 XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, len);
737 state->memsize += (U32)len;
738 return XXH_OK;
739 }
740
741 if (state->memsize) /* some data left from previous update */
742 {
743 XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, 32-state->memsize);
744 {
745 const U64* p64 = state->mem64;
746 state->v1 += XXH_readLE64(p64, endian) * PRIME64_2;
747 state->v1 = XXH_rotl64(state->v1, 31);
748 state->v1 *= PRIME64_1;
749 p64++;
750 state->v2 += XXH_readLE64(p64, endian) * PRIME64_2;
751 state->v2 = XXH_rotl64(state->v2, 31);
752 state->v2 *= PRIME64_1;
753 p64++;
754 state->v3 += XXH_readLE64(p64, endian) * PRIME64_2;
755 state->v3 = XXH_rotl64(state->v3, 31);
756 state->v3 *= PRIME64_1;
757 p64++;
758 state->v4 += XXH_readLE64(p64, endian) * PRIME64_2;
759 state->v4 = XXH_rotl64(state->v4, 31);
760 state->v4 *= PRIME64_1;
761 p64++;
762 }
763 p += 32-state->memsize;
764 state->memsize = 0;
765 }
766
767 if (p+32 <= bEnd)
768 {
769 const BYTE* const limit = bEnd - 32;
770 U64 v1 = state->v1;
771 U64 v2 = state->v2;
772 U64 v3 = state->v3;
773 U64 v4 = state->v4;
774
775 do
776 {
777 v1 += XXH_readLE64(p, endian) * PRIME64_2;
778 v1 = XXH_rotl64(v1, 31);
779 v1 *= PRIME64_1;
780 p+=8;
781 v2 += XXH_readLE64(p, endian) * PRIME64_2;
782 v2 = XXH_rotl64(v2, 31);
783 v2 *= PRIME64_1;
784 p+=8;
785 v3 += XXH_readLE64(p, endian) * PRIME64_2;
786 v3 = XXH_rotl64(v3, 31);
787 v3 *= PRIME64_1;
788 p+=8;
789 v4 += XXH_readLE64(p, endian) * PRIME64_2;
790 v4 = XXH_rotl64(v4, 31);
791 v4 *= PRIME64_1;
792 p+=8;
793 }
794 while (p<=limit);
795
796 state->v1 = v1;
797 state->v2 = v2;
798 state->v3 = v3;
799 state->v4 = v4;
800 }
801
802 if (p < bEnd)
803 {
804 XXH_memcpy(state->mem64, p, bEnd-p);
805 state->memsize = (int)(bEnd-p);
806 }
807
808 return XXH_OK;
809}
810
811XXH_errorcode XXH64_update (XXH64_state_t* state_in, const void* input, size_t len)
812{
814
815 if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
816 return XXH64_update_endian(state_in, input, len, XXH_littleEndian);
817 else
818 return XXH64_update_endian(state_in, input, len, XXH_bigEndian);
819}
820
821
822
824{
825 const XXH_istate64_t * state = (const XXH_istate64_t *) state_in;
826 const BYTE * p = (const BYTE*)state->mem64;
827 const BYTE* bEnd = (const BYTE*)state->mem64 + state->memsize;
828 U64 h64;
829
830 if (state->total_len >= 32)
831 {
832 U64 v1 = state->v1;
833 U64 v2 = state->v2;
834 U64 v3 = state->v3;
835 U64 v4 = state->v4;
836
837 h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
838
839 v1 *= PRIME64_2;
840 v1 = XXH_rotl64(v1, 31);
841 v1 *= PRIME64_1;
842 h64 ^= v1;
843 h64 = h64*PRIME64_1 + PRIME64_4;
844
845 v2 *= PRIME64_2;
846 v2 = XXH_rotl64(v2, 31);
847 v2 *= PRIME64_1;
848 h64 ^= v2;
849 h64 = h64*PRIME64_1 + PRIME64_4;
850
851 v3 *= PRIME64_2;
852 v3 = XXH_rotl64(v3, 31);
853 v3 *= PRIME64_1;
854 h64 ^= v3;
855 h64 = h64*PRIME64_1 + PRIME64_4;
856
857 v4 *= PRIME64_2;
858 v4 = XXH_rotl64(v4, 31);
859 v4 *= PRIME64_1;
860 h64 ^= v4;
861 h64 = h64*PRIME64_1 + PRIME64_4;
862 }
863 else
864 {
865 h64 = state->seed + PRIME64_5;
866 }
867
868 h64 += (U64) state->total_len;
869
870 while (p+8<=bEnd)
871 {
872 U64 k1 = XXH_readLE64(p, endian);
873 k1 *= PRIME64_2;
874 k1 = XXH_rotl64(k1,31);
875 k1 *= PRIME64_1;
876 h64 ^= k1;
877 h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4;
878 p+=8;
879 }
880
881 if (p+4<=bEnd)
882 {
883 h64 ^= (U64)(XXH_readLE32(p, endian)) * PRIME64_1;
884 h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;
885 p+=4;
886 }
887
888 while (p<bEnd)
889 {
890 h64 ^= (*p) * PRIME64_5;
891 h64 = XXH_rotl64(h64, 11) * PRIME64_1;
892 p++;
893 }
894
895 h64 ^= h64 >> 33;
896 h64 *= PRIME64_2;
897 h64 ^= h64 >> 29;
898 h64 *= PRIME64_3;
899 h64 ^= h64 >> 32;
900
901 return h64;
902}
903
904
905unsigned long long XXH64_digest (const XXH64_state_t* state_in)
906{
908
909 if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
910 return XXH64_digest_endian(state_in, XXH_littleEndian);
911 else
912 return XXH64_digest_endian(state_in, XXH_bigEndian);
913}
914
915
uint8_t BYTE
const char char * dest
Definition mlz4hc.h:181
#define XXH32_update
Definition mxxhash.h:111
#define XXH64_freeState
Definition mxxhash.h:108
#define XXH32_reset
Definition mxxhash.h:109
#define XXH64_digest
Definition mxxhash.h:114
XXH_errorcode
Definition mxxhash.h:78
@ XXH_ERROR
Definition mxxhash.h:78
@ XXH_OK
Definition mxxhash.h:78
#define XXH32_freeState
Definition mxxhash.h:107
#define XXH32
Definition mxxhash.h:103
#define XXH64_reset
Definition mxxhash.h:110
#define XXH32_digest
Definition mxxhash.h:113
#define XXH64_update
Definition mxxhash.h:112
#define XXH64
Definition mxxhash.h:104
unsigned long long U64
Definition lz4.cxx:127
unsigned int U32
Definition lz4.cxx:125
#define XXH_rotl64(x, r)
Definition xxhash.cxx:143
static void * XXH_malloc(size_t s)
Definition xxhash.cxx:91
static U64 XXH_read64(const void *memPtr)
Definition xxhash.cxx:123
FORCE_INLINE U32 XXH32_endian_align(const void *input, size_t len, U32 seed, XXH_endianess endian, XXH_alignment align)
Definition xxhash.cxx:241
XXH_endianess
Definition xxhash.cxx:177
@ XXH_littleEndian
Definition xxhash.cxx:177
@ XXH_bigEndian
Definition xxhash.cxx:177
unsigned long long U64
Definition xxhash.cxx:113
FORCE_INLINE U64 XXH_readLE64(const void *ptr, XXH_endianess endian)
Definition xxhash.cxx:210
#define PRIME32_1
Definition xxhash.cxx:225
#define PRIME32_3
Definition xxhash.cxx:227
#define PRIME64_4
Definition xxhash.cxx:234
#define XXH_STATIC_ASSERT(c)
Definition xxhash.cxx:219
#define XXH_CPU_LITTLE_ENDIAN
Definition xxhash.cxx:180
FORCE_INLINE U64 XXH_readLE64_align(const void *ptr, XXH_endianess endian, XXH_alignment align)
Definition xxhash.cxx:202
unsigned char BYTE
Definition xxhash.cxx:109
FORCE_INLINE XXH_errorcode XXH64_update_endian(XXH64_state_t *state_in, const void *input, size_t len, XXH_endianess endian)
Definition xxhash.cxx:722
signed int S32
Definition xxhash.cxx:112
static U64 XXH_swap64(U64 x)
Definition xxhash.cxx:160
FORCE_INLINE U64 XXH64_digest_endian(const XXH64_state_t *state_in, XXH_endianess endian)
Definition xxhash.cxx:823
FORCE_INLINE U32 XXH_readLE32(const void *ptr, XXH_endianess endian)
Definition xxhash.cxx:197
#define PRIME32_2
Definition xxhash.cxx:226
#define XXH_rotl32(x, r)
Definition xxhash.cxx:142
static void XXH_free(void *p)
Definition xxhash.cxx:92
static U32 XXH_swap32(U32 x)
Definition xxhash.cxx:153
XXH32_state_t * XXH32_createState(void)
Definition xxhash.cxx:516
FORCE_INLINE XXH_errorcode XXH32_update_endian(XXH32_state_t *state_in, const void *input, size_t len, XXH_endianess endian)
Definition xxhash.cxx:568
#define PRIME64_1
Definition xxhash.cxx:231
static void * XXH_memcpy(void *dest, const void *src, size_t size)
Definition xxhash.cxx:95
static const int one
Definition xxhash.cxx:179
#define PRIME32_5
Definition xxhash.cxx:229
FORCE_INLINE U32 XXH_readLE32_align(const void *ptr, XXH_endianess endian, XXH_alignment align)
Definition xxhash.cxx:189
#define PRIME64_5
Definition xxhash.cxx:235
#define FORCE_INLINE
Definition xxhash.cxx:79
unsigned int U32
Definition xxhash.cxx:111
#define XXH_get64bits(p)
static U32 XXH_read32(const void *memPtr)
Definition xxhash.cxx:116
#define PRIME64_3
Definition xxhash.cxx:233
#define XXH_get32bits(p)
XXH64_state_t * XXH64_createState(void)
Definition xxhash.cxx:527
XXH_alignment
Definition xxhash.cxx:187
@ XXH_aligned
Definition xxhash.cxx:187
@ XXH_unaligned
Definition xxhash.cxx:187
unsigned short U16
Definition xxhash.cxx:110
#define PRIME32_4
Definition xxhash.cxx:228
FORCE_INLINE U64 XXH64_endian_align(const void *input, size_t len, U64 seed, XXH_endianess endian, XXH_alignment align)
Definition xxhash.cxx:346
FORCE_INLINE U32 XXH32_digest_endian(const XXH32_state_t *state_in, XXH_endianess endian)
Definition xxhash.cxx:669
#define XXH_FORCE_NATIVE_FORMAT
Definition xxhash.cxx:62
#define PRIME64_2
Definition xxhash.cxx:232