2 * This code was taken from http://ccodearchive.net/info/hash.html
3 * The original file was modified to remove unwanted code
4 * and some changes to fit the current build environment
7 -------------------------------------------------------------------------------
8 lookup3.c, by Bob Jenkins, May 2006, Public Domain.
10 These are functions for producing 32-bit hashes for hash table lookup.
11 hash_word(), hashlittle(), hashlittle2(), hashbig(), mix(), and final()
12 are externally useful functions. Routines to test the hash are included
13 if SELF_TEST is defined. You can use this free for any purpose. It's in
14 the public domain. It has no warranty.
16 You probably want to use hashlittle(). hashlittle() and hashbig()
17 hash byte arrays. hashlittle() is is faster than hashbig() on
18 little-endian machines. Intel and AMD are little-endian machines.
19 On second thought, you probably want hashlittle2(), which is identical to
20 hashlittle() except it returns two 32-bit hashes for the price of one.
21 You could implement hashbig2() if you wanted but I haven't bothered here.
23 If you want to find a hash of, say, exactly 7 integers, do
24 a = i1; b = i2; c = i3;
26 a += i4; b += i5; c += i6;
30 then use c as the hash value. If you have a variable length array of
31 4-byte integers to hash, use hash_word(). If you have a byte array (like
32 a character string), use hashlittle(). If you have several byte arrays, or
33 a mix of things, see the comments above hashlittle().
35 Why is this so big? I read 12 bytes at a time into 3 4-byte integers,
36 then mix those integers. This is fast (you can do a lot more thorough
37 mixing with 12*3 instructions on 3 integers than you can with 3 instructions
38 on 1 byte), but shoehorning those bytes into integers efficiently is messy.
39 -------------------------------------------------------------------------------
41 #include <netlink/hash.h>
43 #if HAVE_LITTLE_ENDIAN
44 #define HASH_LITTLE_ENDIAN 1
45 #define HASH_BIG_ENDIAN 0
47 #define HASH_LITTLE_ENDIAN 0
48 #define HASH_BIG_ENDIAN 1
53 #define hashsize(n) ((uint32_t)1<<(n))
54 #define hashmask(n) (hashsize(n)-1)
55 #define rot(x,k) (((x)<<(k)) | ((x)>>(32-(k))))
58 -------------------------------------------------------------------------------
59 mix -- mix 3 32-bit values reversibly.
61 This is reversible, so any information in (a,b,c) before mix() is
62 still in (a,b,c) after mix().
64 If four pairs of (a,b,c) inputs are run through mix(), or through
65 mix() in reverse, there are at least 32 bits of the output that
66 are sometimes the same for one pair and different for another pair.
68 * pairs that differed by one bit, by two bits, in any combination
69 of top bits of (a,b,c), or in any combination of bottom bits of
71 * "differ" is defined as +, -, ^, or ~^. For + and -, I transformed
72 the output delta to a Gray code (a^(a>>1)) so a string of 1's (as
73 is commonly produced by subtraction) look like a single 1-bit
75 * the base values were pseudorandom, all zero but one bit set, or
76 all zero plus a counter that starts at zero.
78 Some k values for my "a-=c; a^=rot(c,k); c+=b;" arrangement that
83 Well, "9 15 3 18 27 15" didn't quite get 32 bits diffing
84 for "differ" defined as + with a one-bit base and a two-bit delta. I
85 used http://burtleburtle.net/bob/hash/avalanche.html to choose
86 the operations, constants, and arrangements of the variables.
88 This does not achieve avalanche. There are input bits of (a,b,c)
89 that fail to affect some output bits of (a,b,c), especially of a. The
90 most thoroughly mixed value is c, but it doesn't really even achieve
93 This allows some parallelism. Read-after-writes are good at doubling
94 the number of bits affected, so the goal of mixing pulls in the opposite
95 direction as the goal of parallelism. I did what I could. Rotates
96 seem to cost as much as shifts on every machine I could lay my hands
97 on, and rotates are much kinder to the top and bottom bits, so I used
99 -------------------------------------------------------------------------------
103 a -= c; a ^= rot(c, 4); c += b; \
104 b -= a; b ^= rot(a, 6); a += c; \
105 c -= b; c ^= rot(b, 8); b += a; \
106 a -= c; a ^= rot(c,16); c += b; \
107 b -= a; b ^= rot(a,19); a += c; \
108 c -= b; c ^= rot(b, 4); b += a; \
112 -------------------------------------------------------------------------------
113 final -- final mixing of 3 32-bit values (a,b,c) into c
115 Pairs of (a,b,c) values differing in only a few bits will usually
116 produce values of c that look totally different. This was tested for
117 * pairs that differed by one bit, by two bits, in any combination
118 of top bits of (a,b,c), or in any combination of bottom bits of
120 * "differ" is defined as +, -, ^, or ~^. For + and -, I transformed
121 the output delta to a Gray code (a^(a>>1)) so a string of 1's (as
122 is commonly produced by subtraction) look like a single 1-bit
124 * the base values were pseudorandom, all zero but one bit set, or
125 all zero plus a counter that starts at zero.
127 These constants passed:
130 and these came close:
134 -------------------------------------------------------------------------------
136 #define final(a,b,c) \
138 c ^= b; c -= rot(b,14); \
139 a ^= c; a -= rot(c,11); \
140 b ^= a; b -= rot(a,25); \
141 c ^= b; c -= rot(b,16); \
142 a ^= c; a -= rot(c,4); \
143 b ^= a; b -= rot(a,14); \
144 c ^= b; c -= rot(b,24); \
148 -------------------------------------------------------------------------------
149 hashlittle() -- hash a variable-length key into a 32-bit value
150 k : the key (the unaligned variable-length array of bytes)
151 length : the length of the key, counting by bytes
152 val2 : IN: can be any 4-byte value OUT: second 32 bit hash.
153 Returns a 32-bit value. Every bit of the key affects every bit of
154 the return value. Two keys differing by one or two bits will have
155 totally different hash values. Note that the return value is better
156 mixed than val2, so use that first.
158 The best hash table sizes are powers of 2. There is no need to do
159 mod a prime (mod is sooo slow!). If you need less than 32 bits,
160 use a bitmask. For example, if you need only 10 bits, do
161 h = (h & hashmask(10));
162 In which case, the hash table should have hashsize(10) elements.
164 If you are hashing n strings (uint8_t **)k, do it like this:
165 for (i=0, h=0; i<n; ++i) h = hashlittle( k[i], len[i], h);
167 By Bob Jenkins, 2006. bob_jenkins@burtleburtle.net. You may use this
168 code any way you wish, private, educational, or commercial. It's free.
170 Use for hash table lookup, or anything where one collision in 2^^32 is
171 acceptable. Do NOT use for cryptographic purposes.
172 -------------------------------------------------------------------------------
175 static uint32_t hashlittle( const void *key, size_t length, uint32_t *val2 )
177 uint32_t a,b,c; /* internal state */
178 union { const void *ptr; size_t i; } u; /* needed for Mac Powerbook G4 */
180 /* Set up the internal state */
181 a = b = c = 0xdeadbeef + ((uint32_t)length) + *val2;
184 if (HASH_LITTLE_ENDIAN && ((u.i & 0x3) == 0)) {
185 const uint32_t *k = (const uint32_t *)key; /* read 32-bit chunks */
188 /*------ all but last block: aligned reads and affect 32 bits of (a,b,c) */
199 /*----------------------------- handle the last (probably partial) block */
201 * "k[2]&0xffffff" actually reads beyond the end of the string, but
202 * then masks off the part it's not allowed to read. Because the
203 * string is aligned, the masked-off tail is in the same word as the
204 * rest of the string. Every machine with memory protection I've seen
205 * does it on word boundaries, so is OK with this. But VALGRIND will
206 * still catch it and complain. The masking trick does make the hash
207 * noticably faster for short strings (like English words).
209 * Not on my testing with gcc 4.5 on an intel i5 CPU, at least --RR.
214 case 12: c+=k[2]; b+=k[1]; a+=k[0]; break;
215 case 11: c+=k[2]&0xffffff; b+=k[1]; a+=k[0]; break;
216 case 10: c+=k[2]&0xffff; b+=k[1]; a+=k[0]; break;
217 case 9 : c+=k[2]&0xff; b+=k[1]; a+=k[0]; break;
218 case 8 : b+=k[1]; a+=k[0]; break;
219 case 7 : b+=k[1]&0xffffff; a+=k[0]; break;
220 case 6 : b+=k[1]&0xffff; a+=k[0]; break;
221 case 5 : b+=k[1]&0xff; a+=k[0]; break;
222 case 4 : a+=k[0]; break;
223 case 3 : a+=k[0]&0xffffff; break;
224 case 2 : a+=k[0]&0xffff; break;
225 case 1 : a+=k[0]&0xff; break;
226 case 0 : return c; /* zero length strings require no mixing */
229 #else /* make valgrind happy */
231 k8 = (const uint8_t *)k;
234 case 12: c+=k[2]; b+=k[1]; a+=k[0]; break;
235 case 11: c+=((uint32_t)k8[10])<<16; /* fall through */
236 case 10: c+=((uint32_t)k8[9])<<8; /* fall through */
237 case 9 : c+=k8[8]; /* fall through */
238 case 8 : b+=k[1]; a+=k[0]; break;
239 case 7 : b+=((uint32_t)k8[6])<<16; /* fall through */
240 case 6 : b+=((uint32_t)k8[5])<<8; /* fall through */
241 case 5 : b+=k8[4]; /* fall through */
242 case 4 : a+=k[0]; break;
243 case 3 : a+=((uint32_t)k8[2])<<16; /* fall through */
244 case 2 : a+=((uint32_t)k8[1])<<8; /* fall through */
245 case 1 : a+=k8[0]; break;
249 #endif /* !valgrind */
251 } else if (HASH_LITTLE_ENDIAN && ((u.i & 0x1) == 0)) {
252 const uint16_t *k = (const uint16_t *)key; /* read 16-bit chunks */
255 /*--------------- all but last block: aligned reads and different mixing */
258 a += k[0] + (((uint32_t)k[1])<<16);
259 b += k[2] + (((uint32_t)k[3])<<16);
260 c += k[4] + (((uint32_t)k[5])<<16);
266 /*----------------------------- handle the last (probably partial) block */
267 k8 = (const uint8_t *)k;
270 case 12: c+=k[4]+(((uint32_t)k[5])<<16);
271 b+=k[2]+(((uint32_t)k[3])<<16);
272 a+=k[0]+(((uint32_t)k[1])<<16);
274 case 11: c+=((uint32_t)k8[10])<<16; /* fall through */
276 b+=k[2]+(((uint32_t)k[3])<<16);
277 a+=k[0]+(((uint32_t)k[1])<<16);
279 case 9 : c+=k8[8]; /* fall through */
280 case 8 : b+=k[2]+(((uint32_t)k[3])<<16);
281 a+=k[0]+(((uint32_t)k[1])<<16);
283 case 7 : b+=((uint32_t)k8[6])<<16; /* fall through */
285 a+=k[0]+(((uint32_t)k[1])<<16);
287 case 5 : b+=k8[4]; /* fall through */
288 case 4 : a+=k[0]+(((uint32_t)k[1])<<16);
290 case 3 : a+=((uint32_t)k8[2])<<16; /* fall through */
295 case 0 : return c; /* zero length requires no mixing */
298 } else { /* need to read the key one byte at a time */
299 const uint8_t *k = (const uint8_t *)key;
301 /*--------------- all but the last block: affect some 32 bits of (a,b,c) */
305 a += ((uint32_t)k[1])<<8;
306 a += ((uint32_t)k[2])<<16;
307 a += ((uint32_t)k[3])<<24;
309 b += ((uint32_t)k[5])<<8;
310 b += ((uint32_t)k[6])<<16;
311 b += ((uint32_t)k[7])<<24;
313 c += ((uint32_t)k[9])<<8;
314 c += ((uint32_t)k[10])<<16;
315 c += ((uint32_t)k[11])<<24;
321 /*-------------------------------- last block: affect all 32 bits of (c) */
322 switch(length) /* all the case statements fall through */
324 case 12: c+=((uint32_t)k[11])<<24;
325 case 11: c+=((uint32_t)k[10])<<16;
326 case 10: c+=((uint32_t)k[9])<<8;
328 case 8 : b+=((uint32_t)k[7])<<24;
329 case 7 : b+=((uint32_t)k[6])<<16;
330 case 6 : b+=((uint32_t)k[5])<<8;
332 case 4 : a+=((uint32_t)k[3])<<24;
333 case 3 : a+=((uint32_t)k[2])<<16;
334 case 2 : a+=((uint32_t)k[1])<<8;
348 * This is the same as hash_word() on big-endian machines. It is different
349 * from hashlittle() on all machines. hashbig() takes advantage of
350 * big-endian byte ordering.
352 static uint32_t hashbig( const void *key, size_t length, uint32_t *val2)
355 union { const void *ptr; size_t i; } u; /* to cast key to (size_t) happily */
357 /* Set up the internal state */
358 a = b = c = 0xdeadbeef + ((uint32_t)length) + *val2;
361 if (HASH_BIG_ENDIAN && ((u.i & 0x3) == 0)) {
362 const uint32_t *k = (const uint32_t *)key; /* read 32-bit chunks */
365 /*------ all but last block: aligned reads and affect 32 bits of (a,b,c) */
376 /*----------------------------- handle the last (probably partial) block */
378 * "k[2]<<8" actually reads beyond the end of the string, but
379 * then shifts out the part it's not allowed to read. Because the
380 * string is aligned, the illegal read is in the same word as the
381 * rest of the string. Every machine with memory protection I've seen
382 * does it on word boundaries, so is OK with this. But VALGRIND will
383 * still catch it and complain. The masking trick does make the hash
384 * noticably faster for short strings (like English words).
386 * Not on my testing with gcc 4.5 on an intel i5 CPU, at least --RR.
391 case 12: c+=k[2]; b+=k[1]; a+=k[0]; break;
392 case 11: c+=k[2]&0xffffff00; b+=k[1]; a+=k[0]; break;
393 case 10: c+=k[2]&0xffff0000; b+=k[1]; a+=k[0]; break;
394 case 9 : c+=k[2]&0xff000000; b+=k[1]; a+=k[0]; break;
395 case 8 : b+=k[1]; a+=k[0]; break;
396 case 7 : b+=k[1]&0xffffff00; a+=k[0]; break;
397 case 6 : b+=k[1]&0xffff0000; a+=k[0]; break;
398 case 5 : b+=k[1]&0xff000000; a+=k[0]; break;
399 case 4 : a+=k[0]; break;
400 case 3 : a+=k[0]&0xffffff00; break;
401 case 2 : a+=k[0]&0xffff0000; break;
402 case 1 : a+=k[0]&0xff000000; break;
403 case 0 : return c; /* zero length strings require no mixing */
406 #else /* make valgrind happy */
408 k8 = (const uint8_t *)k;
409 switch(length) /* all the case statements fall through */
411 case 12: c+=k[2]; b+=k[1]; a+=k[0]; break;
412 case 11: c+=((uint32_t)k8[10])<<8; /* fall through */
413 case 10: c+=((uint32_t)k8[9])<<16; /* fall through */
414 case 9 : c+=((uint32_t)k8[8])<<24; /* fall through */
415 case 8 : b+=k[1]; a+=k[0]; break;
416 case 7 : b+=((uint32_t)k8[6])<<8; /* fall through */
417 case 6 : b+=((uint32_t)k8[5])<<16; /* fall through */
418 case 5 : b+=((uint32_t)k8[4])<<24; /* fall through */
419 case 4 : a+=k[0]; break;
420 case 3 : a+=((uint32_t)k8[2])<<8; /* fall through */
421 case 2 : a+=((uint32_t)k8[1])<<16; /* fall through */
422 case 1 : a+=((uint32_t)k8[0])<<24; break;
426 #endif /* !VALGRIND */
428 } else { /* need to read the key one byte at a time */
429 const uint8_t *k = (const uint8_t *)key;
431 /*--------------- all but the last block: affect some 32 bits of (a,b,c) */
434 a += ((uint32_t)k[0])<<24;
435 a += ((uint32_t)k[1])<<16;
436 a += ((uint32_t)k[2])<<8;
437 a += ((uint32_t)k[3]);
438 b += ((uint32_t)k[4])<<24;
439 b += ((uint32_t)k[5])<<16;
440 b += ((uint32_t)k[6])<<8;
441 b += ((uint32_t)k[7]);
442 c += ((uint32_t)k[8])<<24;
443 c += ((uint32_t)k[9])<<16;
444 c += ((uint32_t)k[10])<<8;
445 c += ((uint32_t)k[11]);
451 /*-------------------------------- last block: affect all 32 bits of (c) */
452 switch(length) /* all the case statements fall through */
455 case 11: c+=((uint32_t)k[10])<<8;
456 case 10: c+=((uint32_t)k[9])<<16;
457 case 9 : c+=((uint32_t)k[8])<<24;
459 case 7 : b+=((uint32_t)k[6])<<8;
460 case 6 : b+=((uint32_t)k[5])<<16;
461 case 5 : b+=((uint32_t)k[4])<<24;
463 case 3 : a+=((uint32_t)k[2])<<8;
464 case 2 : a+=((uint32_t)k[1])<<16;
465 case 1 : a+=((uint32_t)k[0])<<24;
476 uint32_t nl_hash_any(const void *key, size_t length, uint32_t base)
479 return hashbig(key, length, &base);
481 return hashlittle(key, length, &base);