...

Source file src/github.com/zeebo/xxh3/hash64.go

Documentation: github.com/zeebo/xxh3

     1  package xxh3
     2  
     3  import "math/bits"
     4  
     5  // Hash returns the hash of the byte slice.
     6  func Hash(b []byte) uint64 {
     7  	return hashAny(*(*str)(ptr(&b)))
     8  }
     9  
    10  // Hash returns the hash of the string slice.
    11  func HashString(s string) uint64 {
    12  	return hashAny(*(*str)(ptr(&s)))
    13  }
    14  
    15  func hashAny(s str) (acc u64) {
    16  	p, l := s.p, s.l
    17  
    18  	switch {
    19  	case l <= 16:
    20  		switch {
    21  		case l > 8: // 9-16
    22  			inputlo := readU64(p, 0) ^ (key64_024 ^ key64_032)
    23  			inputhi := readU64(p, ui(l)-8) ^ (key64_040 ^ key64_048)
    24  			folded := mulFold64(inputlo, inputhi)
    25  			return xxh3Avalanche(u64(l) + bits.ReverseBytes64(inputlo) + inputhi + folded)
    26  
    27  		case l > 3: // 4-8
    28  			input1 := readU32(p, 0)
    29  			input2 := readU32(p, ui(l)-4)
    30  			input64 := u64(input2) + u64(input1)<<32
    31  			keyed := input64 ^ (key64_008 ^ key64_016)
    32  			return rrmxmx(keyed, u64(l))
    33  
    34  		case l == 3: // 3
    35  			c12 := u64(readU16(p, 0))
    36  			c3 := u64(readU8(p, 2))
    37  			acc = c12<<16 + c3 + 3<<8
    38  
    39  		case l > 1: // 2
    40  			c12 := u64(readU16(p, 0))
    41  			acc = c12*(1<<24+1)>>8 + 2<<8
    42  
    43  		case l == 1: // 1
    44  			c1 := u64(readU8(p, 0))
    45  			acc = c1*(1<<24+1<<16+1) + 1<<8
    46  
    47  		default: // 0
    48  			return 0x2d06800538d394c2 // xxh_avalanche(key64_056 ^ key64_064)
    49  		}
    50  
    51  		acc ^= u64(key32_000 ^ key32_004)
    52  		return xxhAvalancheSmall(acc)
    53  
    54  	case l <= 128:
    55  		acc = u64(l) * prime64_1
    56  
    57  		if l > 32 {
    58  			if l > 64 {
    59  				if l > 96 {
    60  					acc += mulFold64(readU64(p, 6*8)^key64_096, readU64(p, 7*8)^key64_104)
    61  					acc += mulFold64(readU64(p, ui(l)-8*8)^key64_112, readU64(p, ui(l)-7*8)^key64_120)
    62  				} // 96
    63  				acc += mulFold64(readU64(p, 4*8)^key64_064, readU64(p, 5*8)^key64_072)
    64  				acc += mulFold64(readU64(p, ui(l)-6*8)^key64_080, readU64(p, ui(l)-5*8)^key64_088)
    65  			} // 64
    66  			acc += mulFold64(readU64(p, 2*8)^key64_032, readU64(p, 3*8)^key64_040)
    67  			acc += mulFold64(readU64(p, ui(l)-4*8)^key64_048, readU64(p, ui(l)-3*8)^key64_056)
    68  		} // 32
    69  		acc += mulFold64(readU64(p, 0*8)^key64_000, readU64(p, 1*8)^key64_008)
    70  		acc += mulFold64(readU64(p, ui(l)-2*8)^key64_016, readU64(p, ui(l)-1*8)^key64_024)
    71  
    72  		return xxh3Avalanche(acc)
    73  
    74  	case l <= 240:
    75  		acc = u64(l) * prime64_1
    76  
    77  		acc += mulFold64(readU64(p, 0*16+0)^key64_000, readU64(p, 0*16+8)^key64_008)
    78  		acc += mulFold64(readU64(p, 1*16+0)^key64_016, readU64(p, 1*16+8)^key64_024)
    79  		acc += mulFold64(readU64(p, 2*16+0)^key64_032, readU64(p, 2*16+8)^key64_040)
    80  		acc += mulFold64(readU64(p, 3*16+0)^key64_048, readU64(p, 3*16+8)^key64_056)
    81  		acc += mulFold64(readU64(p, 4*16+0)^key64_064, readU64(p, 4*16+8)^key64_072)
    82  		acc += mulFold64(readU64(p, 5*16+0)^key64_080, readU64(p, 5*16+8)^key64_088)
    83  		acc += mulFold64(readU64(p, 6*16+0)^key64_096, readU64(p, 6*16+8)^key64_104)
    84  		acc += mulFold64(readU64(p, 7*16+0)^key64_112, readU64(p, 7*16+8)^key64_120)
    85  
    86  		// avalanche
    87  		acc = xxh3Avalanche(acc)
    88  
    89  		// trailing groups after 128
    90  		top := ui(l) &^ 15
    91  		for i := ui(8 * 16); i < top; i += 16 {
    92  			acc += mulFold64(readU64(p, i+0)^readU64(key, i-125), readU64(p, i+8)^readU64(key, i-117))
    93  		}
    94  
    95  		// last 16 bytes
    96  		acc += mulFold64(readU64(p, ui(l)-16)^key64_119, readU64(p, ui(l)-8)^key64_127)
    97  
    98  		return xxh3Avalanche(acc)
    99  
   100  	default:
   101  		acc = u64(l) * prime64_1
   102  
   103  		accs := [8]u64{
   104  			prime32_3, prime64_1, prime64_2, prime64_3,
   105  			prime64_4, prime32_2, prime64_5, prime32_1,
   106  		}
   107  
   108  		if hasAVX512 && l >= avx512Switch {
   109  			accumAVX512(&accs, p, key, u64(l))
   110  		} else if hasAVX2 {
   111  			accumAVX2(&accs, p, key, u64(l))
   112  		} else if hasSSE2 {
   113  			accumSSE(&accs, p, key, u64(l))
   114  		} else {
   115  			accumScalar(&accs, p, key, u64(l))
   116  		}
   117  
   118  		// merge accs
   119  		acc += mulFold64(accs[0]^key64_011, accs[1]^key64_019)
   120  		acc += mulFold64(accs[2]^key64_027, accs[3]^key64_035)
   121  		acc += mulFold64(accs[4]^key64_043, accs[5]^key64_051)
   122  		acc += mulFold64(accs[6]^key64_059, accs[7]^key64_067)
   123  
   124  		return xxh3Avalanche(acc)
   125  	}
   126  }
   127  

View as plain text