...

Source file src/github.com/zeebo/xxh3/hash64_seed.go

Documentation: github.com/zeebo/xxh3

     1  package xxh3
     2  
     3  import "math/bits"
     4  
     5  // HashSeed returns the hash of the byte slice with given seed.
     6  func HashSeed(b []byte, seed uint64) uint64 {
     7  	return hashAnySeed(*(*str)(ptr(&b)), seed)
     8  
     9  }
    10  
    11  // HashStringSeed returns the hash of the string slice with given seed.
    12  func HashStringSeed(s string, seed uint64) uint64 {
    13  	return hashAnySeed(*(*str)(ptr(&s)), seed)
    14  }
    15  
    16  func hashAnySeed(s str, seed uint64) (acc u64) {
    17  	p, l := s.p, s.l
    18  
    19  	switch {
    20  	case l <= 16:
    21  		switch {
    22  		case l > 8:
    23  			inputlo := readU64(p, 0) ^ (key64_024 ^ key64_032 + seed)
    24  			inputhi := readU64(p, ui(l)-8) ^ (key64_040 ^ key64_048 - seed)
    25  			folded := mulFold64(inputlo, inputhi)
    26  			return xxh3Avalanche(u64(l) + bits.ReverseBytes64(inputlo) + inputhi + folded)
    27  
    28  		case l > 3:
    29  			seed ^= u64(bits.ReverseBytes32(u32(seed))) << 32
    30  			input1 := readU32(p, 0)
    31  			input2 := readU32(p, ui(l)-4)
    32  			input64 := u64(input2) + u64(input1)<<32
    33  			keyed := input64 ^ (key64_008 ^ key64_016 - seed)
    34  			return rrmxmx(keyed, u64(l))
    35  
    36  		case l == 3: // 3
    37  			c12 := u64(readU16(p, 0))
    38  			c3 := u64(readU8(p, 2))
    39  			acc = c12<<16 + c3 + 3<<8
    40  
    41  		case l > 1: // 2
    42  			c12 := u64(readU16(p, 0))
    43  			acc = c12*(1<<24+1)>>8 + 2<<8
    44  
    45  		case l == 1: // 1
    46  			c1 := u64(readU8(p, 0))
    47  			acc = c1*(1<<24+1<<16+1) + 1<<8
    48  
    49  		default:
    50  			return xxhAvalancheSmall(seed ^ key64_056 ^ key64_064)
    51  		}
    52  
    53  		acc ^= u64(key32_000^key32_004) + seed
    54  		return xxhAvalancheSmall(acc)
    55  
    56  	case l <= 128:
    57  		acc = u64(l) * prime64_1
    58  
    59  		if l > 32 {
    60  			if l > 64 {
    61  				if l > 96 {
    62  					acc += mulFold64(readU64(p, 6*8)^(key64_096+seed), readU64(p, 7*8)^(key64_104-seed))
    63  					acc += mulFold64(readU64(p, ui(l)-8*8)^(key64_112+seed), readU64(p, ui(l)-7*8)^(key64_120-seed))
    64  				} // 96
    65  				acc += mulFold64(readU64(p, 4*8)^(key64_064+seed), readU64(p, 5*8)^(key64_072-seed))
    66  				acc += mulFold64(readU64(p, ui(l)-6*8)^(key64_080+seed), readU64(p, ui(l)-5*8)^(key64_088-seed))
    67  			} // 64
    68  			acc += mulFold64(readU64(p, 2*8)^(key64_032+seed), readU64(p, 3*8)^(key64_040-seed))
    69  			acc += mulFold64(readU64(p, ui(l)-4*8)^(key64_048+seed), readU64(p, ui(l)-3*8)^(key64_056-seed))
    70  		} // 32
    71  		acc += mulFold64(readU64(p, 0*8)^(key64_000+seed), readU64(p, 1*8)^(key64_008-seed))
    72  		acc += mulFold64(readU64(p, ui(l)-2*8)^(key64_016+seed), readU64(p, ui(l)-1*8)^(key64_024-seed))
    73  
    74  		return xxh3Avalanche(acc)
    75  
    76  	case l <= 240:
    77  		acc = u64(l) * prime64_1
    78  
    79  		acc += mulFold64(readU64(p, 0*16+0)^(key64_000+seed), readU64(p, 0*16+8)^(key64_008-seed))
    80  		acc += mulFold64(readU64(p, 1*16+0)^(key64_016+seed), readU64(p, 1*16+8)^(key64_024-seed))
    81  		acc += mulFold64(readU64(p, 2*16+0)^(key64_032+seed), readU64(p, 2*16+8)^(key64_040-seed))
    82  		acc += mulFold64(readU64(p, 3*16+0)^(key64_048+seed), readU64(p, 3*16+8)^(key64_056-seed))
    83  		acc += mulFold64(readU64(p, 4*16+0)^(key64_064+seed), readU64(p, 4*16+8)^(key64_072-seed))
    84  		acc += mulFold64(readU64(p, 5*16+0)^(key64_080+seed), readU64(p, 5*16+8)^(key64_088-seed))
    85  		acc += mulFold64(readU64(p, 6*16+0)^(key64_096+seed), readU64(p, 6*16+8)^(key64_104-seed))
    86  		acc += mulFold64(readU64(p, 7*16+0)^(key64_112+seed), readU64(p, 7*16+8)^(key64_120-seed))
    87  
    88  		// avalanche
    89  		acc = xxh3Avalanche(acc)
    90  
    91  		// trailing groups after 128
    92  		top := ui(l) &^ 15
    93  		for i := ui(8 * 16); i < top; i += 16 {
    94  			acc += mulFold64(readU64(p, i+0)^(readU64(key, i-125)+seed), readU64(p, i+8)^(readU64(key, i-117)-seed))
    95  		}
    96  
    97  		// last 16 bytes
    98  		acc += mulFold64(readU64(p, ui(l)-16)^(key64_119+seed), readU64(p, ui(l)-8)^(key64_127-seed))
    99  
   100  		return xxh3Avalanche(acc)
   101  
   102  	default:
   103  		acc = u64(l) * prime64_1
   104  
   105  		secret := key
   106  		if seed != 0 {
   107  			secret = ptr(&[secretSize]byte{})
   108  			initSecret(secret, seed)
   109  		}
   110  
   111  		accs := [8]u64{
   112  			prime32_3, prime64_1, prime64_2, prime64_3,
   113  			prime64_4, prime32_2, prime64_5, prime32_1,
   114  		}
   115  
   116  		if hasAVX512 && l >= avx512Switch {
   117  			accumAVX512(&accs, p, secret, u64(l))
   118  		} else if hasAVX2 {
   119  			accumAVX2(&accs, p, secret, u64(l))
   120  		} else if hasSSE2 {
   121  			accumSSE(&accs, p, secret, u64(l))
   122  		} else {
   123  			accumScalarSeed(&accs, p, secret, u64(l))
   124  		}
   125  
   126  		// merge accs
   127  		acc += mulFold64(accs[0]^readU64(secret, 11), accs[1]^readU64(secret, 19))
   128  		acc += mulFold64(accs[2]^readU64(secret, 27), accs[3]^readU64(secret, 35))
   129  		acc += mulFold64(accs[4]^readU64(secret, 43), accs[5]^readU64(secret, 51))
   130  		acc += mulFold64(accs[6]^readU64(secret, 59), accs[7]^readU64(secret, 67))
   131  
   132  		return xxh3Avalanche(acc)
   133  	}
   134  }
   135  

View as plain text