...

Source file src/github.com/klauspost/compress/flate/level5.go

Documentation: github.com/klauspost/compress/flate

     1  package flate
     2  
     3  import "fmt"
     4  
     5  type fastEncL5 struct {
     6  	fastGen
     7  	table  [tableSize]tableEntry
     8  	bTable [tableSize]tableEntryPrev
     9  }
    10  
    11  func (e *fastEncL5) Encode(dst *tokens, src []byte) {
    12  	const (
    13  		inputMargin            = 12 - 1
    14  		minNonLiteralBlockSize = 1 + 1 + inputMargin
    15  		hashShortBytes         = 4
    16  	)
    17  	if debugDeflate && e.cur < 0 {
    18  		panic(fmt.Sprint("e.cur < 0: ", e.cur))
    19  	}
    20  
    21  	// Protect against e.cur wraparound.
    22  	for e.cur >= bufferReset {
    23  		if len(e.hist) == 0 {
    24  			for i := range e.table[:] {
    25  				e.table[i] = tableEntry{}
    26  			}
    27  			for i := range e.bTable[:] {
    28  				e.bTable[i] = tableEntryPrev{}
    29  			}
    30  			e.cur = maxMatchOffset
    31  			break
    32  		}
    33  		// Shift down everything in the table that isn't already too far away.
    34  		minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
    35  		for i := range e.table[:] {
    36  			v := e.table[i].offset
    37  			if v <= minOff {
    38  				v = 0
    39  			} else {
    40  				v = v - e.cur + maxMatchOffset
    41  			}
    42  			e.table[i].offset = v
    43  		}
    44  		for i := range e.bTable[:] {
    45  			v := e.bTable[i]
    46  			if v.Cur.offset <= minOff {
    47  				v.Cur.offset = 0
    48  				v.Prev.offset = 0
    49  			} else {
    50  				v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset
    51  				if v.Prev.offset <= minOff {
    52  					v.Prev.offset = 0
    53  				} else {
    54  					v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset
    55  				}
    56  			}
    57  			e.bTable[i] = v
    58  		}
    59  		e.cur = maxMatchOffset
    60  	}
    61  
    62  	s := e.addBlock(src)
    63  
    64  	// This check isn't in the Snappy implementation, but there, the caller
    65  	// instead of the callee handles this case.
    66  	if len(src) < minNonLiteralBlockSize {
    67  		// We do not fill the token table.
    68  		// This will be picked up by caller.
    69  		dst.n = uint16(len(src))
    70  		return
    71  	}
    72  
    73  	// Override src
    74  	src = e.hist
    75  	nextEmit := s
    76  
    77  	// sLimit is when to stop looking for offset/length copies. The inputMargin
    78  	// lets us use a fast path for emitLiteral in the main loop, while we are
    79  	// looking for copies.
    80  	sLimit := int32(len(src) - inputMargin)
    81  
    82  	// nextEmit is where in src the next emitLiteral should start from.
    83  	cv := load6432(src, s)
    84  	for {
    85  		const skipLog = 6
    86  		const doEvery = 1
    87  
    88  		nextS := s
    89  		var l int32
    90  		var t int32
    91  		for {
    92  			nextHashS := hashLen(cv, tableBits, hashShortBytes)
    93  			nextHashL := hash7(cv, tableBits)
    94  
    95  			s = nextS
    96  			nextS = s + doEvery + (s-nextEmit)>>skipLog
    97  			if nextS > sLimit {
    98  				goto emitRemainder
    99  			}
   100  			// Fetch a short+long candidate
   101  			sCandidate := e.table[nextHashS]
   102  			lCandidate := e.bTable[nextHashL]
   103  			next := load6432(src, nextS)
   104  			entry := tableEntry{offset: s + e.cur}
   105  			e.table[nextHashS] = entry
   106  			eLong := &e.bTable[nextHashL]
   107  			eLong.Cur, eLong.Prev = entry, eLong.Cur
   108  
   109  			nextHashS = hashLen(next, tableBits, hashShortBytes)
   110  			nextHashL = hash7(next, tableBits)
   111  
   112  			t = lCandidate.Cur.offset - e.cur
   113  			if s-t < maxMatchOffset {
   114  				if uint32(cv) == load3232(src, lCandidate.Cur.offset-e.cur) {
   115  					// Store the next match
   116  					e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
   117  					eLong := &e.bTable[nextHashL]
   118  					eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
   119  
   120  					t2 := lCandidate.Prev.offset - e.cur
   121  					if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) {
   122  						l = e.matchlen(s+4, t+4, src) + 4
   123  						ml1 := e.matchlen(s+4, t2+4, src) + 4
   124  						if ml1 > l {
   125  							t = t2
   126  							l = ml1
   127  							break
   128  						}
   129  					}
   130  					break
   131  				}
   132  				t = lCandidate.Prev.offset - e.cur
   133  				if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) {
   134  					// Store the next match
   135  					e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
   136  					eLong := &e.bTable[nextHashL]
   137  					eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
   138  					break
   139  				}
   140  			}
   141  
   142  			t = sCandidate.offset - e.cur
   143  			if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) {
   144  				// Found a 4 match...
   145  				l = e.matchlen(s+4, t+4, src) + 4
   146  				lCandidate = e.bTable[nextHashL]
   147  				// Store the next match
   148  
   149  				e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
   150  				eLong := &e.bTable[nextHashL]
   151  				eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
   152  
   153  				// If the next long is a candidate, use that...
   154  				t2 := lCandidate.Cur.offset - e.cur
   155  				if nextS-t2 < maxMatchOffset {
   156  					if load3232(src, lCandidate.Cur.offset-e.cur) == uint32(next) {
   157  						ml := e.matchlen(nextS+4, t2+4, src) + 4
   158  						if ml > l {
   159  							t = t2
   160  							s = nextS
   161  							l = ml
   162  							break
   163  						}
   164  					}
   165  					// If the previous long is a candidate, use that...
   166  					t2 = lCandidate.Prev.offset - e.cur
   167  					if nextS-t2 < maxMatchOffset && load3232(src, lCandidate.Prev.offset-e.cur) == uint32(next) {
   168  						ml := e.matchlen(nextS+4, t2+4, src) + 4
   169  						if ml > l {
   170  							t = t2
   171  							s = nextS
   172  							l = ml
   173  							break
   174  						}
   175  					}
   176  				}
   177  				break
   178  			}
   179  			cv = next
   180  		}
   181  
   182  		// A 4-byte match has been found. We'll later see if more than 4 bytes
   183  		// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
   184  		// them as literal bytes.
   185  
   186  		if l == 0 {
   187  			// Extend the 4-byte match as long as possible.
   188  			l = e.matchlenLong(s+4, t+4, src) + 4
   189  		} else if l == maxMatchLength {
   190  			l += e.matchlenLong(s+l, t+l, src)
   191  		}
   192  
   193  		// Try to locate a better match by checking the end of best match...
   194  		if sAt := s + l; l < 30 && sAt < sLimit {
   195  			// Allow some bytes at the beginning to mismatch.
   196  			// Sweet spot is 2/3 bytes depending on input.
   197  			// 3 is only a little better when it is but sometimes a lot worse.
   198  			// The skipped bytes are tested in Extend backwards,
   199  			// and still picked up as part of the match if they do.
   200  			const skipBeginning = 2
   201  			eLong := e.bTable[hash7(load6432(src, sAt), tableBits)].Cur.offset
   202  			t2 := eLong - e.cur - l + skipBeginning
   203  			s2 := s + skipBeginning
   204  			off := s2 - t2
   205  			if t2 >= 0 && off < maxMatchOffset && off > 0 {
   206  				if l2 := e.matchlenLong(s2, t2, src); l2 > l {
   207  					t = t2
   208  					l = l2
   209  					s = s2
   210  				}
   211  			}
   212  		}
   213  
   214  		// Extend backwards
   215  		for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
   216  			s--
   217  			t--
   218  			l++
   219  		}
   220  		if nextEmit < s {
   221  			if false {
   222  				emitLiteral(dst, src[nextEmit:s])
   223  			} else {
   224  				for _, v := range src[nextEmit:s] {
   225  					dst.tokens[dst.n] = token(v)
   226  					dst.litHist[v]++
   227  					dst.n++
   228  				}
   229  			}
   230  		}
   231  		if debugDeflate {
   232  			if t >= s {
   233  				panic(fmt.Sprintln("s-t", s, t))
   234  			}
   235  			if (s - t) > maxMatchOffset {
   236  				panic(fmt.Sprintln("mmo", s-t))
   237  			}
   238  			if l < baseMatchLength {
   239  				panic("bml")
   240  			}
   241  		}
   242  
   243  		dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
   244  		s += l
   245  		nextEmit = s
   246  		if nextS >= s {
   247  			s = nextS + 1
   248  		}
   249  
   250  		if s >= sLimit {
   251  			goto emitRemainder
   252  		}
   253  
   254  		// Store every 3rd hash in-between.
   255  		if true {
   256  			const hashEvery = 3
   257  			i := s - l + 1
   258  			if i < s-1 {
   259  				cv := load6432(src, i)
   260  				t := tableEntry{offset: i + e.cur}
   261  				e.table[hashLen(cv, tableBits, hashShortBytes)] = t
   262  				eLong := &e.bTable[hash7(cv, tableBits)]
   263  				eLong.Cur, eLong.Prev = t, eLong.Cur
   264  
   265  				// Do an long at i+1
   266  				cv >>= 8
   267  				t = tableEntry{offset: t.offset + 1}
   268  				eLong = &e.bTable[hash7(cv, tableBits)]
   269  				eLong.Cur, eLong.Prev = t, eLong.Cur
   270  
   271  				// We only have enough bits for a short entry at i+2
   272  				cv >>= 8
   273  				t = tableEntry{offset: t.offset + 1}
   274  				e.table[hashLen(cv, tableBits, hashShortBytes)] = t
   275  
   276  				// Skip one - otherwise we risk hitting 's'
   277  				i += 4
   278  				for ; i < s-1; i += hashEvery {
   279  					cv := load6432(src, i)
   280  					t := tableEntry{offset: i + e.cur}
   281  					t2 := tableEntry{offset: t.offset + 1}
   282  					eLong := &e.bTable[hash7(cv, tableBits)]
   283  					eLong.Cur, eLong.Prev = t, eLong.Cur
   284  					e.table[hashLen(cv>>8, tableBits, hashShortBytes)] = t2
   285  				}
   286  			}
   287  		}
   288  
   289  		// We could immediately start working at s now, but to improve
   290  		// compression we first update the hash table at s-1 and at s.
   291  		x := load6432(src, s-1)
   292  		o := e.cur + s - 1
   293  		prevHashS := hashLen(x, tableBits, hashShortBytes)
   294  		prevHashL := hash7(x, tableBits)
   295  		e.table[prevHashS] = tableEntry{offset: o}
   296  		eLong := &e.bTable[prevHashL]
   297  		eLong.Cur, eLong.Prev = tableEntry{offset: o}, eLong.Cur
   298  		cv = x >> 8
   299  	}
   300  
   301  emitRemainder:
   302  	if int(nextEmit) < len(src) {
   303  		// If nothing was added, don't encode literals.
   304  		if dst.n == 0 {
   305  			return
   306  		}
   307  
   308  		emitLiteral(dst, src[nextEmit:])
   309  	}
   310  }
   311  
   312  // fastEncL5Window is a level 5 encoder,
   313  // but with a custom window size.
   314  type fastEncL5Window struct {
   315  	hist      []byte
   316  	cur       int32
   317  	maxOffset int32
   318  	table     [tableSize]tableEntry
   319  	bTable    [tableSize]tableEntryPrev
   320  }
   321  
   322  func (e *fastEncL5Window) Encode(dst *tokens, src []byte) {
   323  	const (
   324  		inputMargin            = 12 - 1
   325  		minNonLiteralBlockSize = 1 + 1 + inputMargin
   326  		hashShortBytes         = 4
   327  	)
   328  	maxMatchOffset := e.maxOffset
   329  	if debugDeflate && e.cur < 0 {
   330  		panic(fmt.Sprint("e.cur < 0: ", e.cur))
   331  	}
   332  
   333  	// Protect against e.cur wraparound.
   334  	for e.cur >= bufferReset {
   335  		if len(e.hist) == 0 {
   336  			for i := range e.table[:] {
   337  				e.table[i] = tableEntry{}
   338  			}
   339  			for i := range e.bTable[:] {
   340  				e.bTable[i] = tableEntryPrev{}
   341  			}
   342  			e.cur = maxMatchOffset
   343  			break
   344  		}
   345  		// Shift down everything in the table that isn't already too far away.
   346  		minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
   347  		for i := range e.table[:] {
   348  			v := e.table[i].offset
   349  			if v <= minOff {
   350  				v = 0
   351  			} else {
   352  				v = v - e.cur + maxMatchOffset
   353  			}
   354  			e.table[i].offset = v
   355  		}
   356  		for i := range e.bTable[:] {
   357  			v := e.bTable[i]
   358  			if v.Cur.offset <= minOff {
   359  				v.Cur.offset = 0
   360  				v.Prev.offset = 0
   361  			} else {
   362  				v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset
   363  				if v.Prev.offset <= minOff {
   364  					v.Prev.offset = 0
   365  				} else {
   366  					v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset
   367  				}
   368  			}
   369  			e.bTable[i] = v
   370  		}
   371  		e.cur = maxMatchOffset
   372  	}
   373  
   374  	s := e.addBlock(src)
   375  
   376  	// This check isn't in the Snappy implementation, but there, the caller
   377  	// instead of the callee handles this case.
   378  	if len(src) < minNonLiteralBlockSize {
   379  		// We do not fill the token table.
   380  		// This will be picked up by caller.
   381  		dst.n = uint16(len(src))
   382  		return
   383  	}
   384  
   385  	// Override src
   386  	src = e.hist
   387  	nextEmit := s
   388  
   389  	// sLimit is when to stop looking for offset/length copies. The inputMargin
   390  	// lets us use a fast path for emitLiteral in the main loop, while we are
   391  	// looking for copies.
   392  	sLimit := int32(len(src) - inputMargin)
   393  
   394  	// nextEmit is where in src the next emitLiteral should start from.
   395  	cv := load6432(src, s)
   396  	for {
   397  		const skipLog = 6
   398  		const doEvery = 1
   399  
   400  		nextS := s
   401  		var l int32
   402  		var t int32
   403  		for {
   404  			nextHashS := hashLen(cv, tableBits, hashShortBytes)
   405  			nextHashL := hash7(cv, tableBits)
   406  
   407  			s = nextS
   408  			nextS = s + doEvery + (s-nextEmit)>>skipLog
   409  			if nextS > sLimit {
   410  				goto emitRemainder
   411  			}
   412  			// Fetch a short+long candidate
   413  			sCandidate := e.table[nextHashS]
   414  			lCandidate := e.bTable[nextHashL]
   415  			next := load6432(src, nextS)
   416  			entry := tableEntry{offset: s + e.cur}
   417  			e.table[nextHashS] = entry
   418  			eLong := &e.bTable[nextHashL]
   419  			eLong.Cur, eLong.Prev = entry, eLong.Cur
   420  
   421  			nextHashS = hashLen(next, tableBits, hashShortBytes)
   422  			nextHashL = hash7(next, tableBits)
   423  
   424  			t = lCandidate.Cur.offset - e.cur
   425  			if s-t < maxMatchOffset {
   426  				if uint32(cv) == load3232(src, lCandidate.Cur.offset-e.cur) {
   427  					// Store the next match
   428  					e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
   429  					eLong := &e.bTable[nextHashL]
   430  					eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
   431  
   432  					t2 := lCandidate.Prev.offset - e.cur
   433  					if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) {
   434  						l = e.matchlen(s+4, t+4, src) + 4
   435  						ml1 := e.matchlen(s+4, t2+4, src) + 4
   436  						if ml1 > l {
   437  							t = t2
   438  							l = ml1
   439  							break
   440  						}
   441  					}
   442  					break
   443  				}
   444  				t = lCandidate.Prev.offset - e.cur
   445  				if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) {
   446  					// Store the next match
   447  					e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
   448  					eLong := &e.bTable[nextHashL]
   449  					eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
   450  					break
   451  				}
   452  			}
   453  
   454  			t = sCandidate.offset - e.cur
   455  			if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) {
   456  				// Found a 4 match...
   457  				l = e.matchlen(s+4, t+4, src) + 4
   458  				lCandidate = e.bTable[nextHashL]
   459  				// Store the next match
   460  
   461  				e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
   462  				eLong := &e.bTable[nextHashL]
   463  				eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
   464  
   465  				// If the next long is a candidate, use that...
   466  				t2 := lCandidate.Cur.offset - e.cur
   467  				if nextS-t2 < maxMatchOffset {
   468  					if load3232(src, lCandidate.Cur.offset-e.cur) == uint32(next) {
   469  						ml := e.matchlen(nextS+4, t2+4, src) + 4
   470  						if ml > l {
   471  							t = t2
   472  							s = nextS
   473  							l = ml
   474  							break
   475  						}
   476  					}
   477  					// If the previous long is a candidate, use that...
   478  					t2 = lCandidate.Prev.offset - e.cur
   479  					if nextS-t2 < maxMatchOffset && load3232(src, lCandidate.Prev.offset-e.cur) == uint32(next) {
   480  						ml := e.matchlen(nextS+4, t2+4, src) + 4
   481  						if ml > l {
   482  							t = t2
   483  							s = nextS
   484  							l = ml
   485  							break
   486  						}
   487  					}
   488  				}
   489  				break
   490  			}
   491  			cv = next
   492  		}
   493  
   494  		// A 4-byte match has been found. We'll later see if more than 4 bytes
   495  		// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
   496  		// them as literal bytes.
   497  
   498  		if l == 0 {
   499  			// Extend the 4-byte match as long as possible.
   500  			l = e.matchlenLong(s+4, t+4, src) + 4
   501  		} else if l == maxMatchLength {
   502  			l += e.matchlenLong(s+l, t+l, src)
   503  		}
   504  
   505  		// Try to locate a better match by checking the end of best match...
   506  		if sAt := s + l; l < 30 && sAt < sLimit {
   507  			// Allow some bytes at the beginning to mismatch.
   508  			// Sweet spot is 2/3 bytes depending on input.
   509  			// 3 is only a little better when it is but sometimes a lot worse.
   510  			// The skipped bytes are tested in Extend backwards,
   511  			// and still picked up as part of the match if they do.
   512  			const skipBeginning = 2
   513  			eLong := e.bTable[hash7(load6432(src, sAt), tableBits)].Cur.offset
   514  			t2 := eLong - e.cur - l + skipBeginning
   515  			s2 := s + skipBeginning
   516  			off := s2 - t2
   517  			if t2 >= 0 && off < maxMatchOffset && off > 0 {
   518  				if l2 := e.matchlenLong(s2, t2, src); l2 > l {
   519  					t = t2
   520  					l = l2
   521  					s = s2
   522  				}
   523  			}
   524  		}
   525  
   526  		// Extend backwards
   527  		for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
   528  			s--
   529  			t--
   530  			l++
   531  		}
   532  		if nextEmit < s {
   533  			if false {
   534  				emitLiteral(dst, src[nextEmit:s])
   535  			} else {
   536  				for _, v := range src[nextEmit:s] {
   537  					dst.tokens[dst.n] = token(v)
   538  					dst.litHist[v]++
   539  					dst.n++
   540  				}
   541  			}
   542  		}
   543  		if debugDeflate {
   544  			if t >= s {
   545  				panic(fmt.Sprintln("s-t", s, t))
   546  			}
   547  			if (s - t) > maxMatchOffset {
   548  				panic(fmt.Sprintln("mmo", s-t))
   549  			}
   550  			if l < baseMatchLength {
   551  				panic("bml")
   552  			}
   553  		}
   554  
   555  		dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
   556  		s += l
   557  		nextEmit = s
   558  		if nextS >= s {
   559  			s = nextS + 1
   560  		}
   561  
   562  		if s >= sLimit {
   563  			goto emitRemainder
   564  		}
   565  
   566  		// Store every 3rd hash in-between.
   567  		if true {
   568  			const hashEvery = 3
   569  			i := s - l + 1
   570  			if i < s-1 {
   571  				cv := load6432(src, i)
   572  				t := tableEntry{offset: i + e.cur}
   573  				e.table[hashLen(cv, tableBits, hashShortBytes)] = t
   574  				eLong := &e.bTable[hash7(cv, tableBits)]
   575  				eLong.Cur, eLong.Prev = t, eLong.Cur
   576  
   577  				// Do an long at i+1
   578  				cv >>= 8
   579  				t = tableEntry{offset: t.offset + 1}
   580  				eLong = &e.bTable[hash7(cv, tableBits)]
   581  				eLong.Cur, eLong.Prev = t, eLong.Cur
   582  
   583  				// We only have enough bits for a short entry at i+2
   584  				cv >>= 8
   585  				t = tableEntry{offset: t.offset + 1}
   586  				e.table[hashLen(cv, tableBits, hashShortBytes)] = t
   587  
   588  				// Skip one - otherwise we risk hitting 's'
   589  				i += 4
   590  				for ; i < s-1; i += hashEvery {
   591  					cv := load6432(src, i)
   592  					t := tableEntry{offset: i + e.cur}
   593  					t2 := tableEntry{offset: t.offset + 1}
   594  					eLong := &e.bTable[hash7(cv, tableBits)]
   595  					eLong.Cur, eLong.Prev = t, eLong.Cur
   596  					e.table[hashLen(cv>>8, tableBits, hashShortBytes)] = t2
   597  				}
   598  			}
   599  		}
   600  
   601  		// We could immediately start working at s now, but to improve
   602  		// compression we first update the hash table at s-1 and at s.
   603  		x := load6432(src, s-1)
   604  		o := e.cur + s - 1
   605  		prevHashS := hashLen(x, tableBits, hashShortBytes)
   606  		prevHashL := hash7(x, tableBits)
   607  		e.table[prevHashS] = tableEntry{offset: o}
   608  		eLong := &e.bTable[prevHashL]
   609  		eLong.Cur, eLong.Prev = tableEntry{offset: o}, eLong.Cur
   610  		cv = x >> 8
   611  	}
   612  
   613  emitRemainder:
   614  	if int(nextEmit) < len(src) {
   615  		// If nothing was added, don't encode literals.
   616  		if dst.n == 0 {
   617  			return
   618  		}
   619  
   620  		emitLiteral(dst, src[nextEmit:])
   621  	}
   622  }
   623  
   624  // Reset the encoding table.
   625  func (e *fastEncL5Window) Reset() {
   626  	// We keep the same allocs, since we are compressing the same block sizes.
   627  	if cap(e.hist) < allocHistory {
   628  		e.hist = make([]byte, 0, allocHistory)
   629  	}
   630  
   631  	// We offset current position so everything will be out of reach.
   632  	// If we are above the buffer reset it will be cleared anyway since len(hist) == 0.
   633  	if e.cur <= int32(bufferReset) {
   634  		e.cur += e.maxOffset + int32(len(e.hist))
   635  	}
   636  	e.hist = e.hist[:0]
   637  }
   638  
   639  func (e *fastEncL5Window) addBlock(src []byte) int32 {
   640  	// check if we have space already
   641  	maxMatchOffset := e.maxOffset
   642  
   643  	if len(e.hist)+len(src) > cap(e.hist) {
   644  		if cap(e.hist) == 0 {
   645  			e.hist = make([]byte, 0, allocHistory)
   646  		} else {
   647  			if cap(e.hist) < int(maxMatchOffset*2) {
   648  				panic("unexpected buffer size")
   649  			}
   650  			// Move down
   651  			offset := int32(len(e.hist)) - maxMatchOffset
   652  			copy(e.hist[0:maxMatchOffset], e.hist[offset:])
   653  			e.cur += offset
   654  			e.hist = e.hist[:maxMatchOffset]
   655  		}
   656  	}
   657  	s := int32(len(e.hist))
   658  	e.hist = append(e.hist, src...)
   659  	return s
   660  }
   661  
   662  // matchlen will return the match length between offsets and t in src.
   663  // The maximum length returned is maxMatchLength - 4.
   664  // It is assumed that s > t, that t >=0 and s < len(src).
   665  func (e *fastEncL5Window) matchlen(s, t int32, src []byte) int32 {
   666  	if debugDecode {
   667  		if t >= s {
   668  			panic(fmt.Sprint("t >=s:", t, s))
   669  		}
   670  		if int(s) >= len(src) {
   671  			panic(fmt.Sprint("s >= len(src):", s, len(src)))
   672  		}
   673  		if t < 0 {
   674  			panic(fmt.Sprint("t < 0:", t))
   675  		}
   676  		if s-t > e.maxOffset {
   677  			panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")"))
   678  		}
   679  	}
   680  	s1 := int(s) + maxMatchLength - 4
   681  	if s1 > len(src) {
   682  		s1 = len(src)
   683  	}
   684  
   685  	// Extend the match to be as long as possible.
   686  	return int32(matchLen(src[s:s1], src[t:]))
   687  }
   688  
   689  // matchlenLong will return the match length between offsets and t in src.
   690  // It is assumed that s > t, that t >=0 and s < len(src).
   691  func (e *fastEncL5Window) matchlenLong(s, t int32, src []byte) int32 {
   692  	if debugDeflate {
   693  		if t >= s {
   694  			panic(fmt.Sprint("t >=s:", t, s))
   695  		}
   696  		if int(s) >= len(src) {
   697  			panic(fmt.Sprint("s >= len(src):", s, len(src)))
   698  		}
   699  		if t < 0 {
   700  			panic(fmt.Sprint("t < 0:", t))
   701  		}
   702  		if s-t > e.maxOffset {
   703  			panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")"))
   704  		}
   705  	}
   706  	// Extend the match to be as long as possible.
   707  	return int32(matchLen(src[s:], src[t:]))
   708  }
   709  

View as plain text