12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019 |
- // Copyright 2019+ Klaus Post. All rights reserved.
- // License information can be found in the LICENSE file.
- // Based on work by Yann Collet, released under BSD License.
- package zstd
- import (
- "fmt"
- "math"
- "math/bits"
- )
- const (
- tableBits = 15 // Bits used in the table
- tableSize = 1 << tableBits // Size of the table
- tableShardCnt = 1 << (tableBits - dictShardBits) // Number of shards in the table
- tableShardSize = tableSize / tableShardCnt // Size of an individual shard
- tableFastHashLen = 6
- tableMask = tableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks.
- maxMatchLength = 131074
- )
- type tableEntry struct {
- val uint32
- offset int32
- }
- type fastEncoder struct {
- fastBase
- table [tableSize]tableEntry
- }
- type fastEncoderDict struct {
- fastEncoder
- dictTable []tableEntry
- tableShardDirty [tableShardCnt]bool
- allDirty bool
- }
- // Encode mimmics functionality in zstd_fast.c
- func (e *fastEncoder) Encode(blk *blockEnc, src []byte) {
- const (
- inputMargin = 8
- minNonLiteralBlockSize = 1 + 1 + inputMargin
- )
- // Protect against e.cur wraparound.
- for e.cur >= bufferReset {
- if len(e.hist) == 0 {
- for i := range e.table[:] {
- e.table[i] = tableEntry{}
- }
- e.cur = e.maxMatchOff
- break
- }
- // Shift down everything in the table that isn't already too far away.
- minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff
- for i := range e.table[:] {
- v := e.table[i].offset
- if v < minOff {
- v = 0
- } else {
- v = v - e.cur + e.maxMatchOff
- }
- e.table[i].offset = v
- }
- e.cur = e.maxMatchOff
- break
- }
- s := e.addBlock(src)
- blk.size = len(src)
- if len(src) < minNonLiteralBlockSize {
- blk.extraLits = len(src)
- blk.literals = blk.literals[:len(src)]
- copy(blk.literals, src)
- return
- }
- // Override src
- src = e.hist
- sLimit := int32(len(src)) - inputMargin
- // stepSize is the number of bytes to skip on every main loop iteration.
- // It should be >= 2.
- const stepSize = 2
- // TEMPLATE
- const hashLog = tableBits
- // seems global, but would be nice to tweak.
- const kSearchStrength = 7
- // nextEmit is where in src the next emitLiteral should start from.
- nextEmit := s
- cv := load6432(src, s)
- // Relative offsets
- offset1 := int32(blk.recentOffsets[0])
- offset2 := int32(blk.recentOffsets[1])
- addLiterals := func(s *seq, until int32) {
- if until == nextEmit {
- return
- }
- blk.literals = append(blk.literals, src[nextEmit:until]...)
- s.litLen = uint32(until - nextEmit)
- }
- if debugEncoder {
- println("recent offsets:", blk.recentOffsets)
- }
- encodeLoop:
- for {
- // t will contain the match offset when we find one.
- // When existing the search loop, we have already checked 4 bytes.
- var t int32
- // We will not use repeat offsets across blocks.
- // By not using them for the first 3 matches
- canRepeat := len(blk.sequences) > 2
- for {
- if debugAsserts && canRepeat && offset1 == 0 {
- panic("offset0 was 0")
- }
- nextHash := hashLen(cv, hashLog, tableFastHashLen)
- nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen)
- candidate := e.table[nextHash]
- candidate2 := e.table[nextHash2]
- repIndex := s - offset1 + 2
- e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)}
- e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)}
- if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) {
- // Consider history as well.
- var seq seq
- var length int32
- // length = 4 + e.matchlen(s+6, repIndex+4, src)
- {
- a := src[s+6:]
- b := src[repIndex+4:]
- endI := len(a) & (math.MaxInt32 - 7)
- length = int32(endI) + 4
- for i := 0; i < endI; i += 8 {
- if diff := load64(a, i) ^ load64(b, i); diff != 0 {
- length = int32(i+bits.TrailingZeros64(diff)>>3) + 4
- break
- }
- }
- }
- seq.matchLen = uint32(length - zstdMinMatch)
- // We might be able to match backwards.
- // Extend as long as we can.
- start := s + 2
- // We end the search early, so we don't risk 0 literals
- // and have to do special offset treatment.
- startLimit := nextEmit + 1
- sMin := s - e.maxMatchOff
- if sMin < 0 {
- sMin = 0
- }
- for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch {
- repIndex--
- start--
- seq.matchLen++
- }
- addLiterals(&seq, start)
- // rep 0
- seq.offset = 1
- if debugSequences {
- println("repeat sequence", seq, "next s:", s)
- }
- blk.sequences = append(blk.sequences, seq)
- s += length + 2
- nextEmit = s
- if s >= sLimit {
- if debugEncoder {
- println("repeat ended", s, length)
- }
- break encodeLoop
- }
- cv = load6432(src, s)
- continue
- }
- coffset0 := s - (candidate.offset - e.cur)
- coffset1 := s - (candidate2.offset - e.cur) + 1
- if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val {
- // found a regular match
- t = candidate.offset - e.cur
- if debugAsserts && s <= t {
- panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
- }
- if debugAsserts && s-t > e.maxMatchOff {
- panic("s - t >e.maxMatchOff")
- }
- break
- }
- if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val {
- // found a regular match
- t = candidate2.offset - e.cur
- s++
- if debugAsserts && s <= t {
- panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
- }
- if debugAsserts && s-t > e.maxMatchOff {
- panic("s - t >e.maxMatchOff")
- }
- if debugAsserts && t < 0 {
- panic("t<0")
- }
- break
- }
- s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1))
- if s >= sLimit {
- break encodeLoop
- }
- cv = load6432(src, s)
- }
- // A 4-byte match has been found. We'll later see if more than 4 bytes.
- offset2 = offset1
- offset1 = s - t
- if debugAsserts && s <= t {
- panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
- }
- if debugAsserts && canRepeat && int(offset1) > len(src) {
- panic("invalid offset")
- }
- // Extend the 4-byte match as long as possible.
- //l := e.matchlen(s+4, t+4, src) + 4
- var l int32
- {
- a := src[s+4:]
- b := src[t+4:]
- endI := len(a) & (math.MaxInt32 - 7)
- l = int32(endI) + 4
- for i := 0; i < endI; i += 8 {
- if diff := load64(a, i) ^ load64(b, i); diff != 0 {
- l = int32(i+bits.TrailingZeros64(diff)>>3) + 4
- break
- }
- }
- }
- // Extend backwards
- tMin := s - e.maxMatchOff
- if tMin < 0 {
- tMin = 0
- }
- for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength {
- s--
- t--
- l++
- }
- // Write our sequence.
- var seq seq
- seq.litLen = uint32(s - nextEmit)
- seq.matchLen = uint32(l - zstdMinMatch)
- if seq.litLen > 0 {
- blk.literals = append(blk.literals, src[nextEmit:s]...)
- }
- // Don't use repeat offsets
- seq.offset = uint32(s-t) + 3
- s += l
- if debugSequences {
- println("sequence", seq, "next s:", s)
- }
- blk.sequences = append(blk.sequences, seq)
- nextEmit = s
- if s >= sLimit {
- break encodeLoop
- }
- cv = load6432(src, s)
- // Check offset 2
- if o2 := s - offset2; canRepeat && load3232(src, o2) == uint32(cv) {
- // We have at least 4 byte match.
- // No need to check backwards. We come straight from a match
- //l := 4 + e.matchlen(s+4, o2+4, src)
- var l int32
- {
- a := src[s+4:]
- b := src[o2+4:]
- endI := len(a) & (math.MaxInt32 - 7)
- l = int32(endI) + 4
- for i := 0; i < endI; i += 8 {
- if diff := load64(a, i) ^ load64(b, i); diff != 0 {
- l = int32(i+bits.TrailingZeros64(diff)>>3) + 4
- break
- }
- }
- }
- // Store this, since we have it.
- nextHash := hashLen(cv, hashLog, tableFastHashLen)
- e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)}
- seq.matchLen = uint32(l) - zstdMinMatch
- seq.litLen = 0
- // Since litlen is always 0, this is offset 1.
- seq.offset = 1
- s += l
- nextEmit = s
- if debugSequences {
- println("sequence", seq, "next s:", s)
- }
- blk.sequences = append(blk.sequences, seq)
- // Swap offset 1 and 2.
- offset1, offset2 = offset2, offset1
- if s >= sLimit {
- break encodeLoop
- }
- // Prepare next loop.
- cv = load6432(src, s)
- }
- }
- if int(nextEmit) < len(src) {
- blk.literals = append(blk.literals, src[nextEmit:]...)
- blk.extraLits = len(src) - int(nextEmit)
- }
- blk.recentOffsets[0] = uint32(offset1)
- blk.recentOffsets[1] = uint32(offset2)
- if debugEncoder {
- println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
- }
- }
- // EncodeNoHist will encode a block with no history and no following blocks.
- // Most notable difference is that src will not be copied for history and
- // we do not need to check for max match length.
- func (e *fastEncoder) EncodeNoHist(blk *blockEnc, src []byte) {
- const (
- inputMargin = 8
- minNonLiteralBlockSize = 1 + 1 + inputMargin
- )
- if debugEncoder {
- if len(src) > maxBlockSize {
- panic("src too big")
- }
- }
- // Protect against e.cur wraparound.
- if e.cur >= bufferReset {
- for i := range e.table[:] {
- e.table[i] = tableEntry{}
- }
- e.cur = e.maxMatchOff
- }
- s := int32(0)
- blk.size = len(src)
- if len(src) < minNonLiteralBlockSize {
- blk.extraLits = len(src)
- blk.literals = blk.literals[:len(src)]
- copy(blk.literals, src)
- return
- }
- sLimit := int32(len(src)) - inputMargin
- // stepSize is the number of bytes to skip on every main loop iteration.
- // It should be >= 2.
- const stepSize = 2
- // TEMPLATE
- const hashLog = tableBits
- // seems global, but would be nice to tweak.
- const kSearchStrength = 8
- // nextEmit is where in src the next emitLiteral should start from.
- nextEmit := s
- cv := load6432(src, s)
- // Relative offsets
- offset1 := int32(blk.recentOffsets[0])
- offset2 := int32(blk.recentOffsets[1])
- addLiterals := func(s *seq, until int32) {
- if until == nextEmit {
- return
- }
- blk.literals = append(blk.literals, src[nextEmit:until]...)
- s.litLen = uint32(until - nextEmit)
- }
- if debugEncoder {
- println("recent offsets:", blk.recentOffsets)
- }
- encodeLoop:
- for {
- // t will contain the match offset when we find one.
- // When existing the search loop, we have already checked 4 bytes.
- var t int32
- // We will not use repeat offsets across blocks.
- // By not using them for the first 3 matches
- for {
- nextHash := hashLen(cv, hashLog, tableFastHashLen)
- nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen)
- candidate := e.table[nextHash]
- candidate2 := e.table[nextHash2]
- repIndex := s - offset1 + 2
- e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)}
- e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)}
- if len(blk.sequences) > 2 && load3232(src, repIndex) == uint32(cv>>16) {
- // Consider history as well.
- var seq seq
- // length := 4 + e.matchlen(s+6, repIndex+4, src)
- // length := 4 + int32(matchLen(src[s+6:], src[repIndex+4:]))
- var length int32
- {
- a := src[s+6:]
- b := src[repIndex+4:]
- endI := len(a) & (math.MaxInt32 - 7)
- length = int32(endI) + 4
- for i := 0; i < endI; i += 8 {
- if diff := load64(a, i) ^ load64(b, i); diff != 0 {
- length = int32(i+bits.TrailingZeros64(diff)>>3) + 4
- break
- }
- }
- }
- seq.matchLen = uint32(length - zstdMinMatch)
- // We might be able to match backwards.
- // Extend as long as we can.
- start := s + 2
- // We end the search early, so we don't risk 0 literals
- // and have to do special offset treatment.
- startLimit := nextEmit + 1
- sMin := s - e.maxMatchOff
- if sMin < 0 {
- sMin = 0
- }
- for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] {
- repIndex--
- start--
- seq.matchLen++
- }
- addLiterals(&seq, start)
- // rep 0
- seq.offset = 1
- if debugSequences {
- println("repeat sequence", seq, "next s:", s)
- }
- blk.sequences = append(blk.sequences, seq)
- s += length + 2
- nextEmit = s
- if s >= sLimit {
- if debugEncoder {
- println("repeat ended", s, length)
- }
- break encodeLoop
- }
- cv = load6432(src, s)
- continue
- }
- coffset0 := s - (candidate.offset - e.cur)
- coffset1 := s - (candidate2.offset - e.cur) + 1
- if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val {
- // found a regular match
- t = candidate.offset - e.cur
- if debugAsserts && s <= t {
- panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
- }
- if debugAsserts && s-t > e.maxMatchOff {
- panic("s - t >e.maxMatchOff")
- }
- if debugAsserts && t < 0 {
- panic(fmt.Sprintf("t (%d) < 0, candidate.offset: %d, e.cur: %d, coffset0: %d, e.maxMatchOff: %d", t, candidate.offset, e.cur, coffset0, e.maxMatchOff))
- }
- break
- }
- if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val {
- // found a regular match
- t = candidate2.offset - e.cur
- s++
- if debugAsserts && s <= t {
- panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
- }
- if debugAsserts && s-t > e.maxMatchOff {
- panic("s - t >e.maxMatchOff")
- }
- if debugAsserts && t < 0 {
- panic("t<0")
- }
- break
- }
- s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1))
- if s >= sLimit {
- break encodeLoop
- }
- cv = load6432(src, s)
- }
- // A 4-byte match has been found. We'll later see if more than 4 bytes.
- offset2 = offset1
- offset1 = s - t
- if debugAsserts && s <= t {
- panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
- }
- if debugAsserts && t < 0 {
- panic(fmt.Sprintf("t (%d) < 0 ", t))
- }
- // Extend the 4-byte match as long as possible.
- //l := e.matchlenNoHist(s+4, t+4, src) + 4
- // l := int32(matchLen(src[s+4:], src[t+4:])) + 4
- var l int32
- {
- a := src[s+4:]
- b := src[t+4:]
- endI := len(a) & (math.MaxInt32 - 7)
- l = int32(endI) + 4
- for i := 0; i < endI; i += 8 {
- if diff := load64(a, i) ^ load64(b, i); diff != 0 {
- l = int32(i+bits.TrailingZeros64(diff)>>3) + 4
- break
- }
- }
- }
- // Extend backwards
- tMin := s - e.maxMatchOff
- if tMin < 0 {
- tMin = 0
- }
- for t > tMin && s > nextEmit && src[t-1] == src[s-1] {
- s--
- t--
- l++
- }
- // Write our sequence.
- var seq seq
- seq.litLen = uint32(s - nextEmit)
- seq.matchLen = uint32(l - zstdMinMatch)
- if seq.litLen > 0 {
- blk.literals = append(blk.literals, src[nextEmit:s]...)
- }
- // Don't use repeat offsets
- seq.offset = uint32(s-t) + 3
- s += l
- if debugSequences {
- println("sequence", seq, "next s:", s)
- }
- blk.sequences = append(blk.sequences, seq)
- nextEmit = s
- if s >= sLimit {
- break encodeLoop
- }
- cv = load6432(src, s)
- // Check offset 2
- if o2 := s - offset2; len(blk.sequences) > 2 && load3232(src, o2) == uint32(cv) {
- // We have at least 4 byte match.
- // No need to check backwards. We come straight from a match
- //l := 4 + e.matchlenNoHist(s+4, o2+4, src)
- // l := 4 + int32(matchLen(src[s+4:], src[o2+4:]))
- var l int32
- {
- a := src[s+4:]
- b := src[o2+4:]
- endI := len(a) & (math.MaxInt32 - 7)
- l = int32(endI) + 4
- for i := 0; i < endI; i += 8 {
- if diff := load64(a, i) ^ load64(b, i); diff != 0 {
- l = int32(i+bits.TrailingZeros64(diff)>>3) + 4
- break
- }
- }
- }
- // Store this, since we have it.
- nextHash := hashLen(cv, hashLog, tableFastHashLen)
- e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)}
- seq.matchLen = uint32(l) - zstdMinMatch
- seq.litLen = 0
- // Since litlen is always 0, this is offset 1.
- seq.offset = 1
- s += l
- nextEmit = s
- if debugSequences {
- println("sequence", seq, "next s:", s)
- }
- blk.sequences = append(blk.sequences, seq)
- // Swap offset 1 and 2.
- offset1, offset2 = offset2, offset1
- if s >= sLimit {
- break encodeLoop
- }
- // Prepare next loop.
- cv = load6432(src, s)
- }
- }
- if int(nextEmit) < len(src) {
- blk.literals = append(blk.literals, src[nextEmit:]...)
- blk.extraLits = len(src) - int(nextEmit)
- }
- if debugEncoder {
- println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
- }
- // We do not store history, so we must offset e.cur to avoid false matches for next user.
- if e.cur < bufferReset {
- e.cur += int32(len(src))
- }
- }
- // Encode will encode the content, with a dictionary if initialized for it.
- func (e *fastEncoderDict) Encode(blk *blockEnc, src []byte) {
- const (
- inputMargin = 8
- minNonLiteralBlockSize = 1 + 1 + inputMargin
- )
- if e.allDirty || len(src) > 32<<10 {
- e.fastEncoder.Encode(blk, src)
- e.allDirty = true
- return
- }
- // Protect against e.cur wraparound.
- for e.cur >= bufferReset {
- if len(e.hist) == 0 {
- for i := range e.table[:] {
- e.table[i] = tableEntry{}
- }
- e.cur = e.maxMatchOff
- break
- }
- // Shift down everything in the table that isn't already too far away.
- minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff
- for i := range e.table[:] {
- v := e.table[i].offset
- if v < minOff {
- v = 0
- } else {
- v = v - e.cur + e.maxMatchOff
- }
- e.table[i].offset = v
- }
- e.cur = e.maxMatchOff
- break
- }
- s := e.addBlock(src)
- blk.size = len(src)
- if len(src) < minNonLiteralBlockSize {
- blk.extraLits = len(src)
- blk.literals = blk.literals[:len(src)]
- copy(blk.literals, src)
- return
- }
- // Override src
- src = e.hist
- sLimit := int32(len(src)) - inputMargin
- // stepSize is the number of bytes to skip on every main loop iteration.
- // It should be >= 2.
- const stepSize = 2
- // TEMPLATE
- const hashLog = tableBits
- // seems global, but would be nice to tweak.
- const kSearchStrength = 7
- // nextEmit is where in src the next emitLiteral should start from.
- nextEmit := s
- cv := load6432(src, s)
- // Relative offsets
- offset1 := int32(blk.recentOffsets[0])
- offset2 := int32(blk.recentOffsets[1])
- addLiterals := func(s *seq, until int32) {
- if until == nextEmit {
- return
- }
- blk.literals = append(blk.literals, src[nextEmit:until]...)
- s.litLen = uint32(until - nextEmit)
- }
- if debugEncoder {
- println("recent offsets:", blk.recentOffsets)
- }
- encodeLoop:
- for {
- // t will contain the match offset when we find one.
- // When existing the search loop, we have already checked 4 bytes.
- var t int32
- // We will not use repeat offsets across blocks.
- // By not using them for the first 3 matches
- canRepeat := len(blk.sequences) > 2
- for {
- if debugAsserts && canRepeat && offset1 == 0 {
- panic("offset0 was 0")
- }
- nextHash := hashLen(cv, hashLog, tableFastHashLen)
- nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen)
- candidate := e.table[nextHash]
- candidate2 := e.table[nextHash2]
- repIndex := s - offset1 + 2
- e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)}
- e.markShardDirty(nextHash)
- e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)}
- e.markShardDirty(nextHash2)
- if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) {
- // Consider history as well.
- var seq seq
- var length int32
- // length = 4 + e.matchlen(s+6, repIndex+4, src)
- {
- a := src[s+6:]
- b := src[repIndex+4:]
- endI := len(a) & (math.MaxInt32 - 7)
- length = int32(endI) + 4
- for i := 0; i < endI; i += 8 {
- if diff := load64(a, i) ^ load64(b, i); diff != 0 {
- length = int32(i+bits.TrailingZeros64(diff)>>3) + 4
- break
- }
- }
- }
- seq.matchLen = uint32(length - zstdMinMatch)
- // We might be able to match backwards.
- // Extend as long as we can.
- start := s + 2
- // We end the search early, so we don't risk 0 literals
- // and have to do special offset treatment.
- startLimit := nextEmit + 1
- sMin := s - e.maxMatchOff
- if sMin < 0 {
- sMin = 0
- }
- for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch {
- repIndex--
- start--
- seq.matchLen++
- }
- addLiterals(&seq, start)
- // rep 0
- seq.offset = 1
- if debugSequences {
- println("repeat sequence", seq, "next s:", s)
- }
- blk.sequences = append(blk.sequences, seq)
- s += length + 2
- nextEmit = s
- if s >= sLimit {
- if debugEncoder {
- println("repeat ended", s, length)
- }
- break encodeLoop
- }
- cv = load6432(src, s)
- continue
- }
- coffset0 := s - (candidate.offset - e.cur)
- coffset1 := s - (candidate2.offset - e.cur) + 1
- if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val {
- // found a regular match
- t = candidate.offset - e.cur
- if debugAsserts && s <= t {
- panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
- }
- if debugAsserts && s-t > e.maxMatchOff {
- panic("s - t >e.maxMatchOff")
- }
- break
- }
- if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val {
- // found a regular match
- t = candidate2.offset - e.cur
- s++
- if debugAsserts && s <= t {
- panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
- }
- if debugAsserts && s-t > e.maxMatchOff {
- panic("s - t >e.maxMatchOff")
- }
- if debugAsserts && t < 0 {
- panic("t<0")
- }
- break
- }
- s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1))
- if s >= sLimit {
- break encodeLoop
- }
- cv = load6432(src, s)
- }
- // A 4-byte match has been found. We'll later see if more than 4 bytes.
- offset2 = offset1
- offset1 = s - t
- if debugAsserts && s <= t {
- panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
- }
- if debugAsserts && canRepeat && int(offset1) > len(src) {
- panic("invalid offset")
- }
- // Extend the 4-byte match as long as possible.
- //l := e.matchlen(s+4, t+4, src) + 4
- var l int32
- {
- a := src[s+4:]
- b := src[t+4:]
- endI := len(a) & (math.MaxInt32 - 7)
- l = int32(endI) + 4
- for i := 0; i < endI; i += 8 {
- if diff := load64(a, i) ^ load64(b, i); diff != 0 {
- l = int32(i+bits.TrailingZeros64(diff)>>3) + 4
- break
- }
- }
- }
- // Extend backwards
- tMin := s - e.maxMatchOff
- if tMin < 0 {
- tMin = 0
- }
- for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength {
- s--
- t--
- l++
- }
- // Write our sequence.
- var seq seq
- seq.litLen = uint32(s - nextEmit)
- seq.matchLen = uint32(l - zstdMinMatch)
- if seq.litLen > 0 {
- blk.literals = append(blk.literals, src[nextEmit:s]...)
- }
- // Don't use repeat offsets
- seq.offset = uint32(s-t) + 3
- s += l
- if debugSequences {
- println("sequence", seq, "next s:", s)
- }
- blk.sequences = append(blk.sequences, seq)
- nextEmit = s
- if s >= sLimit {
- break encodeLoop
- }
- cv = load6432(src, s)
- // Check offset 2
- if o2 := s - offset2; canRepeat && load3232(src, o2) == uint32(cv) {
- // We have at least 4 byte match.
- // No need to check backwards. We come straight from a match
- //l := 4 + e.matchlen(s+4, o2+4, src)
- var l int32
- {
- a := src[s+4:]
- b := src[o2+4:]
- endI := len(a) & (math.MaxInt32 - 7)
- l = int32(endI) + 4
- for i := 0; i < endI; i += 8 {
- if diff := load64(a, i) ^ load64(b, i); diff != 0 {
- l = int32(i+bits.TrailingZeros64(diff)>>3) + 4
- break
- }
- }
- }
- // Store this, since we have it.
- nextHash := hashLen(cv, hashLog, tableFastHashLen)
- e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)}
- e.markShardDirty(nextHash)
- seq.matchLen = uint32(l) - zstdMinMatch
- seq.litLen = 0
- // Since litlen is always 0, this is offset 1.
- seq.offset = 1
- s += l
- nextEmit = s
- if debugSequences {
- println("sequence", seq, "next s:", s)
- }
- blk.sequences = append(blk.sequences, seq)
- // Swap offset 1 and 2.
- offset1, offset2 = offset2, offset1
- if s >= sLimit {
- break encodeLoop
- }
- // Prepare next loop.
- cv = load6432(src, s)
- }
- }
- if int(nextEmit) < len(src) {
- blk.literals = append(blk.literals, src[nextEmit:]...)
- blk.extraLits = len(src) - int(nextEmit)
- }
- blk.recentOffsets[0] = uint32(offset1)
- blk.recentOffsets[1] = uint32(offset2)
- if debugEncoder {
- println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
- }
- }
- // ResetDict will reset and set a dictionary if not nil
- func (e *fastEncoder) Reset(d *dict, singleBlock bool) {
- e.resetBase(d, singleBlock)
- if d != nil {
- panic("fastEncoder: Reset with dict")
- }
- }
- // ResetDict will reset and set a dictionary if not nil
- func (e *fastEncoderDict) Reset(d *dict, singleBlock bool) {
- e.resetBase(d, singleBlock)
- if d == nil {
- return
- }
- // Init or copy dict table
- if len(e.dictTable) != len(e.table) || d.id != e.lastDictID {
- if len(e.dictTable) != len(e.table) {
- e.dictTable = make([]tableEntry, len(e.table))
- }
- if true {
- end := e.maxMatchOff + int32(len(d.content)) - 8
- for i := e.maxMatchOff; i < end; i += 3 {
- const hashLog = tableBits
- cv := load6432(d.content, i-e.maxMatchOff)
- nextHash := hashLen(cv, hashLog, tableFastHashLen) // 0 -> 5
- nextHash1 := hashLen(cv>>8, hashLog, tableFastHashLen) // 1 -> 6
- nextHash2 := hashLen(cv>>16, hashLog, tableFastHashLen) // 2 -> 7
- e.dictTable[nextHash] = tableEntry{
- val: uint32(cv),
- offset: i,
- }
- e.dictTable[nextHash1] = tableEntry{
- val: uint32(cv >> 8),
- offset: i + 1,
- }
- e.dictTable[nextHash2] = tableEntry{
- val: uint32(cv >> 16),
- offset: i + 2,
- }
- }
- }
- e.lastDictID = d.id
- e.allDirty = true
- }
- e.cur = e.maxMatchOff
- dirtyShardCnt := 0
- if !e.allDirty {
- for i := range e.tableShardDirty {
- if e.tableShardDirty[i] {
- dirtyShardCnt++
- }
- }
- }
- const shardCnt = tableShardCnt
- const shardSize = tableShardSize
- if e.allDirty || dirtyShardCnt > shardCnt*4/6 {
- copy(e.table[:], e.dictTable)
- for i := range e.tableShardDirty {
- e.tableShardDirty[i] = false
- }
- e.allDirty = false
- return
- }
- for i := range e.tableShardDirty {
- if !e.tableShardDirty[i] {
- continue
- }
- copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize])
- e.tableShardDirty[i] = false
- }
- e.allDirty = false
- }
- func (e *fastEncoderDict) markAllShardsDirty() {
- e.allDirty = true
- }
- func (e *fastEncoderDict) markShardDirty(entryNum uint32) {
- e.tableShardDirty[entryNum/tableShardSize] = true
- }
|