level2.go 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214
  1. package flate
  2. import "fmt"
  3. // fastGen maintains the table for matches,
  4. // and the previous byte block for level 2.
  5. // This is the generic implementation.
  6. type fastEncL2 struct {
  7. fastGen
  8. table [bTableSize]tableEntry
  9. }
  10. // EncodeL2 uses a similar algorithm to level 1, but is capable
  11. // of matching across blocks giving better compression at a small slowdown.
  12. func (e *fastEncL2) Encode(dst *tokens, src []byte) {
  13. const (
  14. inputMargin = 12 - 1
  15. minNonLiteralBlockSize = 1 + 1 + inputMargin
  16. hashBytes = 5
  17. )
  18. if debugDeflate && e.cur < 0 {
  19. panic(fmt.Sprint("e.cur < 0: ", e.cur))
  20. }
  21. // Protect against e.cur wraparound.
  22. for e.cur >= bufferReset {
  23. if len(e.hist) == 0 {
  24. for i := range e.table[:] {
  25. e.table[i] = tableEntry{}
  26. }
  27. e.cur = maxMatchOffset
  28. break
  29. }
  30. // Shift down everything in the table that isn't already too far away.
  31. minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
  32. for i := range e.table[:] {
  33. v := e.table[i].offset
  34. if v <= minOff {
  35. v = 0
  36. } else {
  37. v = v - e.cur + maxMatchOffset
  38. }
  39. e.table[i].offset = v
  40. }
  41. e.cur = maxMatchOffset
  42. }
  43. s := e.addBlock(src)
  44. // This check isn't in the Snappy implementation, but there, the caller
  45. // instead of the callee handles this case.
  46. if len(src) < minNonLiteralBlockSize {
  47. // We do not fill the token table.
  48. // This will be picked up by caller.
  49. dst.n = uint16(len(src))
  50. return
  51. }
  52. // Override src
  53. src = e.hist
  54. nextEmit := s
  55. // sLimit is when to stop looking for offset/length copies. The inputMargin
  56. // lets us use a fast path for emitLiteral in the main loop, while we are
  57. // looking for copies.
  58. sLimit := int32(len(src) - inputMargin)
  59. // nextEmit is where in src the next emitLiteral should start from.
  60. cv := load6432(src, s)
  61. for {
  62. // When should we start skipping if we haven't found matches in a long while.
  63. const skipLog = 5
  64. const doEvery = 2
  65. nextS := s
  66. var candidate tableEntry
  67. for {
  68. nextHash := hashLen(cv, bTableBits, hashBytes)
  69. s = nextS
  70. nextS = s + doEvery + (s-nextEmit)>>skipLog
  71. if nextS > sLimit {
  72. goto emitRemainder
  73. }
  74. candidate = e.table[nextHash]
  75. now := load6432(src, nextS)
  76. e.table[nextHash] = tableEntry{offset: s + e.cur}
  77. nextHash = hashLen(now, bTableBits, hashBytes)
  78. offset := s - (candidate.offset - e.cur)
  79. if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) {
  80. e.table[nextHash] = tableEntry{offset: nextS + e.cur}
  81. break
  82. }
  83. // Do one right away...
  84. cv = now
  85. s = nextS
  86. nextS++
  87. candidate = e.table[nextHash]
  88. now >>= 8
  89. e.table[nextHash] = tableEntry{offset: s + e.cur}
  90. offset = s - (candidate.offset - e.cur)
  91. if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) {
  92. break
  93. }
  94. cv = now
  95. }
  96. // A 4-byte match has been found. We'll later see if more than 4 bytes
  97. // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
  98. // them as literal bytes.
  99. // Call emitCopy, and then see if another emitCopy could be our next
  100. // move. Repeat until we find no match for the input immediately after
  101. // what was consumed by the last emitCopy call.
  102. //
  103. // If we exit this loop normally then we need to call emitLiteral next,
  104. // though we don't yet know how big the literal will be. We handle that
  105. // by proceeding to the next iteration of the main loop. We also can
  106. // exit this loop via goto if we get close to exhausting the input.
  107. for {
  108. // Invariant: we have a 4-byte match at s, and no need to emit any
  109. // literal bytes prior to s.
  110. // Extend the 4-byte match as long as possible.
  111. t := candidate.offset - e.cur
  112. l := e.matchlenLong(s+4, t+4, src) + 4
  113. // Extend backwards
  114. for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
  115. s--
  116. t--
  117. l++
  118. }
  119. if nextEmit < s {
  120. if false {
  121. emitLiteral(dst, src[nextEmit:s])
  122. } else {
  123. for _, v := range src[nextEmit:s] {
  124. dst.tokens[dst.n] = token(v)
  125. dst.litHist[v]++
  126. dst.n++
  127. }
  128. }
  129. }
  130. dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
  131. s += l
  132. nextEmit = s
  133. if nextS >= s {
  134. s = nextS + 1
  135. }
  136. if s >= sLimit {
  137. // Index first pair after match end.
  138. if int(s+l+8) < len(src) {
  139. cv := load6432(src, s)
  140. e.table[hashLen(cv, bTableBits, hashBytes)] = tableEntry{offset: s + e.cur}
  141. }
  142. goto emitRemainder
  143. }
  144. // Store every second hash in-between, but offset by 1.
  145. for i := s - l + 2; i < s-5; i += 7 {
  146. x := load6432(src, i)
  147. nextHash := hashLen(x, bTableBits, hashBytes)
  148. e.table[nextHash] = tableEntry{offset: e.cur + i}
  149. // Skip one
  150. x >>= 16
  151. nextHash = hashLen(x, bTableBits, hashBytes)
  152. e.table[nextHash] = tableEntry{offset: e.cur + i + 2}
  153. // Skip one
  154. x >>= 16
  155. nextHash = hashLen(x, bTableBits, hashBytes)
  156. e.table[nextHash] = tableEntry{offset: e.cur + i + 4}
  157. }
  158. // We could immediately start working at s now, but to improve
  159. // compression we first update the hash table at s-2 to s. If
  160. // another emitCopy is not our next move, also calculate nextHash
  161. // at s+1. At least on GOARCH=amd64, these three hash calculations
  162. // are faster as one load64 call (with some shifts) instead of
  163. // three load32 calls.
  164. x := load6432(src, s-2)
  165. o := e.cur + s - 2
  166. prevHash := hashLen(x, bTableBits, hashBytes)
  167. prevHash2 := hashLen(x>>8, bTableBits, hashBytes)
  168. e.table[prevHash] = tableEntry{offset: o}
  169. e.table[prevHash2] = tableEntry{offset: o + 1}
  170. currHash := hashLen(x>>16, bTableBits, hashBytes)
  171. candidate = e.table[currHash]
  172. e.table[currHash] = tableEntry{offset: o + 2}
  173. offset := s - (candidate.offset - e.cur)
  174. if offset > maxMatchOffset || uint32(x>>16) != load3232(src, candidate.offset-e.cur) {
  175. cv = x >> 24
  176. s++
  177. break
  178. }
  179. }
  180. }
  181. emitRemainder:
  182. if int(nextEmit) < len(src) {
  183. // If nothing was added, don't encode literals.
  184. if dst.n == 0 {
  185. return
  186. }
  187. emitLiteral(dst, src[nextEmit:])
  188. }
  189. }