level1.go 5.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241
  1. package flate
  2. import (
  3. "encoding/binary"
  4. "fmt"
  5. "math/bits"
  6. )
  7. // fastGen maintains the table for matches,
  8. // and the previous byte block for level 2.
  9. // This is the generic implementation.
  10. type fastEncL1 struct {
  11. fastGen
  12. table [tableSize]tableEntry
  13. }
  14. // EncodeL1 uses a similar algorithm to level 1
  15. func (e *fastEncL1) Encode(dst *tokens, src []byte) {
  16. const (
  17. inputMargin = 12 - 1
  18. minNonLiteralBlockSize = 1 + 1 + inputMargin
  19. hashBytes = 5
  20. )
  21. if debugDeflate && e.cur < 0 {
  22. panic(fmt.Sprint("e.cur < 0: ", e.cur))
  23. }
  24. // Protect against e.cur wraparound.
  25. for e.cur >= bufferReset {
  26. if len(e.hist) == 0 {
  27. for i := range e.table[:] {
  28. e.table[i] = tableEntry{}
  29. }
  30. e.cur = maxMatchOffset
  31. break
  32. }
  33. // Shift down everything in the table that isn't already too far away.
  34. minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
  35. for i := range e.table[:] {
  36. v := e.table[i].offset
  37. if v <= minOff {
  38. v = 0
  39. } else {
  40. v = v - e.cur + maxMatchOffset
  41. }
  42. e.table[i].offset = v
  43. }
  44. e.cur = maxMatchOffset
  45. }
  46. s := e.addBlock(src)
  47. // This check isn't in the Snappy implementation, but there, the caller
  48. // instead of the callee handles this case.
  49. if len(src) < minNonLiteralBlockSize {
  50. // We do not fill the token table.
  51. // This will be picked up by caller.
  52. dst.n = uint16(len(src))
  53. return
  54. }
  55. // Override src
  56. src = e.hist
  57. nextEmit := s
  58. // sLimit is when to stop looking for offset/length copies. The inputMargin
  59. // lets us use a fast path for emitLiteral in the main loop, while we are
  60. // looking for copies.
  61. sLimit := int32(len(src) - inputMargin)
  62. // nextEmit is where in src the next emitLiteral should start from.
  63. cv := load6432(src, s)
  64. for {
  65. const skipLog = 5
  66. const doEvery = 2
  67. nextS := s
  68. var candidate tableEntry
  69. for {
  70. nextHash := hashLen(cv, tableBits, hashBytes)
  71. candidate = e.table[nextHash]
  72. nextS = s + doEvery + (s-nextEmit)>>skipLog
  73. if nextS > sLimit {
  74. goto emitRemainder
  75. }
  76. now := load6432(src, nextS)
  77. e.table[nextHash] = tableEntry{offset: s + e.cur}
  78. nextHash = hashLen(now, tableBits, hashBytes)
  79. offset := s - (candidate.offset - e.cur)
  80. if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) {
  81. e.table[nextHash] = tableEntry{offset: nextS + e.cur}
  82. break
  83. }
  84. // Do one right away...
  85. cv = now
  86. s = nextS
  87. nextS++
  88. candidate = e.table[nextHash]
  89. now >>= 8
  90. e.table[nextHash] = tableEntry{offset: s + e.cur}
  91. offset = s - (candidate.offset - e.cur)
  92. if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) {
  93. e.table[nextHash] = tableEntry{offset: nextS + e.cur}
  94. break
  95. }
  96. cv = now
  97. s = nextS
  98. }
  99. // A 4-byte match has been found. We'll later see if more than 4 bytes
  100. // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
  101. // them as literal bytes.
  102. for {
  103. // Invariant: we have a 4-byte match at s, and no need to emit any
  104. // literal bytes prior to s.
  105. // Extend the 4-byte match as long as possible.
  106. t := candidate.offset - e.cur
  107. var l = int32(4)
  108. if false {
  109. l = e.matchlenLong(s+4, t+4, src) + 4
  110. } else {
  111. // inlined:
  112. a := src[s+4:]
  113. b := src[t+4:]
  114. for len(a) >= 8 {
  115. if diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b); diff != 0 {
  116. l += int32(bits.TrailingZeros64(diff) >> 3)
  117. break
  118. }
  119. l += 8
  120. a = a[8:]
  121. b = b[8:]
  122. }
  123. if len(a) < 8 {
  124. b = b[:len(a)]
  125. for i := range a {
  126. if a[i] != b[i] {
  127. break
  128. }
  129. l++
  130. }
  131. }
  132. }
  133. // Extend backwards
  134. for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
  135. s--
  136. t--
  137. l++
  138. }
  139. if nextEmit < s {
  140. if false {
  141. emitLiteral(dst, src[nextEmit:s])
  142. } else {
  143. for _, v := range src[nextEmit:s] {
  144. dst.tokens[dst.n] = token(v)
  145. dst.litHist[v]++
  146. dst.n++
  147. }
  148. }
  149. }
  150. // Save the match found
  151. if false {
  152. dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
  153. } else {
  154. // Inlined...
  155. xoffset := uint32(s - t - baseMatchOffset)
  156. xlength := l
  157. oc := offsetCode(xoffset)
  158. xoffset |= oc << 16
  159. for xlength > 0 {
  160. xl := xlength
  161. if xl > 258 {
  162. if xl > 258+baseMatchLength {
  163. xl = 258
  164. } else {
  165. xl = 258 - baseMatchLength
  166. }
  167. }
  168. xlength -= xl
  169. xl -= baseMatchLength
  170. dst.extraHist[lengthCodes1[uint8(xl)]]++
  171. dst.offHist[oc]++
  172. dst.tokens[dst.n] = token(matchType | uint32(xl)<<lengthShift | xoffset)
  173. dst.n++
  174. }
  175. }
  176. s += l
  177. nextEmit = s
  178. if nextS >= s {
  179. s = nextS + 1
  180. }
  181. if s >= sLimit {
  182. // Index first pair after match end.
  183. if int(s+l+8) < len(src) {
  184. cv := load6432(src, s)
  185. e.table[hashLen(cv, tableBits, hashBytes)] = tableEntry{offset: s + e.cur}
  186. }
  187. goto emitRemainder
  188. }
  189. // We could immediately start working at s now, but to improve
  190. // compression we first update the hash table at s-2 and at s. If
  191. // another emitCopy is not our next move, also calculate nextHash
  192. // at s+1. At least on GOARCH=amd64, these three hash calculations
  193. // are faster as one load64 call (with some shifts) instead of
  194. // three load32 calls.
  195. x := load6432(src, s-2)
  196. o := e.cur + s - 2
  197. prevHash := hashLen(x, tableBits, hashBytes)
  198. e.table[prevHash] = tableEntry{offset: o}
  199. x >>= 16
  200. currHash := hashLen(x, tableBits, hashBytes)
  201. candidate = e.table[currHash]
  202. e.table[currHash] = tableEntry{offset: o + 2}
  203. offset := s - (candidate.offset - e.cur)
  204. if offset > maxMatchOffset || uint32(x) != load3232(src, candidate.offset-e.cur) {
  205. cv = x >> 8
  206. s++
  207. break
  208. }
  209. }
  210. }
  211. emitRemainder:
  212. if int(nextEmit) < len(src) {
  213. // If nothing was added, don't encode literals.
  214. if dst.n == 0 {
  215. return
  216. }
  217. emitLiteral(dst, src[nextEmit:])
  218. }
  219. }