huffman_bit_writer.go 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182
  1. // Copyright 2009 The Go Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style
  3. // license that can be found in the LICENSE file.
  4. package flate
  5. import (
  6. "encoding/binary"
  7. "fmt"
  8. "io"
  9. "math"
  10. )
  11. const (
  12. // The largest offset code.
  13. offsetCodeCount = 30
  14. // The special code used to mark the end of a block.
  15. endBlockMarker = 256
  16. // The first length code.
  17. lengthCodesStart = 257
  18. // The number of codegen codes.
  19. codegenCodeCount = 19
  20. badCode = 255
  21. // maxPredefinedTokens is the maximum number of tokens
  22. // where we check if fixed size is smaller.
  23. maxPredefinedTokens = 250
  24. // bufferFlushSize indicates the buffer size
  25. // after which bytes are flushed to the writer.
  26. // Should preferably be a multiple of 6, since
  27. // we accumulate 6 bytes between writes to the buffer.
  28. bufferFlushSize = 246
  29. )
  30. // Minimum length code that emits bits.
  31. const lengthExtraBitsMinCode = 8
  32. // The number of extra bits needed by length code X - LENGTH_CODES_START.
  33. var lengthExtraBits = [32]uint8{
  34. /* 257 */ 0, 0, 0,
  35. /* 260 */ 0, 0, 0, 0, 0, 1, 1, 1, 1, 2,
  36. /* 270 */ 2, 2, 2, 3, 3, 3, 3, 4, 4, 4,
  37. /* 280 */ 4, 5, 5, 5, 5, 0,
  38. }
  39. // The length indicated by length code X - LENGTH_CODES_START.
  40. var lengthBase = [32]uint8{
  41. 0, 1, 2, 3, 4, 5, 6, 7, 8, 10,
  42. 12, 14, 16, 20, 24, 28, 32, 40, 48, 56,
  43. 64, 80, 96, 112, 128, 160, 192, 224, 255,
  44. }
  45. // Minimum offset code that emits bits.
  46. const offsetExtraBitsMinCode = 4
  47. // offset code word extra bits.
  48. var offsetExtraBits = [32]int8{
  49. 0, 0, 0, 0, 1, 1, 2, 2, 3, 3,
  50. 4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
  51. 9, 9, 10, 10, 11, 11, 12, 12, 13, 13,
  52. /* extended window */
  53. 14, 14,
  54. }
  55. var offsetCombined = [32]uint32{}
  56. func init() {
  57. var offsetBase = [32]uint32{
  58. /* normal deflate */
  59. 0x000000, 0x000001, 0x000002, 0x000003, 0x000004,
  60. 0x000006, 0x000008, 0x00000c, 0x000010, 0x000018,
  61. 0x000020, 0x000030, 0x000040, 0x000060, 0x000080,
  62. 0x0000c0, 0x000100, 0x000180, 0x000200, 0x000300,
  63. 0x000400, 0x000600, 0x000800, 0x000c00, 0x001000,
  64. 0x001800, 0x002000, 0x003000, 0x004000, 0x006000,
  65. /* extended window */
  66. 0x008000, 0x00c000,
  67. }
  68. for i := range offsetCombined[:] {
  69. // Don't use extended window values...
  70. if offsetExtraBits[i] == 0 || offsetBase[i] > 0x006000 {
  71. continue
  72. }
  73. offsetCombined[i] = uint32(offsetExtraBits[i]) | (offsetBase[i] << 8)
  74. }
  75. }
  76. // The odd order in which the codegen code sizes are written.
  77. var codegenOrder = []uint32{16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}
  78. type huffmanBitWriter struct {
  79. // writer is the underlying writer.
  80. // Do not use it directly; use the write method, which ensures
  81. // that Write errors are sticky.
  82. writer io.Writer
  83. // Data waiting to be written is bytes[0:nbytes]
  84. // and then the low nbits of bits.
  85. bits uint64
  86. nbits uint8
  87. nbytes uint8
  88. lastHuffMan bool
  89. literalEncoding *huffmanEncoder
  90. tmpLitEncoding *huffmanEncoder
  91. offsetEncoding *huffmanEncoder
  92. codegenEncoding *huffmanEncoder
  93. err error
  94. lastHeader int
  95. // Set between 0 (reused block can be up to 2x the size)
  96. logNewTablePenalty uint
  97. bytes [256 + 8]byte
  98. literalFreq [lengthCodesStart + 32]uint16
  99. offsetFreq [32]uint16
  100. codegenFreq [codegenCodeCount]uint16
  101. // codegen must have an extra space for the final symbol.
  102. codegen [literalCount + offsetCodeCount + 1]uint8
  103. }
  104. // Huffman reuse.
  105. //
  106. // The huffmanBitWriter supports reusing huffman tables and thereby combining block sections.
  107. //
  108. // This is controlled by several variables:
  109. //
  110. // If lastHeader is non-zero the Huffman table can be reused.
  111. // This also indicates that a Huffman table has been generated that can output all
  112. // possible symbols.
  113. // It also indicates that an EOB has not yet been emitted, so if a new tabel is generated
  114. // an EOB with the previous table must be written.
  115. //
  116. // If lastHuffMan is set, a table for outputting literals has been generated and offsets are invalid.
  117. //
  118. // An incoming block estimates the output size of a new table using a 'fresh' by calculating the
  119. // optimal size and adding a penalty in 'logNewTablePenalty'.
  120. // A Huffman table is not optimal, which is why we add a penalty, and generating a new table
  121. // is slower both for compression and decompression.
  122. func newHuffmanBitWriter(w io.Writer) *huffmanBitWriter {
  123. return &huffmanBitWriter{
  124. writer: w,
  125. literalEncoding: newHuffmanEncoder(literalCount),
  126. tmpLitEncoding: newHuffmanEncoder(literalCount),
  127. codegenEncoding: newHuffmanEncoder(codegenCodeCount),
  128. offsetEncoding: newHuffmanEncoder(offsetCodeCount),
  129. }
  130. }
  131. func (w *huffmanBitWriter) reset(writer io.Writer) {
  132. w.writer = writer
  133. w.bits, w.nbits, w.nbytes, w.err = 0, 0, 0, nil
  134. w.lastHeader = 0
  135. w.lastHuffMan = false
  136. }
  137. func (w *huffmanBitWriter) canReuse(t *tokens) (ok bool) {
  138. a := t.offHist[:offsetCodeCount]
  139. b := w.offsetEncoding.codes
  140. b = b[:len(a)]
  141. for i, v := range a {
  142. if v != 0 && b[i].zero() {
  143. return false
  144. }
  145. }
  146. a = t.extraHist[:literalCount-256]
  147. b = w.literalEncoding.codes[256:literalCount]
  148. b = b[:len(a)]
  149. for i, v := range a {
  150. if v != 0 && b[i].zero() {
  151. return false
  152. }
  153. }
  154. a = t.litHist[:256]
  155. b = w.literalEncoding.codes[:len(a)]
  156. for i, v := range a {
  157. if v != 0 && b[i].zero() {
  158. return false
  159. }
  160. }
  161. return true
  162. }
  163. func (w *huffmanBitWriter) flush() {
  164. if w.err != nil {
  165. w.nbits = 0
  166. return
  167. }
  168. if w.lastHeader > 0 {
  169. // We owe an EOB
  170. w.writeCode(w.literalEncoding.codes[endBlockMarker])
  171. w.lastHeader = 0
  172. }
  173. n := w.nbytes
  174. for w.nbits != 0 {
  175. w.bytes[n] = byte(w.bits)
  176. w.bits >>= 8
  177. if w.nbits > 8 { // Avoid underflow
  178. w.nbits -= 8
  179. } else {
  180. w.nbits = 0
  181. }
  182. n++
  183. }
  184. w.bits = 0
  185. w.write(w.bytes[:n])
  186. w.nbytes = 0
  187. }
  188. func (w *huffmanBitWriter) write(b []byte) {
  189. if w.err != nil {
  190. return
  191. }
  192. _, w.err = w.writer.Write(b)
  193. }
  194. func (w *huffmanBitWriter) writeBits(b int32, nb uint8) {
  195. w.bits |= uint64(b) << (w.nbits & 63)
  196. w.nbits += nb
  197. if w.nbits >= 48 {
  198. w.writeOutBits()
  199. }
  200. }
  201. func (w *huffmanBitWriter) writeBytes(bytes []byte) {
  202. if w.err != nil {
  203. return
  204. }
  205. n := w.nbytes
  206. if w.nbits&7 != 0 {
  207. w.err = InternalError("writeBytes with unfinished bits")
  208. return
  209. }
  210. for w.nbits != 0 {
  211. w.bytes[n] = byte(w.bits)
  212. w.bits >>= 8
  213. w.nbits -= 8
  214. n++
  215. }
  216. if n != 0 {
  217. w.write(w.bytes[:n])
  218. }
  219. w.nbytes = 0
  220. w.write(bytes)
  221. }
  222. // RFC 1951 3.2.7 specifies a special run-length encoding for specifying
  223. // the literal and offset lengths arrays (which are concatenated into a single
  224. // array). This method generates that run-length encoding.
  225. //
  226. // The result is written into the codegen array, and the frequencies
  227. // of each code is written into the codegenFreq array.
  228. // Codes 0-15 are single byte codes. Codes 16-18 are followed by additional
  229. // information. Code badCode is an end marker
  230. //
  231. // numLiterals The number of literals in literalEncoding
  232. // numOffsets The number of offsets in offsetEncoding
  233. // litenc, offenc The literal and offset encoder to use
  234. func (w *huffmanBitWriter) generateCodegen(numLiterals int, numOffsets int, litEnc, offEnc *huffmanEncoder) {
  235. for i := range w.codegenFreq {
  236. w.codegenFreq[i] = 0
  237. }
  238. // Note that we are using codegen both as a temporary variable for holding
  239. // a copy of the frequencies, and as the place where we put the result.
  240. // This is fine because the output is always shorter than the input used
  241. // so far.
  242. codegen := w.codegen[:] // cache
  243. // Copy the concatenated code sizes to codegen. Put a marker at the end.
  244. cgnl := codegen[:numLiterals]
  245. for i := range cgnl {
  246. cgnl[i] = litEnc.codes[i].len()
  247. }
  248. cgnl = codegen[numLiterals : numLiterals+numOffsets]
  249. for i := range cgnl {
  250. cgnl[i] = offEnc.codes[i].len()
  251. }
  252. codegen[numLiterals+numOffsets] = badCode
  253. size := codegen[0]
  254. count := 1
  255. outIndex := 0
  256. for inIndex := 1; size != badCode; inIndex++ {
  257. // INVARIANT: We have seen "count" copies of size that have not yet
  258. // had output generated for them.
  259. nextSize := codegen[inIndex]
  260. if nextSize == size {
  261. count++
  262. continue
  263. }
  264. // We need to generate codegen indicating "count" of size.
  265. if size != 0 {
  266. codegen[outIndex] = size
  267. outIndex++
  268. w.codegenFreq[size]++
  269. count--
  270. for count >= 3 {
  271. n := 6
  272. if n > count {
  273. n = count
  274. }
  275. codegen[outIndex] = 16
  276. outIndex++
  277. codegen[outIndex] = uint8(n - 3)
  278. outIndex++
  279. w.codegenFreq[16]++
  280. count -= n
  281. }
  282. } else {
  283. for count >= 11 {
  284. n := 138
  285. if n > count {
  286. n = count
  287. }
  288. codegen[outIndex] = 18
  289. outIndex++
  290. codegen[outIndex] = uint8(n - 11)
  291. outIndex++
  292. w.codegenFreq[18]++
  293. count -= n
  294. }
  295. if count >= 3 {
  296. // count >= 3 && count <= 10
  297. codegen[outIndex] = 17
  298. outIndex++
  299. codegen[outIndex] = uint8(count - 3)
  300. outIndex++
  301. w.codegenFreq[17]++
  302. count = 0
  303. }
  304. }
  305. count--
  306. for ; count >= 0; count-- {
  307. codegen[outIndex] = size
  308. outIndex++
  309. w.codegenFreq[size]++
  310. }
  311. // Set up invariant for next time through the loop.
  312. size = nextSize
  313. count = 1
  314. }
  315. // Marker indicating the end of the codegen.
  316. codegen[outIndex] = badCode
  317. }
  318. func (w *huffmanBitWriter) codegens() int {
  319. numCodegens := len(w.codegenFreq)
  320. for numCodegens > 4 && w.codegenFreq[codegenOrder[numCodegens-1]] == 0 {
  321. numCodegens--
  322. }
  323. return numCodegens
  324. }
  325. func (w *huffmanBitWriter) headerSize() (size, numCodegens int) {
  326. numCodegens = len(w.codegenFreq)
  327. for numCodegens > 4 && w.codegenFreq[codegenOrder[numCodegens-1]] == 0 {
  328. numCodegens--
  329. }
  330. return 3 + 5 + 5 + 4 + (3 * numCodegens) +
  331. w.codegenEncoding.bitLength(w.codegenFreq[:]) +
  332. int(w.codegenFreq[16])*2 +
  333. int(w.codegenFreq[17])*3 +
  334. int(w.codegenFreq[18])*7, numCodegens
  335. }
  336. // dynamicSize returns the size of dynamically encoded data in bits.
  337. func (w *huffmanBitWriter) dynamicReuseSize(litEnc, offEnc *huffmanEncoder) (size int) {
  338. size = litEnc.bitLength(w.literalFreq[:]) +
  339. offEnc.bitLength(w.offsetFreq[:])
  340. return size
  341. }
  342. // dynamicSize returns the size of dynamically encoded data in bits.
  343. func (w *huffmanBitWriter) dynamicSize(litEnc, offEnc *huffmanEncoder, extraBits int) (size, numCodegens int) {
  344. header, numCodegens := w.headerSize()
  345. size = header +
  346. litEnc.bitLength(w.literalFreq[:]) +
  347. offEnc.bitLength(w.offsetFreq[:]) +
  348. extraBits
  349. return size, numCodegens
  350. }
  351. // extraBitSize will return the number of bits that will be written
  352. // as "extra" bits on matches.
  353. func (w *huffmanBitWriter) extraBitSize() int {
  354. total := 0
  355. for i, n := range w.literalFreq[257:literalCount] {
  356. total += int(n) * int(lengthExtraBits[i&31])
  357. }
  358. for i, n := range w.offsetFreq[:offsetCodeCount] {
  359. total += int(n) * int(offsetExtraBits[i&31])
  360. }
  361. return total
  362. }
  363. // fixedSize returns the size of dynamically encoded data in bits.
  364. func (w *huffmanBitWriter) fixedSize(extraBits int) int {
  365. return 3 +
  366. fixedLiteralEncoding.bitLength(w.literalFreq[:]) +
  367. fixedOffsetEncoding.bitLength(w.offsetFreq[:]) +
  368. extraBits
  369. }
  370. // storedSize calculates the stored size, including header.
  371. // The function returns the size in bits and whether the block
  372. // fits inside a single block.
  373. func (w *huffmanBitWriter) storedSize(in []byte) (int, bool) {
  374. if in == nil {
  375. return 0, false
  376. }
  377. if len(in) <= maxStoreBlockSize {
  378. return (len(in) + 5) * 8, true
  379. }
  380. return 0, false
  381. }
  382. func (w *huffmanBitWriter) writeCode(c hcode) {
  383. // The function does not get inlined if we "& 63" the shift.
  384. w.bits |= c.code64() << (w.nbits & 63)
  385. w.nbits += c.len()
  386. if w.nbits >= 48 {
  387. w.writeOutBits()
  388. }
  389. }
  390. // writeOutBits will write bits to the buffer.
  391. func (w *huffmanBitWriter) writeOutBits() {
  392. bits := w.bits
  393. w.bits >>= 48
  394. w.nbits -= 48
  395. n := w.nbytes
  396. // We over-write, but faster...
  397. binary.LittleEndian.PutUint64(w.bytes[n:], bits)
  398. n += 6
  399. if n >= bufferFlushSize {
  400. if w.err != nil {
  401. n = 0
  402. return
  403. }
  404. w.write(w.bytes[:n])
  405. n = 0
  406. }
  407. w.nbytes = n
  408. }
  409. // Write the header of a dynamic Huffman block to the output stream.
  410. //
  411. // numLiterals The number of literals specified in codegen
  412. // numOffsets The number of offsets specified in codegen
  413. // numCodegens The number of codegens used in codegen
  414. func (w *huffmanBitWriter) writeDynamicHeader(numLiterals int, numOffsets int, numCodegens int, isEof bool) {
  415. if w.err != nil {
  416. return
  417. }
  418. var firstBits int32 = 4
  419. if isEof {
  420. firstBits = 5
  421. }
  422. w.writeBits(firstBits, 3)
  423. w.writeBits(int32(numLiterals-257), 5)
  424. w.writeBits(int32(numOffsets-1), 5)
  425. w.writeBits(int32(numCodegens-4), 4)
  426. for i := 0; i < numCodegens; i++ {
  427. value := uint(w.codegenEncoding.codes[codegenOrder[i]].len())
  428. w.writeBits(int32(value), 3)
  429. }
  430. i := 0
  431. for {
  432. var codeWord = uint32(w.codegen[i])
  433. i++
  434. if codeWord == badCode {
  435. break
  436. }
  437. w.writeCode(w.codegenEncoding.codes[codeWord])
  438. switch codeWord {
  439. case 16:
  440. w.writeBits(int32(w.codegen[i]), 2)
  441. i++
  442. case 17:
  443. w.writeBits(int32(w.codegen[i]), 3)
  444. i++
  445. case 18:
  446. w.writeBits(int32(w.codegen[i]), 7)
  447. i++
  448. }
  449. }
  450. }
  451. // writeStoredHeader will write a stored header.
  452. // If the stored block is only used for EOF,
  453. // it is replaced with a fixed huffman block.
  454. func (w *huffmanBitWriter) writeStoredHeader(length int, isEof bool) {
  455. if w.err != nil {
  456. return
  457. }
  458. if w.lastHeader > 0 {
  459. // We owe an EOB
  460. w.writeCode(w.literalEncoding.codes[endBlockMarker])
  461. w.lastHeader = 0
  462. }
  463. // To write EOF, use a fixed encoding block. 10 bits instead of 5 bytes.
  464. if length == 0 && isEof {
  465. w.writeFixedHeader(isEof)
  466. // EOB: 7 bits, value: 0
  467. w.writeBits(0, 7)
  468. w.flush()
  469. return
  470. }
  471. var flag int32
  472. if isEof {
  473. flag = 1
  474. }
  475. w.writeBits(flag, 3)
  476. w.flush()
  477. w.writeBits(int32(length), 16)
  478. w.writeBits(int32(^uint16(length)), 16)
  479. }
  480. func (w *huffmanBitWriter) writeFixedHeader(isEof bool) {
  481. if w.err != nil {
  482. return
  483. }
  484. if w.lastHeader > 0 {
  485. // We owe an EOB
  486. w.writeCode(w.literalEncoding.codes[endBlockMarker])
  487. w.lastHeader = 0
  488. }
  489. // Indicate that we are a fixed Huffman block
  490. var value int32 = 2
  491. if isEof {
  492. value = 3
  493. }
  494. w.writeBits(value, 3)
  495. }
  496. // writeBlock will write a block of tokens with the smallest encoding.
  497. // The original input can be supplied, and if the huffman encoded data
  498. // is larger than the original bytes, the data will be written as a
  499. // stored block.
  500. // If the input is nil, the tokens will always be Huffman encoded.
  501. func (w *huffmanBitWriter) writeBlock(tokens *tokens, eof bool, input []byte) {
  502. if w.err != nil {
  503. return
  504. }
  505. tokens.AddEOB()
  506. if w.lastHeader > 0 {
  507. // We owe an EOB
  508. w.writeCode(w.literalEncoding.codes[endBlockMarker])
  509. w.lastHeader = 0
  510. }
  511. numLiterals, numOffsets := w.indexTokens(tokens, false)
  512. w.generate()
  513. var extraBits int
  514. storedSize, storable := w.storedSize(input)
  515. if storable {
  516. extraBits = w.extraBitSize()
  517. }
  518. // Figure out smallest code.
  519. // Fixed Huffman baseline.
  520. var literalEncoding = fixedLiteralEncoding
  521. var offsetEncoding = fixedOffsetEncoding
  522. var size = math.MaxInt32
  523. if tokens.n < maxPredefinedTokens {
  524. size = w.fixedSize(extraBits)
  525. }
  526. // Dynamic Huffman?
  527. var numCodegens int
  528. // Generate codegen and codegenFrequencies, which indicates how to encode
  529. // the literalEncoding and the offsetEncoding.
  530. w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, w.offsetEncoding)
  531. w.codegenEncoding.generate(w.codegenFreq[:], 7)
  532. dynamicSize, numCodegens := w.dynamicSize(w.literalEncoding, w.offsetEncoding, extraBits)
  533. if dynamicSize < size {
  534. size = dynamicSize
  535. literalEncoding = w.literalEncoding
  536. offsetEncoding = w.offsetEncoding
  537. }
  538. // Stored bytes?
  539. if storable && storedSize <= size {
  540. w.writeStoredHeader(len(input), eof)
  541. w.writeBytes(input)
  542. return
  543. }
  544. // Huffman.
  545. if literalEncoding == fixedLiteralEncoding {
  546. w.writeFixedHeader(eof)
  547. } else {
  548. w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof)
  549. }
  550. // Write the tokens.
  551. w.writeTokens(tokens.Slice(), literalEncoding.codes, offsetEncoding.codes)
  552. }
  553. // writeBlockDynamic encodes a block using a dynamic Huffman table.
  554. // This should be used if the symbols used have a disproportionate
  555. // histogram distribution.
  556. // If input is supplied and the compression savings are below 1/16th of the
  557. // input size the block is stored.
  558. func (w *huffmanBitWriter) writeBlockDynamic(tokens *tokens, eof bool, input []byte, sync bool) {
  559. if w.err != nil {
  560. return
  561. }
  562. sync = sync || eof
  563. if sync {
  564. tokens.AddEOB()
  565. }
  566. // We cannot reuse pure huffman table, and must mark as EOF.
  567. if (w.lastHuffMan || eof) && w.lastHeader > 0 {
  568. // We will not try to reuse.
  569. w.writeCode(w.literalEncoding.codes[endBlockMarker])
  570. w.lastHeader = 0
  571. w.lastHuffMan = false
  572. }
  573. // fillReuse enables filling of empty values.
  574. // This will make encodings always reusable without testing.
  575. // However, this does not appear to benefit on most cases.
  576. const fillReuse = false
  577. // Check if we can reuse...
  578. if !fillReuse && w.lastHeader > 0 && !w.canReuse(tokens) {
  579. w.writeCode(w.literalEncoding.codes[endBlockMarker])
  580. w.lastHeader = 0
  581. }
  582. numLiterals, numOffsets := w.indexTokens(tokens, !sync)
  583. extraBits := 0
  584. ssize, storable := w.storedSize(input)
  585. const usePrefs = true
  586. if storable || w.lastHeader > 0 {
  587. extraBits = w.extraBitSize()
  588. }
  589. var size int
  590. // Check if we should reuse.
  591. if w.lastHeader > 0 {
  592. // Estimate size for using a new table.
  593. // Use the previous header size as the best estimate.
  594. newSize := w.lastHeader + tokens.EstimatedBits()
  595. newSize += int(w.literalEncoding.codes[endBlockMarker].len()) + newSize>>w.logNewTablePenalty
  596. // The estimated size is calculated as an optimal table.
  597. // We add a penalty to make it more realistic and re-use a bit more.
  598. reuseSize := w.dynamicReuseSize(w.literalEncoding, w.offsetEncoding) + extraBits
  599. // Check if a new table is better.
  600. if newSize < reuseSize {
  601. // Write the EOB we owe.
  602. w.writeCode(w.literalEncoding.codes[endBlockMarker])
  603. size = newSize
  604. w.lastHeader = 0
  605. } else {
  606. size = reuseSize
  607. }
  608. if tokens.n < maxPredefinedTokens {
  609. if preSize := w.fixedSize(extraBits) + 7; usePrefs && preSize < size {
  610. // Check if we get a reasonable size decrease.
  611. if storable && ssize <= size {
  612. w.writeStoredHeader(len(input), eof)
  613. w.writeBytes(input)
  614. return
  615. }
  616. w.writeFixedHeader(eof)
  617. if !sync {
  618. tokens.AddEOB()
  619. }
  620. w.writeTokens(tokens.Slice(), fixedLiteralEncoding.codes, fixedOffsetEncoding.codes)
  621. return
  622. }
  623. }
  624. // Check if we get a reasonable size decrease.
  625. if storable && ssize <= size {
  626. w.writeStoredHeader(len(input), eof)
  627. w.writeBytes(input)
  628. return
  629. }
  630. }
  631. // We want a new block/table
  632. if w.lastHeader == 0 {
  633. if fillReuse && !sync {
  634. w.fillTokens()
  635. numLiterals, numOffsets = maxNumLit, maxNumDist
  636. } else {
  637. w.literalFreq[endBlockMarker] = 1
  638. }
  639. w.generate()
  640. // Generate codegen and codegenFrequencies, which indicates how to encode
  641. // the literalEncoding and the offsetEncoding.
  642. w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, w.offsetEncoding)
  643. w.codegenEncoding.generate(w.codegenFreq[:], 7)
  644. var numCodegens int
  645. if fillReuse && !sync {
  646. // Reindex for accurate size...
  647. w.indexTokens(tokens, true)
  648. }
  649. size, numCodegens = w.dynamicSize(w.literalEncoding, w.offsetEncoding, extraBits)
  650. // Store predefined, if we don't get a reasonable improvement.
  651. if tokens.n < maxPredefinedTokens {
  652. if preSize := w.fixedSize(extraBits); usePrefs && preSize <= size {
  653. // Store bytes, if we don't get an improvement.
  654. if storable && ssize <= preSize {
  655. w.writeStoredHeader(len(input), eof)
  656. w.writeBytes(input)
  657. return
  658. }
  659. w.writeFixedHeader(eof)
  660. if !sync {
  661. tokens.AddEOB()
  662. }
  663. w.writeTokens(tokens.Slice(), fixedLiteralEncoding.codes, fixedOffsetEncoding.codes)
  664. return
  665. }
  666. }
  667. if storable && ssize <= size {
  668. // Store bytes, if we don't get an improvement.
  669. w.writeStoredHeader(len(input), eof)
  670. w.writeBytes(input)
  671. return
  672. }
  673. // Write Huffman table.
  674. w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof)
  675. if !sync {
  676. w.lastHeader, _ = w.headerSize()
  677. }
  678. w.lastHuffMan = false
  679. }
  680. if sync {
  681. w.lastHeader = 0
  682. }
  683. // Write the tokens.
  684. w.writeTokens(tokens.Slice(), w.literalEncoding.codes, w.offsetEncoding.codes)
  685. }
  686. func (w *huffmanBitWriter) fillTokens() {
  687. for i, v := range w.literalFreq[:literalCount] {
  688. if v == 0 {
  689. w.literalFreq[i] = 1
  690. }
  691. }
  692. for i, v := range w.offsetFreq[:offsetCodeCount] {
  693. if v == 0 {
  694. w.offsetFreq[i] = 1
  695. }
  696. }
  697. }
  698. // indexTokens indexes a slice of tokens, and updates
  699. // literalFreq and offsetFreq, and generates literalEncoding
  700. // and offsetEncoding.
  701. // The number of literal and offset tokens is returned.
  702. func (w *huffmanBitWriter) indexTokens(t *tokens, filled bool) (numLiterals, numOffsets int) {
  703. //copy(w.literalFreq[:], t.litHist[:])
  704. *(*[256]uint16)(w.literalFreq[:]) = t.litHist
  705. //copy(w.literalFreq[256:], t.extraHist[:])
  706. *(*[32]uint16)(w.literalFreq[256:]) = t.extraHist
  707. w.offsetFreq = t.offHist
  708. if t.n == 0 {
  709. return
  710. }
  711. if filled {
  712. return maxNumLit, maxNumDist
  713. }
  714. // get the number of literals
  715. numLiterals = len(w.literalFreq)
  716. for w.literalFreq[numLiterals-1] == 0 {
  717. numLiterals--
  718. }
  719. // get the number of offsets
  720. numOffsets = len(w.offsetFreq)
  721. for numOffsets > 0 && w.offsetFreq[numOffsets-1] == 0 {
  722. numOffsets--
  723. }
  724. if numOffsets == 0 {
  725. // We haven't found a single match. If we want to go with the dynamic encoding,
  726. // we should count at least one offset to be sure that the offset huffman tree could be encoded.
  727. w.offsetFreq[0] = 1
  728. numOffsets = 1
  729. }
  730. return
  731. }
  732. func (w *huffmanBitWriter) generate() {
  733. w.literalEncoding.generate(w.literalFreq[:literalCount], 15)
  734. w.offsetEncoding.generate(w.offsetFreq[:offsetCodeCount], 15)
  735. }
  736. // writeTokens writes a slice of tokens to the output.
  737. // codes for literal and offset encoding must be supplied.
  738. func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) {
  739. if w.err != nil {
  740. return
  741. }
  742. if len(tokens) == 0 {
  743. return
  744. }
  745. // Only last token should be endBlockMarker.
  746. var deferEOB bool
  747. if tokens[len(tokens)-1] == endBlockMarker {
  748. tokens = tokens[:len(tokens)-1]
  749. deferEOB = true
  750. }
  751. // Create slices up to the next power of two to avoid bounds checks.
  752. lits := leCodes[:256]
  753. offs := oeCodes[:32]
  754. lengths := leCodes[lengthCodesStart:]
  755. lengths = lengths[:32]
  756. // Go 1.16 LOVES having these on stack.
  757. bits, nbits, nbytes := w.bits, w.nbits, w.nbytes
  758. for _, t := range tokens {
  759. if t < 256 {
  760. //w.writeCode(lits[t.literal()])
  761. c := lits[t]
  762. bits |= c.code64() << (nbits & 63)
  763. nbits += c.len()
  764. if nbits >= 48 {
  765. binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
  766. //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits
  767. bits >>= 48
  768. nbits -= 48
  769. nbytes += 6
  770. if nbytes >= bufferFlushSize {
  771. if w.err != nil {
  772. nbytes = 0
  773. return
  774. }
  775. _, w.err = w.writer.Write(w.bytes[:nbytes])
  776. nbytes = 0
  777. }
  778. }
  779. continue
  780. }
  781. // Write the length
  782. length := t.length()
  783. lengthCode := lengthCode(length) & 31
  784. if false {
  785. w.writeCode(lengths[lengthCode])
  786. } else {
  787. // inlined
  788. c := lengths[lengthCode]
  789. bits |= c.code64() << (nbits & 63)
  790. nbits += c.len()
  791. if nbits >= 48 {
  792. binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
  793. //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits
  794. bits >>= 48
  795. nbits -= 48
  796. nbytes += 6
  797. if nbytes >= bufferFlushSize {
  798. if w.err != nil {
  799. nbytes = 0
  800. return
  801. }
  802. _, w.err = w.writer.Write(w.bytes[:nbytes])
  803. nbytes = 0
  804. }
  805. }
  806. }
  807. if lengthCode >= lengthExtraBitsMinCode {
  808. extraLengthBits := lengthExtraBits[lengthCode]
  809. //w.writeBits(extraLength, extraLengthBits)
  810. extraLength := int32(length - lengthBase[lengthCode])
  811. bits |= uint64(extraLength) << (nbits & 63)
  812. nbits += extraLengthBits
  813. if nbits >= 48 {
  814. binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
  815. //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits
  816. bits >>= 48
  817. nbits -= 48
  818. nbytes += 6
  819. if nbytes >= bufferFlushSize {
  820. if w.err != nil {
  821. nbytes = 0
  822. return
  823. }
  824. _, w.err = w.writer.Write(w.bytes[:nbytes])
  825. nbytes = 0
  826. }
  827. }
  828. }
  829. // Write the offset
  830. offset := t.offset()
  831. offsetCode := (offset >> 16) & 31
  832. if false {
  833. w.writeCode(offs[offsetCode])
  834. } else {
  835. // inlined
  836. c := offs[offsetCode]
  837. bits |= c.code64() << (nbits & 63)
  838. nbits += c.len()
  839. if nbits >= 48 {
  840. binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
  841. //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits
  842. bits >>= 48
  843. nbits -= 48
  844. nbytes += 6
  845. if nbytes >= bufferFlushSize {
  846. if w.err != nil {
  847. nbytes = 0
  848. return
  849. }
  850. _, w.err = w.writer.Write(w.bytes[:nbytes])
  851. nbytes = 0
  852. }
  853. }
  854. }
  855. if offsetCode >= offsetExtraBitsMinCode {
  856. offsetComb := offsetCombined[offsetCode]
  857. //w.writeBits(extraOffset, extraOffsetBits)
  858. bits |= uint64((offset-(offsetComb>>8))&matchOffsetOnlyMask) << (nbits & 63)
  859. nbits += uint8(offsetComb)
  860. if nbits >= 48 {
  861. binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
  862. //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits
  863. bits >>= 48
  864. nbits -= 48
  865. nbytes += 6
  866. if nbytes >= bufferFlushSize {
  867. if w.err != nil {
  868. nbytes = 0
  869. return
  870. }
  871. _, w.err = w.writer.Write(w.bytes[:nbytes])
  872. nbytes = 0
  873. }
  874. }
  875. }
  876. }
  877. // Restore...
  878. w.bits, w.nbits, w.nbytes = bits, nbits, nbytes
  879. if deferEOB {
  880. w.writeCode(leCodes[endBlockMarker])
  881. }
  882. }
  883. // huffOffset is a static offset encoder used for huffman only encoding.
  884. // It can be reused since we will not be encoding offset values.
  885. var huffOffset *huffmanEncoder
  886. func init() {
  887. w := newHuffmanBitWriter(nil)
  888. w.offsetFreq[0] = 1
  889. huffOffset = newHuffmanEncoder(offsetCodeCount)
  890. huffOffset.generate(w.offsetFreq[:offsetCodeCount], 15)
  891. }
  892. // writeBlockHuff encodes a block of bytes as either
  893. // Huffman encoded literals or uncompressed bytes if the
  894. // results only gains very little from compression.
  895. func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) {
  896. if w.err != nil {
  897. return
  898. }
  899. // Clear histogram
  900. for i := range w.literalFreq[:] {
  901. w.literalFreq[i] = 0
  902. }
  903. if !w.lastHuffMan {
  904. for i := range w.offsetFreq[:] {
  905. w.offsetFreq[i] = 0
  906. }
  907. }
  908. const numLiterals = endBlockMarker + 1
  909. const numOffsets = 1
  910. // Add everything as literals
  911. // We have to estimate the header size.
  912. // Assume header is around 70 bytes:
  913. // https://stackoverflow.com/a/25454430
  914. const guessHeaderSizeBits = 70 * 8
  915. histogram(input, w.literalFreq[:numLiterals])
  916. ssize, storable := w.storedSize(input)
  917. if storable && len(input) > 1024 {
  918. // Quick check for incompressible content.
  919. abs := float64(0)
  920. avg := float64(len(input)) / 256
  921. max := float64(len(input) * 2)
  922. for _, v := range w.literalFreq[:256] {
  923. diff := float64(v) - avg
  924. abs += diff * diff
  925. if abs > max {
  926. break
  927. }
  928. }
  929. if abs < max {
  930. if debugDeflate {
  931. fmt.Println("stored", abs, "<", max)
  932. }
  933. // No chance we can compress this...
  934. w.writeStoredHeader(len(input), eof)
  935. w.writeBytes(input)
  936. return
  937. }
  938. }
  939. w.literalFreq[endBlockMarker] = 1
  940. w.tmpLitEncoding.generate(w.literalFreq[:numLiterals], 15)
  941. estBits := w.tmpLitEncoding.canReuseBits(w.literalFreq[:numLiterals])
  942. if estBits < math.MaxInt32 {
  943. estBits += w.lastHeader
  944. if w.lastHeader == 0 {
  945. estBits += guessHeaderSizeBits
  946. }
  947. estBits += estBits >> w.logNewTablePenalty
  948. }
  949. // Store bytes, if we don't get a reasonable improvement.
  950. if storable && ssize <= estBits {
  951. if debugDeflate {
  952. fmt.Println("stored,", ssize, "<=", estBits)
  953. }
  954. w.writeStoredHeader(len(input), eof)
  955. w.writeBytes(input)
  956. return
  957. }
  958. if w.lastHeader > 0 {
  959. reuseSize := w.literalEncoding.canReuseBits(w.literalFreq[:256])
  960. if estBits < reuseSize {
  961. if debugDeflate {
  962. fmt.Println("NOT reusing, reuse:", reuseSize/8, "> new:", estBits/8, "header est:", w.lastHeader/8, "bytes")
  963. }
  964. // We owe an EOB
  965. w.writeCode(w.literalEncoding.codes[endBlockMarker])
  966. w.lastHeader = 0
  967. } else if debugDeflate {
  968. fmt.Println("reusing, reuse:", reuseSize/8, "> new:", estBits/8, "- header est:", w.lastHeader/8)
  969. }
  970. }
  971. count := 0
  972. if w.lastHeader == 0 {
  973. // Use the temp encoding, so swap.
  974. w.literalEncoding, w.tmpLitEncoding = w.tmpLitEncoding, w.literalEncoding
  975. // Generate codegen and codegenFrequencies, which indicates how to encode
  976. // the literalEncoding and the offsetEncoding.
  977. w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, huffOffset)
  978. w.codegenEncoding.generate(w.codegenFreq[:], 7)
  979. numCodegens := w.codegens()
  980. // Huffman.
  981. w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof)
  982. w.lastHuffMan = true
  983. w.lastHeader, _ = w.headerSize()
  984. if debugDeflate {
  985. count += w.lastHeader
  986. fmt.Println("header:", count/8)
  987. }
  988. }
  989. encoding := w.literalEncoding.codes[:256]
  990. // Go 1.16 LOVES having these on stack. At least 1.5x the speed.
  991. bits, nbits, nbytes := w.bits, w.nbits, w.nbytes
  992. if debugDeflate {
  993. count -= int(nbytes)*8 + int(nbits)
  994. }
  995. // Unroll, write 3 codes/loop.
  996. // Fastest number of unrolls.
  997. for len(input) > 3 {
  998. // We must have at least 48 bits free.
  999. if nbits >= 8 {
  1000. n := nbits >> 3
  1001. binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
  1002. bits >>= (n * 8) & 63
  1003. nbits -= n * 8
  1004. nbytes += n
  1005. }
  1006. if nbytes >= bufferFlushSize {
  1007. if w.err != nil {
  1008. nbytes = 0
  1009. return
  1010. }
  1011. if debugDeflate {
  1012. count += int(nbytes) * 8
  1013. }
  1014. _, w.err = w.writer.Write(w.bytes[:nbytes])
  1015. nbytes = 0
  1016. }
  1017. a, b := encoding[input[0]], encoding[input[1]]
  1018. bits |= a.code64() << (nbits & 63)
  1019. bits |= b.code64() << ((nbits + a.len()) & 63)
  1020. c := encoding[input[2]]
  1021. nbits += b.len() + a.len()
  1022. bits |= c.code64() << (nbits & 63)
  1023. nbits += c.len()
  1024. input = input[3:]
  1025. }
  1026. // Remaining...
  1027. for _, t := range input {
  1028. if nbits >= 48 {
  1029. binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
  1030. //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits
  1031. bits >>= 48
  1032. nbits -= 48
  1033. nbytes += 6
  1034. if nbytes >= bufferFlushSize {
  1035. if w.err != nil {
  1036. nbytes = 0
  1037. return
  1038. }
  1039. if debugDeflate {
  1040. count += int(nbytes) * 8
  1041. }
  1042. _, w.err = w.writer.Write(w.bytes[:nbytes])
  1043. nbytes = 0
  1044. }
  1045. }
  1046. // Bitwriting inlined, ~30% speedup
  1047. c := encoding[t]
  1048. bits |= c.code64() << (nbits & 63)
  1049. nbits += c.len()
  1050. if debugDeflate {
  1051. count += int(c.len())
  1052. }
  1053. }
  1054. // Restore...
  1055. w.bits, w.nbits, w.nbytes = bits, nbits, nbytes
  1056. if debugDeflate {
  1057. nb := count + int(nbytes)*8 + int(nbits)
  1058. fmt.Println("wrote", nb, "bits,", nb/8, "bytes.")
  1059. }
  1060. // Flush if needed to have space.
  1061. if w.nbits >= 48 {
  1062. w.writeOutBits()
  1063. }
  1064. if eof || sync {
  1065. w.writeCode(w.literalEncoding.codes[endBlockMarker])
  1066. w.lastHeader = 0
  1067. w.lastHuffMan = false
  1068. }
  1069. }