compress.go 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480
  1. package fasthttp
  2. import (
  3. "bytes"
  4. "fmt"
  5. "io"
  6. "io/fs"
  7. "sync"
  8. "github.com/klauspost/compress/flate"
  9. "github.com/klauspost/compress/gzip"
  10. "github.com/klauspost/compress/zlib"
  11. "github.com/valyala/bytebufferpool"
  12. "github.com/valyala/fasthttp/stackless"
  13. )
  14. // Supported compression levels.
  15. const (
  16. CompressNoCompression = flate.NoCompression
  17. CompressBestSpeed = flate.BestSpeed
  18. CompressBestCompression = flate.BestCompression
  19. CompressDefaultCompression = 6 // flate.DefaultCompression
  20. CompressHuffmanOnly = -2 // flate.HuffmanOnly
  21. )
  22. func acquireGzipReader(r io.Reader) (*gzip.Reader, error) {
  23. v := gzipReaderPool.Get()
  24. if v == nil {
  25. return gzip.NewReader(r)
  26. }
  27. zr := v.(*gzip.Reader)
  28. if err := zr.Reset(r); err != nil {
  29. return nil, err
  30. }
  31. return zr, nil
  32. }
  33. func releaseGzipReader(zr *gzip.Reader) {
  34. zr.Close()
  35. gzipReaderPool.Put(zr)
  36. }
  37. var gzipReaderPool sync.Pool
  38. func acquireFlateReader(r io.Reader) (io.ReadCloser, error) {
  39. v := flateReaderPool.Get()
  40. if v == nil {
  41. zr, err := zlib.NewReader(r)
  42. if err != nil {
  43. return nil, err
  44. }
  45. return zr, nil
  46. }
  47. zr := v.(io.ReadCloser)
  48. if err := resetFlateReader(zr, r); err != nil {
  49. return nil, err
  50. }
  51. return zr, nil
  52. }
  53. func releaseFlateReader(zr io.ReadCloser) {
  54. zr.Close()
  55. flateReaderPool.Put(zr)
  56. }
  57. func resetFlateReader(zr io.ReadCloser, r io.Reader) error {
  58. zrr, ok := zr.(zlib.Resetter)
  59. if !ok {
  60. // sanity check. should only be called with a zlib.Reader
  61. panic("BUG: zlib.Reader doesn't implement zlib.Resetter???")
  62. }
  63. return zrr.Reset(r, nil)
  64. }
  65. var flateReaderPool sync.Pool
  66. func acquireStacklessGzipWriter(w io.Writer, level int) stackless.Writer {
  67. nLevel := normalizeCompressLevel(level)
  68. p := stacklessGzipWriterPoolMap[nLevel]
  69. v := p.Get()
  70. if v == nil {
  71. return stackless.NewWriter(w, func(w io.Writer) stackless.Writer {
  72. return acquireRealGzipWriter(w, level)
  73. })
  74. }
  75. sw := v.(stackless.Writer)
  76. sw.Reset(w)
  77. return sw
  78. }
  79. func releaseStacklessGzipWriter(sw stackless.Writer, level int) {
  80. sw.Close()
  81. nLevel := normalizeCompressLevel(level)
  82. p := stacklessGzipWriterPoolMap[nLevel]
  83. p.Put(sw)
  84. }
  85. func acquireRealGzipWriter(w io.Writer, level int) *gzip.Writer {
  86. nLevel := normalizeCompressLevel(level)
  87. p := realGzipWriterPoolMap[nLevel]
  88. v := p.Get()
  89. if v == nil {
  90. zw, err := gzip.NewWriterLevel(w, level)
  91. if err != nil {
  92. // gzip.NewWriterLevel only errors for invalid
  93. // compression levels. Clamp it to be min or max.
  94. if level < gzip.HuffmanOnly {
  95. level = gzip.HuffmanOnly
  96. } else {
  97. level = gzip.BestCompression
  98. }
  99. zw, _ = gzip.NewWriterLevel(w, level)
  100. }
  101. return zw
  102. }
  103. zw := v.(*gzip.Writer)
  104. zw.Reset(w)
  105. return zw
  106. }
  107. func releaseRealGzipWriter(zw *gzip.Writer, level int) {
  108. zw.Close()
  109. nLevel := normalizeCompressLevel(level)
  110. p := realGzipWriterPoolMap[nLevel]
  111. p.Put(zw)
  112. }
  113. var (
  114. stacklessGzipWriterPoolMap = newCompressWriterPoolMap()
  115. realGzipWriterPoolMap = newCompressWriterPoolMap()
  116. )
  117. // AppendGzipBytesLevel appends gzipped src to dst using the given
  118. // compression level and returns the resulting dst.
  119. //
  120. // Supported compression levels are:
  121. //
  122. // - CompressNoCompression
  123. // - CompressBestSpeed
  124. // - CompressBestCompression
  125. // - CompressDefaultCompression
  126. // - CompressHuffmanOnly
  127. func AppendGzipBytesLevel(dst, src []byte, level int) []byte {
  128. w := &byteSliceWriter{dst}
  129. WriteGzipLevel(w, src, level) //nolint:errcheck
  130. return w.b
  131. }
  132. // WriteGzipLevel writes gzipped p to w using the given compression level
  133. // and returns the number of compressed bytes written to w.
  134. //
  135. // Supported compression levels are:
  136. //
  137. // - CompressNoCompression
  138. // - CompressBestSpeed
  139. // - CompressBestCompression
  140. // - CompressDefaultCompression
  141. // - CompressHuffmanOnly
  142. func WriteGzipLevel(w io.Writer, p []byte, level int) (int, error) {
  143. switch w.(type) {
  144. case *byteSliceWriter,
  145. *bytes.Buffer,
  146. *bytebufferpool.ByteBuffer:
  147. // These writers don't block, so we can just use stacklessWriteGzip
  148. ctx := &compressCtx{
  149. w: w,
  150. p: p,
  151. level: level,
  152. }
  153. stacklessWriteGzip(ctx)
  154. return len(p), nil
  155. default:
  156. zw := acquireStacklessGzipWriter(w, level)
  157. n, err := zw.Write(p)
  158. releaseStacklessGzipWriter(zw, level)
  159. return n, err
  160. }
  161. }
  162. var (
  163. stacklessWriteGzipOnce sync.Once
  164. stacklessWriteGzipFunc func(ctx interface{}) bool
  165. )
  166. func stacklessWriteGzip(ctx interface{}) {
  167. stacklessWriteGzipOnce.Do(func() {
  168. stacklessWriteGzipFunc = stackless.NewFunc(nonblockingWriteGzip)
  169. })
  170. stacklessWriteGzipFunc(ctx)
  171. }
  172. func nonblockingWriteGzip(ctxv interface{}) {
  173. ctx := ctxv.(*compressCtx)
  174. zw := acquireRealGzipWriter(ctx.w, ctx.level)
  175. zw.Write(ctx.p) //nolint:errcheck // no way to handle this error anyway
  176. releaseRealGzipWriter(zw, ctx.level)
  177. }
  178. // WriteGzip writes gzipped p to w and returns the number of compressed
  179. // bytes written to w.
  180. func WriteGzip(w io.Writer, p []byte) (int, error) {
  181. return WriteGzipLevel(w, p, CompressDefaultCompression)
  182. }
  183. // AppendGzipBytes appends gzipped src to dst and returns the resulting dst.
  184. func AppendGzipBytes(dst, src []byte) []byte {
  185. return AppendGzipBytesLevel(dst, src, CompressDefaultCompression)
  186. }
  187. // WriteGunzip writes ungzipped p to w and returns the number of uncompressed
  188. // bytes written to w.
  189. func WriteGunzip(w io.Writer, p []byte) (int, error) {
  190. r := &byteSliceReader{p}
  191. zr, err := acquireGzipReader(r)
  192. if err != nil {
  193. return 0, err
  194. }
  195. n, err := copyZeroAlloc(w, zr)
  196. releaseGzipReader(zr)
  197. nn := int(n)
  198. if int64(nn) != n {
  199. return 0, fmt.Errorf("too much data gunzipped: %d", n)
  200. }
  201. return nn, err
  202. }
  203. // AppendGunzipBytes appends gunzipped src to dst and returns the resulting dst.
  204. func AppendGunzipBytes(dst, src []byte) ([]byte, error) {
  205. w := &byteSliceWriter{dst}
  206. _, err := WriteGunzip(w, src)
  207. return w.b, err
  208. }
  209. // AppendDeflateBytesLevel appends deflated src to dst using the given
  210. // compression level and returns the resulting dst.
  211. //
  212. // Supported compression levels are:
  213. //
  214. // - CompressNoCompression
  215. // - CompressBestSpeed
  216. // - CompressBestCompression
  217. // - CompressDefaultCompression
  218. // - CompressHuffmanOnly
  219. func AppendDeflateBytesLevel(dst, src []byte, level int) []byte {
  220. w := &byteSliceWriter{dst}
  221. WriteDeflateLevel(w, src, level) //nolint:errcheck
  222. return w.b
  223. }
  224. // WriteDeflateLevel writes deflated p to w using the given compression level
  225. // and returns the number of compressed bytes written to w.
  226. //
  227. // Supported compression levels are:
  228. //
  229. // - CompressNoCompression
  230. // - CompressBestSpeed
  231. // - CompressBestCompression
  232. // - CompressDefaultCompression
  233. // - CompressHuffmanOnly
  234. func WriteDeflateLevel(w io.Writer, p []byte, level int) (int, error) {
  235. switch w.(type) {
  236. case *byteSliceWriter,
  237. *bytes.Buffer,
  238. *bytebufferpool.ByteBuffer:
  239. // These writers don't block, so we can just use stacklessWriteDeflate
  240. ctx := &compressCtx{
  241. w: w,
  242. p: p,
  243. level: level,
  244. }
  245. stacklessWriteDeflate(ctx)
  246. return len(p), nil
  247. default:
  248. zw := acquireStacklessDeflateWriter(w, level)
  249. n, err := zw.Write(p)
  250. releaseStacklessDeflateWriter(zw, level)
  251. return n, err
  252. }
  253. }
  254. var (
  255. stacklessWriteDeflateOnce sync.Once
  256. stacklessWriteDeflateFunc func(ctx interface{}) bool
  257. )
  258. func stacklessWriteDeflate(ctx interface{}) {
  259. stacklessWriteDeflateOnce.Do(func() {
  260. stacklessWriteDeflateFunc = stackless.NewFunc(nonblockingWriteDeflate)
  261. })
  262. stacklessWriteDeflateFunc(ctx)
  263. }
  264. func nonblockingWriteDeflate(ctxv interface{}) {
  265. ctx := ctxv.(*compressCtx)
  266. zw := acquireRealDeflateWriter(ctx.w, ctx.level)
  267. zw.Write(ctx.p) //nolint:errcheck // no way to handle this error anyway
  268. releaseRealDeflateWriter(zw, ctx.level)
  269. }
  270. type compressCtx struct {
  271. w io.Writer
  272. p []byte
  273. level int
  274. }
  275. // WriteDeflate writes deflated p to w and returns the number of compressed
  276. // bytes written to w.
  277. func WriteDeflate(w io.Writer, p []byte) (int, error) {
  278. return WriteDeflateLevel(w, p, CompressDefaultCompression)
  279. }
  280. // AppendDeflateBytes appends deflated src to dst and returns the resulting dst.
  281. func AppendDeflateBytes(dst, src []byte) []byte {
  282. return AppendDeflateBytesLevel(dst, src, CompressDefaultCompression)
  283. }
  284. // WriteInflate writes inflated p to w and returns the number of uncompressed
  285. // bytes written to w.
  286. func WriteInflate(w io.Writer, p []byte) (int, error) {
  287. r := &byteSliceReader{p}
  288. zr, err := acquireFlateReader(r)
  289. if err != nil {
  290. return 0, err
  291. }
  292. n, err := copyZeroAlloc(w, zr)
  293. releaseFlateReader(zr)
  294. nn := int(n)
  295. if int64(nn) != n {
  296. return 0, fmt.Errorf("too much data inflated: %d", n)
  297. }
  298. return nn, err
  299. }
  300. // AppendInflateBytes appends inflated src to dst and returns the resulting dst.
  301. func AppendInflateBytes(dst, src []byte) ([]byte, error) {
  302. w := &byteSliceWriter{dst}
  303. _, err := WriteInflate(w, src)
  304. return w.b, err
  305. }
  306. type byteSliceWriter struct {
  307. b []byte
  308. }
  309. func (w *byteSliceWriter) Write(p []byte) (int, error) {
  310. w.b = append(w.b, p...)
  311. return len(p), nil
  312. }
  313. type byteSliceReader struct {
  314. b []byte
  315. }
  316. func (r *byteSliceReader) Read(p []byte) (int, error) {
  317. if len(r.b) == 0 {
  318. return 0, io.EOF
  319. }
  320. n := copy(p, r.b)
  321. r.b = r.b[n:]
  322. return n, nil
  323. }
  324. func (r *byteSliceReader) ReadByte() (byte, error) {
  325. if len(r.b) == 0 {
  326. return 0, io.EOF
  327. }
  328. n := r.b[0]
  329. r.b = r.b[1:]
  330. return n, nil
  331. }
  332. func acquireStacklessDeflateWriter(w io.Writer, level int) stackless.Writer {
  333. nLevel := normalizeCompressLevel(level)
  334. p := stacklessDeflateWriterPoolMap[nLevel]
  335. v := p.Get()
  336. if v == nil {
  337. return stackless.NewWriter(w, func(w io.Writer) stackless.Writer {
  338. return acquireRealDeflateWriter(w, level)
  339. })
  340. }
  341. sw := v.(stackless.Writer)
  342. sw.Reset(w)
  343. return sw
  344. }
  345. func releaseStacklessDeflateWriter(sw stackless.Writer, level int) {
  346. sw.Close()
  347. nLevel := normalizeCompressLevel(level)
  348. p := stacklessDeflateWriterPoolMap[nLevel]
  349. p.Put(sw)
  350. }
  351. func acquireRealDeflateWriter(w io.Writer, level int) *zlib.Writer {
  352. nLevel := normalizeCompressLevel(level)
  353. p := realDeflateWriterPoolMap[nLevel]
  354. v := p.Get()
  355. if v == nil {
  356. zw, err := zlib.NewWriterLevel(w, level)
  357. if err != nil {
  358. // zlib.NewWriterLevel only errors for invalid
  359. // compression levels. Clamp it to be min or max.
  360. if level < zlib.HuffmanOnly {
  361. level = zlib.HuffmanOnly
  362. } else {
  363. level = zlib.BestCompression
  364. }
  365. zw, _ = zlib.NewWriterLevel(w, level)
  366. }
  367. return zw
  368. }
  369. zw := v.(*zlib.Writer)
  370. zw.Reset(w)
  371. return zw
  372. }
  373. func releaseRealDeflateWriter(zw *zlib.Writer, level int) {
  374. zw.Close()
  375. nLevel := normalizeCompressLevel(level)
  376. p := realDeflateWriterPoolMap[nLevel]
  377. p.Put(zw)
  378. }
  379. var (
  380. stacklessDeflateWriterPoolMap = newCompressWriterPoolMap()
  381. realDeflateWriterPoolMap = newCompressWriterPoolMap()
  382. )
  383. func newCompressWriterPoolMap() []*sync.Pool {
  384. // Initialize pools for all the compression levels defined
  385. // in https://pkg.go.dev/compress/flate#pkg-constants .
  386. // Compression levels are normalized with normalizeCompressLevel,
  387. // so the fit [0..11].
  388. var m []*sync.Pool
  389. for i := 0; i < 12; i++ {
  390. m = append(m, &sync.Pool{})
  391. }
  392. return m
  393. }
  394. func isFileCompressible(f fs.File, minCompressRatio float64) bool {
  395. // Try compressing the first 4kb of the file
  396. // and see if it can be compressed by more than
  397. // the given minCompressRatio.
  398. b := bytebufferpool.Get()
  399. zw := acquireStacklessGzipWriter(b, CompressDefaultCompression)
  400. lr := &io.LimitedReader{
  401. R: f,
  402. N: 4096,
  403. }
  404. _, err := copyZeroAlloc(zw, lr)
  405. releaseStacklessGzipWriter(zw, CompressDefaultCompression)
  406. seeker, ok := f.(io.Seeker)
  407. if !ok {
  408. return false
  409. }
  410. seeker.Seek(0, io.SeekStart) //nolint:errcheck
  411. if err != nil {
  412. return false
  413. }
  414. n := 4096 - lr.N
  415. zn := len(b.B)
  416. bytebufferpool.Put(b)
  417. return float64(zn) < float64(n)*minCompressRatio
  418. }
  419. // normalizes compression level into [0..11], so it could be used as an index
  420. // in *PoolMap.
  421. func normalizeCompressLevel(level int) int {
  422. // -2 is the lowest compression level - CompressHuffmanOnly
  423. // 9 is the highest compression level - CompressBestCompression
  424. if level < -2 || level > 9 {
  425. level = CompressDefaultCompression
  426. }
  427. return level + 2
  428. }