ringbuffer.go 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134
  1. package brotli
  2. /* Copyright 2013 Google Inc. All Rights Reserved.
  3. Distributed under MIT license.
  4. See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
  5. */
  6. /* A ringBuffer(window_bits, tail_bits) contains `1 << window_bits' bytes of
  7. data in a circular manner: writing a byte writes it to:
  8. `position() % (1 << window_bits)'.
  9. For convenience, the ringBuffer array contains another copy of the
  10. first `1 << tail_bits' bytes:
  11. buffer_[i] == buffer_[i + (1 << window_bits)], if i < (1 << tail_bits),
  12. and another copy of the last two bytes:
  13. buffer_[-1] == buffer_[(1 << window_bits) - 1] and
  14. buffer_[-2] == buffer_[(1 << window_bits) - 2]. */
  15. type ringBuffer struct {
  16. size_ uint32
  17. mask_ uint32
  18. tail_size_ uint32
  19. total_size_ uint32
  20. cur_size_ uint32
  21. pos_ uint32
  22. data_ []byte
  23. buffer_ []byte
  24. }
  25. func ringBufferInit(rb *ringBuffer) {
  26. rb.pos_ = 0
  27. }
  28. func ringBufferSetup(params *encoderParams, rb *ringBuffer) {
  29. var window_bits int = computeRbBits(params)
  30. var tail_bits int = params.lgblock
  31. *(*uint32)(&rb.size_) = 1 << uint(window_bits)
  32. *(*uint32)(&rb.mask_) = (1 << uint(window_bits)) - 1
  33. *(*uint32)(&rb.tail_size_) = 1 << uint(tail_bits)
  34. *(*uint32)(&rb.total_size_) = rb.size_ + rb.tail_size_
  35. }
  36. const kSlackForEightByteHashingEverywhere uint = 7
  37. /* Allocates or re-allocates data_ to the given length + plus some slack
  38. region before and after. Fills the slack regions with zeros. */
  39. func ringBufferInitBuffer(buflen uint32, rb *ringBuffer) {
  40. var new_data []byte
  41. var i uint
  42. size := 2 + int(buflen) + int(kSlackForEightByteHashingEverywhere)
  43. if cap(rb.data_) < size {
  44. new_data = make([]byte, size)
  45. } else {
  46. new_data = rb.data_[:size]
  47. }
  48. if rb.data_ != nil {
  49. copy(new_data, rb.data_[:2+rb.cur_size_+uint32(kSlackForEightByteHashingEverywhere)])
  50. }
  51. rb.data_ = new_data
  52. rb.cur_size_ = buflen
  53. rb.buffer_ = rb.data_[2:]
  54. rb.data_[1] = 0
  55. rb.data_[0] = rb.data_[1]
  56. for i = 0; i < kSlackForEightByteHashingEverywhere; i++ {
  57. rb.buffer_[rb.cur_size_+uint32(i)] = 0
  58. }
  59. }
  60. func ringBufferWriteTail(bytes []byte, n uint, rb *ringBuffer) {
  61. var masked_pos uint = uint(rb.pos_ & rb.mask_)
  62. if uint32(masked_pos) < rb.tail_size_ {
  63. /* Just fill the tail buffer with the beginning data. */
  64. var p uint = uint(rb.size_ + uint32(masked_pos))
  65. copy(rb.buffer_[p:], bytes[:brotli_min_size_t(n, uint(rb.tail_size_-uint32(masked_pos)))])
  66. }
  67. }
  68. /* Push bytes into the ring buffer. */
  69. func ringBufferWrite(bytes []byte, n uint, rb *ringBuffer) {
  70. if rb.pos_ == 0 && uint32(n) < rb.tail_size_ {
  71. /* Special case for the first write: to process the first block, we don't
  72. need to allocate the whole ring-buffer and we don't need the tail
  73. either. However, we do this memory usage optimization only if the
  74. first write is less than the tail size, which is also the input block
  75. size, otherwise it is likely that other blocks will follow and we
  76. will need to reallocate to the full size anyway. */
  77. rb.pos_ = uint32(n)
  78. ringBufferInitBuffer(rb.pos_, rb)
  79. copy(rb.buffer_, bytes[:n])
  80. return
  81. }
  82. if rb.cur_size_ < rb.total_size_ {
  83. /* Lazily allocate the full buffer. */
  84. ringBufferInitBuffer(rb.total_size_, rb)
  85. /* Initialize the last two bytes to zero, so that we don't have to worry
  86. later when we copy the last two bytes to the first two positions. */
  87. rb.buffer_[rb.size_-2] = 0
  88. rb.buffer_[rb.size_-1] = 0
  89. }
  90. {
  91. var masked_pos uint = uint(rb.pos_ & rb.mask_)
  92. /* The length of the writes is limited so that we do not need to worry
  93. about a write */
  94. ringBufferWriteTail(bytes, n, rb)
  95. if uint32(masked_pos+n) <= rb.size_ {
  96. /* A single write fits. */
  97. copy(rb.buffer_[masked_pos:], bytes[:n])
  98. } else {
  99. /* Split into two writes.
  100. Copy into the end of the buffer, including the tail buffer. */
  101. copy(rb.buffer_[masked_pos:], bytes[:brotli_min_size_t(n, uint(rb.total_size_-uint32(masked_pos)))])
  102. /* Copy into the beginning of the buffer */
  103. copy(rb.buffer_, bytes[rb.size_-uint32(masked_pos):][:uint32(n)-(rb.size_-uint32(masked_pos))])
  104. }
  105. }
  106. {
  107. var not_first_lap bool = rb.pos_&(1<<31) != 0
  108. var rb_pos_mask uint32 = (1 << 31) - 1
  109. rb.data_[0] = rb.buffer_[rb.size_-2]
  110. rb.data_[1] = rb.buffer_[rb.size_-1]
  111. rb.pos_ = (rb.pos_ & rb_pos_mask) + uint32(uint32(n)&rb_pos_mask)
  112. if not_first_lap {
  113. /* Wrap, but preserve not-a-first-lap feature. */
  114. rb.pos_ |= 1 << 31
  115. }
  116. }
  117. }