You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 

722 lines
19 KiB

  1. // Copyright 2019+ Klaus Post. All rights reserved.
  2. // License information can be found in the LICENSE file.
  3. // Based on work by Yann Collet, released under BSD License.
  4. package zstd
  5. import (
  6. "bytes"
  7. "encoding/binary"
  8. "errors"
  9. "fmt"
  10. "io"
  11. "io/ioutil"
  12. "os"
  13. "path/filepath"
  14. "sync"
  15. "github.com/klauspost/compress/huff0"
  16. "github.com/klauspost/compress/zstd/internal/xxhash"
  17. )
  18. type blockType uint8
  19. //go:generate stringer -type=blockType,literalsBlockType,seqCompMode,tableIndex
  20. const (
  21. blockTypeRaw blockType = iota
  22. blockTypeRLE
  23. blockTypeCompressed
  24. blockTypeReserved
  25. )
  26. type literalsBlockType uint8
  27. const (
  28. literalsBlockRaw literalsBlockType = iota
  29. literalsBlockRLE
  30. literalsBlockCompressed
  31. literalsBlockTreeless
  32. )
  33. const (
  34. // maxCompressedBlockSize is the biggest allowed compressed block size (128KB)
  35. maxCompressedBlockSize = 128 << 10
  36. compressedBlockOverAlloc = 16
  37. maxCompressedBlockSizeAlloc = 128<<10 + compressedBlockOverAlloc
  38. // Maximum possible block size (all Raw+Uncompressed).
  39. maxBlockSize = (1 << 21) - 1
  40. maxMatchLen = 131074
  41. maxSequences = 0x7f00 + 0xffff
  42. // We support slightly less than the reference decoder to be able to
  43. // use ints on 32 bit archs.
  44. maxOffsetBits = 30
  45. )
  46. var (
  47. huffDecoderPool = sync.Pool{New: func() interface{} {
  48. return &huff0.Scratch{}
  49. }}
  50. fseDecoderPool = sync.Pool{New: func() interface{} {
  51. return &fseDecoder{}
  52. }}
  53. )
  54. type blockDec struct {
  55. // Raw source data of the block.
  56. data []byte
  57. dataStorage []byte
  58. // Destination of the decoded data.
  59. dst []byte
  60. // Buffer for literals data.
  61. literalBuf []byte
  62. // Window size of the block.
  63. WindowSize uint64
  64. err error
  65. // Check against this crc
  66. checkCRC []byte
  67. // Frame to use for singlethreaded decoding.
  68. // Should not be used by the decoder itself since parent may be another frame.
  69. localFrame *frameDec
  70. sequence []seqVals
  71. async struct {
  72. newHist *history
  73. literals []byte
  74. seqData []byte
  75. seqSize int // Size of uncompressed sequences
  76. fcs uint64
  77. }
  78. // Block is RLE, this is the size.
  79. RLESize uint32
  80. Type blockType
  81. // Is this the last block of a frame?
  82. Last bool
  83. // Use less memory
  84. lowMem bool
  85. }
  86. func (b *blockDec) String() string {
  87. if b == nil {
  88. return "<nil>"
  89. }
  90. return fmt.Sprintf("Steam Size: %d, Type: %v, Last: %t, Window: %d", len(b.data), b.Type, b.Last, b.WindowSize)
  91. }
  92. func newBlockDec(lowMem bool) *blockDec {
  93. b := blockDec{
  94. lowMem: lowMem,
  95. }
  96. return &b
  97. }
  98. // reset will reset the block.
  99. // Input must be a start of a block and will be at the end of the block when returned.
  100. func (b *blockDec) reset(br byteBuffer, windowSize uint64) error {
  101. b.WindowSize = windowSize
  102. tmp, err := br.readSmall(3)
  103. if err != nil {
  104. println("Reading block header:", err)
  105. return err
  106. }
  107. bh := uint32(tmp[0]) | (uint32(tmp[1]) << 8) | (uint32(tmp[2]) << 16)
  108. b.Last = bh&1 != 0
  109. b.Type = blockType((bh >> 1) & 3)
  110. // find size.
  111. cSize := int(bh >> 3)
  112. maxSize := maxCompressedBlockSizeAlloc
  113. switch b.Type {
  114. case blockTypeReserved:
  115. return ErrReservedBlockType
  116. case blockTypeRLE:
  117. if cSize > maxCompressedBlockSize || cSize > int(b.WindowSize) {
  118. if debugDecoder {
  119. printf("rle block too big: csize:%d block: %+v\n", uint64(cSize), b)
  120. }
  121. return ErrWindowSizeExceeded
  122. }
  123. b.RLESize = uint32(cSize)
  124. if b.lowMem {
  125. maxSize = cSize
  126. }
  127. cSize = 1
  128. case blockTypeCompressed:
  129. if debugDecoder {
  130. println("Data size on stream:", cSize)
  131. }
  132. b.RLESize = 0
  133. maxSize = maxCompressedBlockSizeAlloc
  134. if windowSize < maxCompressedBlockSize && b.lowMem {
  135. maxSize = int(windowSize) + compressedBlockOverAlloc
  136. }
  137. if cSize > maxCompressedBlockSize || uint64(cSize) > b.WindowSize {
  138. if debugDecoder {
  139. printf("compressed block too big: csize:%d block: %+v\n", uint64(cSize), b)
  140. }
  141. return ErrCompressedSizeTooBig
  142. }
  143. // Empty compressed blocks must at least be 2 bytes
  144. // for Literals_Block_Type and one for Sequences_Section_Header.
  145. if cSize < 2 {
  146. return ErrBlockTooSmall
  147. }
  148. case blockTypeRaw:
  149. if cSize > maxCompressedBlockSize || cSize > int(b.WindowSize) {
  150. if debugDecoder {
  151. printf("rle block too big: csize:%d block: %+v\n", uint64(cSize), b)
  152. }
  153. return ErrWindowSizeExceeded
  154. }
  155. b.RLESize = 0
  156. // We do not need a destination for raw blocks.
  157. maxSize = -1
  158. default:
  159. panic("Invalid block type")
  160. }
  161. // Read block data.
  162. if cap(b.dataStorage) < cSize {
  163. if b.lowMem || cSize > maxCompressedBlockSize {
  164. b.dataStorage = make([]byte, 0, cSize+compressedBlockOverAlloc)
  165. } else {
  166. b.dataStorage = make([]byte, 0, maxCompressedBlockSizeAlloc)
  167. }
  168. }
  169. if cap(b.dst) <= maxSize {
  170. b.dst = make([]byte, 0, maxSize+1)
  171. }
  172. b.data, err = br.readBig(cSize, b.dataStorage)
  173. if err != nil {
  174. if debugDecoder {
  175. println("Reading block:", err, "(", cSize, ")", len(b.data))
  176. printf("%T", br)
  177. }
  178. return err
  179. }
  180. return nil
  181. }
  182. // sendEOF will make the decoder send EOF on this frame.
  183. func (b *blockDec) sendErr(err error) {
  184. b.Last = true
  185. b.Type = blockTypeReserved
  186. b.err = err
  187. }
  188. // Close will release resources.
  189. // Closed blockDec cannot be reset.
  190. func (b *blockDec) Close() {
  191. }
  192. // decodeBuf
  193. func (b *blockDec) decodeBuf(hist *history) error {
  194. switch b.Type {
  195. case blockTypeRLE:
  196. if cap(b.dst) < int(b.RLESize) {
  197. if b.lowMem {
  198. b.dst = make([]byte, b.RLESize)
  199. } else {
  200. b.dst = make([]byte, maxBlockSize)
  201. }
  202. }
  203. b.dst = b.dst[:b.RLESize]
  204. v := b.data[0]
  205. for i := range b.dst {
  206. b.dst[i] = v
  207. }
  208. hist.appendKeep(b.dst)
  209. return nil
  210. case blockTypeRaw:
  211. hist.appendKeep(b.data)
  212. return nil
  213. case blockTypeCompressed:
  214. saved := b.dst
  215. // Append directly to history
  216. if hist.ignoreBuffer == 0 {
  217. b.dst = hist.b
  218. hist.b = nil
  219. } else {
  220. b.dst = b.dst[:0]
  221. }
  222. err := b.decodeCompressed(hist)
  223. if debugDecoder {
  224. println("Decompressed to total", len(b.dst), "bytes, hash:", xxhash.Sum64(b.dst), "error:", err)
  225. }
  226. if hist.ignoreBuffer == 0 {
  227. hist.b = b.dst
  228. b.dst = saved
  229. } else {
  230. hist.appendKeep(b.dst)
  231. }
  232. return err
  233. case blockTypeReserved:
  234. // Used for returning errors.
  235. return b.err
  236. default:
  237. panic("Invalid block type")
  238. }
  239. }
  240. func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err error) {
  241. // There must be at least one byte for Literals_Block_Type and one for Sequences_Section_Header
  242. if len(in) < 2 {
  243. return in, ErrBlockTooSmall
  244. }
  245. litType := literalsBlockType(in[0] & 3)
  246. var litRegenSize int
  247. var litCompSize int
  248. sizeFormat := (in[0] >> 2) & 3
  249. var fourStreams bool
  250. var literals []byte
  251. switch litType {
  252. case literalsBlockRaw, literalsBlockRLE:
  253. switch sizeFormat {
  254. case 0, 2:
  255. // Regenerated_Size uses 5 bits (0-31). Literals_Section_Header uses 1 byte.
  256. litRegenSize = int(in[0] >> 3)
  257. in = in[1:]
  258. case 1:
  259. // Regenerated_Size uses 12 bits (0-4095). Literals_Section_Header uses 2 bytes.
  260. litRegenSize = int(in[0]>>4) + (int(in[1]) << 4)
  261. in = in[2:]
  262. case 3:
  263. // Regenerated_Size uses 20 bits (0-1048575). Literals_Section_Header uses 3 bytes.
  264. if len(in) < 3 {
  265. println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in))
  266. return in, ErrBlockTooSmall
  267. }
  268. litRegenSize = int(in[0]>>4) + (int(in[1]) << 4) + (int(in[2]) << 12)
  269. in = in[3:]
  270. }
  271. case literalsBlockCompressed, literalsBlockTreeless:
  272. switch sizeFormat {
  273. case 0, 1:
  274. // Both Regenerated_Size and Compressed_Size use 10 bits (0-1023).
  275. if len(in) < 3 {
  276. println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in))
  277. return in, ErrBlockTooSmall
  278. }
  279. n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12)
  280. litRegenSize = int(n & 1023)
  281. litCompSize = int(n >> 10)
  282. fourStreams = sizeFormat == 1
  283. in = in[3:]
  284. case 2:
  285. fourStreams = true
  286. if len(in) < 4 {
  287. println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in))
  288. return in, ErrBlockTooSmall
  289. }
  290. n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20)
  291. litRegenSize = int(n & 16383)
  292. litCompSize = int(n >> 14)
  293. in = in[4:]
  294. case 3:
  295. fourStreams = true
  296. if len(in) < 5 {
  297. println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in))
  298. return in, ErrBlockTooSmall
  299. }
  300. n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20) + (uint64(in[4]) << 28)
  301. litRegenSize = int(n & 262143)
  302. litCompSize = int(n >> 18)
  303. in = in[5:]
  304. }
  305. }
  306. if debugDecoder {
  307. println("literals type:", litType, "litRegenSize:", litRegenSize, "litCompSize:", litCompSize, "sizeFormat:", sizeFormat, "4X:", fourStreams)
  308. }
  309. if litRegenSize > int(b.WindowSize) || litRegenSize > maxCompressedBlockSize {
  310. return in, ErrWindowSizeExceeded
  311. }
  312. switch litType {
  313. case literalsBlockRaw:
  314. if len(in) < litRegenSize {
  315. println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litRegenSize)
  316. return in, ErrBlockTooSmall
  317. }
  318. literals = in[:litRegenSize]
  319. in = in[litRegenSize:]
  320. //printf("Found %d uncompressed literals\n", litRegenSize)
  321. case literalsBlockRLE:
  322. if len(in) < 1 {
  323. println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", 1)
  324. return in, ErrBlockTooSmall
  325. }
  326. if cap(b.literalBuf) < litRegenSize {
  327. if b.lowMem {
  328. b.literalBuf = make([]byte, litRegenSize, litRegenSize+compressedBlockOverAlloc)
  329. } else {
  330. b.literalBuf = make([]byte, litRegenSize, maxCompressedBlockSize+compressedBlockOverAlloc)
  331. }
  332. }
  333. literals = b.literalBuf[:litRegenSize]
  334. v := in[0]
  335. for i := range literals {
  336. literals[i] = v
  337. }
  338. in = in[1:]
  339. if debugDecoder {
  340. printf("Found %d RLE compressed literals\n", litRegenSize)
  341. }
  342. case literalsBlockTreeless:
  343. if len(in) < litCompSize {
  344. println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize)
  345. return in, ErrBlockTooSmall
  346. }
  347. // Store compressed literals, so we defer decoding until we get history.
  348. literals = in[:litCompSize]
  349. in = in[litCompSize:]
  350. if debugDecoder {
  351. printf("Found %d compressed literals\n", litCompSize)
  352. }
  353. huff := hist.huffTree
  354. if huff == nil {
  355. return in, errors.New("literal block was treeless, but no history was defined")
  356. }
  357. // Ensure we have space to store it.
  358. if cap(b.literalBuf) < litRegenSize {
  359. if b.lowMem {
  360. b.literalBuf = make([]byte, 0, litRegenSize+compressedBlockOverAlloc)
  361. } else {
  362. b.literalBuf = make([]byte, 0, maxCompressedBlockSize+compressedBlockOverAlloc)
  363. }
  364. }
  365. var err error
  366. // Use our out buffer.
  367. huff.MaxDecodedSize = litRegenSize
  368. if fourStreams {
  369. literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals)
  370. } else {
  371. literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals)
  372. }
  373. // Make sure we don't leak our literals buffer
  374. if err != nil {
  375. println("decompressing literals:", err)
  376. return in, err
  377. }
  378. if len(literals) != litRegenSize {
  379. return in, fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals))
  380. }
  381. case literalsBlockCompressed:
  382. if len(in) < litCompSize {
  383. println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize)
  384. return in, ErrBlockTooSmall
  385. }
  386. literals = in[:litCompSize]
  387. in = in[litCompSize:]
  388. // Ensure we have space to store it.
  389. if cap(b.literalBuf) < litRegenSize {
  390. if b.lowMem {
  391. b.literalBuf = make([]byte, 0, litRegenSize+compressedBlockOverAlloc)
  392. } else {
  393. b.literalBuf = make([]byte, 0, maxCompressedBlockSize+compressedBlockOverAlloc)
  394. }
  395. }
  396. huff := hist.huffTree
  397. if huff == nil || (hist.dict != nil && huff == hist.dict.litEnc) {
  398. huff = huffDecoderPool.Get().(*huff0.Scratch)
  399. if huff == nil {
  400. huff = &huff0.Scratch{}
  401. }
  402. }
  403. var err error
  404. huff, literals, err = huff0.ReadTable(literals, huff)
  405. if err != nil {
  406. println("reading huffman table:", err)
  407. return in, err
  408. }
  409. hist.huffTree = huff
  410. huff.MaxDecodedSize = litRegenSize
  411. // Use our out buffer.
  412. if fourStreams {
  413. literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals)
  414. } else {
  415. literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals)
  416. }
  417. if err != nil {
  418. println("decoding compressed literals:", err)
  419. return in, err
  420. }
  421. // Make sure we don't leak our literals buffer
  422. if len(literals) != litRegenSize {
  423. return in, fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals))
  424. }
  425. // Re-cap to get extra size.
  426. literals = b.literalBuf[:len(literals)]
  427. if debugDecoder {
  428. printf("Decompressed %d literals into %d bytes\n", litCompSize, litRegenSize)
  429. }
  430. }
  431. hist.decoders.literals = literals
  432. return in, nil
  433. }
  434. // decodeCompressed will start decompressing a block.
  435. func (b *blockDec) decodeCompressed(hist *history) error {
  436. in := b.data
  437. in, err := b.decodeLiterals(in, hist)
  438. if err != nil {
  439. return err
  440. }
  441. err = b.prepareSequences(in, hist)
  442. if err != nil {
  443. return err
  444. }
  445. if hist.decoders.nSeqs == 0 {
  446. b.dst = append(b.dst, hist.decoders.literals...)
  447. return nil
  448. }
  449. before := len(hist.decoders.out)
  450. err = hist.decoders.decodeSync(hist.b[hist.ignoreBuffer:])
  451. if err != nil {
  452. return err
  453. }
  454. if hist.decoders.maxSyncLen > 0 {
  455. hist.decoders.maxSyncLen += uint64(before)
  456. hist.decoders.maxSyncLen -= uint64(len(hist.decoders.out))
  457. }
  458. b.dst = hist.decoders.out
  459. hist.recentOffsets = hist.decoders.prevOffset
  460. return nil
  461. }
  462. func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) {
  463. if debugDecoder {
  464. printf("prepareSequences: %d byte(s) input\n", len(in))
  465. }
  466. // Decode Sequences
  467. // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#sequences-section
  468. if len(in) < 1 {
  469. return ErrBlockTooSmall
  470. }
  471. var nSeqs int
  472. seqHeader := in[0]
  473. switch {
  474. case seqHeader < 128:
  475. nSeqs = int(seqHeader)
  476. in = in[1:]
  477. case seqHeader < 255:
  478. if len(in) < 2 {
  479. return ErrBlockTooSmall
  480. }
  481. nSeqs = int(seqHeader-128)<<8 | int(in[1])
  482. in = in[2:]
  483. case seqHeader == 255:
  484. if len(in) < 3 {
  485. return ErrBlockTooSmall
  486. }
  487. nSeqs = 0x7f00 + int(in[1]) + (int(in[2]) << 8)
  488. in = in[3:]
  489. }
  490. if nSeqs == 0 && len(in) != 0 {
  491. // When no sequences, there should not be any more data...
  492. if debugDecoder {
  493. printf("prepareSequences: 0 sequences, but %d byte(s) left on stream\n", len(in))
  494. }
  495. return ErrUnexpectedBlockSize
  496. }
  497. var seqs = &hist.decoders
  498. seqs.nSeqs = nSeqs
  499. if nSeqs > 0 {
  500. if len(in) < 1 {
  501. return ErrBlockTooSmall
  502. }
  503. br := byteReader{b: in, off: 0}
  504. compMode := br.Uint8()
  505. br.advance(1)
  506. if debugDecoder {
  507. printf("Compression modes: 0b%b", compMode)
  508. }
  509. for i := uint(0); i < 3; i++ {
  510. mode := seqCompMode((compMode >> (6 - i*2)) & 3)
  511. if debugDecoder {
  512. println("Table", tableIndex(i), "is", mode)
  513. }
  514. var seq *sequenceDec
  515. switch tableIndex(i) {
  516. case tableLiteralLengths:
  517. seq = &seqs.litLengths
  518. case tableOffsets:
  519. seq = &seqs.offsets
  520. case tableMatchLengths:
  521. seq = &seqs.matchLengths
  522. default:
  523. panic("unknown table")
  524. }
  525. switch mode {
  526. case compModePredefined:
  527. if seq.fse != nil && !seq.fse.preDefined {
  528. fseDecoderPool.Put(seq.fse)
  529. }
  530. seq.fse = &fsePredef[i]
  531. case compModeRLE:
  532. if br.remain() < 1 {
  533. return ErrBlockTooSmall
  534. }
  535. v := br.Uint8()
  536. br.advance(1)
  537. if seq.fse == nil || seq.fse.preDefined {
  538. seq.fse = fseDecoderPool.Get().(*fseDecoder)
  539. }
  540. symb, err := decSymbolValue(v, symbolTableX[i])
  541. if err != nil {
  542. printf("RLE Transform table (%v) error: %v", tableIndex(i), err)
  543. return err
  544. }
  545. seq.fse.setRLE(symb)
  546. if debugDecoder {
  547. printf("RLE set to %+v, code: %v", symb, v)
  548. }
  549. case compModeFSE:
  550. println("Reading table for", tableIndex(i))
  551. if seq.fse == nil || seq.fse.preDefined {
  552. seq.fse = fseDecoderPool.Get().(*fseDecoder)
  553. }
  554. err := seq.fse.readNCount(&br, uint16(maxTableSymbol[i]))
  555. if err != nil {
  556. println("Read table error:", err)
  557. return err
  558. }
  559. err = seq.fse.transform(symbolTableX[i])
  560. if err != nil {
  561. println("Transform table error:", err)
  562. return err
  563. }
  564. if debugDecoder {
  565. println("Read table ok", "symbolLen:", seq.fse.symbolLen)
  566. }
  567. case compModeRepeat:
  568. seq.repeat = true
  569. }
  570. if br.overread() {
  571. return io.ErrUnexpectedEOF
  572. }
  573. }
  574. in = br.unread()
  575. }
  576. if debugDecoder {
  577. println("Literals:", len(seqs.literals), "hash:", xxhash.Sum64(seqs.literals), "and", seqs.nSeqs, "sequences.")
  578. }
  579. if nSeqs == 0 {
  580. if len(b.sequence) > 0 {
  581. b.sequence = b.sequence[:0]
  582. }
  583. return nil
  584. }
  585. br := seqs.br
  586. if br == nil {
  587. br = &bitReader{}
  588. }
  589. if err := br.init(in); err != nil {
  590. return err
  591. }
  592. if err := seqs.initialize(br, hist, b.dst); err != nil {
  593. println("initializing sequences:", err)
  594. return err
  595. }
  596. // Extract blocks...
  597. if false && hist.dict == nil {
  598. fatalErr := func(err error) {
  599. if err != nil {
  600. panic(err)
  601. }
  602. }
  603. fn := fmt.Sprintf("n-%d-lits-%d-prev-%d-%d-%d-win-%d.blk", hist.decoders.nSeqs, len(hist.decoders.literals), hist.recentOffsets[0], hist.recentOffsets[1], hist.recentOffsets[2], hist.windowSize)
  604. var buf bytes.Buffer
  605. fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.litLengths.fse))
  606. fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.matchLengths.fse))
  607. fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.offsets.fse))
  608. buf.Write(in)
  609. ioutil.WriteFile(filepath.Join("testdata", "seqs", fn), buf.Bytes(), os.ModePerm)
  610. }
  611. return nil
  612. }
  613. func (b *blockDec) decodeSequences(hist *history) error {
  614. if cap(b.sequence) < hist.decoders.nSeqs {
  615. if b.lowMem {
  616. b.sequence = make([]seqVals, 0, hist.decoders.nSeqs)
  617. } else {
  618. b.sequence = make([]seqVals, 0, 0x7F00+0xffff)
  619. }
  620. }
  621. b.sequence = b.sequence[:hist.decoders.nSeqs]
  622. if hist.decoders.nSeqs == 0 {
  623. hist.decoders.seqSize = len(hist.decoders.literals)
  624. return nil
  625. }
  626. hist.decoders.windowSize = hist.windowSize
  627. hist.decoders.prevOffset = hist.recentOffsets
  628. err := hist.decoders.decode(b.sequence)
  629. hist.recentOffsets = hist.decoders.prevOffset
  630. return err
  631. }
  632. func (b *blockDec) executeSequences(hist *history) error {
  633. hbytes := hist.b
  634. if len(hbytes) > hist.windowSize {
  635. hbytes = hbytes[len(hbytes)-hist.windowSize:]
  636. // We do not need history anymore.
  637. if hist.dict != nil {
  638. hist.dict.content = nil
  639. }
  640. }
  641. hist.decoders.windowSize = hist.windowSize
  642. hist.decoders.out = b.dst[:0]
  643. err := hist.decoders.execute(b.sequence, hbytes)
  644. if err != nil {
  645. return err
  646. }
  647. return b.updateHistory(hist)
  648. }
  649. func (b *blockDec) updateHistory(hist *history) error {
  650. if len(b.data) > maxCompressedBlockSize {
  651. return fmt.Errorf("compressed block size too large (%d)", len(b.data))
  652. }
  653. // Set output and release references.
  654. b.dst = hist.decoders.out
  655. hist.recentOffsets = hist.decoders.prevOffset
  656. if b.Last {
  657. // if last block we don't care about history.
  658. println("Last block, no history returned")
  659. hist.b = hist.b[:0]
  660. return nil
  661. } else {
  662. hist.append(b.dst)
  663. if debugDecoder {
  664. println("Finished block with ", len(b.sequence), "sequences. Added", len(b.dst), "to history, now length", len(hist.b))
  665. }
  666. }
  667. hist.decoders.out, hist.decoders.literals = nil, nil
  668. return nil
  669. }