chore: make deps

This commit is contained in:
2025-11-11 14:18:57 +01:00
parent db7c4042d0
commit 45af67d22d
590 changed files with 22837 additions and 16387 deletions

View File

@ -143,7 +143,7 @@ func (b *bitWriter) flush32() {
// flushAlign will flush remaining full bytes and align to next byte boundary.
func (b *bitWriter) flushAlign() {
nbBytes := (b.nBits + 7) >> 3
for i := uint8(0); i < nbBytes; i++ {
for i := range nbBytes {
b.out = append(b.out, byte(b.bitContainer>>(i*8)))
}
b.nBits = 0

View File

@ -396,7 +396,7 @@ func (s *Scratch) buildCTable() error {
if v > largeLimit {
s.zeroBits = true
}
for nbOccurrences := int16(0); nbOccurrences < v; nbOccurrences++ {
for range v {
tableSymbol[position] = symbol
position = (position + step) & tableMask
for position > highThreshold {

View File

@ -85,7 +85,7 @@ func (b *bitWriter) flush32() {
// flushAlign will flush remaining full bytes and align to next byte boundary.
func (b *bitWriter) flushAlign() {
nbBytes := (b.nBits + 7) >> 3
for i := uint8(0); i < nbBytes; i++ {
for i := range nbBytes {
b.out = append(b.out, byte(b.bitContainer>>(i*8)))
}
b.nBits = 0

View File

@ -276,7 +276,7 @@ func (s *Scratch) compress4X(src []byte) ([]byte, error) {
offsetIdx := len(s.Out)
s.Out = append(s.Out, sixZeros[:]...)
for i := 0; i < 4; i++ {
for i := range 4 {
toDo := src
if len(toDo) > segmentSize {
toDo = toDo[:segmentSize]
@ -312,7 +312,7 @@ func (s *Scratch) compress4Xp(src []byte) ([]byte, error) {
segmentSize := (len(src) + 3) / 4
var wg sync.WaitGroup
wg.Add(4)
for i := 0; i < 4; i++ {
for i := range 4 {
toDo := src
if len(toDo) > segmentSize {
toDo = toDo[:segmentSize]
@ -326,7 +326,7 @@ func (s *Scratch) compress4Xp(src []byte) ([]byte, error) {
}(i)
}
wg.Wait()
for i := 0; i < 4; i++ {
for i := range 4 {
o := s.tmpOut[i]
if len(o) > math.MaxUint16 {
// We cannot store the size in the jump table

View File

@ -626,7 +626,7 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
var br [4]bitReaderBytes
start := 6
for i := 0; i < 3; i++ {
for i := range 3 {
length := int(src[i*2]) | (int(src[i*2+1]) << 8)
if start+length >= len(src) {
return nil, errors.New("truncated input (or invalid offset)")
@ -798,10 +798,7 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
remainBytes := dstEvery - (decoded / 4)
for i := range br {
offset := dstEvery * i
endsAt := offset + remainBytes
if endsAt > len(out) {
endsAt = len(out)
}
endsAt := min(offset+remainBytes, len(out))
br := &br[i]
bitsLeft := br.remaining()
for bitsLeft > 0 {
@ -864,7 +861,7 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) {
var br [4]bitReaderBytes
start := 6
for i := 0; i < 3; i++ {
for i := range 3 {
length := int(src[i*2]) | (int(src[i*2+1]) << 8)
if start+length >= len(src) {
return nil, errors.New("truncated input (or invalid offset)")
@ -1035,10 +1032,7 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) {
remainBytes := dstEvery - (decoded / 4)
for i := range br {
offset := dstEvery * i
endsAt := offset + remainBytes
if endsAt > len(out) {
endsAt = len(out)
}
endsAt := min(offset+remainBytes, len(out))
br := &br[i]
bitsLeft := br.remaining()
for bitsLeft > 0 {

View File

@ -58,7 +58,7 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
var br [4]bitReaderShifted
// Decode "jump table"
start := 6
for i := 0; i < 3; i++ {
for i := range 3 {
length := int(src[i*2]) | (int(src[i*2+1]) << 8)
if start+length >= len(src) {
return nil, errors.New("truncated input (or invalid offset)")
@ -109,10 +109,7 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
remainBytes := dstEvery - (decoded / 4)
for i := range br {
offset := dstEvery * i
endsAt := offset + remainBytes
if endsAt > len(out) {
endsAt = len(out)
}
endsAt := min(offset+remainBytes, len(out))
br := &br[i]
bitsLeft := br.remaining()
for bitsLeft > 0 {

View File

@ -201,7 +201,7 @@ func (c cTable) write(s *Scratch) error {
for i := range hist[:16] {
hist[i] = 0
}
for n := uint8(0); n < maxSymbolValue; n++ {
for n := range maxSymbolValue {
v := bitsToWeight[c[n].nBits] & 15
huffWeight[n] = v
hist[v]++
@ -271,7 +271,7 @@ func (c cTable) estTableSize(s *Scratch) (sz int, err error) {
for i := range hist[:16] {
hist[i] = 0
}
for n := uint8(0); n < maxSymbolValue; n++ {
for n := range maxSymbolValue {
v := bitsToWeight[c[n].nBits] & 15
huffWeight[n] = v
hist[v]++

View File

@ -37,6 +37,6 @@ func Store32(b []byte, v uint32) {
}
// Store64 will store v at b.
func Store64(b []byte, v uint64) {
binary.LittleEndian.PutUint64(b, v)
func Store64[I Indexer](b []byte, i I, v uint64) {
binary.LittleEndian.PutUint64(b[i:], v)
}

View File

@ -38,18 +38,15 @@ func Load64[I Indexer](b []byte, i I) uint64 {
// Store16 will store v at b.
func Store16(b []byte, v uint16) {
//binary.LittleEndian.PutUint16(b, v)
*(*uint16)(unsafe.Pointer(unsafe.SliceData(b))) = v
}
// Store32 will store v at b.
func Store32(b []byte, v uint32) {
//binary.LittleEndian.PutUint32(b, v)
*(*uint32)(unsafe.Pointer(unsafe.SliceData(b))) = v
}
// Store64 will store v at b.
func Store64(b []byte, v uint64) {
//binary.LittleEndian.PutUint64(b, v)
*(*uint64)(unsafe.Pointer(unsafe.SliceData(b))) = v
// Store64 will store v at b[i:].
func Store64[I Indexer](b []byte, i I, v uint64) {
*(*uint64)(unsafe.Add(unsafe.Pointer(unsafe.SliceData(b)), i)) = v
}

View File

@ -209,7 +209,7 @@ func (r *Reader) fill() error {
if !r.readFull(r.buf[:len(magicBody)], false) {
return r.err
}
for i := 0; i < len(magicBody); i++ {
for i := range len(magicBody) {
if r.buf[i] != magicBody[i] {
r.err = ErrCorrupt
return r.err

View File

@ -20,8 +20,10 @@ import (
func Encode(dst, src []byte) []byte {
if n := MaxEncodedLen(len(src)); n < 0 {
panic(ErrTooLarge)
} else if len(dst) < n {
} else if cap(dst) < n {
dst = make([]byte, n)
} else {
dst = dst[:n]
}
// The block starts with the varint-encoded length of the decompressed bytes.

View File

@ -88,7 +88,7 @@ func (b *bitWriter) flush32() {
// flushAlign will flush remaining full bytes and align to next byte boundary.
func (b *bitWriter) flushAlign() {
nbBytes := (b.nBits + 7) >> 3
for i := uint8(0); i < nbBytes; i++ {
for i := range nbBytes {
b.out = append(b.out, byte(b.bitContainer>>(i*8)))
}
b.nBits = 0

View File

@ -54,11 +54,11 @@ const (
)
var (
huffDecoderPool = sync.Pool{New: func() interface{} {
huffDecoderPool = sync.Pool{New: func() any {
return &huff0.Scratch{}
}}
fseDecoderPool = sync.Pool{New: func() interface{} {
fseDecoderPool = sync.Pool{New: func() any {
return &fseDecoder{}
}}
)
@ -553,7 +553,7 @@ func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) {
if compMode&3 != 0 {
return errors.New("corrupt block: reserved bits not zero")
}
for i := uint(0); i < 3; i++ {
for i := range uint(3) {
mode := seqCompMode((compMode >> (6 - i*2)) & 3)
if debugDecoder {
println("Table", tableIndex(i), "is", mode)

View File

@ -373,11 +373,9 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
if cap(dst) == 0 && !d.o.limitToCap {
// Allocate len(input) * 2 by default if nothing is provided
// and we didn't get frame content size.
size := len(input) * 2
// Cap to 1 MB.
if size > 1<<20 {
size = 1 << 20
}
size := min(
// Cap to 1 MB.
len(input)*2, 1<<20)
if uint64(size) > d.o.maxDecodedSize {
size = int(d.o.maxDecodedSize)
}

View File

@ -194,17 +194,17 @@ func BuildDict(o BuildDictOptions) ([]byte, error) {
hist := o.History
contents := o.Contents
debug := o.DebugOut != nil
println := func(args ...interface{}) {
println := func(args ...any) {
if o.DebugOut != nil {
fmt.Fprintln(o.DebugOut, args...)
}
}
printf := func(s string, args ...interface{}) {
printf := func(s string, args ...any) {
if o.DebugOut != nil {
fmt.Fprintf(o.DebugOut, s, args...)
}
}
print := func(args ...interface{}) {
print := func(args ...any) {
if o.DebugOut != nil {
fmt.Fprint(o.DebugOut, args...)
}
@ -424,16 +424,10 @@ func BuildDict(o BuildDictOptions) ([]byte, error) {
}
// Literal table
avgSize := litTotal
if avgSize > huff0.BlockSizeMax/2 {
avgSize = huff0.BlockSizeMax / 2
}
avgSize := min(litTotal, huff0.BlockSizeMax/2)
huffBuff := make([]byte, 0, avgSize)
// Target size
div := litTotal / avgSize
if div < 1 {
div = 1
}
div := max(litTotal/avgSize, 1)
if debug {
println("Huffman weights:")
}
@ -454,7 +448,7 @@ func BuildDict(o BuildDictOptions) ([]byte, error) {
huffBuff = append(huffBuff, 255)
}
scratch := &huff0.Scratch{TableLog: 11}
for tries := 0; tries < 255; tries++ {
for tries := range 255 {
scratch = &huff0.Scratch{TableLog: 11}
_, _, err = huff0.Compress1X(huffBuff, scratch)
if err == nil {
@ -471,7 +465,7 @@ func BuildDict(o BuildDictOptions) ([]byte, error) {
// Bail out.... Just generate something
huffBuff = append(huffBuff, bytes.Repeat([]byte{255}, 10000)...)
for i := 0; i < 128; i++ {
for i := range 128 {
huffBuff = append(huffBuff, byte(i))
}
continue

View File

@ -8,7 +8,7 @@ import (
)
const (
dictShardBits = 6
dictShardBits = 7
)
type fastBase struct {
@ -41,11 +41,9 @@ func (e *fastBase) AppendCRC(dst []byte) []byte {
// or a window size small enough to contain the input size, if > 0.
func (e *fastBase) WindowSize(size int64) int32 {
if size > 0 && size < int64(e.maxMatchOff) {
b := int32(1) << uint(bits.Len(uint(size)))
// Keep minimum window.
if b < 1024 {
b = 1024
}
b := max(
// Keep minimum window.
int32(1)<<uint(bits.Len(uint(size))), 1024)
return b
}
return e.maxMatchOff

View File

@ -158,11 +158,9 @@ func (e *bestFastEncoder) Encode(blk *blockEnc, src []byte) {
// Use this to estimate literal cost.
// Scaled by 10 bits.
bitsPerByte := int32((compress.ShannonEntropyBits(src) * 1024) / len(src))
// Huffman can never go < 1 bit/byte
if bitsPerByte < 1024 {
bitsPerByte = 1024
}
bitsPerByte := max(
// Huffman can never go < 1 bit/byte
int32((compress.ShannonEntropyBits(src)*1024)/len(src)), 1024)
// Override src
src = e.hist
@ -235,10 +233,7 @@ encodeLoop:
// Extend candidate match backwards as far as possible.
// Do not extend repeats as we can assume they are optimal
// and offsets change if s == nextEmit.
tMin := s - e.maxMatchOff
if tMin < 0 {
tMin = 0
}
tMin := max(s-e.maxMatchOff, 0)
for offset > tMin && s > nextEmit && src[offset-1] == src[s-1] && l < maxMatchLength {
s--
offset--
@ -382,10 +377,7 @@ encodeLoop:
nextEmit = s
// Index skipped...
end := s
if s > sLimit+4 {
end = sLimit + 4
}
end := min(s, sLimit+4)
off := index0 + e.cur
for index0 < end {
cv0 := load6432(src, index0)
@ -444,10 +436,7 @@ encodeLoop:
nextEmit = s
// Index old s + 1 -> s - 1 or sLimit
end := s
if s > sLimit-4 {
end = sLimit - 4
}
end := min(s, sLimit-4)
off := index0 + e.cur
for index0 < end {

View File

@ -190,10 +190,7 @@ encodeLoop:
// and have to do special offset treatment.
startLimit := nextEmit + 1
tMin := s - e.maxMatchOff
if tMin < 0 {
tMin = 0
}
tMin := max(s-e.maxMatchOff, 0)
for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 {
repIndex--
start--
@ -252,10 +249,7 @@ encodeLoop:
// and have to do special offset treatment.
startLimit := nextEmit + 1
tMin := s - e.maxMatchOff
if tMin < 0 {
tMin = 0
}
tMin := max(s-e.maxMatchOff, 0)
for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 {
repIndex--
start--
@ -480,10 +474,7 @@ encodeLoop:
l := matched
// Extend backwards
tMin := s - e.maxMatchOff
if tMin < 0 {
tMin = 0
}
tMin := max(s-e.maxMatchOff, 0)
for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength {
s--
t--
@ -719,10 +710,7 @@ encodeLoop:
// and have to do special offset treatment.
startLimit := nextEmit + 1
tMin := s - e.maxMatchOff
if tMin < 0 {
tMin = 0
}
tMin := max(s-e.maxMatchOff, 0)
for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 {
repIndex--
start--
@ -783,10 +771,7 @@ encodeLoop:
// and have to do special offset treatment.
startLimit := nextEmit + 1
tMin := s - e.maxMatchOff
if tMin < 0 {
tMin = 0
}
tMin := max(s-e.maxMatchOff, 0)
for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 {
repIndex--
start--
@ -1005,10 +990,7 @@ encodeLoop:
l := matched
// Extend backwards
tMin := s - e.maxMatchOff
if tMin < 0 {
tMin = 0
}
tMin := max(s-e.maxMatchOff, 0)
for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength {
s--
t--

View File

@ -13,7 +13,7 @@ const (
dFastLongLen = 8 // Bytes used for table hash
dLongTableShardCnt = 1 << (dFastLongTableBits - dictShardBits) // Number of shards in the table
dLongTableShardSize = dFastLongTableSize / tableShardCnt // Size of an individual shard
dLongTableShardSize = dFastLongTableSize / dLongTableShardCnt // Size of an individual shard
dFastShortTableBits = tableBits // Bits used in the short match table
dFastShortTableSize = 1 << dFastShortTableBits // Size of the table
@ -149,10 +149,7 @@ encodeLoop:
// and have to do special offset treatment.
startLimit := nextEmit + 1
tMin := s - e.maxMatchOff
if tMin < 0 {
tMin = 0
}
tMin := max(s-e.maxMatchOff, 0)
for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 {
repIndex--
start--
@ -266,10 +263,7 @@ encodeLoop:
l := e.matchlen(s+4, t+4, src) + 4
// Extend backwards
tMin := s - e.maxMatchOff
if tMin < 0 {
tMin = 0
}
tMin := max(s-e.maxMatchOff, 0)
for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength {
s--
t--
@ -462,10 +456,7 @@ encodeLoop:
// and have to do special offset treatment.
startLimit := nextEmit + 1
tMin := s - e.maxMatchOff
if tMin < 0 {
tMin = 0
}
tMin := max(s-e.maxMatchOff, 0)
for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] {
repIndex--
start--
@ -576,10 +567,7 @@ encodeLoop:
l := int32(matchLen(src[s+4:], src[t+4:])) + 4
// Extend backwards
tMin := s - e.maxMatchOff
if tMin < 0 {
tMin = 0
}
tMin := max(s-e.maxMatchOff, 0)
for t > tMin && s > nextEmit && src[t-1] == src[s-1] {
s--
t--
@ -809,10 +797,7 @@ encodeLoop:
// and have to do special offset treatment.
startLimit := nextEmit + 1
tMin := s - e.maxMatchOff
if tMin < 0 {
tMin = 0
}
tMin := max(s-e.maxMatchOff, 0)
for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 {
repIndex--
start--
@ -927,10 +912,7 @@ encodeLoop:
l := e.matchlen(s+4, t+4, src) + 4
// Extend backwards
tMin := s - e.maxMatchOff
if tMin < 0 {
tMin = 0
}
tMin := max(s-e.maxMatchOff, 0)
for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength {
s--
t--

View File

@ -143,10 +143,7 @@ encodeLoop:
// and have to do special offset treatment.
startLimit := nextEmit + 1
sMin := s - e.maxMatchOff
if sMin < 0 {
sMin = 0
}
sMin := max(s-e.maxMatchOff, 0)
for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch {
repIndex--
start--
@ -223,10 +220,7 @@ encodeLoop:
l := e.matchlen(s+4, t+4, src) + 4
// Extend backwards
tMin := s - e.maxMatchOff
if tMin < 0 {
tMin = 0
}
tMin := max(s-e.maxMatchOff, 0)
for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength {
s--
t--
@ -387,10 +381,7 @@ encodeLoop:
// and have to do special offset treatment.
startLimit := nextEmit + 1
sMin := s - e.maxMatchOff
if sMin < 0 {
sMin = 0
}
sMin := max(s-e.maxMatchOff, 0)
for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] {
repIndex--
start--
@ -469,10 +460,7 @@ encodeLoop:
l := e.matchlen(s+4, t+4, src) + 4
// Extend backwards
tMin := s - e.maxMatchOff
if tMin < 0 {
tMin = 0
}
tMin := max(s-e.maxMatchOff, 0)
for t > tMin && s > nextEmit && src[t-1] == src[s-1] {
s--
t--
@ -655,10 +643,7 @@ encodeLoop:
// and have to do special offset treatment.
startLimit := nextEmit + 1
sMin := s - e.maxMatchOff
if sMin < 0 {
sMin = 0
}
sMin := max(s-e.maxMatchOff, 0)
for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch {
repIndex--
start--
@ -735,10 +720,7 @@ encodeLoop:
l := e.matchlen(s+4, t+4, src) + 4
// Extend backwards
tMin := s - e.maxMatchOff
if tMin < 0 {
tMin = 0
}
tMin := max(s-e.maxMatchOff, 0)
for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength {
s--
t--

View File

@ -238,10 +238,7 @@ func (d *frameDec) reset(br byteBuffer) error {
if d.WindowSize == 0 && d.SingleSegment {
// We may not need window in this case.
d.WindowSize = d.FrameContentSize
if d.WindowSize < MinWindowSize {
d.WindowSize = MinWindowSize
}
d.WindowSize = max(d.FrameContentSize, MinWindowSize)
if d.WindowSize > d.o.maxDecodedSize {
if debugDecoder {
printf("window size %d > max %d\n", d.WindowSize, d.o.maxWindowSize)

View File

@ -149,7 +149,7 @@ func (s *fseEncoder) buildCTable() error {
if v > largeLimit {
s.zeroBits = true
}
for nbOccurrences := int16(0); nbOccurrences < v; nbOccurrences++ {
for range v {
tableSymbol[position] = symbol
position = (position + step) & tableMask
for position > highThreshold {

View File

@ -231,10 +231,7 @@ func (s *sequenceDecs) decodeSync(hist []byte) error {
llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize]
llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state
out := s.out
maxBlockSize := maxCompressedBlockSize
if s.windowSize < maxBlockSize {
maxBlockSize = s.windowSize
}
maxBlockSize := min(s.windowSize, maxCompressedBlockSize)
if debugDecoder {
println("decodeSync: decoding", seqs, "sequences", br.remain(), "bits remain on stream")

View File

@ -79,10 +79,7 @@ func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) {
br := s.br
maxBlockSize := maxCompressedBlockSize
if s.windowSize < maxBlockSize {
maxBlockSize = s.windowSize
}
maxBlockSize := min(s.windowSize, maxCompressedBlockSize)
ctx := decodeSyncAsmContext{
llTable: s.litLengths.fse.dt[:maxTablesize],
@ -237,10 +234,7 @@ func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmC
func (s *sequenceDecs) decode(seqs []seqVals) error {
br := s.br
maxBlockSize := maxCompressedBlockSize
if s.windowSize < maxBlockSize {
maxBlockSize = s.windowSize
}
maxBlockSize := min(s.windowSize, maxCompressedBlockSize)
ctx := decodeAsmContext{
llTable: s.litLengths.fse.dt[:maxTablesize],

View File

@ -0,0 +1,56 @@
// Copyright 2025+ Klaus Post. All rights reserved.
// License information can be found in the LICENSE file.
//go:build go1.24
package zstd
import (
"errors"
"runtime"
"sync"
"weak"
)
var weakMu sync.Mutex
var simpleEnc weak.Pointer[Encoder]
var simpleDec weak.Pointer[Decoder]
// EncodeTo appends the encoded data from src to dst.
func EncodeTo(dst []byte, src []byte) []byte {
weakMu.Lock()
enc := simpleEnc.Value()
if enc == nil {
var err error
enc, err = NewWriter(nil, WithEncoderConcurrency(runtime.NumCPU()), WithWindowSize(1<<20), WithLowerEncoderMem(true), WithZeroFrames(true))
if err != nil {
panic("failed to create simple encoder: " + err.Error())
}
simpleEnc = weak.Make(enc)
}
weakMu.Unlock()
return enc.EncodeAll(src, dst)
}
// DecodeTo appends the decoded data from src to dst.
// The maximum decoded size is 1GiB,
// not including what may already be in dst.
func DecodeTo(dst []byte, src []byte) ([]byte, error) {
weakMu.Lock()
dec := simpleDec.Value()
if dec == nil {
var err error
dec, err = NewReader(nil, WithDecoderConcurrency(runtime.NumCPU()), WithDecoderLowmem(true), WithDecoderMaxMemory(1<<30))
if err != nil {
weakMu.Unlock()
return nil, errors.New("failed to create simple decoder: " + err.Error())
}
runtime.SetFinalizer(dec, func(d *Decoder) {
d.Close()
})
simpleDec = weak.Make(dec)
}
weakMu.Unlock()
return dec.DecodeAll(src, dst)
}

View File

@ -257,7 +257,7 @@ func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) {
if !r.readFull(r.buf[:len(snappyMagicBody)], false) {
return written, r.err
}
for i := 0; i < len(snappyMagicBody); i++ {
for i := range len(snappyMagicBody) {
if r.buf[i] != snappyMagicBody[i] {
println("r.buf[i] != snappyMagicBody[i]", r.buf[i], snappyMagicBody[i], i)
r.err = ErrSnappyCorrupt

View File

@ -19,7 +19,7 @@ const ZipMethodWinZip = 93
const ZipMethodPKWare = 20
// zipReaderPool is the default reader pool.
var zipReaderPool = sync.Pool{New: func() interface{} {
var zipReaderPool = sync.Pool{New: func() any {
z, err := NewReader(nil, WithDecoderLowmem(true), WithDecoderMaxWindow(128<<20), WithDecoderConcurrency(1))
if err != nil {
panic(err)

View File

@ -98,13 +98,13 @@ var (
ErrDecoderNilInput = errors.New("nil input provided as reader")
)
func println(a ...interface{}) {
func println(a ...any) {
if debug || debugDecoder || debugEncoder {
log.Println(a...)
}
}
func printf(format string, a ...interface{}) {
func printf(format string, a ...any) {
if debug || debugDecoder || debugEncoder {
log.Printf(format, a...)
}