Skip to content

Commit

Permalink
Use syncpool for chunk buffer
Browse files Browse the repository at this point in the history
This reduces the number of gc:s and improves performance.
  • Loading branch information
tobbe76 committed Sep 2, 2024
1 parent 59d7a46 commit 933d7a2
Showing 1 changed file with 16 additions and 1 deletion.
17 changes: 16 additions & 1 deletion cache/disk/casblob/casblob.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ import (
"io"
"log"
"os"
"sync"

"github.com/buchgr/bazel-remote/v2/cache/disk/zstdimpl"
)
Expand Down Expand Up @@ -508,6 +509,14 @@ func (b *readCloserWrapper) Close() error {
return f.Close()
}

// sync pool to reuse large byte buffer between multiple go routines
var chunkBufferPool = &sync.Pool{
New: func() any {
b := make([]byte, defaultChunkSize, defaultChunkSize)

Check failure on line 515 in cache/disk/casblob/casblob.go

View workflow job for this annotation

GitHub Actions / golangci-lint

S1019: should use make([]byte, defaultChunkSize) instead (gosimple)
return &b
},
}

// Read from r and write to f, using CompressionType t.
// Return the size on disk or an error if something went wrong.
func WriteAndClose(zstd zstdimpl.ZstdImpl, r io.Reader, f *os.File, t CompressionType, hash string, size int64) (int64, error) {
Expand Down Expand Up @@ -577,7 +586,13 @@ func WriteAndClose(zstd zstdimpl.ZstdImpl, r io.Reader, f *os.File, t Compressio
remainingRawData := size
var numRead int

uncompressedChunk := make([]byte, chunkSize)
chunkBufferPtr := chunkBufferPool.Get().(*[]byte)
defer func() {
if chunkBufferPtr != nil {
chunkBufferPool.Put(chunkBufferPtr)
}
}()
uncompressedChunk := *chunkBufferPtr

hasher := sha256.New()

Expand Down

0 comments on commit 933d7a2

Please sign in to comment.