Skip to content

Commit

Permalink
Use syncpool for chunk buffer
Browse files Browse the repository at this point in the history
This reduces the number of gc:s and improves performance.
  • Loading branch information
tobbe76 committed Sep 5, 2024
1 parent 59d7a46 commit f7549a5
Showing 1 changed file with 10 additions and 1 deletion.
11 changes: 10 additions & 1 deletion cache/disk/casblob/casblob.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ import (
"io"
"log"
"os"
"sync"

"github.com/buchgr/bazel-remote/v2/cache/disk/zstdimpl"
)
Expand Down Expand Up @@ -508,6 +509,13 @@ func (b *readCloserWrapper) Close() error {
return f.Close()
}

// sync pool to reuse large byte buffer between multiple go routines
var chunkBufferPool = &sync.Pool{
New: func() any {
return make([]byte, defaultChunkSize, defaultChunkSize)

Check failure on line 515 in cache/disk/casblob/casblob.go

View workflow job for this annotation

GitHub Actions / golangci-lint

S1019: should use make([]byte, defaultChunkSize) instead (gosimple)
},
}

// Read from r and write to f, using CompressionType t.
// Return the size on disk or an error if something went wrong.
func WriteAndClose(zstd zstdimpl.ZstdImpl, r io.Reader, f *os.File, t CompressionType, hash string, size int64) (int64, error) {
Expand Down Expand Up @@ -577,7 +585,8 @@ func WriteAndClose(zstd zstdimpl.ZstdImpl, r io.Reader, f *os.File, t Compressio
remainingRawData := size
var numRead int

uncompressedChunk := make([]byte, chunkSize)
uncompressedChunk := chunkBufferPool.Get().([]byte)
defer chunkBufferPool.Put(uncompressedChunk)

Check failure on line 589 in cache/disk/casblob/casblob.go

View workflow job for this annotation

GitHub Actions / golangci-lint

SA6002: argument should be pointer-like to avoid allocations (staticcheck)

hasher := sha256.New()

Expand Down

0 comments on commit f7549a5

Please sign in to comment.