From f7549a5726c8ce6e82e016c1560dd89597850aa2 Mon Sep 17 00:00:00 2001 From: tobbe76 Date: Mon, 2 Sep 2024 09:40:01 +0200 Subject: [PATCH] Use syncpool for chunk buffer This reduces the number of gc:s and improves performance. --- cache/disk/casblob/casblob.go | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/cache/disk/casblob/casblob.go b/cache/disk/casblob/casblob.go index c6f73d67c..2d4179523 100644 --- a/cache/disk/casblob/casblob.go +++ b/cache/disk/casblob/casblob.go @@ -10,6 +10,7 @@ import ( "io" "log" "os" + "sync" "github.com/buchgr/bazel-remote/v2/cache/disk/zstdimpl" ) @@ -508,6 +509,13 @@ func (b *readCloserWrapper) Close() error { return f.Close() } +// sync pool to reuse large byte buffer between multiple go routines +var chunkBufferPool = &sync.Pool{ + New: func() any { + return make([]byte, defaultChunkSize, defaultChunkSize) + }, +} + // Read from r and write to f, using CompressionType t. // Return the size on disk or an error if something went wrong. func WriteAndClose(zstd zstdimpl.ZstdImpl, r io.Reader, f *os.File, t CompressionType, hash string, size int64) (int64, error) { @@ -577,7 +585,8 @@ func WriteAndClose(zstd zstdimpl.ZstdImpl, r io.Reader, f *os.File, t Compressio remainingRawData := size var numRead int - uncompressedChunk := make([]byte, chunkSize) + uncompressedChunk := chunkBufferPool.Get().([]byte) + defer chunkBufferPool.Put(uncompressedChunk) hasher := sha256.New()