aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPatrick Steinhardt <ps@pks.im>2024-04-08 14:17:04 +0200
committerJunio C Hamano <gitster@pobox.com>2024-04-15 10:36:09 -0700
commitce1f213cc91cf545736048f28117fe1de89b8134 (patch)
tree3e8401f9a1682045caa99e8e94211da7f9716eab
parent15a60b747e4f0e0d11353f8e89bc9ce7b36c5512 (diff)
downloadgit-ce1f213cc91cf545736048f28117fe1de89b8134.tar.gz
reftable/block: reuse `zstream` state on inflation
When calling `inflateInit()` and `inflate()`, the zlib library will allocate several data structures for the underlying `zstream` to keep track of various information. Thus, when inflating repeatedly, it is possible to optimize memory allocation patterns by reusing the `zstream` and then calling `inflateReset()` on it to prepare it for the next chunk of data to inflate. This is exactly what the reftable code is doing: when iterating through reflogs we need to potentially inflate many log blocks, but we discard the `zstream` every single time. Instead, as we reuse the `block_reader` for each of the blocks anyway, we can initialize the `zstream` once and then reuse it for subsequent inflations. Refactor the code to do so, which leads to a significant reduction in the number of allocations. The following measurements were done when iterating through 1 million reflog entries. Before: HEAP SUMMARY: in use at exit: 13,473 bytes in 122 blocks total heap usage: 23,028 allocs, 22,906 frees, 162,813,552 bytes allocated After: HEAP SUMMARY: in use at exit: 13,473 bytes in 122 blocks total heap usage: 302 allocs, 180 frees, 88,352 bytes allocated Signed-off-by: Patrick Steinhardt <ps@pks.im> Signed-off-by: Junio C Hamano <gitster@pobox.com>
-rw-r--r--reftable/block.c25
-rw-r--r--reftable/block.h3
-rw-r--r--reftable/reader.c1
3 files changed, 19 insertions, 10 deletions
diff --git a/reftable/block.c b/reftable/block.c
index 435922b569..c6c4a68ea1 100644
--- a/reftable/block.c
+++ b/reftable/block.c
@@ -198,7 +198,6 @@ int block_reader_init(struct block_reader *br, struct reftable_block *block,
uint32_t block_header_skip = 4 + header_off;
uLong dst_len = sz - block_header_skip;
uLong src_len = block->len - block_header_skip;
- z_stream stream = {0};
/* Log blocks specify the *uncompressed* size in their header. */
REFTABLE_ALLOC_GROW(br->uncompressed_data, sz,
@@ -207,16 +206,21 @@ int block_reader_init(struct block_reader *br, struct reftable_block *block,
/* Copy over the block header verbatim. It's not compressed. */
memcpy(br->uncompressed_data, block->data, block_header_skip);
- err = inflateInit(&stream);
+ if (!br->zstream) {
+ REFTABLE_CALLOC_ARRAY(br->zstream, 1);
+ err = inflateInit(br->zstream);
+ } else {
+ err = inflateReset(br->zstream);
+ }
if (err != Z_OK) {
err = REFTABLE_ZLIB_ERROR;
goto done;
}
- stream.next_in = block->data + block_header_skip;
- stream.avail_in = src_len;
- stream.next_out = br->uncompressed_data + block_header_skip;
- stream.avail_out = dst_len;
+ br->zstream->next_in = block->data + block_header_skip;
+ br->zstream->avail_in = src_len;
+ br->zstream->next_out = br->uncompressed_data + block_header_skip;
+ br->zstream->avail_out = dst_len;
/*
* We know both input as well as output size, and we know that
@@ -225,15 +229,14 @@ int block_reader_init(struct block_reader *br, struct reftable_block *block,
* here to instruct zlib to inflate the data in one go, which
* is more efficient than using `Z_NO_FLUSH`.
*/
- err = inflate(&stream, Z_FINISH);
- inflateEnd(&stream);
+ err = inflate(br->zstream, Z_FINISH);
if (err != Z_STREAM_END) {
err = REFTABLE_ZLIB_ERROR;
goto done;
}
err = 0;
- if (stream.total_out + block_header_skip != sz) {
+ if (br->zstream->total_out + block_header_skip != sz) {
err = REFTABLE_FORMAT_ERROR;
goto done;
}
@@ -242,7 +245,7 @@ int block_reader_init(struct block_reader *br, struct reftable_block *block,
reftable_block_done(block);
block->data = br->uncompressed_data;
block->len = sz;
- full_block_size = src_len + block_header_skip - stream.avail_in;
+ full_block_size = src_len + block_header_skip - br->zstream->avail_in;
} else if (full_block_size == 0) {
full_block_size = sz;
} else if (sz < full_block_size && sz < block->len &&
@@ -275,6 +278,8 @@ done:
void block_reader_release(struct block_reader *br)
{
+ inflateEnd(br->zstream);
+ reftable_free(br->zstream);
reftable_free(br->uncompressed_data);
reftable_block_done(&br->block);
}
diff --git a/reftable/block.h b/reftable/block.h
index 12414eb642..c1bd1892cb 100644
--- a/reftable/block.h
+++ b/reftable/block.h
@@ -56,6 +56,8 @@ int block_writer_finish(struct block_writer *w);
/* clears out internally allocated block_writer members. */
void block_writer_release(struct block_writer *bw);
+struct z_stream;
+
/* Read a block. */
struct block_reader {
/* offset of the block header; nonzero for the first block in a
@@ -67,6 +69,7 @@ struct block_reader {
int hash_size;
/* Uncompressed data for log entries. */
+ z_stream *zstream;
unsigned char *uncompressed_data;
size_t uncompressed_cap;
diff --git a/reftable/reader.c b/reftable/reader.c
index aacd5f1337..481dff10d4 100644
--- a/reftable/reader.c
+++ b/reftable/reader.c
@@ -459,6 +459,7 @@ static int reader_seek_linear(struct table_iter *ti,
* we would not do a linear search there anymore.
*/
memset(&next.br.block, 0, sizeof(next.br.block));
+ next.br.zstream = NULL;
next.br.uncompressed_data = NULL;
next.br.uncompressed_cap = 0;