aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPatrick Steinhardt <ps@pks.im>2024-04-08 14:24:35 +0200
committerJunio C Hamano <gitster@pobox.com>2024-04-08 17:01:42 -0700
commita155ab2bf4e16b8fe1e0644d9d57d2c62eecd452 (patch)
tree92df875b5d1038d64f149f70d12259ac459eeb72
parent8aaeffe3b5b85f996ac82ec4b0b0e538f4b5a0ac (diff)
downloadgit-a155ab2bf4e16b8fe1e0644d9d57d2c62eecd452.tar.gz
reftable/block: reuse zstream when writing log blocks
While most reftable blocks are written to disk as-is, blocks for log records are compressed with zlib. To compress them we use `compress2()`, which is a simple wrapper around the more complex `zstream` interface that would require multiple function invocations. One downside of this interface is that `compress2()` will reallocate internal state of the `zstream` interface on every single invocation. Consequently, as we call `compress2()` for every single log block which we are about to write, this can lead to quite some memory allocation churn. Refactor the code so that the block writer reuses a `zstream`. This significantly reduces the number of bytes allocated when writing many refs in a single transaction, as demonstrated by the following benchmark that writes 100k refs in a single transaction. Before: HEAP SUMMARY: in use at exit: 671,931 bytes in 151 blocks total heap usage: 22,631,887 allocs, 22,631,736 frees, 1,854,670,793 bytes allocated After: HEAP SUMMARY: in use at exit: 671,931 bytes in 151 blocks total heap usage: 22,620,528 allocs, 22,620,377 frees, 1,245,549,984 bytes allocated Signed-off-by: Patrick Steinhardt <ps@pks.im> Signed-off-by: Junio C Hamano <gitster@pobox.com>
-rw-r--r--reftable/block.c80
-rw-r--r--reftable/block.h1
2 files changed, 53 insertions, 28 deletions
diff --git a/reftable/block.c b/reftable/block.c
index e2a2cee58d..9129305515 100644
--- a/reftable/block.c
+++ b/reftable/block.c
@@ -76,6 +76,10 @@ void block_writer_init(struct block_writer *bw, uint8_t typ, uint8_t *buf,
bw->entries = 0;
bw->restart_len = 0;
bw->last_key.len = 0;
+ if (!bw->zstream) {
+ REFTABLE_CALLOC_ARRAY(bw->zstream, 1);
+ deflateInit(bw->zstream, 9);
+ }
}
uint8_t block_writer_type(struct block_writer *bw)
@@ -139,39 +143,57 @@ int block_writer_finish(struct block_writer *w)
w->next += 2;
put_be24(w->buf + 1 + w->header_off, w->next);
+ /*
+ * Log records are stored zlib-compressed. Note that the compression
+ * also spans over the restart points we have just written.
+ */
if (block_writer_type(w) == BLOCK_TYPE_LOG) {
int block_header_skip = 4 + w->header_off;
- uLongf src_len = w->next - block_header_skip;
- uLongf dest_cap = src_len * 1.001 + 12;
- uint8_t *compressed;
-
- REFTABLE_ALLOC_ARRAY(compressed, dest_cap);
-
- while (1) {
- uLongf out_dest_len = dest_cap;
- int zresult = compress2(compressed, &out_dest_len,
- w->buf + block_header_skip,
- src_len, 9);
- if (zresult == Z_BUF_ERROR && dest_cap < LONG_MAX) {
- dest_cap *= 2;
- compressed =
- reftable_realloc(compressed, dest_cap);
- if (compressed)
- continue;
- }
-
- if (Z_OK != zresult) {
- reftable_free(compressed);
- return REFTABLE_ZLIB_ERROR;
- }
-
- memcpy(w->buf + block_header_skip, compressed,
- out_dest_len);
- w->next = out_dest_len + block_header_skip;
+ uLongf src_len = w->next - block_header_skip, compressed_len;
+ unsigned char *compressed;
+ int ret;
+
+ ret = deflateReset(w->zstream);
+ if (ret != Z_OK)
+ return REFTABLE_ZLIB_ERROR;
+
+ /*
+ * Precompute the upper bound of how many bytes the compressed
+ * data may end up with. Combined with `Z_FINISH`, `deflate()`
+ * is guaranteed to return `Z_STREAM_END`.
+ */
+ compressed_len = deflateBound(w->zstream, src_len);
+ REFTABLE_ALLOC_ARRAY(compressed, compressed_len);
+
+ w->zstream->next_out = compressed;
+ w->zstream->avail_out = compressed_len;
+ w->zstream->next_in = w->buf + block_header_skip;
+ w->zstream->avail_in = src_len;
+
+ /*
+ * We want to perform all decompression in a single step, which
+ * is why we can pass Z_FINISH here. As we have precomputed the
+ * deflated buffer's size via `deflateBound()` this function is
+ * guaranteed to succeed according to the zlib documentation.
+ */
+ ret = deflate(w->zstream, Z_FINISH);
+ if (ret != Z_STREAM_END) {
reftable_free(compressed);
- break;
+ return REFTABLE_ZLIB_ERROR;
}
+
+ /*
+ * Overwrite the uncompressed data we have already written and
+ * adjust the `next` pointer to point right after the
+ * compressed data.
+ */
+ memcpy(w->buf + block_header_skip, compressed,
+ w->zstream->total_out);
+ w->next = w->zstream->total_out + block_header_skip;
+
+ reftable_free(compressed);
}
+
return w->next;
}
@@ -425,6 +447,8 @@ done:
void block_writer_release(struct block_writer *bw)
{
+ deflateEnd(bw->zstream);
+ FREE_AND_NULL(bw->zstream);
FREE_AND_NULL(bw->restarts);
strbuf_release(&bw->last_key);
/* the block is not owned. */
diff --git a/reftable/block.h b/reftable/block.h
index 47acc62c0a..1375957fc8 100644
--- a/reftable/block.h
+++ b/reftable/block.h
@@ -18,6 +18,7 @@ https://developers.google.com/open-source/licenses/bsd
* allocation overhead.
*/
struct block_writer {
+ z_stream *zstream;
uint8_t *buf;
uint32_t block_size;