aboutsummaryrefslogtreecommitdiffstats
path: root/reftable/stack.c
diff options
context:
space:
mode:
authorPatrick Steinhardt <ps@pks.im>2024-08-23 16:12:57 +0200
committerJunio C Hamano <gitster@pobox.com>2024-08-23 08:04:48 -0700
commit85da2a2ab62e24a8b9ff183fe3a451b445632487 (patch)
tree5c0f0af60471344da322772bb1c1af06082325e8 /reftable/stack.c
parent1302ed68d4f5452242d47ac609b06b793a18ea0b (diff)
downloadgit-85da2a2ab62e24a8b9ff183fe3a451b445632487.tar.gz
reftable/stack: fix segfault when reload with reused readers fails
It is expected that reloading the stack fails with concurrent writers, e.g. because a table that we just wanted to read just got compacted. In case we decided to reuse readers this will cause a segfault though because we unconditionally release all new readers, including the reused ones. As those are still referenced by the current stack, the result is that we will eventually try to dereference those already-freed readers. Fix this bug by incrementing the refcount of reused readers temporarily. Signed-off-by: Patrick Steinhardt <ps@pks.im> Signed-off-by: Junio C Hamano <gitster@pobox.com>
Diffstat (limited to 'reftable/stack.c')
-rw-r--r--reftable/stack.c23
1 files changed, 23 insertions, 0 deletions
diff --git a/reftable/stack.c b/reftable/stack.c
index 0247222258..ce0a35216b 100644
--- a/reftable/stack.c
+++ b/reftable/stack.c
@@ -226,6 +226,8 @@ static int reftable_stack_reload_once(struct reftable_stack *st,
{
size_t cur_len = !st->merged ? 0 : st->merged->readers_len;
struct reftable_reader **cur = stack_copy_readers(st, cur_len);
+ struct reftable_reader **reused = NULL;
+ size_t reused_len = 0, reused_alloc = 0;
size_t names_len = names_length(names);
struct reftable_reader **new_readers =
reftable_calloc(names_len, sizeof(*new_readers));
@@ -245,6 +247,18 @@ static int reftable_stack_reload_once(struct reftable_stack *st,
if (cur[i] && 0 == strcmp(cur[i]->name, name)) {
rd = cur[i];
cur[i] = NULL;
+
+ /*
+ * When reloading the stack fails, we end up
+ * releasing all new readers. This also
+ * includes the reused readers, even though
+ * they are still in used by the old stack. We
+ * thus need to keep them alive here, which we
+ * do by bumping their refcount.
+ */
+ REFTABLE_ALLOC_GROW(reused, reused_len + 1, reused_alloc);
+ reused[reused_len++] = rd;
+ reftable_reader_incref(rd);
break;
}
}
@@ -301,10 +315,19 @@ static int reftable_stack_reload_once(struct reftable_stack *st,
new_readers = NULL;
new_readers_len = 0;
+ /*
+ * Decrement the refcount of reused readers again. This only needs to
+ * happen on the successful case, because on the unsuccessful one we
+ * decrement their refcount via `new_readers`.
+ */
+ for (i = 0; i < reused_len; i++)
+ reftable_reader_decref(reused[i]);
+
done:
for (i = 0; i < new_readers_len; i++)
reftable_reader_decref(new_readers[i]);
reftable_free(new_readers);
+ reftable_free(reused);
reftable_free(cur);
strbuf_release(&table_path);
return err;