diff options
| author | Patrick Steinhardt <ps@pks.im> | 2024-08-08 16:06:39 +0200 |
|---|---|---|
| committer | Junio C Hamano <gitster@pobox.com> | 2024-08-08 10:14:42 -0700 |
| commit | 558f6fbeb1f194116231218b3e7496ceef4b9618 (patch) | |
| tree | 378c0cdbc17bddb6555ce881553cb2ace567354b /reftable/stack.c | |
| parent | 5f0ed603a1653f2394c468814bde4b0dca2cff45 (diff) | |
| download | git-558f6fbeb1f194116231218b3e7496ceef4b9618.tar.gz | |
reftable/stack: simplify tracking of table locks
When compacting tables, we store the locks of all tables we are about to
compact in the `table_locks` array. As we currently only ever compact
all tables in the user-provided range or none, we simply track those
locks via the indices of the respective tables in the merged stack.
This is about to change though, as we will introduce a mode where auto
compaction gracefully handles the case of already-locked files. In this
case, it may happen that we only compact a subset of the user-supplied
range of tables. In this case, the indices will not necessarily match
the lock indices anymore.
Refactor the code such that we track the number of locks via a separate
variable. The resulting code is expected to perform the same, but will
make it easier to perform the described change.
Signed-off-by: Patrick Steinhardt <ps@pks.im>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
Diffstat (limited to 'reftable/stack.c')
| -rw-r--r-- | reftable/stack.c | 14 |
1 files changed, 7 insertions, 7 deletions
diff --git a/reftable/stack.c b/reftable/stack.c index e5959d2c76..07e7ffc6b9 100644 --- a/reftable/stack.c +++ b/reftable/stack.c @@ -1016,7 +1016,7 @@ static int stack_compact_range(struct reftable_stack *st, struct lock_file *table_locks = NULL; struct tempfile *new_table = NULL; int is_empty_table = 0, err = 0; - size_t i; + size_t i, nlocks = 0; if (first > last || (!expiry && first == last)) { err = 0; @@ -1051,7 +1051,7 @@ static int stack_compact_range(struct reftable_stack *st, for (i = first; i <= last; i++) { stack_filename(&table_name, st, reader_name(st->readers[i])); - err = hold_lock_file_for_update(&table_locks[i - first], + err = hold_lock_file_for_update(&table_locks[nlocks], table_name.buf, LOCK_NO_DEREF); if (err < 0) { if (errno == EEXIST) @@ -1066,7 +1066,7 @@ static int stack_compact_range(struct reftable_stack *st, * run into file descriptor exhaustion when we compress a lot * of tables. */ - err = close_lock_file_gently(&table_locks[i - first]); + err = close_lock_file_gently(&table_locks[nlocks++]); if (err < 0) { err = REFTABLE_IO_ERROR; goto done; @@ -1183,8 +1183,8 @@ static int stack_compact_range(struct reftable_stack *st, * Delete the old tables. They may still be in use by concurrent * readers, so it is expected that unlinking tables may fail. */ - for (i = first; i <= last; i++) { - struct lock_file *table_lock = &table_locks[i - first]; + for (i = 0; i < nlocks; i++) { + struct lock_file *table_lock = &table_locks[i]; char *table_path = get_locked_file_path(table_lock); unlink(table_path); free(table_path); @@ -1192,8 +1192,8 @@ static int stack_compact_range(struct reftable_stack *st, done: rollback_lock_file(&tables_list_lock); - for (i = first; table_locks && i <= last; i++) - rollback_lock_file(&table_locks[i - first]); + for (i = 0; table_locks && i < nlocks; i++) + rollback_lock_file(&table_locks[i]); reftable_free(table_locks); delete_tempfile(&new_table); |
