From 1cfd187fc12b1c82e4e0d0c86c580ee1e2d7e0ba Mon Sep 17 00:00:00 2001 From: Seyi Kuforiji Date: Thu, 24 Jul 2025 15:28:36 +0100 Subject: t/unit-tests: convert reftable stack test to use clar Adapt reftable stack test file to use clar by using clar assertions where necessary. This marks the end of all unit tests migrated away from the `unit-tests/t-*.c` pattern, there are no longer any files matching that glob. Remove the sanity check for `t-*.c` files to prevent Meson configuration errors during CI and local builds. Signed-off-by: Seyi Kuforiji Signed-off-by: Junio C Hamano --- t/unit-tests/u-reftable-stack.c | 1331 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 1331 insertions(+) create mode 100644 t/unit-tests/u-reftable-stack.c (limited to 't/unit-tests/u-reftable-stack.c') diff --git a/t/unit-tests/u-reftable-stack.c b/t/unit-tests/u-reftable-stack.c new file mode 100644 index 0000000000..0e3e3f4f4a --- /dev/null +++ b/t/unit-tests/u-reftable-stack.c @@ -0,0 +1,1331 @@ +/* +Copyright 2020 Google LLC + +Use of this source code is governed by a BSD-style +license that can be found in the LICENSE file or at +https://developers.google.com/open-source/licenses/bsd +*/ + +#define DISABLE_SIGN_COMPARE_WARNINGS + +#include "unit-test.h" +#include "dir.h" +#include "lib-reftable-clar.h" +#include "reftable/merged.h" +#include "reftable/reftable-error.h" +#include "reftable/stack.h" +#include "reftable/table.h" +#include "strbuf.h" +#include "tempfile.h" +#include + +static void clear_dir(const char *dirname) +{ + struct strbuf path = REFTABLE_BUF_INIT; + strbuf_addstr(&path, dirname); + remove_dir_recursively(&path, 0); + strbuf_release(&path); +} + +static int count_dir_entries(const char *dirname) +{ + DIR *dir = opendir(dirname); + int len = 0; + struct dirent *d; + if (!dir) + return 0; + + while ((d = readdir(dir))) { + /* + * Besides skipping over "." and "..", we also need to + * skip over other files that have a leading ".". This + * is due to behaviour of NFS, which will rename files + * to ".nfs*" to emulate delete-on-last-close. + * + * In any case this should be fine as the reftable + * library will never write files with leading dots + * anyway. + */ + if (starts_with(d->d_name, ".")) + continue; + len++; + } + closedir(dir); + return len; +} + +/* + * Work linenumber into the tempdir, so we can see which tests forget to + * cleanup. + */ +static char *get_tmp_template(int linenumber) +{ + const char *tmp = getenv("TMPDIR"); + static char template[1024]; + snprintf(template, sizeof(template) - 1, "%s/stack_test-%d.XXXXXX", + tmp ? tmp : "/tmp", linenumber); + return template; +} + +static char *get_tmp_dir(int linenumber) +{ + char *dir = get_tmp_template(linenumber); + cl_assert(mkdtemp(dir) != NULL); + return dir; +} + +void test_reftable_stack__read_file(void) +{ + char *fn = get_tmp_template(__LINE__); + struct tempfile *tmp = mks_tempfile(fn); + int fd = get_tempfile_fd(tmp); + char out[1024] = "line1\n\nline2\nline3"; + int n, err; + char **names = NULL; + const char *want[] = { "line1", "line2", "line3" }; + + cl_assert(fd > 0); + n = write_in_full(fd, out, strlen(out)); + cl_assert_equal_i(n, strlen(out)); + err = close(fd); + cl_assert(err >= 0); + + err = read_lines(fn, &names); + cl_assert(!err); + + for (size_t i = 0; names[i]; i++) + cl_assert_equal_s(want[i], names[i]); + free_names(names); + (void) remove(fn); + delete_tempfile(&tmp); +} + +static int write_test_ref(struct reftable_writer *wr, void *arg) +{ + struct reftable_ref_record *ref = arg; + cl_assert_equal_i(reftable_writer_set_limits(wr, + ref->update_index, ref->update_index), 0); + return reftable_writer_add_ref(wr, ref); +} + +static void write_n_ref_tables(struct reftable_stack *st, + size_t n) +{ + int disable_auto_compact; + + disable_auto_compact = st->opts.disable_auto_compact; + st->opts.disable_auto_compact = 1; + + for (size_t i = 0; i < n; i++) { + struct reftable_ref_record ref = { + .update_index = reftable_stack_next_update_index(st), + .value_type = REFTABLE_REF_VAL1, + }; + char buf[128]; + + snprintf(buf, sizeof(buf), "refs/heads/branch-%04"PRIuMAX, (uintmax_t)i); + ref.refname = buf; + cl_reftable_set_hash(ref.value.val1, i, REFTABLE_HASH_SHA1); + + cl_assert_equal_i(reftable_stack_add(st, + &write_test_ref, &ref), 0); + } + + st->opts.disable_auto_compact = disable_auto_compact; +} + +struct write_log_arg { + struct reftable_log_record *log; + uint64_t update_index; +}; + +static int write_test_log(struct reftable_writer *wr, void *arg) +{ + struct write_log_arg *wla = arg; + + cl_assert_equal_i(reftable_writer_set_limits(wr, + wla->update_index, + wla->update_index), 0); + return reftable_writer_add_log(wr, wla->log); +} + +void test_reftable_stack__add_one(void) +{ + char *dir = get_tmp_dir(__LINE__); + struct reftable_buf scratch = REFTABLE_BUF_INIT; + int mask = umask(002); + struct reftable_write_options opts = { + .default_permissions = 0660, + }; + struct reftable_stack *st = NULL; + struct reftable_ref_record ref = { + .refname = (char *) "HEAD", + .update_index = 1, + .value_type = REFTABLE_REF_SYMREF, + .value.symref = (char *) "master", + }; + struct reftable_ref_record dest = { 0 }; + struct stat stat_result = { 0 }; + int err; + + err = reftable_new_stack(&st, dir, &opts); + cl_assert(!err); + + err = reftable_stack_add(st, write_test_ref, &ref); + cl_assert(!err); + + err = reftable_stack_read_ref(st, ref.refname, &dest); + cl_assert(!err); + cl_assert(reftable_ref_record_equal(&ref, &dest, + REFTABLE_HASH_SIZE_SHA1)); + cl_assert(st->tables_len > 0); + +#ifndef GIT_WINDOWS_NATIVE + cl_assert_equal_i(reftable_buf_addstr(&scratch, dir), 0); + cl_assert_equal_i(reftable_buf_addstr(&scratch, + "/tables.list"), 0); + cl_assert_equal_i(stat(scratch.buf, &stat_result), 0); + cl_assert_equal_i((stat_result.st_mode & 0777), + opts.default_permissions); + + reftable_buf_reset(&scratch); + cl_assert_equal_i(reftable_buf_addstr(&scratch, dir), 0); + cl_assert_equal_i(reftable_buf_addstr(&scratch, "/"), 0); + /* do not try at home; not an external API for reftable. */ + cl_assert(!reftable_buf_addstr(&scratch, st->tables[0]->name)); + err = stat(scratch.buf, &stat_result); + cl_assert(!err); + cl_assert_equal_i((stat_result.st_mode & 0777), + opts.default_permissions); +#else + (void) stat_result; +#endif + + reftable_ref_record_release(&dest); + reftable_stack_destroy(st); + reftable_buf_release(&scratch); + clear_dir(dir); + umask(mask); +} + +void test_reftable_stack__uptodate(void) +{ + struct reftable_write_options opts = { 0 }; + struct reftable_stack *st1 = NULL; + struct reftable_stack *st2 = NULL; + char *dir = get_tmp_dir(__LINE__); + + struct reftable_ref_record ref1 = { + .refname = (char *) "HEAD", + .update_index = 1, + .value_type = REFTABLE_REF_SYMREF, + .value.symref = (char *) "master", + }; + struct reftable_ref_record ref2 = { + .refname = (char *) "branch2", + .update_index = 2, + .value_type = REFTABLE_REF_SYMREF, + .value.symref = (char *) "master", + }; + + + /* simulate multi-process access to the same stack + by creating two stacks for the same directory. + */ + cl_assert_equal_i(reftable_new_stack(&st1, dir, &opts), 0); + cl_assert_equal_i(reftable_new_stack(&st2, dir, &opts), 0); + cl_assert_equal_i(reftable_stack_add(st1, write_test_ref, + &ref1), 0); + cl_assert_equal_i(reftable_stack_add(st2, write_test_ref, + &ref2), REFTABLE_OUTDATED_ERROR); + cl_assert_equal_i(reftable_stack_reload(st2), 0); + cl_assert_equal_i(reftable_stack_add(st2, write_test_ref, + &ref2), 0); + reftable_stack_destroy(st1); + reftable_stack_destroy(st2); + clear_dir(dir); +} + +void test_reftable_stack__transaction_api(void) +{ + char *dir = get_tmp_dir(__LINE__); + struct reftable_write_options opts = { 0 }; + struct reftable_stack *st = NULL; + struct reftable_addition *add = NULL; + + struct reftable_ref_record ref = { + .refname = (char *) "HEAD", + .update_index = 1, + .value_type = REFTABLE_REF_SYMREF, + .value.symref = (char *) "master", + }; + struct reftable_ref_record dest = { 0 }; + + cl_assert_equal_i(reftable_new_stack(&st, dir, &opts), 0); + + reftable_addition_destroy(add); + + cl_assert_equal_i(reftable_stack_new_addition(&add, st, 0), 0); + cl_assert_equal_i(reftable_addition_add(add, write_test_ref, + &ref), 0); + cl_assert_equal_i(reftable_addition_commit(add), 0); + + reftable_addition_destroy(add); + + cl_assert_equal_i(reftable_stack_read_ref(st, ref.refname, + &dest), 0); + cl_assert_equal_i(REFTABLE_REF_SYMREF, dest.value_type); + cl_assert(reftable_ref_record_equal(&ref, &dest, + REFTABLE_HASH_SIZE_SHA1) != 0); + + reftable_ref_record_release(&dest); + reftable_stack_destroy(st); + clear_dir(dir); +} + +void test_reftable_stack__transaction_with_reload(void) +{ + char *dir = get_tmp_dir(__LINE__); + struct reftable_stack *st1 = NULL, *st2 = NULL; + struct reftable_addition *add = NULL; + struct reftable_ref_record refs[2] = { + { + .refname = (char *) "refs/heads/a", + .update_index = 1, + .value_type = REFTABLE_REF_VAL1, + .value.val1 = { '1' }, + }, + { + .refname = (char *) "refs/heads/b", + .update_index = 2, + .value_type = REFTABLE_REF_VAL1, + .value.val1 = { '1' }, + }, + }; + struct reftable_ref_record ref = { 0 }; + + cl_assert_equal_i(reftable_new_stack(&st1, dir, NULL), 0); + cl_assert_equal_i(reftable_new_stack(&st2, dir, NULL), 0); + cl_assert_equal_i(reftable_stack_new_addition(&add, st1, 0), 0); + cl_assert_equal_i(reftable_addition_add(add, write_test_ref, + &refs[0]), 0); + cl_assert_equal_i(reftable_addition_commit(add), 0); + reftable_addition_destroy(add); + + /* + * The second stack is now outdated, which we should notice. We do not + * create the addition and lock the stack by default, but allow the + * reload to happen when REFTABLE_STACK_NEW_ADDITION_RELOAD is set. + */ + cl_assert_equal_i(reftable_stack_new_addition(&add, st2, 0), + REFTABLE_OUTDATED_ERROR); + cl_assert_equal_i(reftable_stack_new_addition(&add, st2, + REFTABLE_STACK_NEW_ADDITION_RELOAD), 0); + cl_assert_equal_i(reftable_addition_add(add, write_test_ref, + &refs[1]), 0); + cl_assert_equal_i(reftable_addition_commit(add), 0); + reftable_addition_destroy(add); + + for (size_t i = 0; i < ARRAY_SIZE(refs); i++) { + cl_assert_equal_i(reftable_stack_read_ref(st2, + refs[i].refname, &ref) , 0); + cl_assert(reftable_ref_record_equal(&refs[i], &ref, + REFTABLE_HASH_SIZE_SHA1) != 0); + } + + reftable_ref_record_release(&ref); + reftable_stack_destroy(st1); + reftable_stack_destroy(st2); + clear_dir(dir); +} + +void test_reftable_stack__transaction_api_performs_auto_compaction(void) +{ + char *dir = get_tmp_dir(__LINE__); + struct reftable_write_options opts = {0}; + struct reftable_addition *add = NULL; + struct reftable_stack *st = NULL; + size_t n = 20; + + cl_assert_equal_i(reftable_new_stack(&st, dir, &opts), 0); + + for (size_t i = 0; i <= n; i++) { + struct reftable_ref_record ref = { + .update_index = reftable_stack_next_update_index(st), + .value_type = REFTABLE_REF_SYMREF, + .value.symref = (char *) "master", + }; + char name[100]; + + snprintf(name, sizeof(name), "branch%04"PRIuMAX, (uintmax_t)i); + ref.refname = name; + + /* + * Disable auto-compaction for all but the last runs. Like this + * we can ensure that we indeed honor this setting and have + * better control over when exactly auto compaction runs. + */ + st->opts.disable_auto_compact = i != n; + + cl_assert_equal_i(reftable_stack_new_addition(&add, + st, 0), 0); + cl_assert_equal_i(reftable_addition_add(add, + write_test_ref, &ref), 0); + cl_assert_equal_i(reftable_addition_commit(add), 0); + + reftable_addition_destroy(add); + + /* + * The stack length should grow continuously for all runs where + * auto compaction is disabled. When enabled, we should merge + * all tables in the stack. + */ + if (i != n) + cl_assert_equal_i(st->merged->tables_len, i + 1); + else + cl_assert_equal_i(st->merged->tables_len, 1); + } + + reftable_stack_destroy(st); + clear_dir(dir); +} + +void test_reftable_stack__auto_compaction_fails_gracefully(void) +{ + struct reftable_ref_record ref = { + .refname = (char *) "refs/heads/master", + .update_index = 1, + .value_type = REFTABLE_REF_VAL1, + .value.val1 = {0x01}, + }; + struct reftable_write_options opts = { 0 }; + struct reftable_stack *st; + struct reftable_buf table_path = REFTABLE_BUF_INIT; + char *dir = get_tmp_dir(__LINE__); + int err; + + cl_assert_equal_i(reftable_new_stack(&st, dir, &opts), 0); + cl_assert_equal_i(reftable_stack_add(st, write_test_ref, + &ref), 0); + cl_assert_equal_i(st->merged->tables_len, 1); + cl_assert_equal_i(st->stats.attempts, 0); + cl_assert_equal_i(st->stats.failures, 0); + + /* + * Lock the newly written table such that it cannot be compacted. + * Adding a new table to the stack should not be impacted by this, even + * though auto-compaction will now fail. + */ + cl_assert(!reftable_buf_addstr(&table_path, dir)); + cl_assert(!reftable_buf_addstr(&table_path, "/")); + cl_assert(!reftable_buf_addstr(&table_path, + st->tables[0]->name)); + cl_assert(!reftable_buf_addstr(&table_path, ".lock")); + write_file_buf(table_path.buf, "", 0); + + ref.update_index = 2; + err = reftable_stack_add(st, write_test_ref, &ref); + cl_assert(!err); + cl_assert_equal_i(st->merged->tables_len, 2); + cl_assert_equal_i(st->stats.attempts, 1); + cl_assert_equal_i(st->stats.failures, 1); + + reftable_stack_destroy(st); + reftable_buf_release(&table_path); + clear_dir(dir); +} + +static int write_error(struct reftable_writer *wr UNUSED, void *arg) +{ + return *((int *)arg); +} + +void test_reftable_stack__update_index_check(void) +{ + char *dir = get_tmp_dir(__LINE__); + struct reftable_write_options opts = { 0 }; + struct reftable_stack *st = NULL; + struct reftable_ref_record ref1 = { + .refname = (char *) "name1", + .update_index = 1, + .value_type = REFTABLE_REF_SYMREF, + .value.symref = (char *) "master", + }; + struct reftable_ref_record ref2 = { + .refname = (char *) "name2", + .update_index = 1, + .value_type = REFTABLE_REF_SYMREF, + .value.symref = (char *) "master", + }; + + cl_assert_equal_i(reftable_new_stack(&st, dir, &opts), 0); + cl_assert_equal_i(reftable_stack_add(st, write_test_ref, + &ref1), 0); + cl_assert_equal_i(reftable_stack_add(st, write_test_ref, + &ref2), REFTABLE_API_ERROR); + reftable_stack_destroy(st); + clear_dir(dir); +} + +void test_reftable_stack__lock_failure(void) +{ + char *dir = get_tmp_dir(__LINE__); + struct reftable_write_options opts = { 0 }; + struct reftable_stack *st = NULL; + int i; + + cl_assert_equal_i(reftable_new_stack(&st, dir, &opts), 0); + for (i = -1; i != REFTABLE_EMPTY_TABLE_ERROR; i--) + cl_assert_equal_i(reftable_stack_add(st, write_error, + &i), i); + + reftable_stack_destroy(st); + clear_dir(dir); +} + +void test_reftable_stack__add(void) +{ + struct reftable_write_options opts = { + .exact_log_message = 1, + .default_permissions = 0660, + .disable_auto_compact = 1, + }; + struct reftable_stack *st = NULL; + char *dir = get_tmp_dir(__LINE__); + struct reftable_ref_record refs[2] = { 0 }; + struct reftable_log_record logs[2] = { 0 }; + struct reftable_buf path = REFTABLE_BUF_INIT; + struct stat stat_result; + size_t i, N = ARRAY_SIZE(refs); + int err; + + err = reftable_new_stack(&st, dir, &opts); + cl_assert(!err); + + for (i = 0; i < N; i++) { + char buf[256]; + snprintf(buf, sizeof(buf), "branch%02"PRIuMAX, (uintmax_t)i); + refs[i].refname = xstrdup(buf); + refs[i].update_index = i + 1; + refs[i].value_type = REFTABLE_REF_VAL1; + cl_reftable_set_hash(refs[i].value.val1, i, + REFTABLE_HASH_SHA1); + + logs[i].refname = xstrdup(buf); + logs[i].update_index = N + i + 1; + logs[i].value_type = REFTABLE_LOG_UPDATE; + logs[i].value.update.email = xstrdup("identity@invalid"); + cl_reftable_set_hash(logs[i].value.update.new_hash, i, + REFTABLE_HASH_SHA1); + } + + for (i = 0; i < N; i++) + cl_assert_equal_i(reftable_stack_add(st, write_test_ref, + &refs[i]), 0); + + for (i = 0; i < N; i++) { + struct write_log_arg arg = { + .log = &logs[i], + .update_index = reftable_stack_next_update_index(st), + }; + cl_assert_equal_i(reftable_stack_add(st, write_test_log, + &arg), 0); + } + + cl_assert_equal_i(reftable_stack_compact_all(st, NULL), 0); + + for (i = 0; i < N; i++) { + struct reftable_ref_record dest = { 0 }; + + cl_assert_equal_i(reftable_stack_read_ref(st, + refs[i].refname, &dest), 0); + cl_assert(reftable_ref_record_equal(&dest, refs + i, + REFTABLE_HASH_SIZE_SHA1) != 0); + reftable_ref_record_release(&dest); + } + + for (i = 0; i < N; i++) { + struct reftable_log_record dest = { 0 }; + cl_assert_equal_i(reftable_stack_read_log(st, + refs[i].refname, &dest), 0); + cl_assert(reftable_log_record_equal(&dest, logs + i, + REFTABLE_HASH_SIZE_SHA1) != 0); + reftable_log_record_release(&dest); + } + +#ifndef GIT_WINDOWS_NATIVE + cl_assert_equal_i(reftable_buf_addstr(&path, dir), 0); + cl_assert_equal_i(reftable_buf_addstr(&path, "/tables.list"), 0); + cl_assert_equal_i(stat(path.buf, &stat_result), 0); + cl_assert_equal_i((stat_result.st_mode & 0777), opts.default_permissions); + + reftable_buf_reset(&path); + cl_assert_equal_i(reftable_buf_addstr(&path, dir), 0); + cl_assert_equal_i(reftable_buf_addstr(&path, "/"), 0); + /* do not try at home; not an external API for reftable. */ + cl_assert(!reftable_buf_addstr(&path, st->tables[0]->name)); + err = stat(path.buf, &stat_result); + cl_assert(!err); + cl_assert_equal_i((stat_result.st_mode & 0777), + opts.default_permissions); +#else + (void) stat_result; +#endif + + /* cleanup */ + reftable_stack_destroy(st); + for (i = 0; i < N; i++) { + reftable_ref_record_release(&refs[i]); + reftable_log_record_release(&logs[i]); + } + reftable_buf_release(&path); + clear_dir(dir); +} + +void test_reftable_stack__iterator(void) +{ + struct reftable_write_options opts = { 0 }; + struct reftable_stack *st = NULL; + char *dir = get_tmp_dir(__LINE__); + struct reftable_ref_record refs[10] = { 0 }; + struct reftable_log_record logs[10] = { 0 }; + struct reftable_iterator it = { 0 }; + size_t N = ARRAY_SIZE(refs), i; + int err; + + cl_assert_equal_i(reftable_new_stack(&st, dir, &opts), 0); + + for (i = 0; i < N; i++) { + refs[i].refname = xstrfmt("branch%02"PRIuMAX, (uintmax_t)i); + refs[i].update_index = i + 1; + refs[i].value_type = REFTABLE_REF_VAL1; + cl_reftable_set_hash(refs[i].value.val1, i, + REFTABLE_HASH_SHA1); + + logs[i].refname = xstrfmt("branch%02"PRIuMAX, (uintmax_t)i); + logs[i].update_index = i + 1; + logs[i].value_type = REFTABLE_LOG_UPDATE; + logs[i].value.update.email = xstrdup("johndoe@invalid"); + logs[i].value.update.message = xstrdup("commit\n"); + cl_reftable_set_hash(logs[i].value.update.new_hash, i, + REFTABLE_HASH_SHA1); + } + + for (i = 0; i < N; i++) + cl_assert_equal_i(reftable_stack_add(st, + write_test_ref, &refs[i]), 0); + + for (i = 0; i < N; i++) { + struct write_log_arg arg = { + .log = &logs[i], + .update_index = reftable_stack_next_update_index(st), + }; + + cl_assert_equal_i(reftable_stack_add(st, + write_test_log, &arg), 0); + } + + reftable_stack_init_ref_iterator(st, &it); + reftable_iterator_seek_ref(&it, refs[0].refname); + for (i = 0; ; i++) { + struct reftable_ref_record ref = { 0 }; + + err = reftable_iterator_next_ref(&it, &ref); + if (err > 0) + break; + cl_assert(!err); + cl_assert(reftable_ref_record_equal(&ref, &refs[i], + REFTABLE_HASH_SIZE_SHA1) != 0); + reftable_ref_record_release(&ref); + } + cl_assert_equal_i(i, N); + + reftable_iterator_destroy(&it); + + cl_assert_equal_i(reftable_stack_init_log_iterator(st, &it), 0); + + reftable_iterator_seek_log(&it, logs[0].refname); + for (i = 0; ; i++) { + struct reftable_log_record log = { 0 }; + + err = reftable_iterator_next_log(&it, &log); + if (err > 0) + break; + cl_assert(!err); + cl_assert(reftable_log_record_equal(&log, &logs[i], + REFTABLE_HASH_SIZE_SHA1) != 0); + reftable_log_record_release(&log); + } + cl_assert_equal_i(i, N); + + reftable_stack_destroy(st); + reftable_iterator_destroy(&it); + for (i = 0; i < N; i++) { + reftable_ref_record_release(&refs[i]); + reftable_log_record_release(&logs[i]); + } + clear_dir(dir); +} + +void test_reftable_stack__log_normalize(void) +{ + struct reftable_write_options opts = { + 0, + }; + struct reftable_stack *st = NULL; + char *dir = get_tmp_dir(__LINE__); + struct reftable_log_record input = { + .refname = (char *) "branch", + .update_index = 1, + .value_type = REFTABLE_LOG_UPDATE, + .value = { + .update = { + .new_hash = { 1 }, + .old_hash = { 2 }, + }, + }, + }; + struct reftable_log_record dest = { + .update_index = 0, + }; + struct write_log_arg arg = { + .log = &input, + .update_index = 1, + }; + + cl_assert_equal_i(reftable_new_stack(&st, dir, &opts), 0); + + input.value.update.message = (char *) "one\ntwo"; + cl_assert_equal_i(reftable_stack_add(st, write_test_log, + &arg), REFTABLE_API_ERROR); + + input.value.update.message = (char *) "one"; + cl_assert_equal_i(reftable_stack_add(st, write_test_log, + &arg), 0); + cl_assert_equal_i(reftable_stack_read_log(st, input.refname, + &dest), 0); + cl_assert_equal_s(dest.value.update.message, "one\n"); + + input.value.update.message = (char *) "two\n"; + arg.update_index = 2; + cl_assert_equal_i(reftable_stack_add(st, write_test_log, + &arg), 0); + cl_assert_equal_i(reftable_stack_read_log(st, input.refname, + &dest), 0); + cl_assert_equal_s(dest.value.update.message, "two\n"); + + /* cleanup */ + reftable_stack_destroy(st); + reftable_log_record_release(&dest); + clear_dir(dir); +} + +void test_reftable_stack__tombstone(void) +{ + char *dir = get_tmp_dir(__LINE__); + struct reftable_write_options opts = { 0 }; + struct reftable_stack *st = NULL; + struct reftable_ref_record refs[2] = { 0 }; + struct reftable_log_record logs[2] = { 0 }; + size_t i, N = ARRAY_SIZE(refs); + struct reftable_ref_record dest = { 0 }; + struct reftable_log_record log_dest = { 0 }; + + cl_assert_equal_i(reftable_new_stack(&st, dir, &opts), 0); + + /* even entries add the refs, odd entries delete them. */ + for (i = 0; i < N; i++) { + const char *buf = "branch"; + refs[i].refname = xstrdup(buf); + refs[i].update_index = i + 1; + if (i % 2 == 0) { + refs[i].value_type = REFTABLE_REF_VAL1; + cl_reftable_set_hash(refs[i].value.val1, i, + REFTABLE_HASH_SHA1); + } + + logs[i].refname = xstrdup(buf); + /* + * update_index is part of the key so should be constant. + * The value itself should be less than the writer's upper + * limit. + */ + logs[i].update_index = 1; + if (i % 2 == 0) { + logs[i].value_type = REFTABLE_LOG_UPDATE; + cl_reftable_set_hash(logs[i].value.update.new_hash, i, REFTABLE_HASH_SHA1); + logs[i].value.update.email = + xstrdup("identity@invalid"); + } + } + for (i = 0; i < N; i++) + cl_assert_equal_i(reftable_stack_add(st, write_test_ref, &refs[i]), 0); + + for (i = 0; i < N; i++) { + struct write_log_arg arg = { + .log = &logs[i], + .update_index = reftable_stack_next_update_index(st), + }; + cl_assert_equal_i(reftable_stack_add(st, + write_test_log, &arg), 0); + } + + cl_assert_equal_i(reftable_stack_read_ref(st, "branch", + &dest), 1); + reftable_ref_record_release(&dest); + + cl_assert_equal_i(reftable_stack_read_log(st, "branch", + &log_dest), 1); + reftable_log_record_release(&log_dest); + + cl_assert_equal_i(reftable_stack_compact_all(st, NULL), 0); + cl_assert_equal_i(reftable_stack_read_ref(st, "branch", + &dest), 1); + cl_assert_equal_i(reftable_stack_read_log(st, "branch", + &log_dest), 1); + reftable_ref_record_release(&dest); + reftable_log_record_release(&log_dest); + + /* cleanup */ + reftable_stack_destroy(st); + for (i = 0; i < N; i++) { + reftable_ref_record_release(&refs[i]); + reftable_log_record_release(&logs[i]); + } + clear_dir(dir); +} + +void test_reftable_stack__hash_id(void) +{ + char *dir = get_tmp_dir(__LINE__); + struct reftable_write_options opts = { 0 }; + struct reftable_stack *st = NULL; + + struct reftable_ref_record ref = { + .refname = (char *) "master", + .value_type = REFTABLE_REF_SYMREF, + .value.symref = (char *) "target", + .update_index = 1, + }; + struct reftable_write_options opts32 = { .hash_id = REFTABLE_HASH_SHA256 }; + struct reftable_stack *st32 = NULL; + struct reftable_write_options opts_default = { 0 }; + struct reftable_stack *st_default = NULL; + struct reftable_ref_record dest = { 0 }; + + cl_assert_equal_i(reftable_new_stack(&st, dir, &opts), 0); + cl_assert_equal_i(reftable_stack_add(st, write_test_ref, + &ref), 0); + + /* can't read it with the wrong hash ID. */ + cl_assert_equal_i(reftable_new_stack(&st32, dir, + &opts32), REFTABLE_FORMAT_ERROR); + + /* check that we can read it back with default opts too. */ + cl_assert_equal_i(reftable_new_stack(&st_default, dir, + &opts_default), 0); + cl_assert_equal_i(reftable_stack_read_ref(st_default, "master", + &dest), 0); + cl_assert(reftable_ref_record_equal(&ref, &dest, + REFTABLE_HASH_SIZE_SHA1) != 0); + reftable_ref_record_release(&dest); + reftable_stack_destroy(st); + reftable_stack_destroy(st_default); + clear_dir(dir); +} + +void test_reftable_stack__suggest_compaction_segment(void) +{ + uint64_t sizes[] = { 512, 64, 17, 16, 9, 9, 9, 16, 2, 16 }; + struct segment min = + suggest_compaction_segment(sizes, ARRAY_SIZE(sizes), 2); + cl_assert_equal_i(min.start, 1); + cl_assert_equal_i(min.end, 10); +} + +void test_reftable_stack__suggest_compaction_segment_nothing(void) +{ + uint64_t sizes[] = { 64, 32, 16, 8, 4, 2 }; + struct segment result = + suggest_compaction_segment(sizes, ARRAY_SIZE(sizes), 2); + cl_assert_equal_i(result.start, result.end); +} + +void test_reftable_stack__reflog_expire(void) +{ + char *dir = get_tmp_dir(__LINE__); + struct reftable_write_options opts = { 0 }; + struct reftable_stack *st = NULL; + struct reftable_log_record logs[20] = { 0 }; + size_t i, N = ARRAY_SIZE(logs) - 1; + struct reftable_log_expiry_config expiry = { + .time = 10, + }; + struct reftable_log_record log = { 0 }; + + cl_assert_equal_i(reftable_new_stack(&st, dir, &opts), 0); + + for (i = 1; i <= N; i++) { + char buf[256]; + snprintf(buf, sizeof(buf), "branch%02"PRIuMAX, (uintmax_t)i); + + logs[i].refname = xstrdup(buf); + logs[i].update_index = i; + logs[i].value_type = REFTABLE_LOG_UPDATE; + logs[i].value.update.time = i; + logs[i].value.update.email = xstrdup("identity@invalid"); + cl_reftable_set_hash(logs[i].value.update.new_hash, i, + REFTABLE_HASH_SHA1); + } + + for (i = 1; i <= N; i++) { + struct write_log_arg arg = { + .log = &logs[i], + .update_index = reftable_stack_next_update_index(st), + }; + cl_assert_equal_i(reftable_stack_add(st, write_test_log, + &arg), 0); + } + + cl_assert_equal_i(reftable_stack_compact_all(st, NULL), 0); + cl_assert_equal_i(reftable_stack_compact_all(st, &expiry), 0); + cl_assert_equal_i(reftable_stack_read_log(st, logs[9].refname, + &log), 1); + cl_assert_equal_i(reftable_stack_read_log(st, logs[11].refname, + &log), 0); + + expiry.min_update_index = 15; + cl_assert_equal_i(reftable_stack_compact_all(st, &expiry), 0); + cl_assert_equal_i(reftable_stack_read_log(st, logs[14].refname, + &log), 1); + cl_assert_equal_i(reftable_stack_read_log(st, logs[16].refname, + &log), 0); + + /* cleanup */ + reftable_stack_destroy(st); + for (i = 0; i <= N; i++) + reftable_log_record_release(&logs[i]); + clear_dir(dir); + reftable_log_record_release(&log); +} + +static int write_nothing(struct reftable_writer *wr, void *arg UNUSED) +{ + cl_assert_equal_i(reftable_writer_set_limits(wr, 1, 1), 0); + return 0; +} + +void test_reftable_stack__empty_add(void) +{ + struct reftable_write_options opts = { 0 }; + struct reftable_stack *st = NULL; + char *dir = get_tmp_dir(__LINE__); + struct reftable_stack *st2 = NULL; + + cl_assert_equal_i(reftable_new_stack(&st, dir, &opts), 0); + cl_assert_equal_i(reftable_stack_add(st, write_nothing, + NULL), 0); + cl_assert_equal_i(reftable_new_stack(&st2, dir, &opts), 0); + clear_dir(dir); + reftable_stack_destroy(st); + reftable_stack_destroy(st2); +} + +static int fastlogN(uint64_t sz, uint64_t N) +{ + int l = 0; + if (sz == 0) + return 0; + for (; sz; sz /= N) + l++; + return l - 1; +} + +void test_reftable_stack__auto_compaction(void) +{ + struct reftable_write_options opts = { + .disable_auto_compact = 1, + }; + struct reftable_stack *st = NULL; + char *dir = get_tmp_dir(__LINE__); + size_t i, N = 100; + int err; + + cl_assert_equal_i(reftable_new_stack(&st, dir, &opts), 0); + + for (i = 0; i < N; i++) { + char name[100]; + struct reftable_ref_record ref = { + .refname = name, + .update_index = reftable_stack_next_update_index(st), + .value_type = REFTABLE_REF_SYMREF, + .value.symref = (char *) "master", + }; + snprintf(name, sizeof(name), "branch%04"PRIuMAX, (uintmax_t)i); + + err = reftable_stack_add(st, write_test_ref, &ref); + cl_assert(!err); + + err = reftable_stack_auto_compact(st); + cl_assert(!err); + cl_assert(i < 2 || st->merged->tables_len < 2 * fastlogN(i, 2)); + } + + cl_assert(reftable_stack_compaction_stats(st)->entries_written < + (uint64_t)(N * fastlogN(N, 2))); + + reftable_stack_destroy(st); + clear_dir(dir); +} + +void test_reftable_stack__auto_compaction_factor(void) +{ + struct reftable_write_options opts = { + .auto_compaction_factor = 5, + }; + struct reftable_stack *st = NULL; + char *dir = get_tmp_dir(__LINE__); + size_t N = 100; + int err; + + cl_assert_equal_i(reftable_new_stack(&st, dir, &opts), 0); + + for (size_t i = 0; i < N; i++) { + char name[20]; + struct reftable_ref_record ref = { + .refname = name, + .update_index = reftable_stack_next_update_index(st), + .value_type = REFTABLE_REF_VAL1, + }; + xsnprintf(name, sizeof(name), "branch%04"PRIuMAX, (uintmax_t)i); + + err = reftable_stack_add(st, &write_test_ref, &ref); + cl_assert(!err); + + cl_assert(i < 5 || st->merged->tables_len < 5 * fastlogN(i, 5)); + } + + reftable_stack_destroy(st); + clear_dir(dir); +} + +void test_reftable_stack__auto_compaction_with_locked_tables(void) +{ + struct reftable_write_options opts = { + .disable_auto_compact = 1, + }; + struct reftable_stack *st = NULL; + struct reftable_buf buf = REFTABLE_BUF_INIT; + char *dir = get_tmp_dir(__LINE__); + int err; + + cl_assert_equal_i(reftable_new_stack(&st, dir, &opts), 0); + + write_n_ref_tables(st, 5); + cl_assert_equal_i(st->merged->tables_len, 5); + + /* + * Given that all tables we have written should be roughly the same + * size, we expect that auto-compaction will want to compact all of the + * tables. Locking any of the tables will keep it from doing so. + */ + cl_assert(!reftable_buf_addstr(&buf, dir)); + cl_assert(!reftable_buf_addstr(&buf, "/")); + cl_assert(!reftable_buf_addstr(&buf, st->tables[2]->name)); + cl_assert(!reftable_buf_addstr(&buf, ".lock")); + write_file_buf(buf.buf, "", 0); + + /* + * When parts of the stack are locked, then auto-compaction does a best + * effort compaction of those tables which aren't locked. So while this + * would in theory compact all tables, due to the preexisting lock we + * only compact the newest two tables. + */ + err = reftable_stack_auto_compact(st); + cl_assert(!err); + cl_assert_equal_i(st->stats.failures, 0); + cl_assert_equal_i(st->merged->tables_len, 4); + + reftable_stack_destroy(st); + reftable_buf_release(&buf); + clear_dir(dir); +} + +void test_reftable_stack__add_performs_auto_compaction(void) +{ + struct reftable_write_options opts = { 0 }; + struct reftable_stack *st = NULL; + char *dir = get_tmp_dir(__LINE__); + size_t i, n = 20; + + cl_assert_equal_i(reftable_new_stack(&st, dir, &opts), 0); + + for (i = 0; i <= n; i++) { + struct reftable_ref_record ref = { + .update_index = reftable_stack_next_update_index(st), + .value_type = REFTABLE_REF_SYMREF, + .value.symref = (char *) "master", + }; + char buf[128]; + + /* + * Disable auto-compaction for all but the last runs. Like this + * we can ensure that we indeed honor this setting and have + * better control over when exactly auto compaction runs. + */ + st->opts.disable_auto_compact = i != n; + + snprintf(buf, sizeof(buf), "branch-%04"PRIuMAX, (uintmax_t)i); + ref.refname = buf; + + cl_assert_equal_i(reftable_stack_add(st, + write_test_ref, &ref), 0); + + /* + * The stack length should grow continuously for all runs where + * auto compaction is disabled. When enabled, we should merge + * all tables in the stack. + */ + if (i != n) + cl_assert_equal_i(st->merged->tables_len, i + 1); + else + cl_assert_equal_i(st->merged->tables_len, 1); + } + + reftable_stack_destroy(st); + clear_dir(dir); +} + +void test_reftable_stack__compaction_with_locked_tables(void) +{ + struct reftable_write_options opts = { + .disable_auto_compact = 1, + }; + struct reftable_stack *st = NULL; + struct reftable_buf buf = REFTABLE_BUF_INIT; + char *dir = get_tmp_dir(__LINE__); + int err; + + cl_assert_equal_i(reftable_new_stack(&st, dir, &opts), 0); + + write_n_ref_tables(st, 3); + cl_assert_equal_i(st->merged->tables_len, 3); + + /* Lock one of the tables that we're about to compact. */ + cl_assert(!reftable_buf_addstr(&buf, dir)); + cl_assert(!reftable_buf_addstr(&buf, "/")); + cl_assert(!reftable_buf_addstr(&buf, st->tables[1]->name)); + cl_assert(!reftable_buf_addstr(&buf, ".lock")); + write_file_buf(buf.buf, "", 0); + + /* + * Compaction is expected to fail given that we were not able to + * compact all tables. + */ + err = reftable_stack_compact_all(st, NULL); + cl_assert_equal_i(err, REFTABLE_LOCK_ERROR); + cl_assert_equal_i(st->stats.failures, 1); + cl_assert_equal_i(st->merged->tables_len, 3); + + reftable_stack_destroy(st); + reftable_buf_release(&buf); + clear_dir(dir); +} + +void test_reftable_stack__compaction_concurrent(void) +{ + struct reftable_write_options opts = { 0 }; + struct reftable_stack *st1 = NULL, *st2 = NULL; + char *dir = get_tmp_dir(__LINE__); + + cl_assert_equal_i(reftable_new_stack(&st1, dir, &opts), 0); + write_n_ref_tables(st1, 3); + + cl_assert_equal_i(reftable_new_stack(&st2, dir, &opts), 0); + cl_assert_equal_i(reftable_stack_compact_all(st1, NULL), 0); + + reftable_stack_destroy(st1); + reftable_stack_destroy(st2); + + cl_assert_equal_i(count_dir_entries(dir), 2); + clear_dir(dir); +} + +static void unclean_stack_close(struct reftable_stack *st) +{ + /* break abstraction boundary to simulate unclean shutdown. */ + for (size_t i = 0; i < st->tables_len; i++) + reftable_table_decref(st->tables[i]); + st->tables_len = 0; + REFTABLE_FREE_AND_NULL(st->tables); +} + +void test_reftable_stack__compaction_concurrent_clean(void) +{ + struct reftable_write_options opts = { 0 }; + struct reftable_stack *st1 = NULL, *st2 = NULL, *st3 = NULL; + char *dir = get_tmp_dir(__LINE__); + + cl_assert_equal_i(reftable_new_stack(&st1, dir, &opts), 0); + write_n_ref_tables(st1, 3); + + cl_assert_equal_i(reftable_new_stack(&st2, dir, &opts), 0); + cl_assert_equal_i(reftable_stack_compact_all(st1, NULL), 0); + + unclean_stack_close(st1); + unclean_stack_close(st2); + + cl_assert_equal_i(reftable_new_stack(&st3, dir, &opts), 0); + cl_assert_equal_i(reftable_stack_clean(st3), 0); + cl_assert_equal_i(count_dir_entries(dir), 2); + + reftable_stack_destroy(st1); + reftable_stack_destroy(st2); + reftable_stack_destroy(st3); + + clear_dir(dir); +} + +void test_reftable_stack__read_across_reload(void) +{ + struct reftable_write_options opts = { 0 }; + struct reftable_stack *st1 = NULL, *st2 = NULL; + struct reftable_ref_record rec = { 0 }; + struct reftable_iterator it = { 0 }; + char *dir = get_tmp_dir(__LINE__); + int err; + + /* Create a first stack and set up an iterator for it. */ + cl_assert_equal_i(reftable_new_stack(&st1, dir, &opts), 0); + write_n_ref_tables(st1, 2); + cl_assert_equal_i(st1->merged->tables_len, 2); + reftable_stack_init_ref_iterator(st1, &it); + cl_assert_equal_i(reftable_iterator_seek_ref(&it, ""), 0); + + /* Set up a second stack for the same directory and compact it. */ + err = reftable_new_stack(&st2, dir, &opts); + cl_assert(!err); + cl_assert_equal_i(st2->merged->tables_len, 2); + err = reftable_stack_compact_all(st2, NULL); + cl_assert(!err); + cl_assert_equal_i(st2->merged->tables_len, 1); + + /* + * Verify that we can continue to use the old iterator even after we + * have reloaded its stack. + */ + err = reftable_stack_reload(st1); + cl_assert(!err); + cl_assert_equal_i(st1->merged->tables_len, 1); + err = reftable_iterator_next_ref(&it, &rec); + cl_assert(!err); + cl_assert_equal_s(rec.refname, "refs/heads/branch-0000"); + err = reftable_iterator_next_ref(&it, &rec); + cl_assert(!err); + cl_assert_equal_s(rec.refname, "refs/heads/branch-0001"); + err = reftable_iterator_next_ref(&it, &rec); + cl_assert(err > 0); + + reftable_ref_record_release(&rec); + reftable_iterator_destroy(&it); + reftable_stack_destroy(st1); + reftable_stack_destroy(st2); + clear_dir(dir); +} + +void test_reftable_stack__reload_with_missing_table(void) +{ + struct reftable_write_options opts = { 0 }; + struct reftable_stack *st = NULL; + struct reftable_ref_record rec = { 0 }; + struct reftable_iterator it = { 0 }; + struct reftable_buf table_path = REFTABLE_BUF_INIT, content = REFTABLE_BUF_INIT; + char *dir = get_tmp_dir(__LINE__); + int err; + + /* Create a first stack and set up an iterator for it. */ + cl_assert_equal_i(reftable_new_stack(&st, dir, &opts), 0); + write_n_ref_tables(st, 2); + cl_assert_equal_i(st->merged->tables_len, 2); + reftable_stack_init_ref_iterator(st, &it); + cl_assert_equal_i(reftable_iterator_seek_ref(&it, ""), 0); + + /* + * Update the tables.list file with some garbage data, while reusing + * our old tables. This should trigger a partial reload of the stack, + * where we try to reuse our old tables. + */ + cl_assert(!reftable_buf_addstr(&content, st->tables[0]->name)); + cl_assert(!reftable_buf_addstr(&content, "\n")); + cl_assert(!reftable_buf_addstr(&content, st->tables[1]->name)); + cl_assert(!reftable_buf_addstr(&content, "\n")); + cl_assert(!reftable_buf_addstr(&content, "garbage\n")); + cl_assert(!reftable_buf_addstr(&table_path, st->list_file)); + cl_assert(!reftable_buf_addstr(&table_path, ".lock")); + write_file_buf(table_path.buf, content.buf, content.len); + cl_assert_equal_i(rename(table_path.buf, st->list_file), 0); + + err = reftable_stack_reload(st); + cl_assert_equal_i(err, -4); + cl_assert_equal_i(st->merged->tables_len, 2); + + /* + * Even though the reload has failed, we should be able to continue + * using the iterator. + */ + cl_assert_equal_i(reftable_iterator_next_ref(&it, &rec), 0); + cl_assert_equal_s(rec.refname, "refs/heads/branch-0000"); + cl_assert_equal_i(reftable_iterator_next_ref(&it, &rec), 0); + cl_assert_equal_s(rec.refname, "refs/heads/branch-0001"); + cl_assert(reftable_iterator_next_ref(&it, &rec) > 0); + + reftable_ref_record_release(&rec); + reftable_iterator_destroy(&it); + reftable_stack_destroy(st); + reftable_buf_release(&table_path); + reftable_buf_release(&content); + clear_dir(dir); +} + +static int write_limits_after_ref(struct reftable_writer *wr, void *arg) +{ + struct reftable_ref_record *ref = arg; + cl_assert_equal_i(reftable_writer_set_limits(wr, + ref->update_index, ref->update_index), 0); + cl_assert_equal_i(reftable_writer_add_ref(wr, ref), 0); + return reftable_writer_set_limits(wr, ref->update_index, ref->update_index); +} + +void test_reftable_stack__invalid_limit_updates(void) +{ + struct reftable_ref_record ref = { + .refname = (char *) "HEAD", + .update_index = 1, + .value_type = REFTABLE_REF_SYMREF, + .value.symref = (char *) "master", + }; + struct reftable_write_options opts = { + .default_permissions = 0660, + }; + struct reftable_addition *add = NULL; + char *dir = get_tmp_dir(__LINE__); + struct reftable_stack *st = NULL; + + cl_assert_equal_i(reftable_new_stack(&st, dir, &opts), 0); + + reftable_addition_destroy(add); + + cl_assert_equal_i(reftable_stack_new_addition(&add, st, 0), 0); + + /* + * write_limits_after_ref also updates the update indexes after adding + * the record. This should cause an err to be returned, since the limits + * must be set at the start. + */ + cl_assert_equal_i(reftable_addition_add(add, + write_limits_after_ref, &ref), REFTABLE_API_ERROR); + + reftable_addition_destroy(add); + reftable_stack_destroy(st); + clear_dir(dir); +} -- cgit 1.2.3-korg From 9bbc981c6f27e00d050d4f70b0293f56e39b019a Mon Sep 17 00:00:00 2001 From: Seyi Kuforiji Date: Thu, 24 Jul 2025 15:28:37 +0100 Subject: t/unit-tests: finalize migration of reftable-related tests The old `lib-reftable.{c,h}` implemented helper functions for our homegrown unit-testing framework. As part of migrating reftable-related tests to the Clar framework, Clar-specific versions of these functions in `lib-reftable-clar.{c,h}` were introduced. Now that all test files using these helpers have been converted to Clar, we can safely remove the original `lib-reftable.{c,h}` and rename the Clar- specific versions back to `lib-reftable.{c,h}`. This restores a clean and consistent naming scheme for shared test utilities. Finally, update our build system to reflect the changes made and remove redundant code related to the reftable tests and our old homegrown unit-testing setup. `test-lib.{c,h}` remains unchanged in our build system as some files particularly `t/helper/test-example-tap.c` depends on it in order to run, and removing that would be beyond the scope of this patch. Signed-off-by: Seyi Kuforiji Signed-off-by: Junio C Hamano --- Makefile | 6 +-- t/Makefile | 1 - t/meson.build | 21 +------- t/unit-tests/lib-reftable-clar.c | 101 ------------------------------------ t/unit-tests/lib-reftable-clar.h | 20 ------- t/unit-tests/lib-reftable.c | 35 +++++++------ t/unit-tests/lib-reftable.h | 15 +++--- t/unit-tests/u-reftable-basics.c | 2 +- t/unit-tests/u-reftable-block.c | 2 +- t/unit-tests/u-reftable-merged.c | 2 +- t/unit-tests/u-reftable-pq.c | 2 +- t/unit-tests/u-reftable-readwrite.c | 2 +- t/unit-tests/u-reftable-record.c | 2 +- t/unit-tests/u-reftable-stack.c | 4 +- t/unit-tests/u-reftable-table.c | 2 +- 15 files changed, 38 insertions(+), 179 deletions(-) delete mode 100644 t/unit-tests/lib-reftable-clar.c delete mode 100644 t/unit-tests/lib-reftable-clar.h (limited to 't/unit-tests/u-reftable-stack.c') diff --git a/Makefile b/Makefile index cbbb2ec05a..25e6558b7b 100644 --- a/Makefile +++ b/Makefile @@ -1380,13 +1380,11 @@ CLAR_TEST_SUITES += u-urlmatch-normalization CLAR_TEST_PROG = $(UNIT_TEST_BIN)/unit-tests$(X) CLAR_TEST_OBJS = $(patsubst %,$(UNIT_TEST_DIR)/%.o,$(CLAR_TEST_SUITES)) CLAR_TEST_OBJS += $(UNIT_TEST_DIR)/clar/clar.o -CLAR_TEST_OBJS += $(UNIT_TEST_DIR)/unit-test.o CLAR_TEST_OBJS += $(UNIT_TEST_DIR)/lib-oid.o -CLAR_TEST_OBJS += $(UNIT_TEST_DIR)/lib-reftable-clar.o +CLAR_TEST_OBJS += $(UNIT_TEST_DIR)/lib-reftable.o +CLAR_TEST_OBJS += $(UNIT_TEST_DIR)/unit-test.o -UNIT_TEST_PROGS = $(patsubst %,$(UNIT_TEST_BIN)/%$X,$(UNIT_TEST_PROGRAMS)) UNIT_TEST_OBJS += $(UNIT_TEST_DIR)/test-lib.o -UNIT_TEST_OBJS += $(UNIT_TEST_DIR)/lib-reftable.o # xdiff and reftable libs may in turn depend on what is in libgit.a GITLIBS = common-main.o $(LIB_FILE) $(XDIFF_LIB) $(REFTABLE_LIB) $(LIB_FILE) diff --git a/t/Makefile b/t/Makefile index 791e0a0978..757674e727 100644 --- a/t/Makefile +++ b/t/Makefile @@ -125,7 +125,6 @@ check-meson: @mkdir -p mesontmp && \ printf "%s\n" \ "integration_tests t[0-9][0-9][0-9][0-9]-*.sh" \ - "unit_test_programs unit-tests/t-*.c" \ "clar_test_suites unit-tests/u-*.c" | \ while read -r variable pattern; do \ awk "/^$$variable = \[\$$/ {flag=1 ; next } /^]$$/ { flag=0 } flag { gsub(/^ \047/, \"\"); gsub(/\047,\$$/, \"\"); print }" meson.build >mesontmp/meson.txt && \ diff --git a/t/meson.build b/t/meson.build index fcbb659263..be96a9639f 100644 --- a/t/meson.build +++ b/t/meson.build @@ -28,7 +28,7 @@ clar_sources = [ 'unit-tests/clar/clar.c', 'unit-tests/unit-test.c', 'unit-tests/lib-oid.c', - 'unit-tests/lib-reftable-clar.c' + 'unit-tests/lib-reftable.c' ] clar_decls_h = custom_target( @@ -62,25 +62,6 @@ clar_unit_tests = executable('unit-tests', ) test('unit-tests', clar_unit_tests) -unit_test_programs = [ -] - -foreach unit_test_program : unit_test_programs - unit_test_name = fs.stem(unit_test_program) - unit_test = executable(unit_test_name, - sources: [ - 'unit-tests/test-lib.c', - 'unit-tests/lib-reftable.c', - unit_test_program, - ], - dependencies: [libgit_commonmain], - ) - test(unit_test_name, unit_test, - workdir: meson.current_source_dir(), - timeout: 0, - ) -endforeach - subdir('helper') integration_tests = [ diff --git a/t/unit-tests/lib-reftable-clar.c b/t/unit-tests/lib-reftable-clar.c deleted file mode 100644 index 64e40a106e..0000000000 --- a/t/unit-tests/lib-reftable-clar.c +++ /dev/null @@ -1,101 +0,0 @@ -#include "unit-test.h" -#include "lib-reftable-clar.h" -#include "hex.h" -#include "parse-options.h" -#include "reftable/constants.h" -#include "reftable/writer.h" -#include "strbuf.h" -#include "string-list.h" -#include "strvec.h" - -void cl_reftable_set_hash(uint8_t *p, int i, enum reftable_hash id) -{ - memset(p, (uint8_t)i, hash_size(id)); -} - -static ssize_t strbuf_writer_write(void *b, const void *data, size_t sz) -{ - strbuf_add(b, data, sz); - return sz; -} - -static int strbuf_writer_flush(void *arg UNUSED) -{ - return 0; -} - -struct reftable_writer *cl_reftable_strbuf_writer(struct reftable_buf *buf, - struct reftable_write_options *opts) -{ - struct reftable_writer *writer; - int ret = reftable_writer_new(&writer, &strbuf_writer_write, &strbuf_writer_flush, - buf, opts); - cl_assert(!ret); - return writer; -} - -void cl_reftable_write_to_buf(struct reftable_buf *buf, - struct reftable_ref_record *refs, - size_t nrefs, - struct reftable_log_record *logs, - size_t nlogs, - struct reftable_write_options *_opts) -{ - struct reftable_write_options opts = { 0 }; - const struct reftable_stats *stats; - struct reftable_writer *writer; - uint64_t min = 0xffffffff; - uint64_t max = 0; - int ret; - - if (_opts) - opts = *_opts; - - for (size_t i = 0; i < nrefs; i++) { - uint64_t ui = refs[i].update_index; - if (ui > max) - max = ui; - if (ui < min) - min = ui; - } - for (size_t i = 0; i < nlogs; i++) { - uint64_t ui = logs[i].update_index; - if (ui > max) - max = ui; - if (ui < min) - min = ui; - } - - writer = cl_reftable_strbuf_writer(buf, &opts); - ret = reftable_writer_set_limits(writer, min, max); - cl_assert_equal_i(ret, 0); - - if (nrefs) { - ret = reftable_writer_add_refs(writer, refs, nrefs); - cl_assert_equal_i(ret, 0); - } - - if (nlogs) { - ret = reftable_writer_add_logs(writer, logs, nlogs); - cl_assert_equal_i(ret, 0); - } - - ret = reftable_writer_close(writer); - cl_assert_equal_i(ret, 0); - - stats = reftable_writer_stats(writer); - for (size_t i = 0; i < (size_t)stats->ref_stats.blocks; i++) { - size_t off = i * (opts.block_size ? opts.block_size - : DEFAULT_BLOCK_SIZE); - if (!off) - off = header_size(opts.hash_id == REFTABLE_HASH_SHA256 ? 2 : 1); - cl_assert(buf->buf[off] == 'r'); - } - - if (nrefs) - cl_assert(stats->ref_stats.blocks > 0); - if (nlogs) - cl_assert(stats->log_stats.blocks > 0); - - reftable_writer_free(writer); -} diff --git a/t/unit-tests/lib-reftable-clar.h b/t/unit-tests/lib-reftable-clar.h deleted file mode 100644 index b562648973..0000000000 --- a/t/unit-tests/lib-reftable-clar.h +++ /dev/null @@ -1,20 +0,0 @@ -#include "git-compat-util.h" -#include "clar/clar.h" -#include "clar-decls.h" -#include "git-compat-util.h" -#include "reftable/reftable-writer.h" -#include "strbuf.h" - -struct reftable_buf; - -void cl_reftable_set_hash(uint8_t *p, int i, enum reftable_hash id); - -struct reftable_writer *cl_reftable_strbuf_writer(struct reftable_buf *buf, - struct reftable_write_options *opts); - -void cl_reftable_write_to_buf(struct reftable_buf *buf, - struct reftable_ref_record *refs, - size_t nrecords, - struct reftable_log_record *logs, - size_t nlogs, - struct reftable_write_options *opts); diff --git a/t/unit-tests/lib-reftable.c b/t/unit-tests/lib-reftable.c index 8a69612266..fdb5b11a20 100644 --- a/t/unit-tests/lib-reftable.c +++ b/t/unit-tests/lib-reftable.c @@ -1,12 +1,14 @@ -#define DISABLE_SIGN_COMPARE_WARNINGS - +#include "unit-test.h" #include "lib-reftable.h" -#include "test-lib.h" +#include "hex.h" +#include "parse-options.h" #include "reftable/constants.h" #include "reftable/writer.h" #include "strbuf.h" +#include "string-list.h" +#include "strvec.h" -void t_reftable_set_hash(uint8_t *p, int i, enum reftable_hash id) +void cl_reftable_set_hash(uint8_t *p, int i, enum reftable_hash id) { memset(p, (uint8_t)i, hash_size(id)); } @@ -22,17 +24,17 @@ static int strbuf_writer_flush(void *arg UNUSED) return 0; } -struct reftable_writer *t_reftable_strbuf_writer(struct reftable_buf *buf, +struct reftable_writer *cl_reftable_strbuf_writer(struct reftable_buf *buf, struct reftable_write_options *opts) { struct reftable_writer *writer; int ret = reftable_writer_new(&writer, &strbuf_writer_write, &strbuf_writer_flush, buf, opts); - check(!ret); + cl_assert(!ret); return writer; } -void t_reftable_write_to_buf(struct reftable_buf *buf, +void cl_reftable_write_to_buf(struct reftable_buf *buf, struct reftable_ref_record *refs, size_t nrefs, struct reftable_log_record *logs, @@ -64,35 +66,36 @@ void t_reftable_write_to_buf(struct reftable_buf *buf, min = ui; } - writer = t_reftable_strbuf_writer(buf, &opts); - reftable_writer_set_limits(writer, min, max); + writer = cl_reftable_strbuf_writer(buf, &opts); + ret = reftable_writer_set_limits(writer, min, max); + cl_assert(!ret); if (nrefs) { ret = reftable_writer_add_refs(writer, refs, nrefs); - check_int(ret, ==, 0); + cl_assert_equal_i(ret, 0); } if (nlogs) { ret = reftable_writer_add_logs(writer, logs, nlogs); - check_int(ret, ==, 0); + cl_assert_equal_i(ret, 0); } ret = reftable_writer_close(writer); - check_int(ret, ==, 0); + cl_assert_equal_i(ret, 0); stats = reftable_writer_stats(writer); - for (size_t i = 0; i < stats->ref_stats.blocks; i++) { + for (size_t i = 0; i < (size_t)stats->ref_stats.blocks; i++) { size_t off = i * (opts.block_size ? opts.block_size : DEFAULT_BLOCK_SIZE); if (!off) off = header_size(opts.hash_id == REFTABLE_HASH_SHA256 ? 2 : 1); - check_char(buf->buf[off], ==, 'r'); + cl_assert(buf->buf[off] == 'r'); } if (nrefs) - check_int(stats->ref_stats.blocks, >, 0); + cl_assert(stats->ref_stats.blocks > 0); if (nlogs) - check_int(stats->log_stats.blocks, >, 0); + cl_assert(stats->log_stats.blocks > 0); reftable_writer_free(writer); } diff --git a/t/unit-tests/lib-reftable.h b/t/unit-tests/lib-reftable.h index e4c360fa7e..d7e6d3136f 100644 --- a/t/unit-tests/lib-reftable.h +++ b/t/unit-tests/lib-reftable.h @@ -1,21 +1,20 @@ -#ifndef LIB_REFTABLE_H -#define LIB_REFTABLE_H - +#include "git-compat-util.h" +#include "clar/clar.h" +#include "clar-decls.h" #include "git-compat-util.h" #include "reftable/reftable-writer.h" +#include "strbuf.h" struct reftable_buf; -void t_reftable_set_hash(uint8_t *p, int i, enum reftable_hash id); +void cl_reftable_set_hash(uint8_t *p, int i, enum reftable_hash id); -struct reftable_writer *t_reftable_strbuf_writer(struct reftable_buf *buf, +struct reftable_writer *cl_reftable_strbuf_writer(struct reftable_buf *buf, struct reftable_write_options *opts); -void t_reftable_write_to_buf(struct reftable_buf *buf, +void cl_reftable_write_to_buf(struct reftable_buf *buf, struct reftable_ref_record *refs, size_t nrecords, struct reftable_log_record *logs, size_t nlogs, struct reftable_write_options *opts); - -#endif diff --git a/t/unit-tests/u-reftable-basics.c b/t/unit-tests/u-reftable-basics.c index f105c22d15..a0471083e7 100644 --- a/t/unit-tests/u-reftable-basics.c +++ b/t/unit-tests/u-reftable-basics.c @@ -7,7 +7,7 @@ license that can be found in the LICENSE file or at */ #include "unit-test.h" -#include "lib-reftable-clar.h" +#include "lib-reftable.h" #include "reftable/basics.h" struct integer_needle_lesseq_args { diff --git a/t/unit-tests/u-reftable-block.c b/t/unit-tests/u-reftable-block.c index 97a1348563..f4bded7d26 100644 --- a/t/unit-tests/u-reftable-block.c +++ b/t/unit-tests/u-reftable-block.c @@ -7,7 +7,7 @@ license that can be found in the LICENSE file or at */ #include "unit-test.h" -#include "lib-reftable-clar.h" +#include "lib-reftable.h" #include "reftable/block.h" #include "reftable/blocksource.h" #include "reftable/constants.h" diff --git a/t/unit-tests/u-reftable-merged.c b/t/unit-tests/u-reftable-merged.c index 62b02baa14..54cb7fc2a7 100644 --- a/t/unit-tests/u-reftable-merged.c +++ b/t/unit-tests/u-reftable-merged.c @@ -7,7 +7,7 @@ license that can be found in the LICENSE file or at */ #include "unit-test.h" -#include "lib-reftable-clar.h" +#include "lib-reftable.h" #include "reftable/blocksource.h" #include "reftable/constants.h" #include "reftable/merged.h" diff --git a/t/unit-tests/u-reftable-pq.c b/t/unit-tests/u-reftable-pq.c index d55792ca6f..f8a28f6e07 100644 --- a/t/unit-tests/u-reftable-pq.c +++ b/t/unit-tests/u-reftable-pq.c @@ -7,7 +7,7 @@ license that can be found in the LICENSE file or at */ #include "unit-test.h" -#include "lib-reftable-clar.h" +#include "lib-reftable.h" #include "reftable/constants.h" #include "reftable/pq.h" #include "strbuf.h" diff --git a/t/unit-tests/u-reftable-readwrite.c b/t/unit-tests/u-reftable-readwrite.c index 9aac74a011..4d8c4be5f1 100644 --- a/t/unit-tests/u-reftable-readwrite.c +++ b/t/unit-tests/u-reftable-readwrite.c @@ -9,7 +9,7 @@ license that can be found in the LICENSE file or at #define DISABLE_SIGN_COMPARE_WARNINGS #include "unit-test.h" -#include "lib-reftable-clar.h" +#include "lib-reftable.h" #include "reftable/basics.h" #include "reftable/blocksource.h" #include "reftable/reftable-error.h" diff --git a/t/unit-tests/u-reftable-record.c b/t/unit-tests/u-reftable-record.c index 650b0aa0d3..6c8c0d5374 100644 --- a/t/unit-tests/u-reftable-record.c +++ b/t/unit-tests/u-reftable-record.c @@ -7,7 +7,7 @@ */ #include "unit-test.h" -#include "lib-reftable-clar.h" +#include "lib-reftable.h" #include "reftable/basics.h" #include "reftable/constants.h" #include "reftable/record.h" diff --git a/t/unit-tests/u-reftable-stack.c b/t/unit-tests/u-reftable-stack.c index 0e3e3f4f4a..e4ea57138e 100644 --- a/t/unit-tests/u-reftable-stack.c +++ b/t/unit-tests/u-reftable-stack.c @@ -10,7 +10,7 @@ license that can be found in the LICENSE file or at #include "unit-test.h" #include "dir.h" -#include "lib-reftable-clar.h" +#include "lib-reftable.h" #include "reftable/merged.h" #include "reftable/reftable-error.h" #include "reftable/stack.h" @@ -497,7 +497,7 @@ void test_reftable_stack__add(void) struct reftable_buf path = REFTABLE_BUF_INIT; struct stat stat_result; size_t i, N = ARRAY_SIZE(refs); - int err; + int err = 0; err = reftable_new_stack(&st, dir, &opts); cl_assert(!err); diff --git a/t/unit-tests/u-reftable-table.c b/t/unit-tests/u-reftable-table.c index 839107d8a4..14fae8b199 100644 --- a/t/unit-tests/u-reftable-table.c +++ b/t/unit-tests/u-reftable-table.c @@ -1,5 +1,5 @@ #include "unit-test.h" -#include "lib-reftable-clar.h" +#include "lib-reftable.h" #include "reftable/blocksource.h" #include "reftable/constants.h" #include "reftable/iter.h" -- cgit 1.2.3-korg