aboutsummaryrefslogtreecommitdiffstats
path: root/builtin
diff options
context:
space:
mode:
Diffstat (limited to 'builtin')
-rw-r--r--builtin/am.c5
-rw-r--r--builtin/apply.c2
-rw-r--r--builtin/backfill.c6
-rw-r--r--builtin/blame.c6
-rw-r--r--builtin/cat-file.c62
-rw-r--r--builtin/checkout.c6
-rw-r--r--builtin/clone.c14
-rw-r--r--builtin/commit-graph.c20
-rw-r--r--builtin/commit-tree.c4
-rw-r--r--builtin/config.c12
-rw-r--r--builtin/count-objects.c6
-rw-r--r--builtin/describe.c5
-rw-r--r--builtin/diff.c2
-rw-r--r--builtin/difftool.c4
-rw-r--r--builtin/fast-export.c10
-rw-r--r--builtin/fast-import.c49
-rw-r--r--builtin/fetch.c169
-rw-r--r--builtin/fsck.c31
-rw-r--r--builtin/gc.c441
-rw-r--r--builtin/grep.c26
-rw-r--r--builtin/hash-object.c4
-rw-r--r--builtin/index-pack.c31
-rw-r--r--builtin/log.c23
-rw-r--r--builtin/ls-files.c4
-rw-r--r--builtin/ls-remote.c2
-rw-r--r--builtin/ls-tree.c6
-rw-r--r--builtin/merge-file.c2
-rw-r--r--builtin/merge-tree.c14
-rw-r--r--builtin/merge.c66
-rw-r--r--builtin/mktag.c6
-rw-r--r--builtin/mktree.c10
-rw-r--r--builtin/multi-pack-index.c6
-rw-r--r--builtin/notes.c8
-rw-r--r--builtin/pack-objects.c259
-rw-r--r--builtin/pack-redundant.c12
-rw-r--r--builtin/patch-id.c2
-rw-r--r--builtin/prune.c30
-rw-r--r--builtin/pull.c5
-rw-r--r--builtin/rebase.c1
-rw-r--r--builtin/receive-pack.c109
-rw-r--r--builtin/remote.c28
-rw-r--r--builtin/repack.c196
-rw-r--r--builtin/replace.c12
-rw-r--r--builtin/rev-list.c8
-rw-r--r--builtin/send-pack.c9
-rw-r--r--builtin/shortlog.c2
-rw-r--r--builtin/show-index.c2
-rw-r--r--builtin/show-ref.c6
-rw-r--r--builtin/stash.c480
-rw-r--r--builtin/submodule--helper.c129
-rw-r--r--builtin/tag.c10
-rw-r--r--builtin/unpack-file.c4
-rw-r--r--builtin/unpack-objects.c12
-rw-r--r--builtin/update-index.c6
-rw-r--r--builtin/update-ref.c25
-rw-r--r--builtin/write-tree.c1
56 files changed, 1637 insertions, 773 deletions
diff --git a/builtin/am.c b/builtin/am.c
index e32a3b4c97..c9d925f7b9 100644
--- a/builtin/am.c
+++ b/builtin/am.c
@@ -1000,7 +1000,7 @@ static void am_setup(struct am_state *state, enum patch_format patch_format,
if (!patch_format) {
fprintf_ln(stderr, _("Patch format detection failed."));
- exit(128);
+ die(NULL);
}
if (mkdir(state->dir, 0777) < 0 && errno != EEXIST)
@@ -1178,7 +1178,7 @@ static void NORETURN die_user_resolve(const struct am_state *state)
strbuf_release(&sb);
}
- exit(128);
+ die(NULL);
}
/**
@@ -2406,6 +2406,7 @@ int cmd_am(int argc,
.type = OPTION_CALLBACK,
.long_name = "show-current-patch",
.value = &resume_mode,
+ .precision = sizeof(resume_mode),
.argh = "(diff|raw)",
.help = N_("show the patch being applied"),
.flags = PARSE_OPT_CMDMODE | PARSE_OPT_OPTARG | PARSE_OPT_NONEG | PARSE_OPT_LITERAL_ARGHELP,
diff --git a/builtin/apply.c b/builtin/apply.c
index a1e20c593d..d642a40251 100644
--- a/builtin/apply.c
+++ b/builtin/apply.c
@@ -29,7 +29,7 @@ int cmd_apply(int argc,
* cf. https://lore.kernel.org/git/xmqqcypfcmn4.fsf@gitster.g/
*/
if (!the_hash_algo)
- repo_set_hash_algo(the_repository, GIT_HASH_SHA1);
+ repo_set_hash_algo(the_repository, GIT_HASH_DEFAULT);
argc = apply_parse_options(argc, argv,
&state, &force_apply, &options,
diff --git a/builtin/backfill.c b/builtin/backfill.c
index fa82ad2f6f..80056abe47 100644
--- a/builtin/backfill.c
+++ b/builtin/backfill.c
@@ -13,7 +13,7 @@
#include "tree.h"
#include "tree-walk.h"
#include "object.h"
-#include "object-store.h"
+#include "odb.h"
#include "oid-array.h"
#include "oidset.h"
#include "promisor-remote.h"
@@ -67,8 +67,8 @@ static int fill_missing_blobs(const char *path UNUSED,
return 0;
for (size_t i = 0; i < list->nr; i++) {
- if (!has_object(ctx->repo, &list->oid[i],
- OBJECT_INFO_FOR_PREFETCH))
+ if (!odb_has_object(ctx->repo->objects, &list->oid[i],
+ OBJECT_INFO_FOR_PREFETCH))
oid_array_append(&ctx->current_batch, &list->oid[i]);
}
diff --git a/builtin/blame.c b/builtin/blame.c
index 944952e30e..91586e6852 100644
--- a/builtin/blame.c
+++ b/builtin/blame.c
@@ -28,7 +28,7 @@
#include "line-log.h"
#include "progress.h"
#include "object-name.h"
-#include "object-store.h"
+#include "odb.h"
#include "pager.h"
#include "blame.h"
#include "refs.h"
@@ -837,7 +837,7 @@ static int is_a_rev(const char *name)
if (repo_get_oid(the_repository, name, &oid))
return 0;
- return OBJ_NONE < oid_object_info(the_repository, &oid, NULL);
+ return OBJ_NONE < odb_read_object_info(the_repository->objects, &oid, NULL);
}
static int peel_to_commit_oid(struct object_id *oid_ret, void *cbdata)
@@ -848,7 +848,7 @@ static int peel_to_commit_oid(struct object_id *oid_ret, void *cbdata)
oidcpy(&oid, oid_ret);
while (1) {
struct object *obj;
- int kind = oid_object_info(r, &oid, NULL);
+ int kind = odb_read_object_info(r->objects, &oid, NULL);
if (kind == OBJ_COMMIT) {
oidcpy(oid_ret, &oid);
return 0;
diff --git a/builtin/cat-file.c b/builtin/cat-file.c
index 4b23fcecbd..2492a0b6f3 100644
--- a/builtin/cat-file.c
+++ b/builtin/cat-file.c
@@ -24,7 +24,7 @@
#include "pack-bitmap.h"
#include "object-file.h"
#include "object-name.h"
-#include "object-store.h"
+#include "odb.h"
#include "replace-object.h"
#include "promisor-remote.h"
#include "mailmap.h"
@@ -74,7 +74,7 @@ static int filter_object(const char *path, unsigned mode,
{
enum object_type type;
- *buf = repo_read_object_file(the_repository, oid, &type, size);
+ *buf = odb_read_object(the_repository->objects, oid, &type, size);
if (!*buf)
return error(_("cannot read object %s '%s'"),
oid_to_hex(oid), path);
@@ -132,7 +132,7 @@ static int cat_one_file(int opt, const char *exp_type, const char *obj_name)
switch (opt) {
case 't':
oi.typep = &type;
- if (oid_object_info_extended(the_repository, &oid, &oi, flags) < 0)
+ if (odb_read_object_info_extended(the_repository->objects, &oid, &oi, flags) < 0)
die("git cat-file: could not get object info");
printf("%s\n", type_name(type));
ret = 0;
@@ -146,7 +146,7 @@ static int cat_one_file(int opt, const char *exp_type, const char *obj_name)
oi.contentp = (void**)&buf;
}
- if (oid_object_info_extended(the_repository, &oid, &oi, flags) < 0)
+ if (odb_read_object_info_extended(the_repository->objects, &oid, &oi, flags) < 0)
die("git cat-file: could not get object info");
if (use_mailmap && (type == OBJ_COMMIT || type == OBJ_TAG)) {
@@ -160,8 +160,8 @@ static int cat_one_file(int opt, const char *exp_type, const char *obj_name)
goto cleanup;
case 'e':
- ret = !has_object(the_repository, &oid,
- HAS_OBJECT_RECHECK_PACKED | HAS_OBJECT_FETCH_PROMISOR);
+ ret = !odb_has_object(the_repository->objects, &oid,
+ HAS_OBJECT_RECHECK_PACKED | HAS_OBJECT_FETCH_PROMISOR);
goto cleanup;
case 'w':
@@ -180,7 +180,7 @@ static int cat_one_file(int opt, const char *exp_type, const char *obj_name)
/* else fallthrough */
case 'p':
- type = oid_object_info(the_repository, &oid, NULL);
+ type = odb_read_object_info(the_repository->objects, &oid, NULL);
if (type < 0)
die("Not a valid object name %s", obj_name);
@@ -197,8 +197,8 @@ static int cat_one_file(int opt, const char *exp_type, const char *obj_name)
ret = stream_blob(&oid);
goto cleanup;
}
- buf = repo_read_object_file(the_repository, &oid, &type,
- &size);
+ buf = odb_read_object(the_repository->objects, &oid,
+ &type, &size);
if (!buf)
die("Cannot read object %s", obj_name);
@@ -217,11 +217,10 @@ static int cat_one_file(int opt, const char *exp_type, const char *obj_name)
if (exp_type_id == OBJ_BLOB) {
struct object_id blob_oid;
- if (oid_object_info(the_repository, &oid, NULL) == OBJ_TAG) {
- char *buffer = repo_read_object_file(the_repository,
- &oid,
- &type,
- &size);
+ if (odb_read_object_info(the_repository->objects,
+ &oid, NULL) == OBJ_TAG) {
+ char *buffer = odb_read_object(the_repository->objects,
+ &oid, &type, &size);
const char *target;
if (!buffer)
@@ -235,7 +234,8 @@ static int cat_one_file(int opt, const char *exp_type, const char *obj_name)
} else
oidcpy(&blob_oid, &oid);
- if (oid_object_info(the_repository, &blob_oid, NULL) == OBJ_BLOB) {
+ if (odb_read_object_info(the_repository->objects,
+ &blob_oid, NULL) == OBJ_BLOB) {
ret = stream_blob(&blob_oid);
goto cleanup;
}
@@ -246,8 +246,8 @@ static int cat_one_file(int opt, const char *exp_type, const char *obj_name)
* fall-back to the usual case.
*/
}
- buf = read_object_with_reference(the_repository, &oid,
- exp_type_id, &size, NULL);
+ buf = odb_read_object_peeled(the_repository->objects, &oid,
+ exp_type_id, &size, NULL);
if (use_mailmap) {
size_t s = size;
@@ -295,7 +295,7 @@ struct expand_data {
/*
* After a mark_query run, this object_info is set up to be
- * passed to oid_object_info_extended. It will point to the data
+ * passed to odb_read_object_info_extended. It will point to the data
* elements above, so you can retrieve the response from there.
*/
struct object_info info;
@@ -406,10 +406,8 @@ static void print_object_or_die(struct batch_options *opt, struct expand_data *d
if (!textconv_object(the_repository,
data->rest, 0100644, oid,
1, &contents, &size))
- contents = repo_read_object_file(the_repository,
- oid,
- &type,
- &size);
+ contents = odb_read_object(the_repository->objects,
+ oid, &type, &size);
if (!contents)
die("could not convert '%s' %s",
oid_to_hex(oid), data->rest);
@@ -426,8 +424,8 @@ static void print_object_or_die(struct batch_options *opt, struct expand_data *d
unsigned long size;
void *contents;
- contents = repo_read_object_file(the_repository, oid, &type,
- &size);
+ contents = odb_read_object(the_repository->objects, oid,
+ &type, &size);
if (!contents)
die("object %s disappeared", oid_to_hex(oid));
@@ -489,12 +487,12 @@ static void batch_object_write(const char *obj_name,
data->info.sizep = &data->size;
if (pack)
- ret = packed_object_info(the_repository, pack, offset,
- &data->info);
+ ret = packed_object_info(the_repository, pack,
+ offset, &data->info);
else
- ret = oid_object_info_extended(the_repository,
- &data->oid, &data->info,
- OBJECT_INFO_LOOKUP_REPLACE);
+ ret = odb_read_object_info_extended(the_repository->objects,
+ &data->oid, &data->info,
+ OBJECT_INFO_LOOKUP_REPLACE);
if (ret < 0) {
if (data->mode == S_IFGITLINK)
report_object_status(opt, oid_to_hex(&data->oid), &data->oid, "submodule");
@@ -539,8 +537,8 @@ static void batch_object_write(const char *obj_name,
size_t s = data->size;
char *buf = NULL;
- buf = repo_read_object_file(the_repository, &data->oid, &data->type,
- &data->size);
+ buf = odb_read_object(the_repository->objects, &data->oid,
+ &data->type, &data->size);
if (!buf)
die(_("unable to read %s"), oid_to_hex(&data->oid));
buf = replace_idents_using_mailmap(buf, &s);
@@ -881,7 +879,7 @@ static int batch_objects(struct batch_options *opt)
/*
* Expand once with our special mark_query flag, which will prime the
- * object_info to be handed to oid_object_info_extended for each
+ * object_info to be handed to odb_read_object_info_extended for each
* object.
*/
data.mark_query = 1;
diff --git a/builtin/checkout.c b/builtin/checkout.c
index d185982f3a..0a90b86a72 100644
--- a/builtin/checkout.c
+++ b/builtin/checkout.c
@@ -20,7 +20,7 @@
#include "merge-ort-wrappers.h"
#include "object-file.h"
#include "object-name.h"
-#include "object-store.h"
+#include "odb.h"
#include "parse-options.h"
#include "path.h"
#include "preload-index.h"
@@ -838,7 +838,7 @@ static int merge_working_tree(const struct checkout_opts *opts,
init_tree_desc(&trees[0], &tree->object.oid,
tree->buffer, tree->size);
if (parse_tree(new_tree) < 0)
- exit(128);
+ die(NULL);
tree = new_tree;
init_tree_desc(&trees[1], &tree->object.oid,
tree->buffer, tree->size);
@@ -913,7 +913,7 @@ static int merge_working_tree(const struct checkout_opts *opts,
work,
old_tree);
if (ret < 0)
- exit(128);
+ die(NULL);
ret = reset_tree(new_tree,
opts, 0,
writeout_error, new_branch_info);
diff --git a/builtin/clone.c b/builtin/clone.c
index 91b9cd0d16..6d08abed37 100644
--- a/builtin/clone.c
+++ b/builtin/clone.c
@@ -25,7 +25,7 @@
#include "refs.h"
#include "refspec.h"
#include "object-file.h"
-#include "object-store.h"
+#include "odb.h"
#include "tree.h"
#include "tree-walk.h"
#include "unpack-trees.h"
@@ -171,7 +171,7 @@ static int add_one_reference(struct string_list_item *item, void *cb_data)
} else {
struct strbuf sb = STRBUF_INIT;
strbuf_addf(&sb, "%s/objects", ref_git);
- add_to_alternates_file(sb.buf);
+ odb_add_to_alternates_file(the_repository->objects, sb.buf);
strbuf_release(&sb);
}
@@ -212,12 +212,14 @@ static void copy_alternates(struct strbuf *src, const char *src_repo)
if (!line.len || line.buf[0] == '#')
continue;
if (is_absolute_path(line.buf)) {
- add_to_alternates_file(line.buf);
+ odb_add_to_alternates_file(the_repository->objects,
+ line.buf);
continue;
}
abs_path = mkpathdup("%s/objects/%s", src_repo, line.buf);
if (!normalize_path_copy(abs_path, abs_path))
- add_to_alternates_file(abs_path);
+ odb_add_to_alternates_file(the_repository->objects,
+ abs_path);
else
warning("skipping invalid relative alternate: %s/%s",
src_repo, line.buf);
@@ -352,7 +354,7 @@ static void clone_local(const char *src_repo, const char *dest_repo)
struct strbuf alt = STRBUF_INIT;
get_common_dir(&alt, src_repo);
strbuf_addstr(&alt, "/objects");
- add_to_alternates_file(alt.buf);
+ odb_add_to_alternates_file(the_repository->objects, alt.buf);
strbuf_release(&alt);
} else {
struct strbuf src = STRBUF_INIT;
@@ -504,7 +506,7 @@ static void write_followtags(const struct ref *refs, const char *msg)
continue;
if (ends_with(ref->name, "^{}"))
continue;
- if (!has_object(the_repository, &ref->old_oid, 0))
+ if (!odb_has_object(the_repository->objects, &ref->old_oid, 0))
continue;
refs_update_ref(get_main_ref_store(the_repository), msg,
ref->name, &ref->old_oid, NULL, 0,
diff --git a/builtin/commit-graph.c b/builtin/commit-graph.c
index ee48980248..25018a0b9d 100644
--- a/builtin/commit-graph.c
+++ b/builtin/commit-graph.c
@@ -6,7 +6,7 @@
#include "hex.h"
#include "parse-options.h"
#include "commit-graph.h"
-#include "object-store.h"
+#include "odb.h"
#include "progress.h"
#include "replace-object.h"
#include "strbuf.h"
@@ -66,7 +66,7 @@ static int graph_verify(int argc, const char **argv, const char *prefix,
struct repository *repo UNUSED)
{
struct commit_graph *graph = NULL;
- struct object_directory *odb = NULL;
+ struct odb_source *source = NULL;
char *graph_name;
char *chain_name;
enum { OPENED_NONE, OPENED_GRAPH, OPENED_CHAIN } opened = OPENED_NONE;
@@ -101,9 +101,9 @@ static int graph_verify(int argc, const char **argv, const char *prefix,
if (opts.progress)
flags |= COMMIT_GRAPH_WRITE_PROGRESS;
- odb = find_odb(the_repository, opts.obj_dir);
- graph_name = get_commit_graph_filename(odb);
- chain_name = get_commit_graph_chain_filename(odb);
+ source = odb_find_source(the_repository->objects, opts.obj_dir);
+ graph_name = get_commit_graph_filename(source);
+ chain_name = get_commit_graph_chain_filename(source);
if (open_commit_graph(graph_name, &fd, &st))
opened = OPENED_GRAPH;
else if (errno != ENOENT)
@@ -120,7 +120,7 @@ static int graph_verify(int argc, const char **argv, const char *prefix,
if (opened == OPENED_NONE)
return 0;
else if (opened == OPENED_GRAPH)
- graph = load_commit_graph_one_fd_st(the_repository, fd, &st, odb);
+ graph = load_commit_graph_one_fd_st(the_repository, fd, &st, source);
else
graph = load_commit_graph_chain_fd_st(the_repository, fd, &st,
&incomplete_chain);
@@ -221,7 +221,7 @@ static int graph_write(int argc, const char **argv, const char *prefix,
struct string_list pack_indexes = STRING_LIST_INIT_DUP;
struct strbuf buf = STRBUF_INIT;
struct oidset commits = OIDSET_INIT;
- struct object_directory *odb = NULL;
+ struct odb_source *source = NULL;
int result = 0;
enum commit_graph_write_flags flags = 0;
struct progress *progress = NULL;
@@ -289,10 +289,10 @@ static int graph_write(int argc, const char **argv, const char *prefix,
git_env_bool(GIT_TEST_COMMIT_GRAPH_CHANGED_PATHS, 0))
flags |= COMMIT_GRAPH_WRITE_BLOOM_FILTERS;
- odb = find_odb(the_repository, opts.obj_dir);
+ source = odb_find_source(the_repository->objects, opts.obj_dir);
if (opts.reachable) {
- if (write_commit_graph_reachable(odb, flags, &write_opts))
+ if (write_commit_graph_reachable(source, flags, &write_opts))
result = 1;
goto cleanup;
}
@@ -319,7 +319,7 @@ static int graph_write(int argc, const char **argv, const char *prefix,
stop_progress(&progress);
}
- if (write_commit_graph(odb,
+ if (write_commit_graph(source,
opts.stdin_packs ? &pack_indexes : NULL,
opts.stdin_commits ? &commits : NULL,
flags,
diff --git a/builtin/commit-tree.c b/builtin/commit-tree.c
index ad6b2c9320..31cfd9bd15 100644
--- a/builtin/commit-tree.c
+++ b/builtin/commit-tree.c
@@ -9,7 +9,7 @@
#include "gettext.h"
#include "hex.h"
#include "object-name.h"
-#include "object-store.h"
+#include "odb.h"
#include "commit.h"
#include "parse-options.h"
@@ -48,7 +48,7 @@ static int parse_parent_arg_callback(const struct option *opt,
if (repo_get_oid_commit(the_repository, arg, &oid))
die(_("not a valid object name %s"), arg);
- assert_oid_type(&oid, OBJ_COMMIT);
+ odb_assert_oid_type(the_repository->objects, &oid, OBJ_COMMIT);
new_parent(lookup_commit(the_repository, &oid), parents);
return 0;
}
diff --git a/builtin/config.c b/builtin/config.c
index f70d635477..5efe273010 100644
--- a/builtin/config.c
+++ b/builtin/config.c
@@ -17,9 +17,9 @@
static const char *const builtin_config_usage[] = {
N_("git config list [<file-option>] [<display-option>] [--includes]"),
- N_("git config get [<file-option>] [<display-option>] [--includes] [--all] [--regexp] [--value=<value>] [--fixed-value] [--default=<default>] <name>"),
- N_("git config set [<file-option>] [--type=<type>] [--all] [--value=<value>] [--fixed-value] <name> <value>"),
- N_("git config unset [<file-option>] [--all] [--value=<value>] [--fixed-value] <name>"),
+ N_("git config get [<file-option>] [<display-option>] [--includes] [--all] [--regexp] [--value=<pattern>] [--fixed-value] [--default=<default>] [--url=<url>] <name>"),
+ N_("git config set [<file-option>] [--type=<type>] [--all] [--value=<pattern>] [--fixed-value] <name> <value>"),
+ N_("git config unset [<file-option>] [--all] [--value=<pattern>] [--fixed-value] <name>"),
N_("git config rename-section [<file-option>] <old-name> <new-name>"),
N_("git config remove-section [<file-option>] <name>"),
N_("git config edit [<file-option>]"),
@@ -33,17 +33,17 @@ static const char *const builtin_config_list_usage[] = {
};
static const char *const builtin_config_get_usage[] = {
- N_("git config get [<file-option>] [<display-option>] [--includes] [--all] [--regexp=<regexp>] [--value=<value>] [--fixed-value] [--default=<default>] <name>"),
+ N_("git config get [<file-option>] [<display-option>] [--includes] [--all] [--regexp=<regexp>] [--value=<pattern>] [--fixed-value] [--default=<default>] <name>"),
NULL
};
static const char *const builtin_config_set_usage[] = {
- N_("git config set [<file-option>] [--type=<type>] [--comment=<message>] [--all] [--value=<value>] [--fixed-value] <name> <value>"),
+ N_("git config set [<file-option>] [--type=<type>] [--comment=<message>] [--all] [--value=<pattern>] [--fixed-value] <name> <value>"),
NULL
};
static const char *const builtin_config_unset_usage[] = {
- N_("git config unset [<file-option>] [--all] [--value=<value>] [--fixed-value] <name>"),
+ N_("git config unset [<file-option>] [--all] [--value=<pattern>] [--fixed-value] <name>"),
NULL
};
diff --git a/builtin/count-objects.c b/builtin/count-objects.c
index a88c0c9c09..f687647931 100644
--- a/builtin/count-objects.c
+++ b/builtin/count-objects.c
@@ -80,10 +80,10 @@ static int count_cruft(const char *basename UNUSED, const char *path,
return 0;
}
-static int print_alternate(struct object_directory *odb, void *data UNUSED)
+static int print_alternate(struct odb_source *alternate, void *data UNUSED)
{
printf("alternate: ");
- quote_c_style(odb->path, NULL, stdout, 0);
+ quote_c_style(alternate->path, NULL, stdout, 0);
putchar('\n');
return 0;
}
@@ -159,7 +159,7 @@ int cmd_count_objects(int argc,
printf("prune-packable: %lu\n", packed_loose);
printf("garbage: %lu\n", garbage);
printf("size-garbage: %s\n", garbage_buf.buf);
- foreach_alt_odb(print_alternate, NULL);
+ odb_for_each_alternate(the_repository->objects, print_alternate, NULL);
strbuf_release(&loose_buf);
strbuf_release(&pack_buf);
strbuf_release(&garbage_buf);
diff --git a/builtin/describe.c b/builtin/describe.c
index 2d50883b72..fbf305d762 100644
--- a/builtin/describe.c
+++ b/builtin/describe.c
@@ -19,7 +19,7 @@
#include "setup.h"
#include "strvec.h"
#include "run-command.h"
-#include "object-store.h"
+#include "odb.h"
#include "list-objects.h"
#include "commit-slab.h"
#include "wildmatch.h"
@@ -552,7 +552,8 @@ static void describe(const char *arg, int last_one)
if (cmit)
describe_commit(&oid, &sb);
- else if (oid_object_info(the_repository, &oid, NULL) == OBJ_BLOB)
+ else if (odb_read_object_info(the_repository->objects,
+ &oid, NULL) == OBJ_BLOB)
describe_blob(oid, &sb);
else
die(_("%s is neither a commit nor blob"), arg);
diff --git a/builtin/diff.c b/builtin/diff.c
index c6231edce4..eebffe36cc 100644
--- a/builtin/diff.c
+++ b/builtin/diff.c
@@ -483,7 +483,7 @@ int cmd_diff(int argc,
* configurable via a command line option.
*/
if (nongit)
- repo_set_hash_algo(the_repository, GIT_HASH_SHA1);
+ repo_set_hash_algo(the_repository, GIT_HASH_DEFAULT);
init_diff_ui_defaults();
git_config(git_diff_ui_config, NULL);
diff --git a/builtin/difftool.c b/builtin/difftool.c
index a3b64ce694..e4bc1f8316 100644
--- a/builtin/difftool.c
+++ b/builtin/difftool.c
@@ -30,7 +30,7 @@
#include "strbuf.h"
#include "lockfile.h"
#include "object-file.h"
-#include "object-store.h"
+#include "odb.h"
#include "dir.h"
#include "entry.h"
#include "setup.h"
@@ -320,7 +320,7 @@ static char *get_symlink(struct repository *repo,
} else {
enum object_type type;
unsigned long size;
- data = repo_read_object_file(repo, oid, &type, &size);
+ data = odb_read_object(repo->objects, oid, &type, &size);
if (!data)
die(_("could not read object %s for symlink %s"),
oid_to_hex(oid), path);
diff --git a/builtin/fast-export.c b/builtin/fast-export.c
index fcf6b00d5f..6a3a17a8cd 100644
--- a/builtin/fast-export.c
+++ b/builtin/fast-export.c
@@ -14,7 +14,7 @@
#include "refs.h"
#include "refspec.h"
#include "object-file.h"
-#include "object-store.h"
+#include "odb.h"
#include "commit.h"
#include "object.h"
#include "tag.h"
@@ -323,7 +323,7 @@ static void export_blob(const struct object_id *oid)
object = (struct object *)lookup_blob(the_repository, oid);
eaten = 0;
} else {
- buf = repo_read_object_file(the_repository, oid, &type, &size);
+ buf = odb_read_object(the_repository->objects, oid, &type, &size);
if (!buf)
die("could not read blob %s", oid_to_hex(oid));
if (check_object_signature(the_repository, oid, buf, size,
@@ -869,8 +869,8 @@ static void handle_tag(const char *name, struct tag *tag)
return;
}
- buf = repo_read_object_file(the_repository, &tag->object.oid, &type,
- &size);
+ buf = odb_read_object(the_repository->objects, &tag->object.oid,
+ &type, &size);
if (!buf)
die("could not read tag %s", oid_to_hex(&tag->object.oid));
message = memmem(buf, size, "\n\n", 2);
@@ -1200,7 +1200,7 @@ static void import_marks(char *input_file, int check_exists)
if (last_idnum < mark)
last_idnum = mark;
- type = oid_object_info(the_repository, &oid, NULL);
+ type = odb_read_object_info(the_repository->objects, &oid, NULL);
if (type < 0)
die("object not found: %s", oid_to_hex(&oid));
diff --git a/builtin/fast-import.c b/builtin/fast-import.c
index b2839c5f43..b1389c5921 100644
--- a/builtin/fast-import.c
+++ b/builtin/fast-import.c
@@ -24,7 +24,7 @@
#include "packfile.h"
#include "object-file.h"
#include "object-name.h"
-#include "object-store.h"
+#include "odb.h"
#include "mem-pool.h"
#include "commit-reach.h"
#include "khash.h"
@@ -763,7 +763,8 @@ static void start_packfile(void)
struct packed_git *p;
int pack_fd;
- pack_fd = odb_mkstemp(&tmp_file, "pack/tmp_pack_XXXXXX");
+ pack_fd = odb_mkstemp(the_repository->objects, &tmp_file,
+ "pack/tmp_pack_XXXXXX");
FLEX_ALLOC_STR(p, pack_name, tmp_file.buf);
strbuf_release(&tmp_file);
@@ -1264,7 +1265,7 @@ static void load_tree(struct tree_entry *root)
die("Can't load tree %s", oid_to_hex(oid));
} else {
enum object_type type;
- buf = repo_read_object_file(the_repository, oid, &type, &size);
+ buf = odb_read_object(the_repository->objects, oid, &type, &size);
if (!buf || type != OBJ_TREE)
die("Can't load tree %s", oid_to_hex(oid));
}
@@ -1755,8 +1756,8 @@ static void insert_object_entry(struct mark_set **s, struct object_id *oid, uint
struct object_entry *e;
e = find_object(oid);
if (!e) {
- enum object_type type = oid_object_info(the_repository,
- oid, NULL);
+ enum object_type type = odb_read_object_info(the_repository->objects,
+ oid, NULL);
if (type < 0)
die("object not found: %s", oid_to_hex(oid));
e = insert_object(oid);
@@ -2415,8 +2416,8 @@ static void file_change_m(const char *p, struct branch *b)
enum object_type expected = S_ISDIR(mode) ?
OBJ_TREE: OBJ_BLOB;
enum object_type type = oe ? oe->type :
- oid_object_info(the_repository, &oid,
- NULL);
+ odb_read_object_info(the_repository->objects,
+ &oid, NULL);
if (type < 0)
die("%s not found: %s",
S_ISDIR(mode) ? "Tree" : "Blob",
@@ -2534,10 +2535,9 @@ static void note_change_n(const char *p, struct branch *b, unsigned char *old_fa
oidcpy(&commit_oid, &commit_oe->idx.oid);
} else if (!repo_get_oid(the_repository, p, &commit_oid)) {
unsigned long size;
- char *buf = read_object_with_reference(the_repository,
- &commit_oid,
- OBJ_COMMIT, &size,
- &commit_oid);
+ char *buf = odb_read_object_peeled(the_repository->objects,
+ &commit_oid, OBJ_COMMIT, &size,
+ &commit_oid);
if (!buf || size < the_hash_algo->hexsz + 6)
die("Not a valid commit: %s", p);
free(buf);
@@ -2552,7 +2552,7 @@ static void note_change_n(const char *p, struct branch *b, unsigned char *old_fa
die("Not a blob (actually a %s): %s",
type_name(oe->type), command_buf.buf);
} else if (!is_null_oid(&oid)) {
- enum object_type type = oid_object_info(the_repository, &oid,
+ enum object_type type = odb_read_object_info(the_repository->objects, &oid,
NULL);
if (type < 0)
die("Blob not found: %s", command_buf.buf);
@@ -2603,9 +2603,8 @@ static void parse_from_existing(struct branch *b)
unsigned long size;
char *buf;
- buf = read_object_with_reference(the_repository,
- &b->oid, OBJ_COMMIT, &size,
- &b->oid);
+ buf = odb_read_object_peeled(the_repository->objects, &b->oid,
+ OBJ_COMMIT, &size, &b->oid);
parse_from_commit(b, buf, size);
free(buf);
}
@@ -2698,10 +2697,9 @@ static struct hash_list *parse_merge(unsigned int *count)
oidcpy(&n->oid, &oe->idx.oid);
} else if (!repo_get_oid(the_repository, from, &n->oid)) {
unsigned long size;
- char *buf = read_object_with_reference(the_repository,
- &n->oid,
- OBJ_COMMIT,
- &size, &n->oid);
+ char *buf = odb_read_object_peeled(the_repository->objects,
+ &n->oid, OBJ_COMMIT,
+ &size, &n->oid);
if (!buf || size < the_hash_algo->hexsz + 6)
die("Not a valid commit: %s", from);
free(buf);
@@ -2894,7 +2892,8 @@ static void parse_new_tag(const char *arg)
} else if (!repo_get_oid(the_repository, from, &oid)) {
struct object_entry *oe = find_object(&oid);
if (!oe) {
- type = oid_object_info(the_repository, &oid, NULL);
+ type = odb_read_object_info(the_repository->objects,
+ &oid, NULL);
if (type < 0)
die("Not a valid object: %s", from);
} else
@@ -3000,7 +2999,7 @@ static void cat_blob(struct object_entry *oe, struct object_id *oid)
char *buf;
if (!oe || oe->pack_id == MAX_PACK_ID) {
- buf = repo_read_object_file(the_repository, oid, &type, &size);
+ buf = odb_read_object(the_repository->objects, oid, &type, &size);
} else {
type = oe->type;
buf = gfi_unpack_entry(oe, &size);
@@ -3084,8 +3083,8 @@ static struct object_entry *dereference(struct object_entry *oe,
const unsigned hexsz = the_hash_algo->hexsz;
if (!oe) {
- enum object_type type = oid_object_info(the_repository, oid,
- NULL);
+ enum object_type type = odb_read_object_info(the_repository->objects,
+ oid, NULL);
if (type < 0)
die("object not found: %s", oid_to_hex(oid));
/* cache it! */
@@ -3108,8 +3107,8 @@ static struct object_entry *dereference(struct object_entry *oe,
buf = gfi_unpack_entry(oe, &size);
} else {
enum object_type unused;
- buf = repo_read_object_file(the_repository, oid, &unused,
- &size);
+ buf = odb_read_object(the_repository->objects, oid,
+ &unused, &size);
}
if (!buf)
die("Can't load object %s", oid_to_hex(oid));
diff --git a/builtin/fetch.c b/builtin/fetch.c
index 40a0e8d244..87a0cca799 100644
--- a/builtin/fetch.c
+++ b/builtin/fetch.c
@@ -14,7 +14,7 @@
#include "refs.h"
#include "refspec.h"
#include "object-name.h"
-#include "object-store.h"
+#include "odb.h"
#include "oidset.h"
#include "oid-array.h"
#include "commit.h"
@@ -366,9 +366,9 @@ static void find_non_local_tags(const struct ref *refs,
*/
if (ends_with(ref->name, "^{}")) {
if (item &&
- !has_object(the_repository, &ref->old_oid, 0) &&
+ !odb_has_object(the_repository->objects, &ref->old_oid, 0) &&
!oidset_contains(&fetch_oids, &ref->old_oid) &&
- !has_object(the_repository, &item->oid, 0) &&
+ !odb_has_object(the_repository->objects, &item->oid, 0) &&
!oidset_contains(&fetch_oids, &item->oid))
clear_item(item);
item = NULL;
@@ -382,7 +382,7 @@ static void find_non_local_tags(const struct ref *refs,
* fetch.
*/
if (item &&
- !has_object(the_repository, &item->oid, 0) &&
+ !odb_has_object(the_repository->objects, &item->oid, 0) &&
!oidset_contains(&fetch_oids, &item->oid))
clear_item(item);
@@ -403,7 +403,7 @@ static void find_non_local_tags(const struct ref *refs,
* checked to see if it needs fetching.
*/
if (item &&
- !has_object(the_repository, &item->oid, 0) &&
+ !odb_has_object(the_repository->objects, &item->oid, 0) &&
!oidset_contains(&fetch_oids, &item->oid))
clear_item(item);
@@ -640,9 +640,6 @@ static struct ref *get_ref_map(struct remote *remote,
return ref_map;
}
-#define STORE_REF_ERROR_OTHER 1
-#define STORE_REF_ERROR_DF_CONFLICT 2
-
static int s_update_ref(const char *action,
struct ref *ref,
struct ref_transaction *transaction,
@@ -650,7 +647,6 @@ static int s_update_ref(const char *action,
{
char *msg;
char *rla = getenv("GIT_REFLOG_ACTION");
- struct ref_transaction *our_transaction = NULL;
struct strbuf err = STRBUF_INIT;
int ret;
@@ -660,43 +656,10 @@ static int s_update_ref(const char *action,
rla = default_rla.buf;
msg = xstrfmt("%s: %s", rla, action);
- /*
- * If no transaction was passed to us, we manage the transaction
- * ourselves. Otherwise, we trust the caller to handle the transaction
- * lifecycle.
- */
- if (!transaction) {
- transaction = our_transaction = ref_store_transaction_begin(get_main_ref_store(the_repository),
- 0, &err);
- if (!transaction) {
- ret = STORE_REF_ERROR_OTHER;
- goto out;
- }
- }
-
ret = ref_transaction_update(transaction, ref->name, &ref->new_oid,
check_old ? &ref->old_oid : NULL,
NULL, NULL, 0, msg, &err);
- if (ret) {
- ret = STORE_REF_ERROR_OTHER;
- goto out;
- }
-
- if (our_transaction) {
- switch (ref_transaction_commit(our_transaction, &err)) {
- case 0:
- break;
- case REF_TRANSACTION_ERROR_NAME_CONFLICT:
- ret = STORE_REF_ERROR_DF_CONFLICT;
- goto out;
- default:
- ret = STORE_REF_ERROR_OTHER;
- goto out;
- }
- }
-out:
- ref_transaction_free(our_transaction);
if (ret)
error("%s", err.buf);
strbuf_release(&err);
@@ -910,8 +873,8 @@ static int update_local_ref(struct ref *ref,
struct commit *current = NULL, *updated;
int fast_forward = 0;
- if (!has_object(the_repository, &ref->new_oid,
- HAS_OBJECT_RECHECK_PACKED | HAS_OBJECT_FETCH_PROMISOR))
+ if (!odb_has_object(the_repository->objects, &ref->new_oid,
+ HAS_OBJECT_RECHECK_PACKED | HAS_OBJECT_FETCH_PROMISOR))
die(_("object %s not found"), oid_to_hex(&ref->new_oid));
if (oideq(&ref->old_oid, &ref->new_oid)) {
@@ -992,7 +955,7 @@ static int update_local_ref(struct ref *ref,
fast_forward = repo_in_merge_bases(the_repository, current,
updated);
if (fast_forward < 0)
- exit(128);
+ die(NULL);
forced_updates_ms += (getnanotime() - t_before) / 1000000;
} else {
fast_forward = 1;
@@ -1139,7 +1102,6 @@ N_("it took %.2f seconds to check forced updates; you can use\n"
"to avoid this check\n");
static int store_updated_refs(struct display_state *display_state,
- const char *remote_name,
int connectivity_checked,
struct ref_transaction *transaction, struct ref *ref_map,
struct fetch_head *fetch_head,
@@ -1277,11 +1239,6 @@ static int store_updated_refs(struct display_state *display_state,
}
}
- if (rc & STORE_REF_ERROR_DF_CONFLICT)
- error(_("some local refs could not be updated; try running\n"
- " 'git remote prune %s' to remove any old, conflicting "
- "branches"), remote_name);
-
if (advice_enabled(ADVICE_FETCH_SHOW_FORCED_UPDATES)) {
if (!config->show_forced_updates) {
warning(_(warn_show_forced_updates));
@@ -1330,7 +1287,8 @@ static int check_exist_and_connected(struct ref *ref_map)
* we need all direct targets to exist.
*/
for (r = rm; r; r = r->next) {
- if (!has_object(the_repository, &r->old_oid, HAS_OBJECT_RECHECK_PACKED))
+ if (!odb_has_object(the_repository->objects, &r->old_oid,
+ HAS_OBJECT_RECHECK_PACKED))
return -1;
}
@@ -1365,9 +1323,8 @@ static int fetch_and_consume_refs(struct display_state *display_state,
}
trace2_region_enter("fetch", "consume_refs", the_repository);
- ret = store_updated_refs(display_state, transport->remote->name,
- connectivity_checked, transaction, ref_map,
- fetch_head, config);
+ ret = store_updated_refs(display_state, connectivity_checked,
+ transaction, ref_map, fetch_head, config);
trace2_region_leave("fetch", "consume_refs", the_repository);
out:
@@ -1383,9 +1340,10 @@ static int prune_refs(struct display_state *display_state,
int result = 0;
struct ref *ref, *stale_refs = get_stale_heads(rs, ref_map);
struct strbuf err = STRBUF_INIT;
- const char *dangling_msg = dry_run
- ? _(" (%s will become dangling)")
- : _(" (%s has become dangling)");
+ struct string_list refnames = STRING_LIST_INIT_NODUP;
+
+ for (ref = stale_refs; ref; ref = ref->next)
+ string_list_append(&refnames, ref->name);
if (!dry_run) {
if (transaction) {
@@ -1396,15 +1354,9 @@ static int prune_refs(struct display_state *display_state,
goto cleanup;
}
} else {
- struct string_list refnames = STRING_LIST_INIT_NODUP;
-
- for (ref = stale_refs; ref; ref = ref->next)
- string_list_append(&refnames, ref->name);
-
result = refs_delete_refs(get_main_ref_store(the_repository),
"fetch: prune", &refnames,
0);
- string_list_clear(&refnames, 0);
}
}
@@ -1416,12 +1368,14 @@ static int prune_refs(struct display_state *display_state,
_("(none)"), ref->name,
&ref->new_oid, &ref->old_oid,
summary_width);
- refs_warn_dangling_symref(get_main_ref_store(the_repository),
- stderr, dangling_msg, ref->name);
}
+ string_list_sort(&refnames);
+ refs_warn_dangling_symrefs(get_main_ref_store(the_repository),
+ stderr, " ", dry_run, &refnames);
}
cleanup:
+ string_list_clear(&refnames, 0);
strbuf_release(&err);
free_refs(stale_refs);
return result;
@@ -1485,7 +1439,7 @@ static void add_negotiation_tips(struct git_transport_options *smart_options)
struct object_id oid;
if (repo_get_oid(the_repository, s, &oid))
die(_("%s is not a valid object"), s);
- if (!has_object(the_repository, &oid, 0))
+ if (!odb_has_object(the_repository->objects, &oid, 0))
die(_("the object %s does not exist"), s);
oid_array_append(oids, &oid);
continue;
@@ -1687,6 +1641,36 @@ cleanup:
return result;
}
+struct ref_rejection_data {
+ int *retcode;
+ int conflict_msg_shown;
+ const char *remote_name;
+};
+
+static void ref_transaction_rejection_handler(const char *refname,
+ const struct object_id *old_oid UNUSED,
+ const struct object_id *new_oid UNUSED,
+ const char *old_target UNUSED,
+ const char *new_target UNUSED,
+ enum ref_transaction_error err,
+ void *cb_data)
+{
+ struct ref_rejection_data *data = cb_data;
+
+ if (err == REF_TRANSACTION_ERROR_NAME_CONFLICT && !data->conflict_msg_shown) {
+ error(_("some local refs could not be updated; try running\n"
+ " 'git remote prune %s' to remove any old, conflicting "
+ "branches"), data->remote_name);
+ data->conflict_msg_shown = 1;
+ } else {
+ const char *reason = ref_transaction_error_msg(err);
+
+ error(_("fetching ref %s failed: %s"), refname, reason);
+ }
+
+ *data->retcode = 1;
+}
+
static int do_fetch(struct transport *transport,
struct refspec *rs,
const struct fetch_config *config)
@@ -1807,6 +1791,24 @@ static int do_fetch(struct transport *transport,
retcode = 1;
}
+ /*
+ * If not atomic, we can still use batched updates, which would be much
+ * more performant. We don't initiate the transaction before pruning,
+ * since pruning must be an independent step, to avoid F/D conflicts.
+ *
+ * TODO: if reference transactions gain logical conflict resolution, we
+ * can delete and create refs (with F/D conflicts) in the same transaction
+ * and this can be moved above the 'prune_refs()' block.
+ */
+ if (!transaction) {
+ transaction = ref_store_transaction_begin(get_main_ref_store(the_repository),
+ REF_TRANSACTION_ALLOW_FAILURE, &err);
+ if (!transaction) {
+ retcode = -1;
+ goto cleanup;
+ }
+ }
+
if (fetch_and_consume_refs(&display_state, transport, transaction, ref_map,
&fetch_head, config)) {
retcode = 1;
@@ -1838,16 +1840,31 @@ static int do_fetch(struct transport *transport,
free_refs(tags_ref_map);
}
- if (transaction) {
- if (retcode)
- goto cleanup;
+ if (retcode)
+ goto cleanup;
- retcode = ref_transaction_commit(transaction, &err);
+ retcode = ref_transaction_commit(transaction, &err);
+ if (retcode) {
+ /*
+ * Explicitly handle transaction cleanup to avoid
+ * aborting an already closed transaction.
+ */
+ ref_transaction_free(transaction);
+ transaction = NULL;
+ goto cleanup;
+ }
+
+ if (!atomic_fetch) {
+ struct ref_rejection_data data = {
+ .retcode = &retcode,
+ .conflict_msg_shown = 0,
+ .remote_name = transport->remote->name,
+ };
+
+ ref_transaction_for_each_rejected_update(transaction,
+ ref_transaction_rejection_handler,
+ &data);
if (retcode) {
- /*
- * Explicitly handle transaction cleanup to avoid
- * aborting an already closed transaction.
- */
ref_transaction_free(transaction);
transaction = NULL;
goto cleanup;
@@ -2653,7 +2670,7 @@ int cmd_fetch(int argc,
commit_graph_flags |= COMMIT_GRAPH_WRITE_PROGRESS;
trace2_region_enter("fetch", "write-commit-graph", the_repository);
- write_commit_graph_reachable(the_repository->objects->odb,
+ write_commit_graph_reachable(the_repository->objects->sources,
commit_graph_flags,
NULL);
trace2_region_leave("fetch", "write-commit-graph", the_repository);
diff --git a/builtin/fsck.c b/builtin/fsck.c
index e7d96a9c8e..0084cf7400 100644
--- a/builtin/fsck.c
+++ b/builtin/fsck.c
@@ -17,7 +17,7 @@
#include "packfile.h"
#include "object-file.h"
#include "object-name.h"
-#include "object-store.h"
+#include "odb.h"
#include "path.h"
#include "read-cache-ll.h"
#include "replace-object.h"
@@ -71,7 +71,8 @@ static const char *printable_type(const struct object_id *oid,
const char *ret;
if (type == OBJ_NONE)
- type = oid_object_info(the_repository, oid, NULL);
+ type = odb_read_object_info(the_repository->objects,
+ oid, NULL);
ret = type_name(type);
if (!ret)
@@ -160,7 +161,7 @@ static int mark_object(struct object *obj, enum object_type type,
return 0;
if (!(obj->flags & HAS_OBJ)) {
- if (parent && !has_object(the_repository, &obj->oid, 1)) {
+ if (parent && !odb_has_object(the_repository->objects, &obj->oid, 1)) {
printf_ln(_("broken link from %7s %s\n"
" to %7s %s"),
printable_type(&parent->oid, parent->type),
@@ -232,8 +233,8 @@ static void mark_unreachable_referents(const struct object_id *oid)
* (and we want to avoid parsing blobs).
*/
if (obj->type == OBJ_NONE) {
- enum object_type type = oid_object_info(the_repository,
- &obj->oid, NULL);
+ enum object_type type = odb_read_object_info(the_repository->objects,
+ &obj->oid, NULL);
if (type > 0)
object_as_type(obj, type, 0);
}
@@ -956,7 +957,7 @@ int cmd_fsck(int argc,
struct repository *repo UNUSED)
{
int i;
- struct object_directory *odb;
+ struct odb_source *source;
/* fsck knows how to handle missing promisor objects */
fetch_if_missing = 0;
@@ -997,9 +998,9 @@ int cmd_fsck(int argc,
for_each_packed_object(the_repository,
mark_packed_for_connectivity, NULL, 0);
} else {
- prepare_alt_odb(the_repository);
- for (odb = the_repository->objects->odb; odb; odb = odb->next)
- fsck_object_dir(odb->path);
+ odb_prepare_alternates(the_repository->objects);
+ for (source = the_repository->objects->sources; source; source = source->next)
+ fsck_object_dir(source->path);
if (check_full) {
struct packed_git *p;
@@ -1108,12 +1109,12 @@ int cmd_fsck(int argc,
if (the_repository->settings.core_commit_graph) {
struct child_process commit_graph_verify = CHILD_PROCESS_INIT;
- prepare_alt_odb(the_repository);
- for (odb = the_repository->objects->odb; odb; odb = odb->next) {
+ odb_prepare_alternates(the_repository->objects);
+ for (source = the_repository->objects->sources; source; source = source->next) {
child_process_init(&commit_graph_verify);
commit_graph_verify.git_cmd = 1;
strvec_pushl(&commit_graph_verify.args, "commit-graph",
- "verify", "--object-dir", odb->path, NULL);
+ "verify", "--object-dir", source->path, NULL);
if (show_progress)
strvec_push(&commit_graph_verify.args, "--progress");
else
@@ -1126,12 +1127,12 @@ int cmd_fsck(int argc,
if (the_repository->settings.core_multi_pack_index) {
struct child_process midx_verify = CHILD_PROCESS_INIT;
- prepare_alt_odb(the_repository);
- for (odb = the_repository->objects->odb; odb; odb = odb->next) {
+ odb_prepare_alternates(the_repository->objects);
+ for (source = the_repository->objects->sources; source; source = source->next) {
child_process_init(&midx_verify);
midx_verify.git_cmd = 1;
strvec_pushl(&midx_verify.args, "multi-pack-index",
- "verify", "--object-dir", odb->path, NULL);
+ "verify", "--object-dir", source->path, NULL);
if (show_progress)
strvec_push(&midx_verify.args, "--progress");
else
diff --git a/builtin/gc.c b/builtin/gc.c
index 7dc94f243d..fab8f4dd4f 100644
--- a/builtin/gc.c
+++ b/builtin/gc.c
@@ -251,7 +251,24 @@ static enum schedule_priority parse_schedule(const char *value)
return SCHEDULE_NONE;
}
+enum maintenance_task_label {
+ TASK_PREFETCH,
+ TASK_LOOSE_OBJECTS,
+ TASK_INCREMENTAL_REPACK,
+ TASK_GC,
+ TASK_COMMIT_GRAPH,
+ TASK_PACK_REFS,
+ TASK_REFLOG_EXPIRE,
+ TASK_WORKTREE_PRUNE,
+ TASK_RERERE_GC,
+
+ /* Leave as final value */
+ TASK__COUNT
+};
+
struct maintenance_run_opts {
+ enum maintenance_task_label *tasks;
+ size_t tasks_nr, tasks_alloc;
int auto_flag;
int detach;
int quiet;
@@ -261,6 +278,11 @@ struct maintenance_run_opts {
.detach = -1, \
}
+static void maintenance_run_opts_release(struct maintenance_run_opts *opts)
+{
+ free(opts->tasks);
+}
+
static int pack_refs_condition(UNUSED struct gc_config *cfg)
{
/*
@@ -517,7 +539,7 @@ static uint64_t total_ram(void)
return total;
}
#elif defined(HAVE_BSD_SYSCTL) && (defined(HW_MEMSIZE) || defined(HW_PHYSMEM) || defined(HW_PHYSMEM64))
- int64_t physical_memory;
+ uint64_t physical_memory;
int mib[2];
size_t length;
@@ -529,9 +551,16 @@ static uint64_t total_ram(void)
# else
mib[1] = HW_PHYSMEM;
# endif
- length = sizeof(int64_t);
- if (!sysctl(mib, 2, &physical_memory, &length, NULL, 0))
+ length = sizeof(physical_memory);
+ if (!sysctl(mib, 2, &physical_memory, &length, NULL, 0)) {
+ if (length == 4) {
+ uint32_t mem;
+
+ if (!sysctl(mib, 2, &mem, &length, NULL, 0))
+ physical_memory = mem;
+ }
return physical_memory;
+ }
#elif defined(GIT_WINDOWS_NATIVE)
MEMORYSTATUSEX memInfo;
@@ -796,22 +825,14 @@ done:
return ret;
}
-static void gc_before_repack(struct maintenance_run_opts *opts,
- struct gc_config *cfg)
+static int gc_foreground_tasks(struct maintenance_run_opts *opts,
+ struct gc_config *cfg)
{
- /*
- * We may be called twice, as both the pre- and
- * post-daemonized phases will call us, but running these
- * commands more than once is pointless and wasteful.
- */
- static int done = 0;
- if (done++)
- return;
-
if (cfg->pack_refs && maintenance_task_pack_refs(opts, cfg))
- die(FAILED_RUN, "pack-refs");
+ return error(FAILED_RUN, "pack-refs");
if (cfg->prune_reflogs && maintenance_task_reflog_expire(opts, cfg))
- die(FAILED_RUN, "reflog");
+ return error(FAILED_RUN, "reflog");
+ return 0;
}
int cmd_gc(int argc,
@@ -820,12 +841,12 @@ int cmd_gc(int argc,
struct repository *repo UNUSED)
{
int aggressive = 0;
- int quiet = 0;
int force = 0;
const char *name;
pid_t pid;
int daemonized = 0;
int keep_largest_pack = -1;
+ int skip_foreground_tasks = 0;
timestamp_t dummy;
struct maintenance_run_opts opts = MAINTENANCE_RUN_OPTS_INIT;
struct gc_config cfg = GC_CONFIG_INIT;
@@ -833,7 +854,7 @@ int cmd_gc(int argc,
const char *prune_expire_arg = prune_expire_sentinel;
int ret;
struct option builtin_gc_options[] = {
- OPT__QUIET(&quiet, N_("suppress progress reporting")),
+ OPT__QUIET(&opts.quiet, N_("suppress progress reporting")),
{
.type = OPTION_STRING,
.long_name = "prune",
@@ -858,6 +879,8 @@ int cmd_gc(int argc,
N_("repack all other packs except the largest pack")),
OPT_STRING(0, "expire-to", &cfg.repack_expire_to, N_("dir"),
N_("pack prefix to store a pack containing pruned objects")),
+ OPT_HIDDEN_BOOL(0, "skip-foreground-tasks", &skip_foreground_tasks,
+ N_("skip maintenance tasks typically done in the foreground")),
OPT_END()
};
@@ -893,7 +916,7 @@ int cmd_gc(int argc,
if (cfg.aggressive_window > 0)
strvec_pushf(&repack, "--window=%d", cfg.aggressive_window);
}
- if (quiet)
+ if (opts.quiet)
strvec_push(&repack, "-q");
if (opts.auto_flag) {
@@ -908,7 +931,7 @@ int cmd_gc(int argc,
goto out;
}
- if (!quiet) {
+ if (!opts.quiet) {
if (opts.detach > 0)
fprintf(stderr, _("Auto packing the repository in background for optimum performance.\n"));
else
@@ -941,13 +964,16 @@ int cmd_gc(int argc,
goto out;
}
- if (lock_repo_for_gc(force, &pid)) {
- ret = 0;
- goto out;
- }
+ if (!skip_foreground_tasks) {
+ if (lock_repo_for_gc(force, &pid)) {
+ ret = 0;
+ goto out;
+ }
- gc_before_repack(&opts, &cfg); /* dies on failure */
- delete_tempfile(&pidfile);
+ if (gc_foreground_tasks(&opts, &cfg) < 0)
+ die(NULL);
+ delete_tempfile(&pidfile);
+ }
/*
* failure to daemonize is ok, we'll continue
@@ -976,9 +1002,10 @@ int cmd_gc(int argc,
free(path);
}
- gc_before_repack(&opts, &cfg);
+ if (opts.detach <= 0 && !skip_foreground_tasks)
+ gc_foreground_tasks(&opts, &cfg);
- if (!repository_format_precious_objects) {
+ if (!the_repository->repository_format_precious_objects) {
struct child_process repack_cmd = CHILD_PROCESS_INIT;
repack_cmd.git_cmd = 1;
@@ -993,7 +1020,7 @@ int cmd_gc(int argc,
strvec_pushl(&prune_cmd.args, "prune", "--expire", NULL);
/* run `git prune` even if using cruft packs */
strvec_push(&prune_cmd.args, cfg.prune_expire);
- if (quiet)
+ if (opts.quiet)
strvec_push(&prune_cmd.args, "--no-progress");
if (repo_has_promisor_remote(the_repository))
strvec_push(&prune_cmd.args,
@@ -1020,8 +1047,8 @@ int cmd_gc(int argc,
}
if (the_repository->settings.gc_write_commit_graph == 1)
- write_commit_graph_reachable(the_repository->objects->odb,
- !quiet && !daemonized ? COMMIT_GRAPH_WRITE_PROGRESS : 0,
+ write_commit_graph_reachable(the_repository->objects->sources,
+ !opts.quiet && !daemonized ? COMMIT_GRAPH_WRITE_PROGRESS : 0,
NULL);
if (opts.auto_flag && too_many_loose_objects(&cfg))
@@ -1035,6 +1062,7 @@ int cmd_gc(int argc,
}
out:
+ maintenance_run_opts_release(&opts);
gc_config_release(&cfg);
return 0;
}
@@ -1082,7 +1110,7 @@ static int dfs_on_ref(const char *refname UNUSED,
if (!peel_iterated_oid(the_repository, oid, &peeled))
oid = &peeled;
- if (oid_object_info(the_repository, oid, NULL) != OBJ_COMMIT)
+ if (odb_read_object_info(the_repository->objects, oid, NULL) != OBJ_COMMIT)
return 0;
commit = lookup_commit(the_repository, oid);
@@ -1211,8 +1239,14 @@ static int maintenance_task_prefetch(struct maintenance_run_opts *opts,
return 0;
}
-static int maintenance_task_gc(struct maintenance_run_opts *opts,
- struct gc_config *cfg UNUSED)
+static int maintenance_task_gc_foreground(struct maintenance_run_opts *opts,
+ struct gc_config *cfg)
+{
+ return gc_foreground_tasks(opts, cfg);
+}
+
+static int maintenance_task_gc_background(struct maintenance_run_opts *opts,
+ struct gc_config *cfg UNUSED)
{
struct child_process child = CHILD_PROCESS_INIT;
@@ -1226,6 +1260,7 @@ static int maintenance_task_gc(struct maintenance_run_opts *opts,
else
strvec_push(&child.args, "--no-quiet");
strvec_push(&child.args, "--no-detach");
+ strvec_push(&child.args, "--skip-foreground-tasks");
return run_command(&child);
}
@@ -1273,7 +1308,7 @@ static int loose_object_auto_condition(struct gc_config *cfg UNUSED)
if (loose_object_auto_limit < 0)
return 1;
- return for_each_loose_file_in_objdir(the_repository->objects->odb->path,
+ return for_each_loose_file_in_objdir(the_repository->objects->sources->path,
loose_object_count,
NULL, NULL, &count);
}
@@ -1308,7 +1343,7 @@ static int pack_loose(struct maintenance_run_opts *opts)
* Do not start pack-objects process
* if there are no loose objects.
*/
- if (!for_each_loose_file_in_objdir(r->objects->odb->path,
+ if (!for_each_loose_file_in_objdir(r->objects->sources->path,
bail_on_loose,
NULL, NULL, NULL))
return 0;
@@ -1320,7 +1355,7 @@ static int pack_loose(struct maintenance_run_opts *opts)
strvec_push(&pack_proc.args, "--quiet");
else
strvec_push(&pack_proc.args, "--no-quiet");
- strvec_pushf(&pack_proc.args, "%s/pack/loose", r->objects->odb->path);
+ strvec_pushf(&pack_proc.args, "%s/pack/loose", r->objects->sources->path);
pack_proc.in = -1;
@@ -1348,7 +1383,7 @@ static int pack_loose(struct maintenance_run_opts *opts)
else if (data.batch_size > 0)
data.batch_size--; /* Decrease for equality on limit. */
- for_each_loose_file_in_objdir(r->objects->odb->path,
+ for_each_loose_file_in_objdir(r->objects->sources->path,
write_loose_object_to_stdin,
NULL,
NULL,
@@ -1513,107 +1548,120 @@ static int maintenance_task_incremental_repack(struct maintenance_run_opts *opts
return 0;
}
-typedef int maintenance_task_fn(struct maintenance_run_opts *opts,
- struct gc_config *cfg);
-
-/*
- * An auto condition function returns 1 if the task should run
- * and 0 if the task should NOT run. See needs_to_gc() for an
- * example.
- */
-typedef int maintenance_auto_fn(struct gc_config *cfg);
+typedef int (*maintenance_task_fn)(struct maintenance_run_opts *opts,
+ struct gc_config *cfg);
+typedef int (*maintenance_auto_fn)(struct gc_config *cfg);
struct maintenance_task {
const char *name;
- maintenance_task_fn *fn;
- maintenance_auto_fn *auto_condition;
- unsigned enabled:1;
-
- enum schedule_priority schedule;
- /* -1 if not selected. */
- int selected_order;
-};
+ /*
+ * Work that will be executed before detaching. This should not include
+ * tasks that may run for an extended amount of time as it does cause
+ * auto-maintenance to block until foreground tasks have been run.
+ */
+ maintenance_task_fn foreground;
-enum maintenance_task_label {
- TASK_PREFETCH,
- TASK_LOOSE_OBJECTS,
- TASK_INCREMENTAL_REPACK,
- TASK_GC,
- TASK_COMMIT_GRAPH,
- TASK_PACK_REFS,
- TASK_REFLOG_EXPIRE,
- TASK_WORKTREE_PRUNE,
- TASK_RERERE_GC,
+ /*
+ * Work that will be executed after detaching. When not detaching the
+ * work will be run in the foreground, as well.
+ */
+ maintenance_task_fn background;
- /* Leave as final value */
- TASK__COUNT
+ /*
+ * An auto condition function returns 1 if the task should run and 0 if
+ * the task should NOT run. See needs_to_gc() for an example.
+ */
+ maintenance_auto_fn auto_condition;
};
-static struct maintenance_task tasks[] = {
+static const struct maintenance_task tasks[] = {
[TASK_PREFETCH] = {
- "prefetch",
- maintenance_task_prefetch,
+ .name = "prefetch",
+ .background = maintenance_task_prefetch,
},
[TASK_LOOSE_OBJECTS] = {
- "loose-objects",
- maintenance_task_loose_objects,
- loose_object_auto_condition,
+ .name = "loose-objects",
+ .background = maintenance_task_loose_objects,
+ .auto_condition = loose_object_auto_condition,
},
[TASK_INCREMENTAL_REPACK] = {
- "incremental-repack",
- maintenance_task_incremental_repack,
- incremental_repack_auto_condition,
+ .name = "incremental-repack",
+ .background = maintenance_task_incremental_repack,
+ .auto_condition = incremental_repack_auto_condition,
},
[TASK_GC] = {
- "gc",
- maintenance_task_gc,
- need_to_gc,
- 1,
+ .name = "gc",
+ .foreground = maintenance_task_gc_foreground,
+ .background = maintenance_task_gc_background,
+ .auto_condition = need_to_gc,
},
[TASK_COMMIT_GRAPH] = {
- "commit-graph",
- maintenance_task_commit_graph,
- should_write_commit_graph,
+ .name = "commit-graph",
+ .background = maintenance_task_commit_graph,
+ .auto_condition = should_write_commit_graph,
},
[TASK_PACK_REFS] = {
- "pack-refs",
- maintenance_task_pack_refs,
- pack_refs_condition,
+ .name = "pack-refs",
+ .foreground = maintenance_task_pack_refs,
+ .auto_condition = pack_refs_condition,
},
[TASK_REFLOG_EXPIRE] = {
- "reflog-expire",
- maintenance_task_reflog_expire,
- reflog_expire_condition,
+ .name = "reflog-expire",
+ .foreground = maintenance_task_reflog_expire,
+ .auto_condition = reflog_expire_condition,
},
[TASK_WORKTREE_PRUNE] = {
- "worktree-prune",
- maintenance_task_worktree_prune,
- worktree_prune_condition,
+ .name = "worktree-prune",
+ .background = maintenance_task_worktree_prune,
+ .auto_condition = worktree_prune_condition,
},
[TASK_RERERE_GC] = {
- "rerere-gc",
- maintenance_task_rerere_gc,
- rerere_gc_condition,
+ .name = "rerere-gc",
+ .background = maintenance_task_rerere_gc,
+ .auto_condition = rerere_gc_condition,
},
};
-static int compare_tasks_by_selection(const void *a_, const void *b_)
+enum task_phase {
+ TASK_PHASE_FOREGROUND,
+ TASK_PHASE_BACKGROUND,
+};
+
+static int maybe_run_task(const struct maintenance_task *task,
+ struct repository *repo,
+ struct maintenance_run_opts *opts,
+ struct gc_config *cfg,
+ enum task_phase phase)
{
- const struct maintenance_task *a = a_;
- const struct maintenance_task *b = b_;
+ int foreground = (phase == TASK_PHASE_FOREGROUND);
+ maintenance_task_fn fn = foreground ? task->foreground : task->background;
+ const char *region = foreground ? "maintenance foreground" : "maintenance";
+ int ret = 0;
- return b->selected_order - a->selected_order;
+ if (!fn)
+ return 0;
+ if (opts->auto_flag &&
+ (!task->auto_condition || !task->auto_condition(cfg)))
+ return 0;
+
+ trace2_region_enter(region, task->name, repo);
+ if (fn(opts, cfg)) {
+ error(_("task '%s' failed"), task->name);
+ ret = 1;
+ }
+ trace2_region_leave(region, task->name, repo);
+
+ return ret;
}
static int maintenance_run_tasks(struct maintenance_run_opts *opts,
struct gc_config *cfg)
{
- int i, found_selected = 0;
int result = 0;
struct lock_file lk;
struct repository *r = the_repository;
- char *lock_path = xstrfmt("%s/maintenance", r->objects->odb->path);
+ char *lock_path = xstrfmt("%s/maintenance", r->objects->sources->path);
if (hold_lock_file_for_update(&lk, lock_path, LOCK_NO_DEREF) < 0) {
/*
@@ -1631,6 +1679,11 @@ static int maintenance_run_tasks(struct maintenance_run_opts *opts,
}
free(lock_path);
+ for (size_t i = 0; i < opts->tasks_nr; i++)
+ if (maybe_run_task(&tasks[opts->tasks[i]], r, opts, cfg,
+ TASK_PHASE_FOREGROUND))
+ result = 1;
+
/* Failure to daemonize is ok, we'll continue in foreground. */
if (opts->detach > 0) {
trace2_region_enter("maintenance", "detach", the_repository);
@@ -1638,120 +1691,138 @@ static int maintenance_run_tasks(struct maintenance_run_opts *opts,
trace2_region_leave("maintenance", "detach", the_repository);
}
- for (i = 0; !found_selected && i < TASK__COUNT; i++)
- found_selected = tasks[i].selected_order >= 0;
-
- if (found_selected)
- QSORT(tasks, TASK__COUNT, compare_tasks_by_selection);
-
- for (i = 0; i < TASK__COUNT; i++) {
- if (found_selected && tasks[i].selected_order < 0)
- continue;
-
- if (!found_selected && !tasks[i].enabled)
- continue;
-
- if (opts->auto_flag &&
- (!tasks[i].auto_condition ||
- !tasks[i].auto_condition(cfg)))
- continue;
-
- if (opts->schedule && tasks[i].schedule < opts->schedule)
- continue;
-
- trace2_region_enter("maintenance", tasks[i].name, r);
- if (tasks[i].fn(opts, cfg)) {
- error(_("task '%s' failed"), tasks[i].name);
+ for (size_t i = 0; i < opts->tasks_nr; i++)
+ if (maybe_run_task(&tasks[opts->tasks[i]], r, opts, cfg,
+ TASK_PHASE_BACKGROUND))
result = 1;
- }
- trace2_region_leave("maintenance", tasks[i].name, r);
- }
rollback_lock_file(&lk);
return result;
}
-static void initialize_maintenance_strategy(void)
+struct maintenance_strategy {
+ struct {
+ int enabled;
+ enum schedule_priority schedule;
+ } tasks[TASK__COUNT];
+};
+
+static const struct maintenance_strategy none_strategy = { 0 };
+static const struct maintenance_strategy default_strategy = {
+ .tasks = {
+ [TASK_GC].enabled = 1,
+ },
+};
+static const struct maintenance_strategy incremental_strategy = {
+ .tasks = {
+ [TASK_COMMIT_GRAPH].enabled = 1,
+ [TASK_COMMIT_GRAPH].schedule = SCHEDULE_HOURLY,
+ [TASK_PREFETCH].enabled = 1,
+ [TASK_PREFETCH].schedule = SCHEDULE_HOURLY,
+ [TASK_INCREMENTAL_REPACK].enabled = 1,
+ [TASK_INCREMENTAL_REPACK].schedule = SCHEDULE_DAILY,
+ [TASK_LOOSE_OBJECTS].enabled = 1,
+ [TASK_LOOSE_OBJECTS].schedule = SCHEDULE_DAILY,
+ [TASK_PACK_REFS].enabled = 1,
+ [TASK_PACK_REFS].schedule = SCHEDULE_WEEKLY,
+ },
+};
+
+static void initialize_task_config(struct maintenance_run_opts *opts,
+ const struct string_list *selected_tasks)
{
+ struct strbuf config_name = STRBUF_INIT;
+ struct maintenance_strategy strategy;
const char *config_str;
- if (git_config_get_string_tmp("maintenance.strategy", &config_str))
- return;
+ /*
+ * In case the user has asked us to run tasks explicitly we only use
+ * those specified tasks. Specifically, we do _not_ want to consult the
+ * config or maintenance strategy.
+ */
+ if (selected_tasks->nr) {
+ for (size_t i = 0; i < selected_tasks->nr; i++) {
+ enum maintenance_task_label label = (intptr_t)selected_tasks->items[i].util;;
+ ALLOC_GROW(opts->tasks, opts->tasks_nr + 1, opts->tasks_alloc);
+ opts->tasks[opts->tasks_nr++] = label;
+ }
- if (!strcasecmp(config_str, "incremental")) {
- tasks[TASK_GC].schedule = SCHEDULE_NONE;
- tasks[TASK_COMMIT_GRAPH].enabled = 1;
- tasks[TASK_COMMIT_GRAPH].schedule = SCHEDULE_HOURLY;
- tasks[TASK_PREFETCH].enabled = 1;
- tasks[TASK_PREFETCH].schedule = SCHEDULE_HOURLY;
- tasks[TASK_INCREMENTAL_REPACK].enabled = 1;
- tasks[TASK_INCREMENTAL_REPACK].schedule = SCHEDULE_DAILY;
- tasks[TASK_LOOSE_OBJECTS].enabled = 1;
- tasks[TASK_LOOSE_OBJECTS].schedule = SCHEDULE_DAILY;
- tasks[TASK_PACK_REFS].enabled = 1;
- tasks[TASK_PACK_REFS].schedule = SCHEDULE_WEEKLY;
+ return;
}
-}
-static void initialize_task_config(int schedule)
-{
- int i;
- struct strbuf config_name = STRBUF_INIT;
+ /*
+ * Otherwise, the strategy depends on whether we run as part of a
+ * scheduled job or not:
+ *
+ * - Scheduled maintenance does not perform any housekeeping by
+ * default, but requires the user to pick a maintenance strategy.
+ *
+ * - Unscheduled maintenance uses our default strategy.
+ *
+ * Both of these are affected by the gitconfig though, which may
+ * override specific aspects of our strategy.
+ */
+ if (opts->schedule) {
+ strategy = none_strategy;
- if (schedule)
- initialize_maintenance_strategy();
+ if (!git_config_get_string_tmp("maintenance.strategy", &config_str)) {
+ if (!strcasecmp(config_str, "incremental"))
+ strategy = incremental_strategy;
+ }
+ } else {
+ strategy = default_strategy;
+ }
- for (i = 0; i < TASK__COUNT; i++) {
+ for (size_t i = 0; i < TASK__COUNT; i++) {
int config_value;
- char *config_str;
strbuf_reset(&config_name);
strbuf_addf(&config_name, "maintenance.%s.enabled",
tasks[i].name);
-
if (!git_config_get_bool(config_name.buf, &config_value))
- tasks[i].enabled = config_value;
-
- strbuf_reset(&config_name);
- strbuf_addf(&config_name, "maintenance.%s.schedule",
- tasks[i].name);
+ strategy.tasks[i].enabled = config_value;
+ if (!strategy.tasks[i].enabled)
+ continue;
- if (!git_config_get_string(config_name.buf, &config_str)) {
- tasks[i].schedule = parse_schedule(config_str);
- free(config_str);
+ if (opts->schedule) {
+ strbuf_reset(&config_name);
+ strbuf_addf(&config_name, "maintenance.%s.schedule",
+ tasks[i].name);
+ if (!git_config_get_string_tmp(config_name.buf, &config_str))
+ strategy.tasks[i].schedule = parse_schedule(config_str);
+ if (strategy.tasks[i].schedule < opts->schedule)
+ continue;
}
+
+ ALLOC_GROW(opts->tasks, opts->tasks_nr + 1, opts->tasks_alloc);
+ opts->tasks[opts->tasks_nr++] = i;
}
strbuf_release(&config_name);
}
-static int task_option_parse(const struct option *opt UNUSED,
+static int task_option_parse(const struct option *opt,
const char *arg, int unset)
{
- int i, num_selected = 0;
- struct maintenance_task *task = NULL;
+ struct string_list *selected_tasks = opt->value;
+ size_t i;
BUG_ON_OPT_NEG(unset);
- for (i = 0; i < TASK__COUNT; i++) {
- if (tasks[i].selected_order >= 0)
- num_selected++;
- if (!strcasecmp(tasks[i].name, arg)) {
- task = &tasks[i];
- }
- }
-
- if (!task) {
+ for (i = 0; i < TASK__COUNT; i++)
+ if (!strcasecmp(tasks[i].name, arg))
+ break;
+ if (i >= TASK__COUNT) {
error(_("'%s' is not a valid task"), arg);
return 1;
}
- if (task->selected_order >= 0) {
+ if (unsorted_string_list_has_string(selected_tasks, arg)) {
error(_("task '%s' cannot be selected multiple times"), arg);
return 1;
}
- task->selected_order = num_selected + 1;
+ string_list_append(selected_tasks, arg)->util = (void *)(intptr_t)i;
return 0;
}
@@ -1759,8 +1830,8 @@ static int task_option_parse(const struct option *opt UNUSED,
static int maintenance_run(int argc, const char **argv, const char *prefix,
struct repository *repo UNUSED)
{
- int i;
struct maintenance_run_opts opts = MAINTENANCE_RUN_OPTS_INIT;
+ struct string_list selected_tasks = STRING_LIST_INIT_DUP;
struct gc_config cfg = GC_CONFIG_INIT;
struct option builtin_maintenance_run_options[] = {
OPT_BOOL(0, "auto", &opts.auto_flag,
@@ -1772,7 +1843,7 @@ static int maintenance_run(int argc, const char **argv, const char *prefix,
maintenance_opt_schedule),
OPT_BOOL(0, "quiet", &opts.quiet,
N_("do not report progress or other information over stderr")),
- OPT_CALLBACK_F(0, "task", NULL, N_("task"),
+ OPT_CALLBACK_F(0, "task", &selected_tasks, N_("task"),
N_("run a specific task"),
PARSE_OPT_NONEG, task_option_parse),
OPT_END()
@@ -1781,25 +1852,27 @@ static int maintenance_run(int argc, const char **argv, const char *prefix,
opts.quiet = !isatty(2);
- for (i = 0; i < TASK__COUNT; i++)
- tasks[i].selected_order = -1;
-
argc = parse_options(argc, argv, prefix,
builtin_maintenance_run_options,
builtin_maintenance_run_usage,
PARSE_OPT_STOP_AT_NON_OPTION);
- if (opts.auto_flag && opts.schedule)
- die(_("use at most one of --auto and --schedule=<frequency>"));
+ die_for_incompatible_opt2(opts.auto_flag, "--auto",
+ opts.schedule, "--schedule=");
+ die_for_incompatible_opt2(selected_tasks.nr, "--task=",
+ opts.schedule, "--schedule=");
gc_config(&cfg);
- initialize_task_config(opts.schedule);
+ initialize_task_config(&opts, &selected_tasks);
if (argc != 0)
usage_with_options(builtin_maintenance_run_usage,
builtin_maintenance_run_options);
ret = maintenance_run_tasks(&opts, &cfg);
+
+ string_list_clear(&selected_tasks, 0);
+ maintenance_run_opts_release(&opts);
gc_config_release(&cfg);
return ret;
}
@@ -3085,7 +3158,7 @@ static int update_background_schedule(const struct maintenance_start_opts *opts,
unsigned int i;
int result = 0;
struct lock_file lk;
- char *lock_path = xstrfmt("%s/schedule", the_repository->objects->odb->path);
+ char *lock_path = xstrfmt("%s/schedule", the_repository->objects->sources->path);
if (hold_lock_file_for_update(&lk, lock_path, LOCK_NO_DEREF) < 0) {
if (errno == EEXIST)
diff --git a/builtin/grep.c b/builtin/grep.c
index 3ce574a605..39273d9c0f 100644
--- a/builtin/grep.c
+++ b/builtin/grep.c
@@ -26,7 +26,7 @@
#include "submodule-config.h"
#include "object-file.h"
#include "object-name.h"
-#include "object-store.h"
+#include "odb.h"
#include "packfile.h"
#include "pager.h"
#include "path.h"
@@ -462,7 +462,7 @@ static int grep_submodule(struct grep_opt *opt,
/*
* NEEDSWORK: repo_read_gitmodules() might call
- * add_to_alternates_memory() via config_from_gitmodules(). This
+ * odb_add_to_alternates_memory() via config_from_gitmodules(). This
* operation causes a race condition with concurrent object readings
* performed by the worker threads. That's why we need obj_read_lock()
* here. It should be removed once it's no longer necessary to add the
@@ -505,7 +505,8 @@ static int grep_submodule(struct grep_opt *opt,
* lazily registered as alternates when needed (and except in an
* unexpected code interaction, it won't be needed).
*/
- add_submodule_odb_by_path(subrepo->objects->odb->path);
+ odb_add_submodule_source_by_path(the_repository->objects,
+ subrepo->objects->sources->path);
obj_read_unlock();
memcpy(&subopt, opt, sizeof(subopt));
@@ -519,11 +520,9 @@ static int grep_submodule(struct grep_opt *opt,
struct strbuf base = STRBUF_INIT;
obj_read_lock();
- object_type = oid_object_info(subrepo, oid, NULL);
+ object_type = odb_read_object_info(subrepo->objects, oid, NULL);
obj_read_unlock();
- data = read_object_with_reference(subrepo,
- oid, OBJ_TREE,
- &size, NULL);
+ data = odb_read_object_peeled(subrepo->objects, oid, OBJ_TREE, &size, NULL);
if (!data)
die(_("unable to read tree (%s)"), oid_to_hex(oid));
@@ -572,8 +571,8 @@ static int grep_cache(struct grep_opt *opt,
void *data;
unsigned long size;
- data = repo_read_object_file(the_repository, &ce->oid,
- &type, &size);
+ data = odb_read_object(the_repository->objects, &ce->oid,
+ &type, &size);
if (!data)
die(_("unable to read tree %s"), oid_to_hex(&ce->oid));
init_tree_desc(&tree, &ce->oid, data, size);
@@ -665,8 +664,8 @@ static int grep_tree(struct grep_opt *opt, const struct pathspec *pathspec,
void *data;
unsigned long size;
- data = repo_read_object_file(the_repository,
- &entry.oid, &type, &size);
+ data = odb_read_object(the_repository->objects,
+ &entry.oid, &type, &size);
if (!data)
die(_("unable to read tree (%s)"),
oid_to_hex(&entry.oid));
@@ -704,9 +703,8 @@ static int grep_object(struct grep_opt *opt, const struct pathspec *pathspec,
struct strbuf base;
int hit, len;
- data = read_object_with_reference(opt->repo,
- &obj->oid, OBJ_TREE,
- &size, NULL);
+ data = odb_read_object_peeled(opt->repo->objects, &obj->oid,
+ OBJ_TREE, &size, NULL);
if (!data)
die(_("unable to read tree (%s)"), oid_to_hex(&obj->oid));
diff --git a/builtin/hash-object.c b/builtin/hash-object.c
index 6a99ec250d..ddf281413a 100644
--- a/builtin/hash-object.c
+++ b/builtin/hash-object.c
@@ -11,7 +11,7 @@
#include "gettext.h"
#include "hex.h"
#include "object-file.h"
-#include "object-store.h"
+#include "odb.h"
#include "blob.h"
#include "quote.h"
#include "parse-options.h"
@@ -104,7 +104,7 @@ int cmd_hash_object(int argc,
prefix = setup_git_directory_gently(&nongit);
if (nongit && !the_hash_algo)
- repo_set_hash_algo(the_repository, GIT_HASH_SHA1);
+ repo_set_hash_algo(the_repository, GIT_HASH_DEFAULT);
if (vpath && prefix) {
vpath_free = prefix_filename(prefix, vpath);
diff --git a/builtin/index-pack.c b/builtin/index-pack.c
index bb7925bd29..0a5c8a1ac8 100644
--- a/builtin/index-pack.c
+++ b/builtin/index-pack.c
@@ -21,7 +21,7 @@
#include "packfile.h"
#include "pack-revindex.h"
#include "object-file.h"
-#include "object-store.h"
+#include "odb.h"
#include "oid-array.h"
#include "oidset.h"
#include "path.h"
@@ -260,7 +260,8 @@ static unsigned check_object(struct object *obj)
if (!(obj->flags & FLAG_CHECKED)) {
unsigned long size;
- int type = oid_object_info(the_repository, &obj->oid, &size);
+ int type = odb_read_object_info(the_repository->objects,
+ &obj->oid, &size);
if (type <= 0)
die(_("did not receive expected object %s"),
oid_to_hex(&obj->oid));
@@ -362,7 +363,7 @@ static const char *open_pack_file(const char *pack_name)
input_fd = 0;
if (!pack_name) {
struct strbuf tmp_file = STRBUF_INIT;
- output_fd = odb_mkstemp(&tmp_file,
+ output_fd = odb_mkstemp(the_repository->objects, &tmp_file,
"pack/tmp_pack_XXXXXX");
pack_name = strbuf_detach(&tmp_file, NULL);
} else {
@@ -892,8 +893,8 @@ static void sha1_object(const void *data, struct object_entry *obj_entry,
if (startup_info->have_repository) {
read_lock();
- collision_test_needed = has_object(the_repository, oid,
- HAS_OBJECT_FETCH_PROMISOR);
+ collision_test_needed = odb_has_object(the_repository->objects, oid,
+ HAS_OBJECT_FETCH_PROMISOR);
read_unlock();
}
@@ -908,13 +909,13 @@ static void sha1_object(const void *data, struct object_entry *obj_entry,
enum object_type has_type;
unsigned long has_size;
read_lock();
- has_type = oid_object_info(the_repository, oid, &has_size);
+ has_type = odb_read_object_info(the_repository->objects, oid, &has_size);
if (has_type < 0)
die(_("cannot read existing object info %s"), oid_to_hex(oid));
if (has_type != type || has_size != size)
die(_("SHA1 COLLISION FOUND WITH %s !"), oid_to_hex(oid));
- has_data = repo_read_object_file(the_repository, oid,
- &has_type, &has_size);
+ has_data = odb_read_object(the_repository->objects, oid,
+ &has_type, &has_size);
read_unlock();
if (!data)
data = new_data = get_data_from_pack(obj_entry);
@@ -1501,9 +1502,9 @@ static void fix_unresolved_deltas(struct hashfile *f)
struct oid_array to_fetch = OID_ARRAY_INIT;
for (i = 0; i < nr_ref_deltas; i++) {
struct ref_delta_entry *d = sorted_by_pos[i];
- if (!oid_object_info_extended(the_repository, &d->oid,
- NULL,
- OBJECT_INFO_FOR_PREFETCH))
+ if (!odb_read_object_info_extended(the_repository->objects,
+ &d->oid, NULL,
+ OBJECT_INFO_FOR_PREFETCH))
continue;
oid_array_append(&to_fetch, &d->oid);
}
@@ -1520,8 +1521,8 @@ static void fix_unresolved_deltas(struct hashfile *f)
if (objects[d->obj_no].real_type != OBJ_REF_DELTA)
continue;
- data = repo_read_object_file(the_repository, &d->oid, &type,
- &size);
+ data = odb_read_object(the_repository->objects, &d->oid,
+ &type, &size);
if (!data)
continue;
@@ -1829,7 +1830,7 @@ static void repack_local_links(void)
oidset_iter_init(&outgoing_links, &iter);
while ((oid = oidset_iter_next(&iter))) {
struct object_info info = OBJECT_INFO_INIT;
- if (oid_object_info_extended(the_repository, oid, &info, 0))
+ if (odb_read_object_info_extended(the_repository->objects, oid, &info, 0))
/* Missing; assume it is a promisor object */
continue;
if (info.whence == OI_PACKED && info.u.packed.pack->pack_promisor)
@@ -2034,7 +2035,7 @@ int cmd_index_pack(int argc,
* choice but to guess the object hash.
*/
if (!the_repository->hash_algo)
- repo_set_hash_algo(the_repository, GIT_HASH_SHA1);
+ repo_set_hash_algo(the_repository, GIT_HASH_DEFAULT);
opts.flags &= ~(WRITE_REV | WRITE_REV_VERIFY);
if (rev_index) {
diff --git a/builtin/log.c b/builtin/log.c
index b450cd3bde..24a57c20a4 100644
--- a/builtin/log.c
+++ b/builtin/log.c
@@ -15,7 +15,7 @@
#include "hex.h"
#include "refs.h"
#include "object-name.h"
-#include "object-store.h"
+#include "odb.h"
#include "pager.h"
#include "color.h"
#include "commit.h"
@@ -113,6 +113,15 @@ struct log_config {
int fmt_patch_name_max;
char *fmt_pretty;
char *default_date_mode;
+
+#ifndef WITH_BREAKING_CHANGES
+ /*
+ * Note: git_log_config() does not touch this member and that
+ * is very deliberate. This member is only to be used to
+ * resurrect whatchanged that is deprecated.
+ */
+ int i_still_use_this;
+#endif
};
static void log_config_init(struct log_config *cfg)
@@ -267,6 +276,10 @@ static void cmd_log_init_finish(int argc, const char **argv, const char *prefix,
OPT__QUIET(&quiet, N_("suppress diff output")),
OPT_BOOL(0, "source", &source, N_("show source")),
OPT_BOOL(0, "use-mailmap", &mailmap, N_("use mail map file")),
+#ifndef WITH_BREAKING_CHANGES
+ OPT_HIDDEN_BOOL(0, "i-still-use-this", &cfg->i_still_use_this,
+ "<use this deprecated command>"),
+#endif
OPT_ALIAS(0, "mailmap", "use-mailmap"),
OPT_CALLBACK_F(0, "clear-decorations", NULL, NULL,
N_("clear all previously-defined decoration filters"),
@@ -633,6 +646,7 @@ static int git_log_config(const char *var, const char *value,
return git_diff_ui_config(var, value, ctx, cb);
}
+#ifndef WITH_BREAKING_CHANGES
int cmd_whatchanged(int argc,
const char **argv,
const char *prefix,
@@ -656,6 +670,10 @@ int cmd_whatchanged(int argc,
opt.def = "HEAD";
opt.revarg_opt = REVARG_COMMITTISH;
cmd_log_init(argc, argv, prefix, &rev, &opt, &cfg);
+
+ if (!cfg.i_still_use_this)
+ you_still_use_that("git whatchanged");
+
if (!rev.diffopt.output_format)
rev.diffopt.output_format = DIFF_FORMAT_RAW;
@@ -665,6 +683,7 @@ int cmd_whatchanged(int argc,
log_config_release(&cfg);
return ret;
}
+#endif
static void show_tagger(const char *buf, struct rev_info *rev)
{
@@ -714,7 +733,7 @@ static int show_tag_object(const struct object_id *oid, struct rev_info *rev)
{
unsigned long size;
enum object_type type;
- char *buf = repo_read_object_file(the_repository, oid, &type, &size);
+ char *buf = odb_read_object(the_repository->objects, oid, &type, &size);
unsigned long offset = 0;
if (!buf)
diff --git a/builtin/ls-files.c b/builtin/ls-files.c
index be74f0a03b..ff975e7be0 100644
--- a/builtin/ls-files.c
+++ b/builtin/ls-files.c
@@ -25,7 +25,7 @@
#include "setup.h"
#include "sparse-index.h"
#include "submodule.h"
-#include "object-store.h"
+#include "odb.h"
#include "hex.h"
@@ -251,7 +251,7 @@ static void expand_objectsize(struct repository *repo, struct strbuf *line,
{
if (type == OBJ_BLOB) {
unsigned long size;
- if (oid_object_info(repo, oid, &size) < 0)
+ if (odb_read_object_info(repo->objects, oid, &size) < 0)
die(_("could not get object info about '%s'"),
oid_to_hex(oid));
if (padded)
diff --git a/builtin/ls-remote.c b/builtin/ls-remote.c
index 01a4d4daa1..df09000b30 100644
--- a/builtin/ls-remote.c
+++ b/builtin/ls-remote.c
@@ -112,7 +112,7 @@ int cmd_ls_remote(int argc,
* depending on what object hash the remote uses.
*/
if (!the_repository->hash_algo)
- repo_set_hash_algo(the_repository, GIT_HASH_SHA1);
+ repo_set_hash_algo(the_repository, GIT_HASH_DEFAULT);
packet_trace_identity("ls-remote");
diff --git a/builtin/ls-tree.c b/builtin/ls-tree.c
index 8aafc30ca4..4d616dd528 100644
--- a/builtin/ls-tree.c
+++ b/builtin/ls-tree.c
@@ -10,7 +10,7 @@
#include "gettext.h"
#include "hex.h"
#include "object-name.h"
-#include "object-store.h"
+#include "odb.h"
#include "tree.h"
#include "path.h"
#include "quote.h"
@@ -27,7 +27,7 @@ static void expand_objectsize(struct strbuf *line, const struct object_id *oid,
{
if (type == OBJ_BLOB) {
unsigned long size;
- if (oid_object_info(the_repository, oid, &size) < 0)
+ if (odb_read_object_info(the_repository->objects, oid, &size) < 0)
die(_("could not get object info about '%s'"),
oid_to_hex(oid));
if (padded)
@@ -217,7 +217,7 @@ static int show_tree_long(const struct object_id *oid, struct strbuf *base,
if (type == OBJ_BLOB) {
unsigned long size;
- if (oid_object_info(the_repository, oid, &size) == OBJ_BAD)
+ if (odb_read_object_info(the_repository->objects, oid, &size) == OBJ_BAD)
xsnprintf(size_text, sizeof(size_text), "BAD");
else
xsnprintf(size_text, sizeof(size_text),
diff --git a/builtin/merge-file.c b/builtin/merge-file.c
index 2b16b10d2c..9464f27562 100644
--- a/builtin/merge-file.c
+++ b/builtin/merge-file.c
@@ -7,7 +7,7 @@
#include "hex.h"
#include "object-file.h"
#include "object-name.h"
-#include "object-store.h"
+#include "odb.h"
#include "config.h"
#include "gettext.h"
#include "setup.h"
diff --git a/builtin/merge-tree.c b/builtin/merge-tree.c
index 7f41665dfd..cf8b06cadc 100644
--- a/builtin/merge-tree.c
+++ b/builtin/merge-tree.c
@@ -10,7 +10,7 @@
#include "commit-reach.h"
#include "merge-ort.h"
#include "object-name.h"
-#include "object-store.h"
+#include "odb.h"
#include "parse-options.h"
#include "blob.h"
#include "merge-blobs.h"
@@ -75,9 +75,9 @@ static void *result(struct merge_list *entry, unsigned long *size)
const char *path = entry->path;
if (!entry->stage)
- return repo_read_object_file(the_repository,
- &entry->blob->object.oid, &type,
- size);
+ return odb_read_object(the_repository->objects,
+ &entry->blob->object.oid, &type,
+ size);
base = NULL;
if (entry->stage == 1) {
base = entry->blob;
@@ -100,9 +100,9 @@ static void *origin(struct merge_list *entry, unsigned long *size)
enum object_type type;
while (entry) {
if (entry->stage == 2)
- return repo_read_object_file(the_repository,
- &entry->blob->object.oid,
- &type, size);
+ return odb_read_object(the_repository->objects,
+ &entry->blob->object.oid,
+ &type, size);
entry = entry->link;
}
return NULL;
diff --git a/builtin/merge.c b/builtin/merge.c
index ce90e52fe4..18b22c0a26 100644
--- a/builtin/merge.c
+++ b/builtin/merge.c
@@ -69,7 +69,10 @@ static const char * const builtin_merge_usage[] = {
NULL
};
-static int show_diffstat = 1, shortlog_len = -1, squash;
+#define MERGE_SHOW_DIFFSTAT 1
+#define MERGE_SHOW_COMPACTSUMMARY 2
+
+static int show_diffstat = MERGE_SHOW_DIFFSTAT, shortlog_len = -1, squash;
static int option_commit = -1;
static int option_edit = -1;
static int allow_trivial = 1, have_message, verify_signatures;
@@ -243,12 +246,28 @@ static int option_parse_strategy(const struct option *opt UNUSED,
return 0;
}
+static int option_parse_compact_summary(const struct option *opt,
+ const char *name UNUSED, int unset)
+{
+ int *setting = opt->value;
+
+ if (unset)
+ *setting = 0;
+ else
+ *setting = MERGE_SHOW_COMPACTSUMMARY;
+ return 0;
+}
+
static struct option builtin_merge_options[] = {
OPT_SET_INT('n', NULL, &show_diffstat,
N_("do not show a diffstat at the end of the merge"), 0),
OPT_BOOL(0, "stat", &show_diffstat,
N_("show a diffstat at the end of the merge")),
OPT_BOOL(0, "summary", &show_diffstat, N_("(synonym to --stat)")),
+ OPT_CALLBACK_F(0, "compact-summary", &show_diffstat, N_("compact-summary"),
+ N_("show a compact-summary at the end of the merge"),
+ PARSE_OPT_NOARG,
+ option_parse_compact_summary),
{
.type = OPTION_INTEGER,
.long_name = "log",
@@ -494,8 +513,19 @@ static void finish(struct commit *head_commit,
struct diff_options opts;
repo_diff_setup(the_repository, &opts);
init_diffstat_widths(&opts);
- opts.output_format |=
- DIFF_FORMAT_SUMMARY | DIFF_FORMAT_DIFFSTAT;
+
+ switch (show_diffstat) {
+ case MERGE_SHOW_DIFFSTAT: /* 1 */
+ opts.output_format |=
+ DIFF_FORMAT_SUMMARY | DIFF_FORMAT_DIFFSTAT;
+ break;
+ case MERGE_SHOW_COMPACTSUMMARY: /* 2 */
+ opts.output_format |= DIFF_FORMAT_DIFFSTAT;
+ opts.flags.stat_with_summary = 1;
+ break;
+ default:
+ break;
+ }
opts.detect_rename = DIFF_DETECT_RENAME;
diff_setup_done(&opts);
diff_tree_oid(head, new_head, "", &opts);
@@ -643,7 +673,35 @@ static int git_merge_config(const char *k, const char *v,
}
if (!strcmp(k, "merge.diffstat") || !strcmp(k, "merge.stat")) {
- show_diffstat = git_config_bool(k, v);
+ int val = git_parse_maybe_bool_text(v);
+ switch (val) {
+ case 0:
+ show_diffstat = 0;
+ break;
+ case 1:
+ show_diffstat = MERGE_SHOW_DIFFSTAT;
+ break;
+ default:
+ if (!strcmp(v, "compact"))
+ show_diffstat = MERGE_SHOW_COMPACTSUMMARY;
+ /*
+ * We do not need to have an explicit
+ *
+ * else if (!strcmp(v, "diffstat"))
+ * show_diffstat = MERGE_SHOW_DIFFSTAT;
+ *
+ * here, because the catch-all uses the
+ * diffstat style anyway.
+ */
+ else
+ /*
+ * A setting from a future? It is not an
+ * error grave enough to fail the command.
+ * proceed using the default one.
+ */
+ show_diffstat = MERGE_SHOW_DIFFSTAT;
+ break;
+ }
} else if (!strcmp(k, "merge.verifysignatures")) {
verify_signatures = git_config_bool(k, v);
} else if (!strcmp(k, "pull.twohead")) {
diff --git a/builtin/mktag.c b/builtin/mktag.c
index 1b1dc0263e..27e649736c 100644
--- a/builtin/mktag.c
+++ b/builtin/mktag.c
@@ -6,7 +6,7 @@
#include "strbuf.h"
#include "replace-object.h"
#include "object-file.h"
-#include "object-store.h"
+#include "odb.h"
#include "fsck.h"
#include "config.h"
@@ -54,8 +54,8 @@ static int verify_object_in_tag(struct object_id *tagged_oid, int *tagged_type)
void *buffer;
const struct object_id *repl;
- buffer = repo_read_object_file(the_repository, tagged_oid, &type,
- &size);
+ buffer = odb_read_object(the_repository->objects, tagged_oid,
+ &type, &size);
if (!buffer)
die(_("could not read tagged object '%s'"),
oid_to_hex(tagged_oid));
diff --git a/builtin/mktree.c b/builtin/mktree.c
index 4b47803467..81df7f6099 100644
--- a/builtin/mktree.c
+++ b/builtin/mktree.c
@@ -12,7 +12,7 @@
#include "tree.h"
#include "parse-options.h"
#include "object-file.h"
-#include "object-store.h"
+#include "odb.h"
static struct treeent {
unsigned mode;
@@ -124,10 +124,10 @@ static void mktree_line(char *buf, int nul_term_line, int allow_missing)
/* Check the type of object identified by oid without fetching objects */
oi.typep = &obj_type;
- if (oid_object_info_extended(the_repository, &oid, &oi,
- OBJECT_INFO_LOOKUP_REPLACE |
- OBJECT_INFO_QUICK |
- OBJECT_INFO_SKIP_FETCH_OBJECT) < 0)
+ if (odb_read_object_info_extended(the_repository->objects, &oid, &oi,
+ OBJECT_INFO_LOOKUP_REPLACE |
+ OBJECT_INFO_QUICK |
+ OBJECT_INFO_SKIP_FETCH_OBJECT) < 0)
obj_type = -1;
if (obj_type < 0) {
diff --git a/builtin/multi-pack-index.c b/builtin/multi-pack-index.c
index 69a9750732..aa25b06f9d 100644
--- a/builtin/multi-pack-index.c
+++ b/builtin/multi-pack-index.c
@@ -7,7 +7,7 @@
#include "midx.h"
#include "strbuf.h"
#include "trace2.h"
-#include "object-store.h"
+#include "odb.h"
#include "replace-object.h"
#include "repository.h"
@@ -294,8 +294,8 @@ int cmd_multi_pack_index(int argc,
if (the_repository &&
the_repository->objects &&
- the_repository->objects->odb)
- opts.object_dir = xstrdup(the_repository->objects->odb->path);
+ the_repository->objects->sources)
+ opts.object_dir = xstrdup(the_repository->objects->sources->path);
argc = parse_options(argc, argv, prefix, options,
builtin_multi_pack_index_usage, 0);
diff --git a/builtin/notes.c b/builtin/notes.c
index a3f433ca4c..a9529b1696 100644
--- a/builtin/notes.c
+++ b/builtin/notes.c
@@ -16,7 +16,7 @@
#include "notes.h"
#include "object-file.h"
#include "object-name.h"
-#include "object-store.h"
+#include "odb.h"
#include "path.h"
#include "pretty.h"
@@ -152,7 +152,7 @@ static void copy_obj_to_fd(int fd, const struct object_id *oid)
{
unsigned long size;
enum object_type type;
- char *buf = repo_read_object_file(the_repository, oid, &type, &size);
+ char *buf = odb_read_object(the_repository->objects, oid, &type, &size);
if (buf) {
if (size)
write_or_die(fd, buf, size);
@@ -319,7 +319,7 @@ static int parse_reuse_arg(const struct option *opt, const char *arg, int unset)
strbuf_init(&msg->buf, 0);
if (repo_get_oid(the_repository, arg, &object))
die(_("failed to resolve '%s' as a valid ref."), arg);
- if (!(value = repo_read_object_file(the_repository, &object, &type, &len)))
+ if (!(value = odb_read_object(the_repository->objects, &object, &type, &len)))
die(_("failed to read object '%s'."), arg);
if (type != OBJ_BLOB) {
strbuf_release(&msg->buf);
@@ -722,7 +722,7 @@ static int append_edit(int argc, const char **argv, const char *prefix,
unsigned long size;
enum object_type type;
struct strbuf buf = STRBUF_INIT;
- char *prev_buf = repo_read_object_file(the_repository, note, &type, &size);
+ char *prev_buf = odb_read_object(the_repository->objects, note, &type, &size);
if (!prev_buf)
die(_("unable to read %s"), oid_to_hex(note));
diff --git a/builtin/pack-objects.c b/builtin/pack-objects.c
index 67941c8a60..067b9e322a 100644
--- a/builtin/pack-objects.c
+++ b/builtin/pack-objects.c
@@ -32,7 +32,7 @@
#include "list.h"
#include "packfile.h"
#include "object-file.h"
-#include "object-store.h"
+#include "odb.h"
#include "replace-object.h"
#include "dir.h"
#include "midx.h"
@@ -284,6 +284,12 @@ static struct oidmap configured_exclusions;
static struct oidset excluded_by_config;
static int name_hash_version = -1;
+enum stdin_packs_mode {
+ STDIN_PACKS_MODE_NONE,
+ STDIN_PACKS_MODE_STANDARD,
+ STDIN_PACKS_MODE_FOLLOW,
+};
+
/**
* Check whether the name_hash_version chosen by user input is appropriate,
* and also validate whether it is compatible with other features.
@@ -349,13 +355,13 @@ static void *get_delta(struct object_entry *entry)
void *buf, *base_buf, *delta_buf;
enum object_type type;
- buf = repo_read_object_file(the_repository, &entry->idx.oid, &type,
- &size);
+ buf = odb_read_object(the_repository->objects, &entry->idx.oid,
+ &type, &size);
if (!buf)
die(_("unable to read %s"), oid_to_hex(&entry->idx.oid));
- base_buf = repo_read_object_file(the_repository,
- &DELTA(entry)->idx.oid, &type,
- &base_size);
+ base_buf = odb_read_object(the_repository->objects,
+ &DELTA(entry)->idx.oid, &type,
+ &base_size);
if (!base_buf)
die("unable to read %s",
oid_to_hex(&DELTA(entry)->idx.oid));
@@ -518,9 +524,9 @@ static unsigned long write_no_reuse_object(struct hashfile *f, struct object_ent
&size, NULL)) != NULL)
buf = NULL;
else {
- buf = repo_read_object_file(the_repository,
- &entry->idx.oid, &type,
- &size);
+ buf = odb_read_object(the_repository->objects,
+ &entry->idx.oid, &type,
+ &size);
if (!buf)
die(_("unable to read %s"),
oid_to_hex(&entry->idx.oid));
@@ -1907,7 +1913,7 @@ static struct pbase_tree_cache *pbase_tree_get(const struct object_id *oid)
/* Did not find one. Either we got a bogus request or
* we need to read and perhaps cache.
*/
- data = repo_read_object_file(the_repository, oid, &type, &size);
+ data = odb_read_object(the_repository->objects, oid, &type, &size);
if (!data)
return NULL;
if (type != OBJ_TREE) {
@@ -2067,8 +2073,8 @@ static void add_preferred_base(struct object_id *oid)
if (window <= num_preferred_base++)
return;
- data = read_object_with_reference(the_repository, oid,
- OBJ_TREE, &size, &tree_oid);
+ data = odb_read_object_peeled(the_repository->objects, oid,
+ OBJ_TREE, &size, &tree_oid);
if (!data)
return;
@@ -2166,10 +2172,10 @@ static void prefetch_to_pack(uint32_t object_index_start) {
for (i = object_index_start; i < to_pack.nr_objects; i++) {
struct object_entry *entry = to_pack.objects + i;
- if (!oid_object_info_extended(the_repository,
- &entry->idx.oid,
- NULL,
- OBJECT_INFO_FOR_PREFETCH))
+ if (!odb_read_object_info_extended(the_repository->objects,
+ &entry->idx.oid,
+ NULL,
+ OBJECT_INFO_FOR_PREFETCH))
continue;
oid_array_append(&to_fetch, &entry->idx.oid);
}
@@ -2310,19 +2316,19 @@ static void check_object(struct object_entry *entry, uint32_t object_index)
/*
* No choice but to fall back to the recursive delta walk
- * with oid_object_info() to find about the object type
+ * with odb_read_object_info() to find about the object type
* at this point...
*/
give_up:
unuse_pack(&w_curs);
}
- if (oid_object_info_extended(the_repository, &entry->idx.oid, &oi,
- OBJECT_INFO_SKIP_FETCH_OBJECT | OBJECT_INFO_LOOKUP_REPLACE) < 0) {
+ if (odb_read_object_info_extended(the_repository->objects, &entry->idx.oid, &oi,
+ OBJECT_INFO_SKIP_FETCH_OBJECT | OBJECT_INFO_LOOKUP_REPLACE) < 0) {
if (repo_has_promisor_remote(the_repository)) {
prefetch_to_pack(object_index);
- if (oid_object_info_extended(the_repository, &entry->idx.oid, &oi,
- OBJECT_INFO_SKIP_FETCH_OBJECT | OBJECT_INFO_LOOKUP_REPLACE) < 0)
+ if (odb_read_object_info_extended(the_repository->objects, &entry->idx.oid, &oi,
+ OBJECT_INFO_SKIP_FETCH_OBJECT | OBJECT_INFO_LOOKUP_REPLACE) < 0)
type = -1;
} else {
type = -1;
@@ -2396,12 +2402,13 @@ static void drop_reused_delta(struct object_entry *entry)
if (packed_object_info(the_repository, IN_PACK(entry), entry->in_pack_offset, &oi) < 0) {
/*
* We failed to get the info from this pack for some reason;
- * fall back to oid_object_info, which may find another copy.
+ * fall back to odb_read_object_info, which may find another copy.
* And if that fails, the error will be recorded in oe_type(entry)
* and dealt with in prepare_pack().
*/
oe_set_type(entry,
- oid_object_info(the_repository, &entry->idx.oid, &size));
+ odb_read_object_info(the_repository->objects,
+ &entry->idx.oid, &size));
} else {
oe_set_type(entry, type);
}
@@ -2689,7 +2696,8 @@ unsigned long oe_get_size_slow(struct packing_data *pack,
if (e->type_ != OBJ_OFS_DELTA && e->type_ != OBJ_REF_DELTA) {
packing_data_lock(&to_pack);
- if (oid_object_info(the_repository, &e->idx.oid, &size) < 0)
+ if (odb_read_object_info(the_repository->objects,
+ &e->idx.oid, &size) < 0)
die(_("unable to get size of %s"),
oid_to_hex(&e->idx.oid));
packing_data_unlock(&to_pack);
@@ -2772,9 +2780,9 @@ static int try_delta(struct unpacked *trg, struct unpacked *src,
/* Load data if not already done */
if (!trg->data) {
packing_data_lock(&to_pack);
- trg->data = repo_read_object_file(the_repository,
- &trg_entry->idx.oid, &type,
- &sz);
+ trg->data = odb_read_object(the_repository->objects,
+ &trg_entry->idx.oid, &type,
+ &sz);
packing_data_unlock(&to_pack);
if (!trg->data)
die(_("object %s cannot be read"),
@@ -2787,9 +2795,9 @@ static int try_delta(struct unpacked *trg, struct unpacked *src,
}
if (!src->data) {
packing_data_lock(&to_pack);
- src->data = repo_read_object_file(the_repository,
- &src_entry->idx.oid, &type,
- &sz);
+ src->data = odb_read_object(the_repository->objects,
+ &src_entry->idx.oid, &type,
+ &sz);
packing_data_unlock(&to_pack);
if (!src->data) {
if (src_entry->preferred_base) {
@@ -3725,7 +3733,6 @@ static int add_object_entry_from_pack(const struct object_id *oid,
return 0;
if (p) {
- struct rev_info *revs = _data;
struct object_info oi = OBJECT_INFO_INIT;
oi.typep = &type;
@@ -3733,6 +3740,7 @@ static int add_object_entry_from_pack(const struct object_id *oid,
die(_("could not get type of object %s in pack %s"),
oid_to_hex(oid), p->pack_name);
} else if (type == OBJ_COMMIT) {
+ struct rev_info *revs = _data;
/*
* commits in included packs are used as starting points for the
* subsequent revision walk
@@ -3748,32 +3756,48 @@ static int add_object_entry_from_pack(const struct object_id *oid,
return 0;
}
-static void show_commit_pack_hint(struct commit *commit UNUSED,
- void *data UNUSED)
+static void show_object_pack_hint(struct object *object, const char *name,
+ void *data)
{
- /* nothing to do; commits don't have a namehash */
+ enum stdin_packs_mode mode = *(enum stdin_packs_mode *)data;
+ if (mode == STDIN_PACKS_MODE_FOLLOW) {
+ if (object->type == OBJ_BLOB &&
+ !has_object(the_repository, &object->oid, 0))
+ return;
+ add_object_entry(&object->oid, object->type, name, 0);
+ } else {
+ struct object_entry *oe = packlist_find(&to_pack, &object->oid);
+ if (!oe)
+ return;
+
+ /*
+ * Our 'to_pack' list was constructed by iterating all
+ * objects packed in included packs, and so doesn't have
+ * a non-zero hash field that you would typically pick
+ * up during a reachability traversal.
+ *
+ * Make a best-effort attempt to fill in the ->hash and
+ * ->no_try_delta fields here in order to perhaps
+ * improve the delta selection process.
+ */
+ oe->hash = pack_name_hash_fn(name);
+ oe->no_try_delta = name && no_try_delta(name);
+
+ stdin_packs_hints_nr++;
+ }
}
-static void show_object_pack_hint(struct object *object, const char *name,
- void *data UNUSED)
+static void show_commit_pack_hint(struct commit *commit, void *data)
{
- struct object_entry *oe = packlist_find(&to_pack, &object->oid);
- if (!oe)
+ enum stdin_packs_mode mode = *(enum stdin_packs_mode *)data;
+
+ if (mode == STDIN_PACKS_MODE_FOLLOW) {
+ show_object_pack_hint((struct object *)commit, "", data);
return;
+ }
- /*
- * Our 'to_pack' list was constructed by iterating all objects packed in
- * included packs, and so doesn't have a non-zero hash field that you
- * would typically pick up during a reachability traversal.
- *
- * Make a best-effort attempt to fill in the ->hash and ->no_try_delta
- * here using a now in order to perhaps improve the delta selection
- * process.
- */
- oe->hash = pack_name_hash_fn(name);
- oe->no_try_delta = name && no_try_delta(name);
+ /* nothing to do; commits don't have a namehash */
- stdin_packs_hints_nr++;
}
static int pack_mtime_cmp(const void *_a, const void *_b)
@@ -3793,7 +3817,7 @@ static int pack_mtime_cmp(const void *_a, const void *_b)
return 0;
}
-static void read_packs_list_from_stdin(void)
+static void read_packs_list_from_stdin(struct rev_info *revs)
{
struct strbuf buf = STRBUF_INIT;
struct string_list include_packs = STRING_LIST_INIT_DUP;
@@ -3801,24 +3825,6 @@ static void read_packs_list_from_stdin(void)
struct string_list_item *item = NULL;
struct packed_git *p;
- struct rev_info revs;
-
- repo_init_revisions(the_repository, &revs, NULL);
- /*
- * Use a revision walk to fill in the namehash of objects in the include
- * packs. To save time, we'll avoid traversing through objects that are
- * in excluded packs.
- *
- * That may cause us to avoid populating all of the namehash fields of
- * all included objects, but our goal is best-effort, since this is only
- * an optimization during delta selection.
- */
- revs.no_kept_objects = 1;
- revs.keep_pack_cache_flags |= IN_CORE_KEEP_PACKS;
- revs.blob_objects = 1;
- revs.tree_objects = 1;
- revs.tag_objects = 1;
- revs.ignore_missing_links = 1;
while (strbuf_getline(&buf, stdin) != EOF) {
if (!buf.len)
@@ -3888,25 +3894,55 @@ static void read_packs_list_from_stdin(void)
struct packed_git *p = item->util;
for_each_object_in_pack(p,
add_object_entry_from_pack,
- &revs,
+ revs,
FOR_EACH_OBJECT_PACK_ORDER);
}
+ strbuf_release(&buf);
+ string_list_clear(&include_packs, 0);
+ string_list_clear(&exclude_packs, 0);
+}
+
+static void add_unreachable_loose_objects(struct rev_info *revs);
+
+static void read_stdin_packs(enum stdin_packs_mode mode, int rev_list_unpacked)
+{
+ struct rev_info revs;
+
+ repo_init_revisions(the_repository, &revs, NULL);
+ /*
+ * Use a revision walk to fill in the namehash of objects in the include
+ * packs. To save time, we'll avoid traversing through objects that are
+ * in excluded packs.
+ *
+ * That may cause us to avoid populating all of the namehash fields of
+ * all included objects, but our goal is best-effort, since this is only
+ * an optimization during delta selection.
+ */
+ revs.no_kept_objects = 1;
+ revs.keep_pack_cache_flags |= IN_CORE_KEEP_PACKS;
+ revs.blob_objects = 1;
+ revs.tree_objects = 1;
+ revs.tag_objects = 1;
+ revs.ignore_missing_links = 1;
+
+ /* avoids adding objects in excluded packs */
+ ignore_packed_keep_in_core = 1;
+ read_packs_list_from_stdin(&revs);
+ if (rev_list_unpacked)
+ add_unreachable_loose_objects(&revs);
+
if (prepare_revision_walk(&revs))
die(_("revision walk setup failed"));
traverse_commit_list(&revs,
show_commit_pack_hint,
show_object_pack_hint,
- NULL);
+ &mode);
trace2_data_intmax("pack-objects", the_repository, "stdin_packs_found",
stdin_packs_found_nr);
trace2_data_intmax("pack-objects", the_repository, "stdin_packs_hints",
stdin_packs_hints_nr);
-
- strbuf_release(&buf);
- string_list_clear(&include_packs, 0);
- string_list_clear(&exclude_packs, 0);
}
static void add_cruft_object_entry(const struct object_id *oid, enum object_type type,
@@ -4004,7 +4040,6 @@ static void mark_pack_kept_in_core(struct string_list *packs, unsigned keep)
}
}
-static void add_unreachable_loose_objects(void);
static void add_objects_in_unpacked_packs(void);
static void enumerate_cruft_objects(void)
@@ -4014,7 +4049,7 @@ static void enumerate_cruft_objects(void)
_("Enumerating cruft objects"), 0);
add_objects_in_unpacked_packs();
- add_unreachable_loose_objects();
+ add_unreachable_loose_objects(NULL);
stop_progress(&progress_state);
}
@@ -4197,7 +4232,7 @@ static void show_object__ma_allow_any(struct object *obj, const char *name, void
* Quietly ignore ALL missing objects. This avoids problems with
* staging them now and getting an odd error later.
*/
- if (!has_object(the_repository, &obj->oid, 0))
+ if (!odb_has_object(the_repository->objects, &obj->oid, 0))
return;
show_object(obj, name, data);
@@ -4211,7 +4246,7 @@ static void show_object__ma_allow_promisor(struct object *obj, const char *name,
* Quietly ignore EXPECTED missing objects. This avoids problems with
* staging them now and getting an odd error later.
*/
- if (!has_object(the_repository, &obj->oid, 0) &&
+ if (!odb_has_object(the_repository->objects, &obj->oid, 0) &&
is_promisor_object(to_pack.repo, &obj->oid))
return;
@@ -4292,9 +4327,10 @@ static void add_objects_in_unpacked_packs(void)
}
static int add_loose_object(const struct object_id *oid, const char *path,
- void *data UNUSED)
+ void *data)
{
- enum object_type type = oid_object_info(the_repository, oid, NULL);
+ struct rev_info *revs = data;
+ enum object_type type = odb_read_object_info(the_repository->objects, oid, NULL);
if (type < 0) {
warning(_("loose object at %s could not be examined"), path);
@@ -4314,6 +4350,10 @@ static int add_loose_object(const struct object_id *oid, const char *path,
} else {
add_object_entry(oid, type, "", 0);
}
+
+ if (revs && type == OBJ_COMMIT)
+ add_pending_oid(revs, NULL, oid, 0);
+
return 0;
}
@@ -4322,11 +4362,10 @@ static int add_loose_object(const struct object_id *oid, const char *path,
* add_object_entry will weed out duplicates, so we just add every
* loose object we find.
*/
-static void add_unreachable_loose_objects(void)
+static void add_unreachable_loose_objects(struct rev_info *revs)
{
for_each_loose_file_in_objdir(repo_get_object_directory(the_repository),
- add_loose_object,
- NULL, NULL, NULL);
+ add_loose_object, NULL, NULL, revs);
}
static int has_sha1_pack_kept_or_nonlocal(const struct object_id *oid)
@@ -4673,7 +4712,7 @@ static void get_object_list(struct rev_info *revs, int ac, const char **av)
if (keep_unreachable)
add_objects_in_unpacked_packs();
if (pack_loose_unreachable)
- add_unreachable_loose_objects();
+ add_unreachable_loose_objects(NULL);
if (unpack_unreachable)
loosen_unused_packed_objects();
@@ -4771,7 +4810,7 @@ static int option_parse_cruft_expiration(const struct option *opt UNUSED,
static int is_not_in_promisor_pack_obj(struct object *obj, void *data UNUSED)
{
struct object_info info = OBJECT_INFO_INIT;
- if (oid_object_info_extended(the_repository, &obj->oid, &info, 0))
+ if (odb_read_object_info_extended(the_repository->objects, &obj->oid, &info, 0))
BUG("should_include_obj should only be called on existing objects");
return info.whence != OI_PACKED || !info.u.packed.pack->pack_promisor;
}
@@ -4780,6 +4819,23 @@ static int is_not_in_promisor_pack(struct commit *commit, void *data) {
return is_not_in_promisor_pack_obj((struct object *) commit, data);
}
+static int parse_stdin_packs_mode(const struct option *opt, const char *arg,
+ int unset)
+{
+ enum stdin_packs_mode *mode = opt->value;
+
+ if (unset)
+ *mode = STDIN_PACKS_MODE_NONE;
+ else if (!arg || !*arg)
+ *mode = STDIN_PACKS_MODE_STANDARD;
+ else if (!strcmp(arg, "follow"))
+ *mode = STDIN_PACKS_MODE_FOLLOW;
+ else
+ die(_("invalid value for '%s': '%s'"), opt->long_name, arg);
+
+ return 0;
+}
+
int cmd_pack_objects(int argc,
const char **argv,
const char *prefix,
@@ -4790,7 +4846,7 @@ int cmd_pack_objects(int argc,
struct strvec rp = STRVEC_INIT;
int rev_list_unpacked = 0, rev_list_all = 0, rev_list_reflog = 0;
int rev_list_index = 0;
- int stdin_packs = 0;
+ enum stdin_packs_mode stdin_packs = STDIN_PACKS_MODE_NONE;
struct string_list keep_pack_list = STRING_LIST_INIT_NODUP;
struct list_objects_filter_options filter_options =
LIST_OBJECTS_FILTER_INIT;
@@ -4845,6 +4901,9 @@ int cmd_pack_objects(int argc,
OPT_SET_INT_F(0, "indexed-objects", &rev_list_index,
N_("include objects referred to by the index"),
1, PARSE_OPT_NONEG),
+ OPT_CALLBACK_F(0, "stdin-packs", &stdin_packs, N_("mode"),
+ N_("read packs from stdin"),
+ PARSE_OPT_OPTARG, parse_stdin_packs_mode),
OPT_BOOL(0, "stdin-packs", &stdin_packs,
N_("read packs from stdin")),
OPT_BOOL(0, "stdout", &pack_to_stdout,
@@ -5010,9 +5069,10 @@ int cmd_pack_objects(int argc,
strvec_push(&rp, "--unpacked");
}
- if (exclude_promisor_objects && exclude_promisor_objects_best_effort)
- die(_("options '%s' and '%s' cannot be used together"),
- "--exclude-promisor-objects", "--exclude-promisor-objects-best-effort");
+ die_for_incompatible_opt2(exclude_promisor_objects,
+ "--exclude-promisor-objects",
+ exclude_promisor_objects_best_effort,
+ "--exclude-promisor-objects-best-effort");
if (exclude_promisor_objects) {
use_internal_rev_list = 1;
fetch_if_missing = 0;
@@ -5050,13 +5110,14 @@ int cmd_pack_objects(int argc,
if (!pack_to_stdout && thin)
die(_("--thin cannot be used to build an indexable pack"));
- if (keep_unreachable && unpack_unreachable)
- die(_("options '%s' and '%s' cannot be used together"), "--keep-unreachable", "--unpack-unreachable");
+ die_for_incompatible_opt2(keep_unreachable, "--keep-unreachable",
+ unpack_unreachable, "--unpack-unreachable");
if (!rev_list_all || !rev_list_reflog || !rev_list_index)
unpack_unreachable_expiration = 0;
- if (stdin_packs && filter_options.choice)
- die(_("cannot use --filter with --stdin-packs"));
+ die_for_incompatible_opt2(stdin_packs, "--stdin-packs",
+ filter_options.choice, "--filter");
+
if (stdin_packs && use_internal_rev_list)
die(_("cannot use internal rev list with --stdin-packs"));
@@ -5064,8 +5125,8 @@ int cmd_pack_objects(int argc,
if (cruft) {
if (use_internal_rev_list)
die(_("cannot use internal rev list with --cruft"));
- if (stdin_packs)
- die(_("cannot use --stdin-packs with --cruft"));
+ die_for_incompatible_opt2(stdin_packs, "--stdin-packs",
+ cruft, "--cruft");
}
/*
@@ -5133,11 +5194,7 @@ int cmd_pack_objects(int argc,
progress_state = start_progress(the_repository,
_("Enumerating objects"), 0);
if (stdin_packs) {
- /* avoids adding objects in excluded packs */
- ignore_packed_keep_in_core = 1;
- read_packs_list_from_stdin();
- if (rev_list_unpacked)
- add_unreachable_loose_objects();
+ read_stdin_packs(stdin_packs, rev_list_unpacked);
} else if (cruft) {
read_cruft_objects();
} else if (!use_internal_rev_list) {
diff --git a/builtin/pack-redundant.c b/builtin/pack-redundant.c
index 5d1fc78176..fe81c293e3 100644
--- a/builtin/pack-redundant.c
+++ b/builtin/pack-redundant.c
@@ -13,7 +13,7 @@
#include "hex.h"
#include "packfile.h"
-#include "object-store.h"
+#include "odb.h"
#include "strbuf.h"
#define BLKSIZE 512
@@ -625,14 +625,8 @@ int cmd_pack_redundant(int argc, const char **argv, const char *prefix UNUSED, s
break;
}
- if (!i_still_use_this) {
- fputs(_("'git pack-redundant' is nominated for removal.\n"
- "If you still use this command, please add an extra\n"
- "option, '--i-still-use-this', on the command line\n"
- "and let us know you still use it by sending an e-mail\n"
- "to <git@vger.kernel.org>. Thanks.\n"), stderr);
- die(_("refusing to run without --i-still-use-this"));
- }
+ if (!i_still_use_this)
+ you_still_use_that("git pack-redundant");
if (load_all_packs)
load_all();
diff --git a/builtin/patch-id.c b/builtin/patch-id.c
index cdef2ec10a..26f04b0335 100644
--- a/builtin/patch-id.c
+++ b/builtin/patch-id.c
@@ -254,7 +254,7 @@ int cmd_patch_id(int argc,
* the code that computes patch IDs to always use SHA1.
*/
if (!the_hash_algo)
- repo_set_hash_algo(the_repository, GIT_HASH_SHA1);
+ repo_set_hash_algo(the_repository, GIT_HASH_DEFAULT);
generate_id_list(opts ? opts > 1 : config.stable,
opts ? opts == 3 : config.verbatim);
diff --git a/builtin/prune.c b/builtin/prune.c
index e930caa0c0..d1c0ee1419 100644
--- a/builtin/prune.c
+++ b/builtin/prune.c
@@ -1,4 +1,3 @@
-#define USE_THE_REPOSITORY_VARIABLE
#define DISABLE_SIGN_COMPARE_WARNINGS
#include "builtin.h"
@@ -17,7 +16,7 @@
#include "replace-object.h"
#include "object-file.h"
#include "object-name.h"
-#include "object-store.h"
+#include "odb.h"
#include "shallow.h"
static const char * const prune_usage[] = {
@@ -64,7 +63,7 @@ static void perform_reachability_traversal(struct rev_info *revs)
return;
if (show_progress)
- progress = start_delayed_progress(the_repository,
+ progress = start_delayed_progress(revs->repo,
_("Checking connectivity"), 0);
mark_reachable_objects(revs, 1, expire, progress);
stop_progress(&progress);
@@ -78,7 +77,7 @@ static int is_object_reachable(const struct object_id *oid,
perform_reachability_traversal(revs);
- obj = lookup_object(the_repository, oid);
+ obj = lookup_object(revs->repo, oid);
return obj && (obj->flags & SEEN);
}
@@ -99,8 +98,8 @@ static int prune_object(const struct object_id *oid, const char *fullpath,
if (st.st_mtime > expire)
return 0;
if (show_only || verbose) {
- enum object_type type = oid_object_info(the_repository, oid,
- NULL);
+ enum object_type type =
+ odb_read_object_info(revs->repo->objects, oid, NULL);
printf("%s %s\n", oid_to_hex(oid),
(type > 0) ? type_name(type) : "unknown");
}
@@ -154,7 +153,7 @@ static void remove_temporary_files(const char *path)
int cmd_prune(int argc,
const char **argv,
const char *prefix,
- struct repository *repo UNUSED)
+ struct repository *repo)
{
struct rev_info revs;
int exclude_promisor_objects = 0;
@@ -173,20 +172,19 @@ int cmd_prune(int argc,
expire = TIME_MAX;
save_commit_buffer = 0;
disable_replace_refs();
- repo_init_revisions(the_repository, &revs, prefix);
argc = parse_options(argc, argv, prefix, options, prune_usage, 0);
- if (repository_format_precious_objects)
+ repo_init_revisions(repo, &revs, prefix);
+ if (repo->repository_format_precious_objects)
die(_("cannot prune in a precious-objects repo"));
while (argc--) {
struct object_id oid;
const char *name = *argv++;
- if (!repo_get_oid(the_repository, name, &oid)) {
- struct object *object = parse_object_or_die(the_repository, &oid,
- name);
+ if (!repo_get_oid(repo, name, &oid)) {
+ struct object *object = parse_object_or_die(repo, &oid, name);
add_pending_object(&revs, object, "");
}
else
@@ -200,16 +198,16 @@ int cmd_prune(int argc,
revs.exclude_promisor_objects = 1;
}
- for_each_loose_file_in_objdir(repo_get_object_directory(the_repository),
+ for_each_loose_file_in_objdir(repo_get_object_directory(repo),
prune_object, prune_cruft, prune_subdir, &revs);
prune_packed_objects(show_only ? PRUNE_PACKED_DRY_RUN : 0);
- remove_temporary_files(repo_get_object_directory(the_repository));
- s = mkpathdup("%s/pack", repo_get_object_directory(the_repository));
+ remove_temporary_files(repo_get_object_directory(repo));
+ s = mkpathdup("%s/pack", repo_get_object_directory(repo));
remove_temporary_files(s);
free(s);
- if (is_repository_shallow(the_repository)) {
+ if (is_repository_shallow(repo)) {
perform_reachability_traversal(&revs);
prune_shallow(show_only ? PRUNE_SHOW_ONLY : 0);
}
diff --git a/builtin/pull.c b/builtin/pull.c
index a1ebc6ad33..c593f324fe 100644
--- a/builtin/pull.c
+++ b/builtin/pull.c
@@ -143,6 +143,9 @@ static struct option pull_options[] = {
OPT_PASSTHRU(0, "summary", &opt_diffstat, NULL,
N_("(synonym to --stat)"),
PARSE_OPT_NOARG | PARSE_OPT_HIDDEN),
+ OPT_PASSTHRU(0, "compact-summary", &opt_diffstat, NULL,
+ N_("show a compact-summary at the end of the merge"),
+ PARSE_OPT_NOARG),
OPT_PASSTHRU(0, "log", &opt_log, N_("n"),
N_("add (at most <n>) entries from shortlog to merge commit message"),
PARSE_OPT_OPTARG),
@@ -487,7 +490,7 @@ static void NORETURN die_no_merge_candidates(const char *repo, const char **refs
} else
fprintf_ln(stderr, _("Your configuration specifies to merge with the ref '%s'\n"
"from the remote, but no such ref was fetched."),
- *curr_branch->merge_name);
+ curr_branch->merge[0]->src);
exit(1);
}
diff --git a/builtin/rebase.c b/builtin/rebase.c
index 2e8c4ee678..e90562a3b8 100644
--- a/builtin/rebase.c
+++ b/builtin/rebase.c
@@ -1128,6 +1128,7 @@ int cmd_rebase(int argc,
.short_name = 'n',
.long_name = "no-stat",
.value = &options.flags,
+ .precision = sizeof(options.flags),
.help = N_("do not show diffstat of what changed upstream"),
.flags = PARSE_OPT_NOARG,
.defval = REBASE_DIFFSTAT,
diff --git a/builtin/receive-pack.c b/builtin/receive-pack.c
index a317d6c278..7974d157eb 100644
--- a/builtin/receive-pack.c
+++ b/builtin/receive-pack.c
@@ -33,7 +33,7 @@
#include "packfile.h"
#include "object-file.h"
#include "object-name.h"
-#include "object-store.h"
+#include "odb.h"
#include "path.h"
#include "protocol.h"
#include "commit-reach.h"
@@ -359,7 +359,8 @@ static void write_head_info(void)
refs_for_each_fullref_in(get_main_ref_store(the_repository), "",
exclude_patterns, show_ref_cb, &seen);
- for_each_alternate_ref(show_one_alternate_ref, &seen);
+ odb_for_each_alternate_ref(the_repository->objects,
+ show_one_alternate_ref, &seen);
oidset_clear(&seen);
strvec_clear(&excludes_vector);
@@ -1508,8 +1509,8 @@ static const char *update(struct command *cmd, struct shallow_info *si)
}
if (!is_null_oid(new_oid) &&
- !has_object(the_repository, new_oid,
- HAS_OBJECT_RECHECK_PACKED | HAS_OBJECT_FETCH_PROMISOR)) {
+ !odb_has_object(the_repository->objects, new_oid,
+ HAS_OBJECT_RECHECK_PACKED | HAS_OBJECT_FETCH_PROMISOR)) {
error("unpack should have generated %s, "
"but I can't find it!", oid_to_hex(new_oid));
ret = "bad pack";
@@ -1846,36 +1847,102 @@ static void BUG_if_skipped_connectivity_check(struct command *commands,
BUG_if_bug("connectivity check skipped???");
}
+static void ref_transaction_rejection_handler(const char *refname,
+ const struct object_id *old_oid UNUSED,
+ const struct object_id *new_oid UNUSED,
+ const char *old_target UNUSED,
+ const char *new_target UNUSED,
+ enum ref_transaction_error err,
+ void *cb_data)
+{
+ struct strmap *failed_refs = cb_data;
+
+ strmap_put(failed_refs, refname, (char *)ref_transaction_error_msg(err));
+}
+
static void execute_commands_non_atomic(struct command *commands,
struct shallow_info *si)
{
struct command *cmd;
struct strbuf err = STRBUF_INIT;
+ const char *reported_error = NULL;
+ struct strmap failed_refs = STRMAP_INIT;
- for (cmd = commands; cmd; cmd = cmd->next) {
- if (!should_process_cmd(cmd) || cmd->run_proc_receive)
- continue;
+ /*
+ * Reference updates, where D/F conflicts shouldn't arise due to
+ * one reference being deleted, while the other being created
+ * are treated as conflicts in batched updates. This is because
+ * we don't do conflict resolution inside a transaction. To
+ * mitigate this, delete references in a separate batch.
+ *
+ * NEEDSWORK: Add conflict resolution between deletion and creation
+ * of reference updates within a transaction. With that, we can
+ * combine the two phases.
+ */
+ enum processing_phase {
+ PHASE_DELETIONS,
+ PHASE_OTHERS
+ };
- transaction = ref_store_transaction_begin(get_main_ref_store(the_repository),
- 0, &err);
- if (!transaction) {
- rp_error("%s", err.buf);
- strbuf_reset(&err);
- cmd->error_string = "transaction failed to start";
- continue;
+ for (enum processing_phase phase = PHASE_DELETIONS; phase <= PHASE_OTHERS; phase++) {
+ for (cmd = commands; cmd; cmd = cmd->next) {
+ if (!should_process_cmd(cmd) || cmd->run_proc_receive)
+ continue;
+
+ if (phase == PHASE_DELETIONS && !is_null_oid(&cmd->new_oid))
+ continue;
+ else if (phase == PHASE_OTHERS && is_null_oid(&cmd->new_oid))
+ continue;
+
+ /*
+ * Lazily create a transaction only when we know there are
+ * updates to be added.
+ */
+ if (!transaction) {
+ transaction = ref_store_transaction_begin(get_main_ref_store(the_repository),
+ REF_TRANSACTION_ALLOW_FAILURE, &err);
+ if (!transaction) {
+ rp_error("%s", err.buf);
+ strbuf_reset(&err);
+ reported_error = "transaction failed to start";
+ goto failure;
+ }
+ }
+
+ cmd->error_string = update(cmd, si);
}
- cmd->error_string = update(cmd, si);
+ /* No transaction, so nothing to commit */
+ if (!transaction)
+ goto cleanup;
- if (!cmd->error_string
- && ref_transaction_commit(transaction, &err)) {
+ if (ref_transaction_commit(transaction, &err)) {
rp_error("%s", err.buf);
- strbuf_reset(&err);
- cmd->error_string = "failed to update ref";
+ reported_error = "failed to update refs";
+ goto failure;
}
+
+ ref_transaction_for_each_rejected_update(transaction,
+ ref_transaction_rejection_handler,
+ &failed_refs);
+
+ if (strmap_empty(&failed_refs))
+ goto cleanup;
+
+ failure:
+ for (cmd = commands; cmd; cmd = cmd->next) {
+ if (reported_error)
+ cmd->error_string = reported_error;
+ else if (strmap_contains(&failed_refs, cmd->ref_name))
+ cmd->error_string = strmap_get(&failed_refs, cmd->ref_name);
+ }
+
+ cleanup:
ref_transaction_free(transaction);
+ transaction = NULL;
+ strmap_clear(&failed_refs, 0);
+ strbuf_release(&err);
}
- strbuf_release(&err);
}
static void execute_commands_atomic(struct command *commands,
@@ -2136,7 +2203,7 @@ static struct command *read_head_info(struct packet_reader *reader,
use_push_options = 1;
hash = parse_feature_value(feature_list, "object-format", &len, NULL);
if (!hash) {
- hash = hash_algos[GIT_HASH_SHA1].name;
+ hash = hash_algos[GIT_HASH_SHA1_LEGACY].name;
len = strlen(hash);
}
if (xstrncmpz(the_hash_algo->name, hash, len))
diff --git a/builtin/remote.c b/builtin/remote.c
index 0d6755bcb7..5dd6cbbaee 100644
--- a/builtin/remote.c
+++ b/builtin/remote.c
@@ -14,7 +14,7 @@
#include "rebase.h"
#include "refs.h"
#include "refspec.h"
-#include "object-store.h"
+#include "odb.h"
#include "strvec.h"
#include "commit-reach.h"
#include "progress.h"
@@ -157,6 +157,21 @@ static int parse_mirror_opt(const struct option *opt, const char *arg, int not)
return 0;
}
+static int check_remote_collision(struct remote *remote, void *data)
+{
+ const char *name = data;
+ const char *p;
+
+ if (skip_prefix(name, remote->name, &p) && *p == '/')
+ die(_("remote name '%s' is a subset of existing remote '%s'"),
+ name, remote->name);
+ if (skip_prefix(remote->name, name, &p) && *p == '/')
+ die(_("remote name '%s' is a superset of existing remote '%s'"),
+ name, remote->name);
+
+ return 0;
+}
+
static int add(int argc, const char **argv, const char *prefix,
struct repository *repo UNUSED)
{
@@ -208,6 +223,8 @@ static int add(int argc, const char **argv, const char *prefix,
if (!valid_remote_name(name))
die(_("'%s' is not a valid remote name"), name);
+ for_each_remote(check_remote_collision, (void *)name);
+
strbuf_addf(&buf, "remote.%s.url", name);
git_config_set(buf.buf, url);
@@ -454,8 +471,8 @@ static int get_push_ref_states(const struct ref *remote_refs,
info->status = PUSH_STATUS_UPTODATE;
else if (is_null_oid(&ref->old_oid))
info->status = PUSH_STATUS_CREATE;
- else if (has_object(the_repository, &ref->old_oid,
- HAS_OBJECT_RECHECK_PACKED | HAS_OBJECT_FETCH_PROMISOR) &&
+ else if (odb_has_object(the_repository->objects, &ref->old_oid,
+ HAS_OBJECT_RECHECK_PACKED | HAS_OBJECT_FETCH_PROMISOR) &&
ref_newer(&ref->new_oid, &ref->old_oid))
info->status = PUSH_STATUS_FASTFORWARD;
else
@@ -1521,9 +1538,6 @@ static int prune_remote(const char *remote, int dry_run)
struct ref_states states = REF_STATES_INIT;
struct string_list refs_to_prune = STRING_LIST_INIT_NODUP;
struct string_list_item *item;
- const char *dangling_msg = dry_run
- ? _(" %s will become dangling!")
- : _(" %s has become dangling!");
get_remote_ref_states(remote, &states, GET_REF_STATES);
@@ -1555,7 +1569,7 @@ static int prune_remote(const char *remote, int dry_run)
}
refs_warn_dangling_symrefs(get_main_ref_store(the_repository),
- stdout, dangling_msg, &refs_to_prune);
+ stdout, " ", dry_run, &refs_to_prune);
string_list_clear(&refs_to_prune, 0);
free_remote_ref_states(&states);
diff --git a/builtin/repack.c b/builtin/repack.c
index 5ddc6e7f95..75158d7776 100644
--- a/builtin/repack.c
+++ b/builtin/repack.c
@@ -17,7 +17,7 @@
#include "midx.h"
#include "packfile.h"
#include "prune-packed.h"
-#include "object-store.h"
+#include "odb.h"
#include "promisor-remote.h"
#include "shallow.h"
#include "pack.h"
@@ -39,6 +39,7 @@ static int write_bitmaps = -1;
static int use_delta_islands;
static int run_update_server_info = 1;
static char *packdir, *packtmp_name, *packtmp;
+static int midx_must_contain_cruft = 1;
static const char *const git_repack_usage[] = {
N_("git repack [-a] [-A] [-d] [-f] [-F] [-l] [-n] [-q] [-b] [-m]\n"
@@ -108,6 +109,10 @@ static int repack_config(const char *var, const char *value,
free(cruft_po_args->threads);
return git_config_string(&cruft_po_args->threads, var, value);
}
+ if (!strcmp(var, "repack.midxmustcontaincruft")) {
+ midx_must_contain_cruft = git_config_bool(var, value);
+ return 0;
+ }
return git_default_config(var, value, ctx, cb);
}
@@ -690,6 +695,77 @@ static void free_pack_geometry(struct pack_geometry *geometry)
free(geometry->pack);
}
+static int midx_has_unknown_packs(char **midx_pack_names,
+ size_t midx_pack_names_nr,
+ struct string_list *include,
+ struct pack_geometry *geometry,
+ struct existing_packs *existing)
+{
+ size_t i;
+
+ string_list_sort(include);
+
+ for (i = 0; i < midx_pack_names_nr; i++) {
+ const char *pack_name = midx_pack_names[i];
+
+ /*
+ * Determine whether or not each MIDX'd pack from the existing
+ * MIDX (if any) is represented in the new MIDX. For each pack
+ * in the MIDX, it must either be:
+ *
+ * - In the "include" list of packs to be included in the new
+ * MIDX. Note this function is called before the include
+ * list is populated with any cruft pack(s).
+ *
+ * - Below the geometric split line (if using pack geometry),
+ * indicating that the pack won't be included in the new
+ * MIDX, but its contents were rolled up as part of the
+ * geometric repack.
+ *
+ * - In the existing non-kept packs list (if not using pack
+ * geometry), and marked as non-deleted.
+ */
+ if (string_list_has_string(include, pack_name)) {
+ continue;
+ } else if (geometry) {
+ struct strbuf buf = STRBUF_INIT;
+ uint32_t j;
+
+ for (j = 0; j < geometry->split; j++) {
+ strbuf_reset(&buf);
+ strbuf_addstr(&buf, pack_basename(geometry->pack[j]));
+ strbuf_strip_suffix(&buf, ".pack");
+ strbuf_addstr(&buf, ".idx");
+
+ if (!strcmp(pack_name, buf.buf)) {
+ strbuf_release(&buf);
+ break;
+ }
+ }
+
+ strbuf_release(&buf);
+
+ if (j < geometry->split)
+ continue;
+ } else {
+ struct string_list_item *item;
+
+ item = string_list_lookup(&existing->non_kept_packs,
+ pack_name);
+ if (item && !pack_is_marked_for_deletion(item))
+ continue;
+ }
+
+ /*
+ * If we got to this point, the MIDX includes some pack that we
+ * don't know about.
+ */
+ return 1;
+ }
+
+ return 0;
+}
+
struct midx_snapshot_ref_data {
struct tempfile *f;
struct oidset seen;
@@ -710,7 +786,7 @@ static int midx_snapshot_ref_one(const char *refname UNUSED,
if (oidset_insert(&data->seen, oid))
return 0; /* already seen */
- if (oid_object_info(the_repository, oid, NULL) != OBJ_COMMIT)
+ if (odb_read_object_info(the_repository->objects, oid, NULL) != OBJ_COMMIT)
return 0;
fprintf(data->f->fp, "%s%s\n", data->preferred ? "+" : "",
@@ -758,6 +834,8 @@ static void midx_snapshot_refs(struct tempfile *f)
static void midx_included_packs(struct string_list *include,
struct existing_packs *existing,
+ char **midx_pack_names,
+ size_t midx_pack_names_nr,
struct string_list *names,
struct pack_geometry *geometry)
{
@@ -811,26 +889,56 @@ static void midx_included_packs(struct string_list *include,
}
}
- for_each_string_list_item(item, &existing->cruft_packs) {
+ if (midx_must_contain_cruft ||
+ midx_has_unknown_packs(midx_pack_names, midx_pack_names_nr,
+ include, geometry, existing)) {
/*
- * When doing a --geometric repack, there is no need to check
- * for deleted packs, since we're by definition not doing an
- * ALL_INTO_ONE repack (hence no packs will be deleted).
- * Otherwise we must check for and exclude any packs which are
- * enqueued for deletion.
+ * If there are one or more unknown pack(s) present (see
+ * midx_has_unknown_packs() for what makes a pack
+ * "unknown") in the MIDX before the repack, keep them
+ * as they may be required to form a reachability
+ * closure if the MIDX is bitmapped.
*
- * So we could omit the conditional below in the --geometric
- * case, but doing so is unnecessary since no packs are marked
- * as pending deletion (since we only call
- * `mark_packs_for_deletion()` when doing an all-into-one
- * repack).
+ * For example, a cruft pack can be required to form a
+ * reachability closure if the MIDX is bitmapped and one
+ * or more of the bitmap's selected commits reaches a
+ * once-cruft object that was later made reachable.
*/
- if (pack_is_marked_for_deletion(item))
- continue;
+ for_each_string_list_item(item, &existing->cruft_packs) {
+ /*
+ * When doing a --geometric repack, there is no
+ * need to check for deleted packs, since we're
+ * by definition not doing an ALL_INTO_ONE
+ * repack (hence no packs will be deleted).
+ * Otherwise we must check for and exclude any
+ * packs which are enqueued for deletion.
+ *
+ * So we could omit the conditional below in the
+ * --geometric case, but doing so is unnecessary
+ * since no packs are marked as pending
+ * deletion (since we only call
+ * `mark_packs_for_deletion()` when doing an
+ * all-into-one repack).
+ */
+ if (pack_is_marked_for_deletion(item))
+ continue;
- strbuf_reset(&buf);
- strbuf_addf(&buf, "%s.idx", item->string);
- string_list_insert(include, buf.buf);
+ strbuf_reset(&buf);
+ strbuf_addf(&buf, "%s.idx", item->string);
+ string_list_insert(include, buf.buf);
+ }
+ } else {
+ /*
+ * Modern versions of Git (with the appropriate
+ * configuration setting) will write new copies of
+ * once-cruft objects when doing a --geometric repack.
+ *
+ * If the MIDX has no cruft pack, new packs written
+ * during a --geometric repack will not rely on the
+ * cruft pack to form a reachability closure, so we can
+ * avoid including them in the MIDX in that case.
+ */
+ ;
}
strbuf_release(&buf);
@@ -1145,6 +1253,8 @@ int cmd_repack(int argc,
struct tempfile *refs_snapshot = NULL;
int i, ext, ret;
int show_progress;
+ char **midx_pack_names = NULL;
+ size_t midx_pack_names_nr = 0;
/* variables to be filled by option parsing */
int delete_redundant = 0;
@@ -1240,7 +1350,7 @@ int cmd_repack(int argc,
po_args.depth = xstrdup_or_null(opt_depth);
po_args.threads = xstrdup_or_null(opt_threads);
- if (delete_redundant && repository_format_precious_objects)
+ if (delete_redundant && the_repository->repository_format_precious_objects)
die(_("cannot delete packs in a precious-objects repo"));
die_for_incompatible_opt3(unpack_unreachable || (pack_everything & LOOSEN_UNREACHABLE), "-A",
@@ -1261,7 +1371,8 @@ int cmd_repack(int argc,
if (write_bitmaps && !(pack_everything & ALL_INTO_ONE) && !write_midx)
die(_(incremental_bitmap_conflict_error));
- if (write_bitmaps && po_args.local && has_alt_odb(the_repository)) {
+ if (write_bitmaps && po_args.local &&
+ odb_has_alternates(the_repository->objects)) {
/*
* When asked to do a local repack, but we have
* packfiles that are inherited from an alternate, then
@@ -1361,7 +1472,10 @@ int cmd_repack(int argc,
!(pack_everything & PACK_CRUFT))
strvec_push(&cmd.args, "--pack-loose-unreachable");
} else if (geometry.split_factor) {
- strvec_push(&cmd.args, "--stdin-packs");
+ if (midx_must_contain_cruft)
+ strvec_push(&cmd.args, "--stdin-packs");
+ else
+ strvec_push(&cmd.args, "--stdin-packs=follow");
strvec_push(&cmd.args, "--unpacked");
} else {
strvec_push(&cmd.args, "--unpacked");
@@ -1401,8 +1515,25 @@ int cmd_repack(int argc,
if (ret)
goto cleanup;
- if (!names.nr && !po_args.quiet)
- printf_ln(_("Nothing new to pack."));
+ if (!names.nr) {
+ if (!po_args.quiet)
+ printf_ln(_("Nothing new to pack."));
+ /*
+ * If we didn't write any new packs, the non-cruft packs
+ * may refer to once-unreachable objects in the cruft
+ * pack(s).
+ *
+ * If there isn't already a MIDX, the one we write
+ * must include the cruft pack(s), in case the
+ * non-cruft pack(s) refer to once-cruft objects.
+ *
+ * If there is already a MIDX, we can punt here, since
+ * midx_has_unknown_packs() will make the decision for
+ * us.
+ */
+ if (!get_local_multi_pack_index(the_repository))
+ midx_must_contain_cruft = 1;
+ }
if (pack_everything & PACK_CRUFT) {
const char *pack_prefix = find_pack_prefix(packdir, packtmp);
@@ -1483,6 +1614,19 @@ int cmd_repack(int argc,
string_list_sort(&names);
+ if (get_local_multi_pack_index(the_repository)) {
+ struct multi_pack_index *m =
+ get_local_multi_pack_index(the_repository);
+
+ ALLOC_ARRAY(midx_pack_names,
+ m->num_packs + m->num_packs_in_base);
+
+ for (; m; m = m->base_midx)
+ for (uint32_t i = 0; i < m->num_packs; i++)
+ midx_pack_names[midx_pack_names_nr++] =
+ xstrdup(m->pack_names[i]);
+ }
+
close_object_store(the_repository->objects);
/*
@@ -1524,7 +1668,8 @@ int cmd_repack(int argc,
if (write_midx) {
struct string_list include = STRING_LIST_INIT_DUP;
- midx_included_packs(&include, &existing, &names, &geometry);
+ midx_included_packs(&include, &existing, midx_pack_names,
+ midx_pack_names_nr, &names, &geometry);
ret = write_midx_included_packs(&include, &geometry, &names,
refs_snapshot ? get_tempfile_path(refs_snapshot) : NULL,
@@ -1575,6 +1720,9 @@ cleanup:
string_list_clear(&names, 1);
existing_packs_release(&existing);
free_pack_geometry(&geometry);
+ for (size_t i = 0; i < midx_pack_names_nr; i++)
+ free(midx_pack_names[i]);
+ free(midx_pack_names);
pack_objects_args_release(&po_args);
pack_objects_args_release(&cruft_po_args);
diff --git a/builtin/replace.c b/builtin/replace.c
index 48c7c6a2d5..5ff2ab723c 100644
--- a/builtin/replace.c
+++ b/builtin/replace.c
@@ -19,7 +19,7 @@
#include "run-command.h"
#include "object-file.h"
#include "object-name.h"
-#include "object-store.h"
+#include "odb.h"
#include "replace-object.h"
#include "tag.h"
#include "wildmatch.h"
@@ -65,8 +65,8 @@ static int show_reference(const char *refname,
if (repo_get_oid(data->repo, refname, &object))
return error(_("failed to resolve '%s' as a valid ref"), refname);
- obj_type = oid_object_info(data->repo, &object, NULL);
- repl_type = oid_object_info(data->repo, oid, NULL);
+ obj_type = odb_read_object_info(data->repo->objects, &object, NULL);
+ repl_type = odb_read_object_info(data->repo->objects, oid, NULL);
printf("%s (%s) -> %s (%s)\n", refname, type_name(obj_type),
oid_to_hex(oid), type_name(repl_type));
@@ -185,8 +185,8 @@ static int replace_object_oid(const char *object_ref,
struct strbuf err = STRBUF_INIT;
int res = 0;
- obj_type = oid_object_info(the_repository, object, NULL);
- repl_type = oid_object_info(the_repository, repl, NULL);
+ obj_type = odb_read_object_info(the_repository->objects, object, NULL);
+ repl_type = odb_read_object_info(the_repository->objects, repl, NULL);
if (!force && obj_type != repl_type)
return error(_("Objects must be of the same type.\n"
"'%s' points to a replaced object of type '%s'\n"
@@ -334,7 +334,7 @@ static int edit_and_replace(const char *object_ref, int force, int raw)
if (repo_get_oid(the_repository, object_ref, &old_oid) < 0)
return error(_("not a valid object name: '%s'"), object_ref);
- type = oid_object_info(the_repository, &old_oid, NULL);
+ type = odb_read_object_info(the_repository->objects, &old_oid, NULL);
if (type < 0)
return error(_("unable to get object type for %s"),
oid_to_hex(&old_oid));
diff --git a/builtin/rev-list.c b/builtin/rev-list.c
index 0984b607bf..4d0c460f18 100644
--- a/builtin/rev-list.c
+++ b/builtin/rev-list.c
@@ -14,7 +14,7 @@
#include "object.h"
#include "object-name.h"
#include "object-file.h"
-#include "object-store.h"
+#include "odb.h"
#include "pack-bitmap.h"
#include "parse-options.h"
#include "log-tree.h"
@@ -110,7 +110,8 @@ static off_t get_object_disk_usage(struct object *obj)
off_t size;
struct object_info oi = OBJECT_INFO_INIT;
oi.disk_sizep = &size;
- if (oid_object_info_extended(the_repository, &obj->oid, &oi, 0) < 0)
+ if (odb_read_object_info_extended(the_repository->objects,
+ &obj->oid, &oi, 0) < 0)
die(_("unable to get disk usage of %s"), oid_to_hex(&obj->oid));
return size;
}
@@ -346,7 +347,8 @@ static void show_commit(struct commit *commit, void *data)
static int finish_object(struct object *obj, const char *name, void *cb_data)
{
struct rev_list_info *info = cb_data;
- if (oid_object_info_extended(the_repository, &obj->oid, NULL, 0) < 0) {
+ if (odb_read_object_info_extended(the_repository->objects,
+ &obj->oid, NULL, 0) < 0) {
finish_object__ma(obj, name);
return 1;
}
diff --git a/builtin/send-pack.c b/builtin/send-pack.c
index c6e0e9d051..28b69d26b4 100644
--- a/builtin/send-pack.c
+++ b/builtin/send-pack.c
@@ -304,9 +304,10 @@ int cmd_send_pack(int argc,
flags |= MATCH_REFS_MIRROR;
/* match them up */
- if (match_push_refs(local_refs, &remote_refs, &rs, flags))
- return -1;
-
+ if (match_push_refs(local_refs, &remote_refs, &rs, flags)) {
+ ret = -1;
+ goto cleanup;
+ }
if (!is_empty_cas(&cas))
apply_push_cas(&cas, remote, remote_refs);
@@ -339,10 +340,12 @@ int cmd_send_pack(int argc,
/* stable plumbing output; do not modify or localize */
fprintf(stderr, "Everything up-to-date\n");
+cleanup:
string_list_clear(&push_options, 0);
free_refs(remote_refs);
free_refs(local_refs);
refspec_clear(&rs);
+ oid_array_clear(&extra_have);
oid_array_clear(&shallow);
clear_cas_option(&cas);
return ret;
diff --git a/builtin/shortlog.c b/builtin/shortlog.c
index fe15e11497..60adc5e7a5 100644
--- a/builtin/shortlog.c
+++ b/builtin/shortlog.c
@@ -419,7 +419,7 @@ int cmd_shortlog(int argc,
* git/nongit so that we do not have to do this.
*/
if (nongit && !the_hash_algo)
- repo_set_hash_algo(the_repository, GIT_HASH_SHA1);
+ repo_set_hash_algo(the_repository, GIT_HASH_DEFAULT);
git_config(git_default_config, NULL);
shortlog_init(&log);
diff --git a/builtin/show-index.c b/builtin/show-index.c
index 9d4ecf5e7b..2c3e2940ce 100644
--- a/builtin/show-index.c
+++ b/builtin/show-index.c
@@ -47,7 +47,7 @@ int cmd_show_index(int argc,
* the index file passed in and use that instead.
*/
if (!the_hash_algo)
- repo_set_hash_algo(the_repository, GIT_HASH_SHA1);
+ repo_set_hash_algo(the_repository, GIT_HASH_DEFAULT);
hashsz = the_hash_algo->rawsz;
diff --git a/builtin/show-ref.c b/builtin/show-ref.c
index 623a52a45f..117709cb07 100644
--- a/builtin/show-ref.c
+++ b/builtin/show-ref.c
@@ -5,7 +5,7 @@
#include "hex.h"
#include "refs/refs-internal.h"
#include "object-name.h"
-#include "object-store.h"
+#include "odb.h"
#include "object.h"
#include "string-list.h"
#include "parse-options.h"
@@ -35,8 +35,8 @@ static void show_one(const struct show_one_options *opts,
const char *hex;
struct object_id peeled;
- if (!has_object(the_repository, oid,
- HAS_OBJECT_RECHECK_PACKED | HAS_OBJECT_FETCH_PROMISOR))
+ if (!odb_has_object(the_repository->objects, oid,
+ HAS_OBJECT_RECHECK_PACKED | HAS_OBJECT_FETCH_PROMISOR))
die("git show-ref: bad ref %s (%s)", refname,
oid_to_hex(oid));
diff --git a/builtin/stash.c b/builtin/stash.c
index cfbd92852a..e2f95cc2eb 100644
--- a/builtin/stash.c
+++ b/builtin/stash.c
@@ -28,7 +28,10 @@
#include "log-tree.h"
#include "diffcore.h"
#include "reflog.h"
+#include "reflog-walk.h"
#include "add-interactive.h"
+#include "oid-array.h"
+#include "commit.h"
#define INCLUDE_ALL_FILES 2
@@ -56,6 +59,10 @@
" [-u | --include-untracked] [-a | --all] [<message>]")
#define BUILTIN_STASH_CREATE_USAGE \
N_("git stash create [<message>]")
+#define BUILTIN_STASH_EXPORT_USAGE \
+ N_("git stash export (--print | --to-ref <ref>) [<stash>...]")
+#define BUILTIN_STASH_IMPORT_USAGE \
+ N_("git stash import <commit>")
#define BUILTIN_STASH_CLEAR_USAGE \
"git stash clear"
@@ -71,6 +78,8 @@ static const char * const git_stash_usage[] = {
BUILTIN_STASH_CLEAR_USAGE,
BUILTIN_STASH_CREATE_USAGE,
BUILTIN_STASH_STORE_USAGE,
+ BUILTIN_STASH_EXPORT_USAGE,
+ BUILTIN_STASH_IMPORT_USAGE,
NULL
};
@@ -124,6 +133,16 @@ static const char * const git_stash_save_usage[] = {
NULL
};
+static const char * const git_stash_export_usage[] = {
+ BUILTIN_STASH_EXPORT_USAGE,
+ NULL
+};
+
+static const char * const git_stash_import_usage[] = {
+ BUILTIN_STASH_IMPORT_USAGE,
+ NULL
+};
+
static const char ref_stash[] = "refs/stash";
static struct strbuf stash_index_path = STRBUF_INIT;
@@ -132,6 +151,7 @@ static struct strbuf stash_index_path = STRBUF_INIT;
* b_commit is set to the base commit
* i_commit is set to the commit containing the index tree
* u_commit is set to the commit containing the untracked files tree
+ * c_commit is set to the first parent (chain commit) when importing and is otherwise unset
* w_tree is set to the working tree
* b_tree is set to the base tree
* i_tree is set to the index tree
@@ -142,6 +162,7 @@ struct stash_info {
struct object_id b_commit;
struct object_id i_commit;
struct object_id u_commit;
+ struct object_id c_commit;
struct object_id w_tree;
struct object_id b_tree;
struct object_id i_tree;
@@ -160,6 +181,33 @@ static void free_stash_info(struct stash_info *info)
strbuf_release(&info->revision);
}
+static int check_stash_topology(struct repository *r, struct commit *stash)
+{
+ struct commit *p1, *p2, *p3 = NULL;
+
+ /* stash must have two or three parents */
+ if (!stash->parents || !stash->parents->next ||
+ (stash->parents->next->next && stash->parents->next->next->next))
+ return -1;
+ p1 = stash->parents->item;
+ p2 = stash->parents->next->item;
+ if (stash->parents->next->next)
+ p3 = stash->parents->next->next->item;
+ if (repo_parse_commit(r, p1) || repo_parse_commit(r, p2) ||
+ (p3 && repo_parse_commit(r, p3)))
+ return -1;
+ /* p2 must have a single parent, p3 must have no parents */
+ if (!p2->parents || p2->parents->next || (p3 && p3->parents))
+ return -1;
+ if (repo_parse_commit(r, p2->parents->item))
+ return -1;
+ /* p2^1 must equal p1 */
+ if (!oideq(&p1->object.oid, &p2->parents->item->object.oid))
+ return -1;
+
+ return 0;
+}
+
static void assert_stash_like(struct stash_info *info, const char *revision)
{
if (get_oidf(&info->b_commit, "%s^1", revision) ||
@@ -169,6 +217,25 @@ static void assert_stash_like(struct stash_info *info, const char *revision)
die(_("'%s' is not a stash-like commit"), revision);
}
+static int parse_stash_revision(struct strbuf *revision, const char *commit, int quiet)
+{
+ strbuf_reset(revision);
+ if (!commit) {
+ if (!refs_ref_exists(get_main_ref_store(the_repository), ref_stash)) {
+ if (!quiet)
+ fprintf_ln(stderr, _("No stash entries found."));
+ return -1;
+ }
+
+ strbuf_addf(revision, "%s@{0}", ref_stash);
+ } else if (strspn(commit, "0123456789") == strlen(commit)) {
+ strbuf_addf(revision, "%s@{%s}", ref_stash, commit);
+ } else {
+ strbuf_addstr(revision, commit);
+ }
+ return 0;
+}
+
static int get_stash_info(struct stash_info *info, int argc, const char **argv)
{
int ret;
@@ -196,17 +263,9 @@ static int get_stash_info(struct stash_info *info, int argc, const char **argv)
if (argc == 1)
commit = argv[0];
- if (!commit) {
- if (!refs_ref_exists(get_main_ref_store(the_repository), ref_stash)) {
- fprintf_ln(stderr, _("No stash entries found."));
- return -1;
- }
-
- strbuf_addf(&info->revision, "%s@{0}", ref_stash);
- } else if (strspn(commit, "0123456789") == strlen(commit)) {
- strbuf_addf(&info->revision, "%s@{%s}", ref_stash, commit);
- } else {
- strbuf_addstr(&info->revision, commit);
+ strbuf_init(&info->revision, 0);
+ if (parse_stash_revision(&info->revision, commit, 0)) {
+ return -1;
}
revision = info->revision.buf;
@@ -1372,6 +1431,7 @@ static int do_create_stash(const struct pathspec *ps, struct strbuf *stash_msg_b
const char *head_short_sha1 = NULL;
const char *branch_ref = NULL;
const char *branch_name = "(no branch)";
+ char *branch_name_buf = NULL;
struct commit *head_commit = NULL;
struct commit_list *parents = NULL;
struct strbuf msg = STRBUF_INIT;
@@ -1404,8 +1464,12 @@ static int do_create_stash(const struct pathspec *ps, struct strbuf *stash_msg_b
branch_ref = refs_resolve_ref_unsafe(get_main_ref_store(the_repository),
"HEAD", 0, NULL, &flags);
- if (flags & REF_ISSYMREF)
- skip_prefix(branch_ref, "refs/heads/", &branch_name);
+
+ if (flags & REF_ISSYMREF) {
+ if (skip_prefix(branch_ref, "refs/heads/", &branch_name))
+ branch_name = branch_name_buf = xstrdup(branch_name);
+ }
+
head_short_sha1 = repo_find_unique_abbrev(the_repository,
&head_commit->object.oid,
DEFAULT_ABBREV);
@@ -1495,6 +1559,7 @@ done:
strbuf_release(&msg);
strbuf_release(&untracked_files);
free_commit_list(parents);
+ free(branch_name_buf);
return ret;
}
@@ -1789,11 +1854,15 @@ static int push_stash(int argc, const char **argv, const char *prefix,
int ret;
if (argc) {
- force_assume = !strcmp(argv[0], "-p");
+ int flags = PARSE_OPT_KEEP_DASHDASH;
+
+ if (push_assumed)
+ flags |= PARSE_OPT_STOP_AT_NON_OPTION;
+
argc = parse_options(argc, argv, prefix, options,
push_assumed ? git_stash_usage :
- git_stash_push_usage,
- PARSE_OPT_KEEP_DASHDASH);
+ git_stash_push_usage, flags);
+ force_assume |= patch_mode;
}
if (argc) {
@@ -1884,6 +1953,383 @@ static int save_stash(int argc, const char **argv, const char *prefix,
return ret;
}
+static int write_commit_with_parents(struct repository *r,
+ struct object_id *out,
+ const struct object_id *oid,
+ struct commit_list *parents)
+{
+ size_t author_len, committer_len;
+ struct commit *this;
+ const char *orig_author, *orig_committer;
+ char *author = NULL, *committer = NULL;
+ const char *buffer;
+ unsigned long bufsize;
+ const char *p;
+ struct strbuf msg = STRBUF_INIT;
+ int ret = 0;
+ struct ident_split id;
+
+ this = lookup_commit_reference(r, oid);
+ buffer = repo_get_commit_buffer(r, this, &bufsize);
+ orig_author = find_commit_header(buffer, "author", &author_len);
+ orig_committer = find_commit_header(buffer, "committer", &committer_len);
+
+ if (!orig_author || !orig_committer) {
+ ret = error(_("cannot parse commit %s"), oid_to_hex(oid));
+ goto out;
+ }
+
+ if (split_ident_line(&id, orig_author, author_len) < 0 ||
+ split_ident_line(&id, orig_committer, committer_len) < 0) {
+ ret = error(_("invalid author or committer for %s"), oid_to_hex(oid));
+ goto out;
+ }
+
+ p = strstr(buffer, "\n\n");
+ strbuf_addstr(&msg, "git stash: ");
+
+ if (p)
+ strbuf_add(&msg, p + 2, bufsize - (p + 2 - buffer));
+ strbuf_complete_line(&msg);
+
+ author = xmemdupz(orig_author, author_len);
+ committer = xmemdupz(orig_committer, committer_len);
+
+ if (commit_tree_extended(msg.buf, msg.len,
+ r->hash_algo->empty_tree, parents,
+ out, author, committer,
+ NULL, NULL)) {
+ ret = error(_("could not write commit"));
+ goto out;
+ }
+out:
+ strbuf_release(&msg);
+ repo_unuse_commit_buffer(r, this, buffer);
+ free(author);
+ free(committer);
+ return ret;
+}
+
+static int do_import_stash(struct repository *r, const char *rev)
+{
+ struct object_id chain;
+ int res = 0;
+ const char *buffer = NULL;
+ unsigned long bufsize;
+ struct commit *this = NULL;
+ struct commit_list *items = NULL, *cur;
+ char *msg = NULL;
+
+ if (repo_get_oid(r, rev, &chain))
+ return error(_("not a valid revision: %s"), rev);
+
+ this = lookup_commit_reference(r, &chain);
+ if (!this)
+ return error(_("not a commit: %s"), rev);
+
+ /*
+ * Walk the commit history, finding each stash entry, and load data into
+ * the array.
+ */
+ for (;;) {
+ const char *author, *committer;
+ size_t author_len, committer_len;
+ const char *p;
+ const char *expected = "git stash <git@stash> 1000684800 +0000";
+ const char *prefix = "git stash: ";
+ struct commit *stash;
+ struct tree *tree = repo_get_commit_tree(r, this);
+
+ if (!tree ||
+ !oideq(&tree->object.oid, r->hash_algo->empty_tree) ||
+ (this->parents &&
+ (!this->parents->next || this->parents->next->next))) {
+ res = error(_("%s is not a valid exported stash commit"),
+ oid_to_hex(&this->object.oid));
+ goto out;
+ }
+
+ buffer = repo_get_commit_buffer(r, this, &bufsize);
+
+ if (!this->parents) {
+ /*
+ * We don't have any parents. Make sure this is our
+ * root commit.
+ */
+ author = find_commit_header(buffer, "author", &author_len);
+ committer = find_commit_header(buffer, "committer", &committer_len);
+
+ if (!author || !committer) {
+ error(_("cannot parse commit %s"), oid_to_hex(&this->object.oid));
+ goto out;
+ }
+
+ if (author_len != strlen(expected) ||
+ committer_len != strlen(expected) ||
+ memcmp(author, expected, author_len) ||
+ memcmp(committer, expected, committer_len)) {
+ res = error(_("found root commit %s with invalid data"), oid_to_hex(&this->object.oid));
+ goto out;
+ }
+ break;
+ }
+
+ p = strstr(buffer, "\n\n");
+ if (!p) {
+ res = error(_("cannot parse commit %s"), oid_to_hex(&this->object.oid));
+ goto out;
+ }
+
+ p += 2;
+ if (((size_t)(bufsize - (p - buffer)) < strlen(prefix)) ||
+ memcmp(prefix, p, strlen(prefix))) {
+ res = error(_("found stash commit %s without expected prefix"), oid_to_hex(&this->object.oid));
+ goto out;
+ }
+
+ stash = this->parents->next->item;
+
+ if (repo_parse_commit(r, this->parents->item) ||
+ repo_parse_commit(r, stash)) {
+ res = error(_("cannot parse parents of commit: %s"),
+ oid_to_hex(&this->object.oid));
+ goto out;
+ }
+
+ if (check_stash_topology(r, stash)) {
+ res = error(_("%s does not look like a stash commit"),
+ oid_to_hex(&stash->object.oid));
+ goto out;
+ }
+
+ repo_unuse_commit_buffer(r, this, buffer);
+ buffer = NULL;
+ items = commit_list_insert(stash, &items);
+ this = this->parents->item;
+ }
+
+ /*
+ * Now, walk each entry, adding it to the stash as a normal stash
+ * commit.
+ */
+ for (cur = items; cur; cur = cur->next) {
+ const char *p;
+ struct object_id *oid;
+
+ this = cur->item;
+ oid = &this->object.oid;
+ buffer = repo_get_commit_buffer(r, this, &bufsize);
+ if (!buffer) {
+ res = error(_("cannot read commit buffer for %s"), oid_to_hex(oid));
+ goto out;
+ }
+
+ p = strstr(buffer, "\n\n");
+ if (!p) {
+ res = error(_("cannot parse commit %s"), oid_to_hex(oid));
+ goto out;
+ }
+
+ p += 2;
+ msg = xmemdupz(p, bufsize - (p - buffer));
+ repo_unuse_commit_buffer(r, this, buffer);
+ buffer = NULL;
+
+ if (do_store_stash(oid, msg, 1)) {
+ res = error(_("cannot save the stash for %s"), oid_to_hex(oid));
+ goto out;
+ }
+ FREE_AND_NULL(msg);
+ }
+out:
+ if (this && buffer)
+ repo_unuse_commit_buffer(r, this, buffer);
+ free_commit_list(items);
+ free(msg);
+
+ return res;
+}
+
+static int import_stash(int argc, const char **argv, const char *prefix,
+ struct repository *repo)
+{
+ struct option options[] = {
+ OPT_END()
+ };
+
+ argc = parse_options(argc, argv, prefix, options,
+ git_stash_import_usage,
+ PARSE_OPT_KEEP_DASHDASH);
+
+ if (argc != 1)
+ usage_msg_opt("a revision is required", git_stash_import_usage, options);
+
+ return do_import_stash(repo, argv[0]);
+}
+
+struct stash_entry_data {
+ struct repository *r;
+ struct commit_list **items;
+ size_t count;
+};
+
+static int collect_stash_entries(struct object_id *old_oid UNUSED,
+ struct object_id *new_oid,
+ const char *committer UNUSED,
+ timestamp_t timestamp UNUSED,
+ int tz UNUSED, const char *msg UNUSED,
+ void *cb_data)
+{
+ struct stash_entry_data *data = cb_data;
+ struct commit *stash;
+
+ data->count++;
+ stash = lookup_commit_reference(data->r, new_oid);
+ if (!stash || check_stash_topology(data->r, stash)) {
+ return error(_("%s does not look like a stash commit"),
+ oid_to_hex(new_oid));
+ }
+ data->items = commit_list_append(stash, data->items);
+ return 0;
+}
+
+static int do_export_stash(struct repository *r,
+ const char *ref,
+ int argc,
+ const char **argv)
+{
+ struct object_id base;
+ struct object_context unused;
+ struct commit *prev;
+ struct commit_list *items = NULL, **iter = &items, *cur;
+ int res = 0;
+ int i;
+ struct strbuf revision = STRBUF_INIT;
+ const char *author, *committer;
+
+ /*
+ * This is an arbitrary, fixed date, specifically the one used by git
+ * format-patch. The goal is merely to produce reproducible output.
+ */
+ prepare_fallback_ident("git stash", "git@stash");
+ author = fmt_ident("git stash", "git@stash", WANT_BLANK_IDENT,
+ "2001-09-17T00:00:00Z", 0);
+ committer = fmt_ident("git stash", "git@stash", WANT_BLANK_IDENT,
+ "2001-09-17T00:00:00Z", 0);
+
+ /* First, we create a single empty commit. */
+ if (commit_tree_extended("", 0, r->hash_algo->empty_tree, NULL,
+ &base, author, committer, NULL, NULL))
+ return error(_("unable to write base commit"));
+
+ prev = lookup_commit_reference(r, &base);
+
+ if (argc) {
+ /*
+ * Find each specified stash, and load data into the array.
+ */
+ for (i = 0; i < argc; i++) {
+ struct object_id oid;
+ struct commit *stash;
+
+ if (parse_stash_revision(&revision, argv[i], 1) ||
+ get_oid_with_context(r, revision.buf,
+ GET_OID_QUIETLY | GET_OID_GENTLY,
+ &oid, &unused)) {
+ res = error(_("unable to find stash entry %s"), argv[i]);
+ goto out;
+ }
+
+ stash = lookup_commit_reference(r, &oid);
+ if (!stash || check_stash_topology(r, stash)) {
+ res = error(_("%s does not look like a stash commit"),
+ revision.buf);
+ goto out;
+ }
+ iter = commit_list_append(stash, iter);
+ }
+ } else {
+ /*
+ * Walk the reflog, finding each stash entry, and load data into the
+ * array.
+ */
+ struct stash_entry_data cb_data = {
+ .r = r, .items = iter,
+ };
+ if (refs_for_each_reflog_ent_reverse(get_main_ref_store(r),
+ "refs/stash",
+ collect_stash_entries,
+ &cb_data) && cb_data.count)
+ goto out;
+ }
+
+ /*
+ * Now, create a set of commits identical to the regular stash commits,
+ * but where their first parents form a chain to our original empty
+ * base commit.
+ */
+ items = reverse_commit_list(items);
+ for (cur = items; cur; cur = cur->next) {
+ struct commit_list *parents = NULL;
+ struct commit_list **next = &parents;
+ struct object_id out;
+ struct commit *stash = cur->item;
+
+ next = commit_list_append(prev, next);
+ next = commit_list_append(stash, next);
+ res = write_commit_with_parents(r, &out, &stash->object.oid, parents);
+ free_commit_list(parents);
+ if (res)
+ goto out;
+ prev = lookup_commit_reference(r, &out);
+ }
+ if (ref)
+ refs_update_ref(get_main_ref_store(r), NULL, ref,
+ &prev->object.oid, NULL, 0, UPDATE_REFS_DIE_ON_ERR);
+ else
+ puts(oid_to_hex(&prev->object.oid));
+out:
+ strbuf_release(&revision);
+ free_commit_list(items);
+
+ return res;
+}
+
+enum export_action {
+ ACTION_NONE,
+ ACTION_PRINT,
+ ACTION_TO_REF,
+};
+
+static int export_stash(int argc,
+ const char **argv,
+ const char *prefix,
+ struct repository *repo)
+{
+ const char *ref = NULL;
+ enum export_action action = ACTION_NONE;
+ struct option options[] = {
+ OPT_CMDMODE(0, "print", &action,
+ N_("print the object ID instead of writing it to a ref"),
+ ACTION_PRINT),
+ OPT_STRING(0, "to-ref", &ref, "ref",
+ N_("save the data to the given ref")),
+ OPT_END()
+ };
+
+ argc = parse_options(argc, argv, prefix, options,
+ git_stash_export_usage,
+ PARSE_OPT_KEEP_DASHDASH);
+
+ if (ref && action == ACTION_NONE)
+ action = ACTION_TO_REF;
+
+ if (action == ACTION_NONE || (ref && action == ACTION_PRINT))
+ return error(_("exactly one of --print and --to-ref is required"));
+
+ return do_export_stash(repo, ref, argc, argv);
+}
+
int cmd_stash(int argc,
const char **argv,
const char *prefix,
@@ -1904,6 +2350,8 @@ int cmd_stash(int argc,
OPT_SUBCOMMAND("store", &fn, store_stash),
OPT_SUBCOMMAND("create", &fn, create_stash),
OPT_SUBCOMMAND("push", &fn, push_stash_unassumed),
+ OPT_SUBCOMMAND("export", &fn, export_stash),
+ OPT_SUBCOMMAND("import", &fn, import_stash),
OPT_SUBCOMMAND_F("save", &fn, save_stash, PARSE_OPT_NOCOMPLETE),
OPT_END()
};
diff --git a/builtin/submodule--helper.c b/builtin/submodule--helper.c
index 640c0059c3..d8a6fa47e5 100644
--- a/builtin/submodule--helper.c
+++ b/builtin/submodule--helper.c
@@ -28,7 +28,7 @@
#include "diff.h"
#include "object-file.h"
#include "object-name.h"
-#include "object-store.h"
+#include "odb.h"
#include "advice.h"
#include "branch.h"
#include "list-objects-filter-options.h"
@@ -41,61 +41,9 @@
typedef void (*each_submodule_fn)(const struct cache_entry *list_item,
void *cb_data);
-static int repo_get_default_remote(struct repository *repo, char **default_remote)
-{
- char *dest = NULL;
- struct strbuf sb = STRBUF_INIT;
- struct ref_store *store = get_main_ref_store(repo);
- const char *refname = refs_resolve_ref_unsafe(store, "HEAD", 0, NULL,
- NULL);
-
- if (!refname)
- return die_message(_("No such ref: %s"), "HEAD");
-
- /* detached HEAD */
- if (!strcmp(refname, "HEAD")) {
- *default_remote = xstrdup("origin");
- return 0;
- }
-
- if (!skip_prefix(refname, "refs/heads/", &refname))
- return die_message(_("Expecting a full ref name, got %s"),
- refname);
-
- strbuf_addf(&sb, "branch.%s.remote", refname);
- if (repo_config_get_string(repo, sb.buf, &dest))
- *default_remote = xstrdup("origin");
- else
- *default_remote = dest;
-
- strbuf_release(&sb);
- return 0;
-}
-
-static int get_default_remote_submodule(const char *module_path, char **default_remote)
-{
- struct repository subrepo;
- int ret;
-
- if (repo_submodule_init(&subrepo, the_repository, module_path,
- null_oid(the_hash_algo)) < 0)
- return die_message(_("could not get a repository handle for submodule '%s'"),
- module_path);
- ret = repo_get_default_remote(&subrepo, default_remote);
- repo_clear(&subrepo);
-
- return ret;
-}
-
static char *get_default_remote(void)
{
- char *default_remote;
- int code = repo_get_default_remote(the_repository, &default_remote);
-
- if (code)
- exit(code);
-
- return default_remote;
+ return xstrdup(repo_default_remote(the_repository));
}
static char *resolve_relative_url(const char *rel_url, const char *up_path, int quiet)
@@ -122,6 +70,46 @@ static char *resolve_relative_url(const char *rel_url, const char *up_path, int
return resolved_url;
}
+static int get_default_remote_submodule(const char *module_path, char **default_remote)
+{
+ const struct submodule *sub;
+ struct repository subrepo;
+ const char *remote_name = NULL;
+ char *url = NULL;
+
+ sub = submodule_from_path(the_repository, null_oid(the_hash_algo), module_path);
+ if (sub && sub->url) {
+ url = xstrdup(sub->url);
+
+ /* Possibly a url relative to parent */
+ if (starts_with_dot_dot_slash(url) ||
+ starts_with_dot_slash(url)) {
+ char *oldurl = url;
+
+ url = resolve_relative_url(oldurl, NULL, 1);
+ free(oldurl);
+ }
+ }
+
+ if (repo_submodule_init(&subrepo, the_repository, module_path,
+ null_oid(the_hash_algo)) < 0)
+ return die_message(_("could not get a repository handle for submodule '%s'"),
+ module_path);
+
+ /* Look up by URL first */
+ if (url)
+ remote_name = repo_remote_from_url(&subrepo, url);
+ if (!remote_name)
+ remote_name = repo_default_remote(&subrepo);
+
+ *default_remote = xstrdup(remote_name);
+
+ repo_clear(&subrepo);
+ free(url);
+
+ return 0;
+}
+
/* the result should be freed by the caller. */
static char *get_submodule_displaypath(const char *path, const char *prefix,
const char *super_prefix)
@@ -303,7 +291,7 @@ static void runcommand_in_submodule_cb(const struct cache_entry *list_item,
char *displaypath;
if (validate_submodule_path(path) < 0)
- exit(128);
+ die(NULL);
displaypath = get_submodule_displaypath(path, info->prefix,
info->super_prefix);
@@ -438,18 +426,6 @@ cleanup:
return ret;
}
-static int starts_with_dot_slash(const char *const path)
-{
- return path_match_flags(path, PATH_MATCH_STARTS_WITH_DOT_SLASH |
- PATH_MATCH_XPLATFORM);
-}
-
-static int starts_with_dot_dot_slash(const char *const path)
-{
- return path_match_flags(path, PATH_MATCH_STARTS_WITH_DOT_DOT_SLASH |
- PATH_MATCH_XPLATFORM);
-}
-
struct init_cb {
const char *prefix;
const char *super_prefix;
@@ -643,7 +619,7 @@ static void status_submodule(const char *path, const struct object_id *ce_oid,
};
if (validate_submodule_path(path) < 0)
- exit(128);
+ die(NULL);
if (!submodule_from_path(the_repository, null_oid(the_hash_algo), path))
die(_("no submodule mapping found in .gitmodules for path '%s'"),
@@ -1257,7 +1233,7 @@ static void sync_submodule(const char *path, const char *prefix,
return;
if (validate_submodule_path(path) < 0)
- exit(128);
+ die(NULL);
sub = submodule_from_path(the_repository, null_oid(the_hash_algo), path);
@@ -1402,7 +1378,7 @@ static void deinit_submodule(const char *path, const char *prefix,
char *sub_git_dir = xstrfmt("%s/.git", path);
if (validate_submodule_path(path) < 0)
- exit(128);
+ die(NULL);
sub = submodule_from_path(the_repository, null_oid(the_hash_algo), path);
@@ -1582,7 +1558,7 @@ static const char alternate_error_advice[] = N_(
);
static int add_possible_reference_from_superproject(
- struct object_directory *odb, void *sas_cb)
+ struct odb_source *alt_odb, void *sas_cb)
{
struct submodule_alternate_setup *sas = sas_cb;
size_t len;
@@ -1591,12 +1567,12 @@ static int add_possible_reference_from_superproject(
* If the alternate object store is another repository, try the
* standard layout with .git/(modules/<name>)+/objects
*/
- if (strip_suffix(odb->path, "/objects", &len)) {
+ if (strip_suffix(alt_odb->path, "/objects", &len)) {
struct repository alternate;
char *sm_alternate;
struct strbuf sb = STRBUF_INIT;
struct strbuf err = STRBUF_INIT;
- strbuf_add(&sb, odb->path, len);
+ strbuf_add(&sb, alt_odb->path, len);
if (repo_init(&alternate, sb.buf, NULL) < 0)
die(_("could not get a repository handle for gitdir '%s'"),
@@ -1668,7 +1644,8 @@ static void prepare_possible_alternates(const char *sm_name,
die(_("Value '%s' for submodule.alternateErrorStrategy is not recognized"), error_strategy);
if (!strcmp(sm_alternate, "superproject"))
- foreach_alt_odb(add_possible_reference_from_superproject, &sas);
+ odb_for_each_alternate(the_repository->objects,
+ add_possible_reference_from_superproject, &sas);
else if (!strcmp(sm_alternate, "no"))
; /* do nothing */
else
@@ -1724,7 +1701,7 @@ static int clone_submodule(const struct module_clone_data *clone_data,
char *to_free = NULL;
if (validate_submodule_path(clone_data_path) < 0)
- exit(128);
+ die(NULL);
if (!is_absolute_path(clone_data->path))
clone_data_path = to_free = xstrfmt("%s/%s", repo_get_work_tree(the_repository),
@@ -3526,7 +3503,7 @@ static int module_add(int argc, const char **argv, const char *prefix,
strip_dir_trailing_slashes(add_data.sm_path);
if (validate_submodule_path(add_data.sm_path) < 0)
- exit(128);
+ die(NULL);
die_on_index_match(add_data.sm_path, force);
die_on_repo_without_commits(add_data.sm_path);
diff --git a/builtin/tag.c b/builtin/tag.c
index 4742b27d16..46cbf892e3 100644
--- a/builtin/tag.c
+++ b/builtin/tag.c
@@ -19,7 +19,7 @@
#include "refs.h"
#include "object-file.h"
#include "object-name.h"
-#include "object-store.h"
+#include "odb.h"
#include "path.h"
#include "tag.h"
#include "parse-options.h"
@@ -244,7 +244,7 @@ static void write_tag_body(int fd, const struct object_id *oid)
struct strbuf payload = STRBUF_INIT;
struct strbuf signature = STRBUF_INIT;
- orig = buf = repo_read_object_file(the_repository, oid, &type, &size);
+ orig = buf = odb_read_object(the_repository->objects, oid, &type, &size);
if (!buf)
return;
if (parse_signature(buf, size, &payload, &signature)) {
@@ -304,7 +304,7 @@ static void create_tag(const struct object_id *object, const char *object_ref,
struct strbuf header = STRBUF_INIT;
int should_edit;
- type = oid_object_info(the_repository, object, NULL);
+ type = odb_read_object_info(the_repository->objects, object, NULL);
if (type <= OBJ_NONE)
die(_("bad object type."));
@@ -401,13 +401,13 @@ static void create_reflog_msg(const struct object_id *oid, struct strbuf *sb)
}
strbuf_addstr(sb, " (");
- type = oid_object_info(the_repository, oid, NULL);
+ type = odb_read_object_info(the_repository->objects, oid, NULL);
switch (type) {
default:
strbuf_addstr(sb, "object of unknown type");
break;
case OBJ_COMMIT:
- if ((buf = repo_read_object_file(the_repository, oid, &type, &size))) {
+ if ((buf = odb_read_object(the_repository->objects, oid, &type, &size))) {
subject_len = find_commit_subject(buf, &subject_start);
strbuf_insert(sb, sb->len, subject_start, subject_len);
} else {
diff --git a/builtin/unpack-file.c b/builtin/unpack-file.c
index e33acfc4ee..4360872ae0 100644
--- a/builtin/unpack-file.c
+++ b/builtin/unpack-file.c
@@ -4,7 +4,7 @@
#include "hex.h"
#include "object-file.h"
#include "object-name.h"
-#include "object-store.h"
+#include "odb.h"
static char *create_temp_file(struct object_id *oid)
{
@@ -14,7 +14,7 @@ static char *create_temp_file(struct object_id *oid)
unsigned long size;
int fd;
- buf = repo_read_object_file(the_repository, oid, &type, &size);
+ buf = odb_read_object(the_repository->objects, oid, &type, &size);
if (!buf || type != OBJ_BLOB)
die("unable to read blob object %s", oid_to_hex(oid));
diff --git a/builtin/unpack-objects.c b/builtin/unpack-objects.c
index e905d5f4e1..a69d59eb50 100644
--- a/builtin/unpack-objects.c
+++ b/builtin/unpack-objects.c
@@ -9,7 +9,7 @@
#include "git-zlib.h"
#include "hex.h"
#include "object-file.h"
-#include "object-store.h"
+#include "odb.h"
#include "object.h"
#include "delta.h"
#include "pack.h"
@@ -232,7 +232,7 @@ static int check_object(struct object *obj, enum object_type type,
if (!(obj->flags & FLAG_OPEN)) {
unsigned long size;
- int type = oid_object_info(the_repository, &obj->oid, &size);
+ int type = odb_read_object_info(the_repository->objects, &obj->oid, &size);
if (type != obj->type || type <= 0)
die("object of unexpected type");
obj->flags |= FLAG_WRITTEN;
@@ -449,8 +449,8 @@ static void unpack_delta_entry(enum object_type type, unsigned long delta_size,
delta_data = get_data(delta_size);
if (!delta_data)
return;
- if (has_object(the_repository, &base_oid,
- HAS_OBJECT_RECHECK_PACKED | HAS_OBJECT_FETCH_PROMISOR))
+ if (odb_has_object(the_repository->objects, &base_oid,
+ HAS_OBJECT_RECHECK_PACKED | HAS_OBJECT_FETCH_PROMISOR))
; /* Ok we have this one */
else if (resolve_against_held(nr, &base_oid,
delta_data, delta_size))
@@ -516,8 +516,8 @@ static void unpack_delta_entry(enum object_type type, unsigned long delta_size,
if (resolve_against_held(nr, &base_oid, delta_data, delta_size))
return;
- base = repo_read_object_file(the_repository, &base_oid, &type,
- &base_size);
+ base = odb_read_object(the_repository->objects, &base_oid,
+ &type, &base_size);
if (!base) {
error("failed to read delta-pack base object %s",
oid_to_hex(&base_oid));
diff --git a/builtin/update-index.c b/builtin/update-index.c
index 538b619ba4..0c1d4ed55b 100644
--- a/builtin/update-index.c
+++ b/builtin/update-index.c
@@ -981,6 +981,7 @@ int cmd_update_index(int argc,
.type = OPTION_SET_INT,
.long_name = "assume-unchanged",
.value = &mark_valid_only,
+ .precision = sizeof(mark_valid_only),
.help = N_("mark files as \"not changing\""),
.flags = PARSE_OPT_NOARG | PARSE_OPT_NONEG,
.defval = MARK_FLAG,
@@ -989,6 +990,7 @@ int cmd_update_index(int argc,
.type = OPTION_SET_INT,
.long_name = "no-assume-unchanged",
.value = &mark_valid_only,
+ .precision = sizeof(mark_valid_only),
.help = N_("clear assumed-unchanged bit"),
.flags = PARSE_OPT_NOARG | PARSE_OPT_NONEG,
.defval = UNMARK_FLAG,
@@ -997,6 +999,7 @@ int cmd_update_index(int argc,
.type = OPTION_SET_INT,
.long_name = "skip-worktree",
.value = &mark_skip_worktree_only,
+ .precision = sizeof(mark_skip_worktree_only),
.help = N_("mark files as \"index-only\""),
.flags = PARSE_OPT_NOARG | PARSE_OPT_NONEG,
.defval = MARK_FLAG,
@@ -1005,6 +1008,7 @@ int cmd_update_index(int argc,
.type = OPTION_SET_INT,
.long_name = "no-skip-worktree",
.value = &mark_skip_worktree_only,
+ .precision = sizeof(mark_skip_worktree_only),
.help = N_("clear skip-worktree bit"),
.flags = PARSE_OPT_NOARG | PARSE_OPT_NONEG,
.defval = UNMARK_FLAG,
@@ -1079,6 +1083,7 @@ int cmd_update_index(int argc,
.type = OPTION_SET_INT,
.long_name = "fsmonitor-valid",
.value = &mark_fsmonitor_only,
+ .precision = sizeof(mark_fsmonitor_only),
.help = N_("mark files as fsmonitor valid"),
.flags = PARSE_OPT_NOARG | PARSE_OPT_NONEG,
.defval = MARK_FLAG,
@@ -1087,6 +1092,7 @@ int cmd_update_index(int argc,
.type = OPTION_SET_INT,
.long_name = "no-fsmonitor-valid",
.value = &mark_fsmonitor_only,
+ .precision = sizeof(mark_fsmonitor_only),
.help = N_("clear fsmonitor valid bit"),
.flags = PARSE_OPT_NOARG | PARSE_OPT_NONEG,
.defval = UNMARK_FLAG,
diff --git a/builtin/update-ref.c b/builtin/update-ref.c
index 2b1e336ba1..1e6131e04a 100644
--- a/builtin/update-ref.c
+++ b/builtin/update-ref.c
@@ -575,30 +575,7 @@ static void print_rejected_refs(const char *refname,
void *cb_data UNUSED)
{
struct strbuf sb = STRBUF_INIT;
- const char *reason = "";
-
- switch (err) {
- case REF_TRANSACTION_ERROR_NAME_CONFLICT:
- reason = "refname conflict";
- break;
- case REF_TRANSACTION_ERROR_CREATE_EXISTS:
- reason = "reference already exists";
- break;
- case REF_TRANSACTION_ERROR_NONEXISTENT_REF:
- reason = "reference does not exist";
- break;
- case REF_TRANSACTION_ERROR_INCORRECT_OLD_VALUE:
- reason = "incorrect old value provided";
- break;
- case REF_TRANSACTION_ERROR_INVALID_NEW_VALUE:
- reason = "invalid new value provided";
- break;
- case REF_TRANSACTION_ERROR_EXPECTED_SYMREF:
- reason = "expected symref but found regular ref";
- break;
- default:
- reason = "unkown failure";
- }
+ const char *reason = ref_transaction_error_msg(err);
strbuf_addf(&sb, "rejected %s %s %s %s\n", refname,
new_oid ? oid_to_hex(new_oid) : new_target,
diff --git a/builtin/write-tree.c b/builtin/write-tree.c
index 5a8dc377ec..cfec044710 100644
--- a/builtin/write-tree.c
+++ b/builtin/write-tree.c
@@ -35,6 +35,7 @@ int cmd_write_tree(int argc,
.type = OPTION_BIT,
.long_name = "ignore-cache-tree",
.value = &flags,
+ .precision = sizeof(flags),
.help = N_("only useful for debugging"),
.flags = PARSE_OPT_HIDDEN | PARSE_OPT_NOARG,
.defval = WRITE_TREE_IGNORE_CACHE_TREE,