diff options
Diffstat (limited to 'builtin/pack-objects.c')
| -rw-r--r-- | builtin/pack-objects.c | 140 |
1 files changed, 79 insertions, 61 deletions
diff --git a/builtin/pack-objects.c b/builtin/pack-objects.c index 0fc0680b40..1c3b842651 100644 --- a/builtin/pack-objects.c +++ b/builtin/pack-objects.c @@ -1,4 +1,6 @@ #define USE_THE_REPOSITORY_VARIABLE +#define DISABLE_SIGN_COMPARE_WARNINGS + #include "builtin.h" #include "environment.h" #include "gettext.h" @@ -239,6 +241,7 @@ static enum { static uint16_t write_bitmap_options = BITMAP_OPT_HASH_CACHE; static int exclude_promisor_objects; +static int exclude_promisor_objects_best_effort; static int use_delta_islands; @@ -1100,78 +1103,64 @@ static void write_reused_pack_one(struct packed_git *reuse_packfile, static size_t write_reused_pack_verbatim(struct bitmapped_pack *reuse_packfile, struct hashfile *out, - off_t pack_start, struct pack_window **w_curs) { - size_t pos = reuse_packfile->bitmap_pos; + size_t pos = 0; size_t end; - if (pos % BITS_IN_EWORD) { - size_t word_pos = (pos / BITS_IN_EWORD); - size_t offset = pos % BITS_IN_EWORD; - size_t last; - eword_t word = reuse_packfile_bitmap->words[word_pos]; - - if (offset + reuse_packfile->bitmap_nr < BITS_IN_EWORD) - last = offset + reuse_packfile->bitmap_nr; - else - last = BITS_IN_EWORD; - - for (; offset < last; offset++) { - if (word >> offset == 0) - return word_pos; - if (!bitmap_get(reuse_packfile_bitmap, - word_pos * BITS_IN_EWORD + offset)) - return word_pos; - } - - pos += BITS_IN_EWORD - (pos % BITS_IN_EWORD); + if (reuse_packfile->bitmap_pos) { + /* + * We can't reuse whole chunks verbatim out of + * non-preferred packs since we can't guarantee that + * all duplicate objects were resolved in favor of + * that pack. + * + * Even if we have a whole eword_t worth of bits that + * could be reused, there may be objects between the + * objects corresponding to the first and last bit of + * that word which were selected from a different + * pack, causing us to send duplicate or unwanted + * objects. + * + * Handle non-preferred packs from within + * write_reused_pack(), which inspects and reuses + * individual bits. + */ + return reuse_packfile->bitmap_pos / BITS_IN_EWORD; } /* - * Now we're going to copy as many whole eword_t's as possible. - * "end" is the index of the last whole eword_t we copy, but - * there may be additional bits to process. Those are handled - * individually by write_reused_pack(). + * Only read through the last word whose bits all correspond + * to objects in the given packfile, since we must stop at a + * word boundary. * - * Begin by advancing to the first word boundary in range of the - * bit positions occupied by objects in "reuse_packfile". Then - * pick the last word boundary in the same range. If we have at - * least one word's worth of bits to process, continue on. + * If there is no whole word to read (i.e. the packfile + * contains fewer than BITS_IN_EWORD objects), then we'll + * inspect bits one-by-one in write_reused_pack(). */ - end = reuse_packfile->bitmap_pos + reuse_packfile->bitmap_nr; - if (end % BITS_IN_EWORD) - end -= end % BITS_IN_EWORD; - if (pos >= end) - return reuse_packfile->bitmap_pos / BITS_IN_EWORD; - - while (pos < end && - reuse_packfile_bitmap->words[pos / BITS_IN_EWORD] == (eword_t)~0) - pos += BITS_IN_EWORD; + end = reuse_packfile->bitmap_nr / BITS_IN_EWORD; + if (reuse_packfile_bitmap->word_alloc < end) + BUG("fewer words than expected in reuse_packfile_bitmap"); - if (pos > end) - pos = end; + while (pos < end && reuse_packfile_bitmap->words[pos] == (eword_t)~0) + pos++; - if (reuse_packfile->bitmap_pos < pos) { - off_t pack_start_off = pack_pos_to_offset(reuse_packfile->p, 0); - off_t pack_end_off = pack_pos_to_offset(reuse_packfile->p, - pos - reuse_packfile->bitmap_pos); + if (pos) { + off_t to_write; - written += pos - reuse_packfile->bitmap_pos; + written = (pos * BITS_IN_EWORD); + to_write = pack_pos_to_offset(reuse_packfile->p, written) + - sizeof(struct pack_header); /* We're recording one chunk, not one object. */ - record_reused_object(pack_start_off, - pack_start_off - (hashfile_total(out) - pack_start)); + record_reused_object(sizeof(struct pack_header), 0); hashflush(out); copy_pack_data(out, reuse_packfile->p, w_curs, - pack_start_off, pack_end_off - pack_start_off); + sizeof(struct pack_header), to_write); display_progress(progress_state, written); } - if (pos % BITS_IN_EWORD) - BUG("attempted to jump past a word boundary to %"PRIuMAX, - (uintmax_t)pos); - return pos / BITS_IN_EWORD; + return pos; } static void write_reused_pack(struct bitmapped_pack *reuse_packfile, @@ -1183,8 +1172,7 @@ static void write_reused_pack(struct bitmapped_pack *reuse_packfile, struct pack_window *w_curs = NULL; if (allow_ofs_delta) - i = write_reused_pack_verbatim(reuse_packfile, f, pack_start, - &w_curs); + i = write_reused_pack_verbatim(reuse_packfile, f, &w_curs); for (; i < reuse_packfile_bitmap->word_alloc; ++i) { eword_t word = reuse_packfile_bitmap->words[i]; @@ -1529,7 +1517,7 @@ static int want_found_object(const struct object_id *oid, int exclude, return 0; if (ignore_packed_keep_in_core && p->pack_keep_in_core) return 0; - if (has_object_kept_pack(oid, flags)) + if (has_object_kept_pack(p->repo, oid, flags)) return 0; } @@ -1556,7 +1544,7 @@ static int want_object_in_pack_one(struct packed_git *p, if (p == *found_pack) offset = *found_offset; else - offset = find_pack_entry_one(oid->hash, p); + offset = find_pack_entry_one(oid, p); if (offset) { if (!*found_pack) { @@ -3627,7 +3615,7 @@ static void show_cruft_commit(struct commit *commit, void *data) static int cruft_include_check_obj(struct object *obj, void *data UNUSED) { - return !has_object_kept_pack(&obj->oid, IN_CORE_KEEP_PACKS); + return !has_object_kept_pack(to_pack.repo, &obj->oid, IN_CORE_KEEP_PACKS); } static int cruft_include_check(struct commit *commit, void *data) @@ -3858,7 +3846,8 @@ static void show_object__ma_allow_promisor(struct object *obj, const char *name, * Quietly ignore EXPECTED missing objects. This avoids problems with * staging them now and getting an odd error later. */ - if (!has_object(the_repository, &obj->oid, 0) && is_promisor_object(&obj->oid)) + if (!has_object(the_repository, &obj->oid, 0) && + is_promisor_object(to_pack.repo, &obj->oid)) return; show_object(obj, name, data); @@ -3927,7 +3916,9 @@ static int add_object_in_unpacked_pack(const struct object_id *oid, static void add_objects_in_unpacked_packs(void) { - if (for_each_packed_object(add_object_in_unpacked_pack, NULL, + if (for_each_packed_object(to_pack.repo, + add_object_in_unpacked_pack, + NULL, FOR_EACH_OBJECT_PACK_ORDER | FOR_EACH_OBJECT_LOCAL_ONLY | FOR_EACH_OBJECT_SKIP_IN_CORE_KEPT_PACKS | @@ -3984,7 +3975,7 @@ static int has_sha1_pack_kept_or_nonlocal(const struct object_id *oid) while (p) { if ((!p->pack_local || p->pack_keep || p->pack_keep_in_core) && - find_pack_entry_one(oid->hash, p)) { + find_pack_entry_one(oid, p)) { last_found = p; return 1; } @@ -4312,6 +4303,18 @@ static int option_parse_cruft_expiration(const struct option *opt UNUSED, return 0; } +static int is_not_in_promisor_pack_obj(struct object *obj, void *data UNUSED) +{ + struct object_info info = OBJECT_INFO_INIT; + if (oid_object_info_extended(the_repository, &obj->oid, &info, 0)) + BUG("should_include_obj should only be called on existing objects"); + return info.whence != OI_PACKED || !info.u.packed.pack->pack_promisor; +} + +static int is_not_in_promisor_pack(struct commit *commit, void *data) { + return is_not_in_promisor_pack_obj((struct object *) commit, data); +} + int cmd_pack_objects(int argc, const char **argv, const char *prefix, @@ -4424,6 +4427,9 @@ int cmd_pack_objects(int argc, option_parse_missing_action), OPT_BOOL(0, "exclude-promisor-objects", &exclude_promisor_objects, N_("do not pack objects in promisor packfiles")), + OPT_BOOL(0, "exclude-promisor-objects-best-effort", + &exclude_promisor_objects_best_effort, + N_("implies --missing=allow-any")), OPT_BOOL(0, "delta-islands", &use_delta_islands, N_("respect islands during delta compression")), OPT_STRING_LIST(0, "uri-protocol", &uri_protocols, @@ -4504,10 +4510,18 @@ int cmd_pack_objects(int argc, strvec_push(&rp, "--unpacked"); } + if (exclude_promisor_objects && exclude_promisor_objects_best_effort) + die(_("options '%s' and '%s' cannot be used together"), + "--exclude-promisor-objects", "--exclude-promisor-objects-best-effort"); if (exclude_promisor_objects) { use_internal_rev_list = 1; fetch_if_missing = 0; strvec_push(&rp, "--exclude-promisor-objects"); + } else if (exclude_promisor_objects_best_effort) { + use_internal_rev_list = 1; + fetch_if_missing = 0; + option_parse_missing_action(NULL, "allow-any", 0); + /* revs configured below */ } if (unpack_unreachable || keep_unreachable || pack_loose_unreachable) use_internal_rev_list = 1; @@ -4627,6 +4641,10 @@ int cmd_pack_objects(int argc, repo_init_revisions(the_repository, &revs, NULL); list_objects_filter_copy(&revs.filter, &filter_options); + if (exclude_promisor_objects_best_effort) { + revs.include_check = is_not_in_promisor_pack; + revs.include_check_obj = is_not_in_promisor_pack_obj; + } get_object_list(&revs, rp.nr, rp.v); release_revisions(&revs); } |
