diff options
209 files changed, 7347 insertions, 3118 deletions
diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index cd1f52692a..831f4df56c 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -309,7 +309,7 @@ jobs: if: needs.ci-config.outputs.enabled == 'yes' env: jobname: StaticAnalysis - runs-on: ubuntu-18.04 + runs-on: ubuntu-22.04 steps: - uses: actions/checkout@v2 - run: ci/install-dependencies.sh diff --git a/.gitignore b/.gitignore index 42fd7253b4..80b530bbed 100644 --- a/.gitignore +++ b/.gitignore @@ -53,6 +53,7 @@ /git-cvsimport /git-cvsserver /git-daemon +/git-diagnose /git-diff /git-diff-files /git-diff-index diff --git a/Documentation/RelNotes/2.37.3.txt b/Documentation/RelNotes/2.37.3.txt new file mode 100644 index 0000000000..d66689e598 --- /dev/null +++ b/Documentation/RelNotes/2.37.3.txt @@ -0,0 +1,46 @@ +Git 2.37.3 Release Notes +======================== + +This primarily is to backport various fixes accumulated on the 'master' +front since 2.37.2. + +Fixes since v2.37.2 +------------------- + + * The build procedure for Windows that uses CMake has been updated to + pick up the shell interpreter from local installation location. + + * Conditionally allow building Python interpreter on Windows + + * Fix to lstat() emulation on Windows. + + * Older gcc with -Wall complains about the universal zero initializer + "struct s = { 0 };" idiom, which makes developers' lives + inconvenient (as -Werror is enabled by DEVELOPER=YesPlease). The + build procedure has been tweaked to help these compilers. + + * Plug memory leaks in the failure code path in the "merge-ort" merge + strategy backend. + + * Avoid repeatedly running getconf to ask libc version in the test + suite, and instead just as it once per script. + + * Platform-specific code that determines if a directory is OK to use + as a repository has been taught to report more details, especially + on Windows. + + * "vimdiff3" regression has been corrected. + + * "git fsck" reads mode from tree objects but canonicalizes the mode + before passing it to the logic to check object sanity, which has + hid broken tree objects from the checking logic. This has been + corrected, but to help exiting projects with broken tree objects + that they cannot fix retroactively, the severity of anomalies this + code detects has been demoted to "info" for now. + + * Fixes to sparse index compatibility work for "reset" and "checkout" + commands. + + * Documentation for "git add --renormalize" has been improved. + +Also contains other minor documentation updates and code clean-ups. diff --git a/Documentation/RelNotes/2.37.4.txt b/Documentation/RelNotes/2.37.4.txt new file mode 100644 index 0000000000..732176376f --- /dev/null +++ b/Documentation/RelNotes/2.37.4.txt @@ -0,0 +1,31 @@ +Git 2.37.4 Release Notes +======================== + +This primarily is to backport various fixes accumulated on the 'master' +front since 2.37.3. + +Fixes since v2.37.3 +------------------- + + * An earlier optimization discarded a tree-object buffer that is + still in use, which has been corrected. + + * Fix deadlocks between main Git process and subprocess spawned via + the pipe_command() API, that can kill "git add -p" that was + reimplemented in C recently. + + * xcalloc(), imitating calloc(), takes "number of elements of the + array", and "size of a single element", in this order. A call that + does not follow this ordering has been corrected. + + * The preload-index codepath made copies of pathspec to give to + multiple threads, which were left leaked. + + * Update the version of Ubuntu used for GitHub Actions CI from 18.04 + to 22.04. + + * The auto-stashed local changes created by "git merge --autostash" + was mixed into a conflicted state left in the working tree, which + has been corrected. + +Also contains other minor documentation updates and code clean-ups. diff --git a/Documentation/RelNotes/2.38.0.txt b/Documentation/RelNotes/2.38.0.txt index 4a08602e0d..311d9224c9 100644 --- a/Documentation/RelNotes/2.38.0.txt +++ b/Documentation/RelNotes/2.38.0.txt @@ -54,6 +54,37 @@ UI, Workflows & Features "human" to show the reported value in human-readable format, like "3.40MiB". + * The "diagnose" feature to create a zip archive for diagnostic + material has been lifted from "scalar" and made into a feature of + "git bugreport". + + * The namespaces used by "log --decorate" from "refs/" hierarchy by + default has been tightened. + + * "git rev-list --ancestry-path=C A..B" is a natural extension of + "git rev-list A..B"; instead of choosing a subset of A..B to those + that have ancestry relationship with A, it lets a subset with + ancestry relationship with C. + + * "scalar" now enables built-in fsmonitor on enlisted repositories, + when able. + + * The bash prompt (in contrib/) learned to optionally indicate when + the index is unmerged. + + * "git clone" command learned the "--bundle-uri" option to coordinate + with hosting sites the use of pre-prepared bundle files. + + * "git range-diff" learned to honor pathspec argument if given. + + * "git format-patch --from=<ident>" can be told to add an in-body + "From:" line even for commits that are authored by the given + <ident> with "--force-in-body-from"option. + + * The built-in fsmonitor refuses to work on a network mounted + repositories; a configuration knob for users to override this has + been introduced. + Performance, Internal Implementation, Development Support etc. @@ -115,6 +146,25 @@ Performance, Internal Implementation, Development Support etc. * The "bundle URI" design gets documented. + * The common ancestor negotiation exchange during a "git fetch" + session now leaves trace log. + + * Test portability improvements. + (merge 4d1d843be7 mt/rot13-in-c later to maint). + + * The "subcommand" mode is introduced to parse-options API and update + the command line parser of Git commands with subcommands. + + * The pack bitmap file gained a bitmap-lookup table to speed up + locating the necessary bitmap for a given commit. + + * The assembly version of SHA-1 implementation for PPC has been + removed. + + * The server side that responds to "git fetch" and "git clone" + request has been optimized by allowing it to send objects in its + object store without recomputing and validating the object names. + Fixes since v2.37 ----------------- @@ -148,11 +198,8 @@ Fixes since v2.37 * Recent update to vimdiff layout code has been made more robust against different end-user vim settings. - * Plug various memory leaks. - (merge ece3974ba6 ab/leakfix later to maint). - - * Plug various memory leaks in test-tool commands. - (merge f40a693450 ab/test-tool-leakfix later to maint). + * Plug various memory leaks, both in the main code and in test-tool + commands. * Fixes a long-standing corner case bug around directory renames in the merge-ort strategy. @@ -211,45 +258,34 @@ Fixes since v2.37 * Gitweb had legacy URL shortener that is specific to the way projects hosted on kernel.org used to (but no longer) work, which has been removed. - (merge 75707da4fa jr/gitweb-title-shortening later to maint). * Fix build procedure for Windows that uses CMake so that it can pick up the shell interpreter from local installation location. - (merge 476e54b1c6 ca/unignore-local-installation-on-windows later to maint). * Conditionally allow building Python interpreter on Windows - (merge 2f0623aaa7 js/mingw-with-python later to maint). * Fix to lstat() emulation on Windows. - (merge 82ba1191ff js/lstat-mingw-enotdir-fix later to maint). * Older gcc with -Wall complains about the universal zero initializer "struct s = { 0 };" idiom, which makes developers' lives inconvenient (as -Werror is enabled by DEVELOPER=YesPlease). The build procedure has been tweaked to help these compilers. - (merge b53a5f2416 jk/struct-zero-init-with-older-gcc later to maint). * Plug memory leaks in the failure code path in the "merge-ort" merge strategy backend. - (merge 1250dff32b js/ort-clean-up-after-failed-merge later to maint). * "git symbolic-ref symref non..sen..se" is now diagnosed as an error. - (merge 04ede97211 lt/symbolic-ref-sanity later to maint). - * A follow-up fix to a fix for a regression in 2.36. - (merge 99ddc24672 ab/hooks-regression-fix later to maint). + * A follow-up fix to a fix for a regression in 2.36 around hooks. * Avoid repeatedly running getconf to ask libc version in the test suite, and instead just as it once per script. - (merge a6a58f7801 pw/use-glibc-tunable-for-malloc-optim later to maint). * Platform-specific code that determines if a directory is OK to use as a repository has been taught to report more details, especially on Windows. - (merge 3f7207e2ea js/safe-directory-plus later to maint). * "vimdiff3" regression fix. - (merge 34133d9658 fc/vimdiff-layout-vimdiff3-fix later to maint). * "git fsck" reads mode from tree objects but canonicalizes the mode before passing it to the logic to check object sanity, which has @@ -257,12 +293,81 @@ Fixes since v2.37 corrected, but to help exiting projects with broken tree objects that they cannot fix retroactively, the severity of anomalies this code detects has been demoted to "info" for now. - (merge 4dd3b045f5 jk/fsck-tree-mode-bits-fix later to maint). * Fixes to sparse index compatibility work for "reset" and "checkout" commands. - (merge b15207b8cf vd/sparse-reset-checkout-fixes later to maint). + + * An earlier optimization discarded a tree-object buffer that is + still in use, which has been corrected. + (merge 1490d7d82d jk/is-promisor-object-keep-tree-in-use later to maint). + + * Fix deadlocks between main Git process and subprocess spawned via + the pipe_command() API, that can kill "git add -p" that was + reimplemented in C recently. + (merge 716c1f649e jk/pipe-command-nonblock later to maint). + + * The sequencer machinery translated messages left in the reflog by + mistake, which has been corrected. + + * xcalloc(), imitating calloc(), takes "number of elements of the + array", and "size of a single element", in this order. A call that + does not follow this ordering has been corrected. + (merge c4bbd9bb8f sg/xcalloc-cocci-fix later to maint). + + * The preload-index codepath made copies of pathspec to give to + multiple threads, which were left leaked. + (merge 23578904da ad/preload-plug-memleak later to maint). + + * Update the version of Ubuntu used for GitHub Actions CI from 18.04 + to 22.04. + (merge ef46584831 ds/github-actions-use-newer-ubuntu later to maint). + + * The auto-stashed local changes created by "git merge --autostash" + was mixed into a conflicted state left in the working tree, which + has been corrected. + (merge d3a9295ada en/merge-unstash-only-on-clean-merge later to maint). + + * Multi-pack index got corrupted when preferred pack changed from one + pack to another in a certain way, which has been corrected. + (merge 99e4d084ff tb/midx-with-changing-preferred-pack-fix later to maint). + + * The clean-up of temporary files created via mks_tempfile_dt() was + racy and attempted to unlink() the leading directory when signals + are involved, which has been corrected. + (merge babe2e0559 rs/tempfile-cleanup-race-fix later to maint). + + * FreeBSD portability fix for "git maintenance" that spawns "crontab" + to schedule tasks. + (merge ee69e7884e bc/gc-crontab-fix later to maint). + + * Those who use diff-so-fancy as the diff-filter noticed a regression + or two in the code that parses the diff output in the built-in + version of "add -p", which has been corrected. + (merge 0a101676e5 js/add-p-diff-parsing-fix later to maint). + + * Segfault fix-up to an earlier fix to the topic to teach "git reset" + and "git checkout" work better in a sparse checkout. + (merge 037f8ea6d9 vd/sparse-reset-checkout-fixes later to maint). + + * "git diff --no-index A B" managed its the pathnames of its two + input files rather haphazardly, sometimes leaking them. The + command line argument processing has been straightened out to clean + it up. + (merge 2b43dd0eb5 rs/diff-no-index-cleanup later to maint). + + * "git rev-list --verify-objects" ought to inspect the contents of + objects and notice corrupted ones, but it didn't when the commit + graph is in use, which has been corrected. + (merge b27ccae34b jk/rev-list-verify-objects-fix later to maint). + + * More fixes to "add -p" + (merge 64ec8efb83 js/builtin-add-p-portability-fix later to maint). + + * The parser in the script interface to parse-options in "git + rev-parse" has been updated to diagnose a bogus input correctly. + (merge f20b9c36d0 ow/rev-parse-parseopt-fix later to maint). * Other code cleanup, docfix, build fix, etc. - (merge 94955d576b gc/git-reflog-doc-markup later to maint). - (merge efae7ce692 po/doc-add-renormalize later to maint). + (merge 77b9e85c0f vd/fix-perf-tests later to maint). + (merge 0682bc43f5 jk/test-crontab-fixes later to maint). + (merge b46dd1726c cc/doc-trailer-whitespace-rules later to maint). diff --git a/Documentation/config/format.txt b/Documentation/config/format.txt index fdbc06a4d2..c7303d8d9f 100644 --- a/Documentation/config/format.txt +++ b/Documentation/config/format.txt @@ -15,6 +15,10 @@ format.from:: different. If set to a non-boolean value, format-patch uses that value instead of your committer identity. Defaults to false. +format.forceInBodyFrom:: + Provides the default value for the `--[no-]force-in-body-from` + option to format-patch. Defaults to false. + format.numbered:: A boolean which can enable or disable sequence numbers in patch subjects. It defaults to "auto" which enables it only if there diff --git a/Documentation/config/log.txt b/Documentation/config/log.txt index 456eb07800..5250ba45fb 100644 --- a/Documentation/config/log.txt +++ b/Documentation/config/log.txt @@ -18,6 +18,11 @@ log.decorate:: names are shown. This is the same as the `--decorate` option of the `git log`. +log.initialDecorationSet:: + By default, `git log` only shows decorations for certain known ref + namespaces. If 'all' is specified, then show all refs as + decorations. + log.excludeDecoration:: Exclude the specified patterns from the log decorations. This is similar to the `--decorate-refs-exclude` command-line option, but diff --git a/Documentation/config/pack.txt b/Documentation/config/pack.txt index 3e581eab84..53093d9996 100644 --- a/Documentation/config/pack.txt +++ b/Documentation/config/pack.txt @@ -164,6 +164,13 @@ When writing a multi-pack reachability bitmap, no new namehashes are computed; instead, any namehashes stored in an existing bitmap are permuted into their appropriate location when writing a new bitmap. +pack.writeBitmapLookupTable:: + When true, Git will include a "lookup table" section in the + bitmap index (if one is written). This table is used to defer + loading individual bitmaps as late as possible. This can be + beneficial in repositories that have relatively large bitmap + indexes. Defaults to false. + pack.writeReverseIndex:: When true, git will write a corresponding .rev file (see: linkgit:gitformat-pack[5]) diff --git a/Documentation/git-bugreport.txt b/Documentation/git-bugreport.txt index d8817bf3ce..eca726e579 100644 --- a/Documentation/git-bugreport.txt +++ b/Documentation/git-bugreport.txt @@ -9,6 +9,7 @@ SYNOPSIS -------- [verse] 'git bugreport' [(-o | --output-directory) <path>] [(-s | --suffix) <format>] + [--diagnose[=<mode>]] DESCRIPTION ----------- @@ -31,6 +32,10 @@ The following information is captured automatically: - A list of enabled hooks - $SHELL +Additional information may be gathered into a separate zip archive using the +`--diagnose` option, and can be attached alongside the bugreport document to +provide additional context to readers. + This tool is invoked via the typical Git setup process, which means that in some cases, it might not be able to launch - for example, if a relevant config file is unreadable. In this kind of scenario, it may be helpful to manually gather @@ -49,6 +54,19 @@ OPTIONS named 'git-bugreport-<formatted suffix>'. This should take the form of a strftime(3) format string; the current local time will be used. +--no-diagnose:: +--diagnose[=<mode>]:: + Create a zip archive of supplemental information about the user's + machine, Git client, and repository state. The archive is written to the + same output directory as the bug report and is named + 'git-diagnostics-<formatted suffix>'. ++ +Without `mode` specified, the diagnostic archive will contain the default set of +statistics reported by `git diagnose`. An optional `mode` value may be specified +to change which information is included in the archive. See +linkgit:git-diagnose[1] for the list of valid values for `mode` and details +about their usage. + GIT --- Part of the linkgit:git[1] suite diff --git a/Documentation/git-clone.txt b/Documentation/git-clone.txt index 632bd1348e..d032d971dd 100644 --- a/Documentation/git-clone.txt +++ b/Documentation/git-clone.txt @@ -323,6 +323,13 @@ or `--mirror` is given) for `host.xz:foo/.git`). Cloning into an existing directory is only allowed if the directory is empty. +--bundle-uri=<uri>:: + Before fetching from the remote, fetch a bundle from the given + `<uri>` and unbundle the data into the local repository. The refs + in the bundle will be stored under the hidden `refs/bundle/*` + namespace. This option is incompatible with `--depth`, + `--shallow-since`, and `--shallow-exclude`. + :git-clone: 1 include::urls.txt[] diff --git a/Documentation/git-diagnose.txt b/Documentation/git-diagnose.txt new file mode 100644 index 0000000000..3ec8cc7ad7 --- /dev/null +++ b/Documentation/git-diagnose.txt @@ -0,0 +1,65 @@ +git-diagnose(1) +================ + +NAME +---- +git-diagnose - Generate a zip archive of diagnostic information + +SYNOPSIS +-------- +[verse] +'git diagnose' [(-o | --output-directory) <path>] [(-s | --suffix) <format>] + [--mode=<mode>] + +DESCRIPTION +----------- +Collects detailed information about the user's machine, Git client, and +repository state and packages that information into a zip archive. The +generated archive can then, for example, be shared with the Git mailing list to +help debug an issue or serve as a reference for independent debugging. + +By default, the following information is captured in the archive: + + * 'git version --build-options' + * The path to the repository root + * The available disk space on the filesystem + * The name and size of each packfile, including those in alternate object + stores + * The total count of loose objects, as well as counts broken down by + `.git/objects` subdirectory + +Additional information can be collected by selecting a different diagnostic mode +using the `--mode` option. + +This tool differs from linkgit:git-bugreport[1] in that it collects much more +detailed information with a greater focus on reporting the size and data shape +of repository contents. + +OPTIONS +------- +-o <path>:: +--output-directory <path>:: + Place the resulting diagnostics archive in `<path>` instead of the + current directory. + +-s <format>:: +--suffix <format>:: + Specify an alternate suffix for the diagnostics archive name, to create + a file named 'git-diagnostics-<formatted suffix>'. This should take the + form of a strftime(3) format string; the current local time will be + used. + +--mode=(stats|all):: + Specify the type of diagnostics that should be collected. The default behavior + of 'git diagnose' is equivalent to `--mode=stats`. ++ +The `--mode=all` option collects everything included in `--mode=stats`, as well +as copies of `.git`, `.git/hooks`, `.git/info`, `.git/logs`, and +`.git/objects/info` directories. This additional information may be sensitive, +as it can be used to reconstruct the full contents of the diagnosed repository. +Users should exercise caution when sharing an archive generated with +`--mode=all`. + +GIT +--- +Part of the linkgit:git[1] suite diff --git a/Documentation/git-format-patch.txt b/Documentation/git-format-patch.txt index be797d7a28..dfcc7da4c2 100644 --- a/Documentation/git-format-patch.txt +++ b/Documentation/git-format-patch.txt @@ -275,6 +275,17 @@ header). Note also that `git send-email` already handles this transformation for you, and this option should not be used if you are feeding the result to `git send-email`. +--[no-]force-in-body-from:: + With the e-mail sender specified via the `--from` option, by + default, an in-body "From:" to identify the real author of + the commit is added at the top of the commit log message if + the sender is different from the author. With this option, + the in-body "From:" is added even when the sender and the + author have the same name and address, which may help if the + mailing list software mangles the sender's identity. + Defaults to the value of the `format.forceInBodyFrom` + configuration variable. + --add-header=<header>:: Add an arbitrary header to the email headers. This is in addition to any configured headers, and may be used multiple times. diff --git a/Documentation/git-interpret-trailers.txt b/Documentation/git-interpret-trailers.txt index 956a01d184..6d6197cd0a 100644 --- a/Documentation/git-interpret-trailers.txt +++ b/Documentation/git-interpret-trailers.txt @@ -60,10 +60,12 @@ non-whitespace lines before a line that starts with '---' (followed by a space or the end of the line). Such three minus signs start the patch part of the message. See also `--no-divider` below. -When reading trailers, there can be whitespaces after the -token, the separator and the value. There can also be whitespaces -inside the token and the value. The value may be split over multiple lines with -each subsequent line starting with whitespace, like the "folding" in RFC 822. +When reading trailers, there can be no whitespace before or inside the +token, but any number of regular space and tab characters are allowed +between the token and the separator. There can be whitespaces before, +inside or after the value. The value may be split over multiple lines +with each subsequent line starting with at least one whitespace, like +the "folding" in RFC 822. Note that 'trailers' do not follow and are not intended to follow many rules for RFC 822 headers. For example they do not follow diff --git a/Documentation/git-log.txt b/Documentation/git-log.txt index 20e87cecf4..b1285aee3c 100644 --- a/Documentation/git-log.txt +++ b/Documentation/git-log.txt @@ -45,13 +45,23 @@ OPTIONS --decorate-refs=<pattern>:: --decorate-refs-exclude=<pattern>:: - If no `--decorate-refs` is given, pretend as if all refs were - included. For each candidate, do not use it for decoration if it + For each candidate reference, do not use it for decoration if it matches any patterns given to `--decorate-refs-exclude` or if it doesn't match any of the patterns given to `--decorate-refs`. The `log.excludeDecoration` config option allows excluding refs from the decorations, but an explicit `--decorate-refs` pattern will override a match in `log.excludeDecoration`. ++ +If none of these options or config settings are given, then references are +used as decoration if they match `HEAD`, `refs/heads/`, `refs/remotes/`, +`refs/stash/`, or `refs/tags/`. + +--clear-decorations:: + When specified, this option clears all previous `--decorate-refs` + or `--decorate-refs-exclude` options and relaxes the default + decoration filter to include all references. This option is + assumed if the config value `log.initialDecorationSet` is set to + `all`. --source:: Print out the ref name given on the command line by which each diff --git a/Documentation/git-range-diff.txt b/Documentation/git-range-diff.txt index fe350d7f40..0b393715d7 100644 --- a/Documentation/git-range-diff.txt +++ b/Documentation/git-range-diff.txt @@ -12,6 +12,7 @@ SYNOPSIS [--no-dual-color] [--creation-factor=<factor>] [--left-only | --right-only] ( <range1> <range2> | <rev1>...<rev2> | <base> <rev1> <rev2> ) + [[--] <path>...] DESCRIPTION ----------- @@ -19,6 +20,9 @@ DESCRIPTION This command shows the differences between two versions of a patch series, or more generally, two commit ranges (ignoring merge commits). +In the presence of `<path>` arguments, these commit ranges are limited +accordingly. + To that end, it first finds pairs of commits from both commit ranges that correspond with each other. Two commits are said to correspond when the diff between their patches (i.e. the author information, the commit diff --git a/Documentation/gitremote-helpers.txt b/Documentation/gitremote-helpers.txt index 6f1e269ae4..ed8da428c9 100644 --- a/Documentation/gitremote-helpers.txt +++ b/Documentation/gitremote-helpers.txt @@ -168,6 +168,9 @@ Supported commands: 'list', 'import'. Can guarantee that when a clone is requested, the received pack is self contained and is connected. +'get':: + Can use the 'get' command to download a file from a given URI. + If a helper advertises 'connect', Git will use it if possible and fall back to another capability if the helper requests so when connecting (see the 'connect' command under COMMANDS). @@ -418,6 +421,12 @@ Supported if the helper has the "connect" capability. + Supported if the helper has the "stateless-connect" capability. +'get' <uri> <path>:: + Downloads the file from the given `<uri>` to the given `<path>`. If + `<path>.temp` exists, then Git assumes that the `.temp` file is a + partial download from a previous attempt and will resume the + download from that position. + If a fatal error occurs, the program writes the error message to stderr and exits. The caller should expect that a suitable error message has been printed if the child closes the connection without diff --git a/Documentation/rev-list-options.txt b/Documentation/rev-list-options.txt index bd08d18576..1837509566 100644 --- a/Documentation/rev-list-options.txt +++ b/Documentation/rev-list-options.txt @@ -392,12 +392,14 @@ Default mode:: merges from the resulting history, as there are no selected commits contributing to this merge. ---ancestry-path:: +--ancestry-path[=<commit>]:: When given a range of commits to display (e.g. 'commit1..commit2' - or 'commit2 {caret}commit1'), only display commits that exist - directly on the ancestry chain between the 'commit1' and - 'commit2', i.e. commits that are both descendants of 'commit1', - and ancestors of 'commit2'. + or 'commit2 {caret}commit1'), only display commits in that range + that are ancestors of <commit>, descendants of <commit>, or + <commit> itself. If no commit is specified, use 'commit1' (the + excluded part of the range) as <commit>. Can be passed multiple + times; if so, a commit is included if it is any of the commits + given or if it is an ancestor or descendant of one of them. A more detailed explanation follows. @@ -571,11 +573,10 @@ Note the major differences in `N`, `P`, and `Q` over `--full-history`: There is another simplification mode available: ---ancestry-path:: - Limit the displayed commits to those directly on the ancestry - chain between the ``from'' and ``to'' commits in the given commit - range. I.e. only display commits that are ancestor of the ``to'' - commit and descendants of the ``from'' commit. +--ancestry-path[=<commit>]:: + Limit the displayed commits to those which are an ancestor of + <commit>, or which are a descendant of <commit>, or are <commit> + itself. + As an example use case, consider the following commit history: + @@ -607,6 +608,29 @@ option does. Applied to the 'D..M' range, it results in: \ L--M ----------------------------------------------------------------------- ++ +We can also use `--ancestry-path=D` instead of `--ancestry-path` which +means the same thing when applied to the 'D..M' range but is just more +explicit. ++ +If we instead are interested in a given topic within this range, and all +commits affected by that topic, we may only want to view the subset of +`D..M` which contain that topic in their ancestry path. So, using +`--ancestry-path=H D..M` for example would result in: ++ +----------------------------------------------------------------------- + E + \ + G---H---I---J + \ + L--M +----------------------------------------------------------------------- ++ +Whereas `--ancestry-path=K D..M` would result in ++ +----------------------------------------------------------------------- + K---------------L--M +----------------------------------------------------------------------- Before discussing another option, `--show-pulls`, we need to create a new example history. @@ -662,7 +686,7 @@ Here, the merge commits `O` and `P` contribute extra noise, as they did not actually contribute a change to `file.txt`. They only merged a topic that was based on an older version of `file.txt`. This is a common issue in repositories using a workflow where many contributors work in -parallel and merge their topic branches along a single trunk: manu +parallel and merge their topic branches along a single trunk: many unrelated merges appear in the `--full-history` results. When using the `--simplify-merges` option, the commits `O` and `P` diff --git a/Documentation/technical/api-parse-options.txt b/Documentation/technical/api-parse-options.txt index acfd5dc1d8..c2a5e42914 100644 --- a/Documentation/technical/api-parse-options.txt +++ b/Documentation/technical/api-parse-options.txt @@ -8,7 +8,8 @@ Basics ------ The argument vector `argv[]` may usually contain mandatory or optional -'non-option arguments', e.g. a filename or a branch, and 'options'. +'non-option arguments', e.g. a filename or a branch, 'options', and +'subcommands'. Options are optional arguments that start with a dash and that allow to change the behavior of a command. @@ -48,6 +49,33 @@ The parse-options API allows: option, e.g. `-a -b --option -- --this-is-a-file` indicates that `--this-is-a-file` must not be processed as an option. +Subcommands are special in a couple of ways: + +* Subcommands only have long form, and they have no double dash prefix, no + negated form, and no description, and they don't take any arguments, and + can't be abbreviated. + +* There must be exactly one subcommand among the arguments, or zero if the + command has a default operation mode. + +* All arguments following the subcommand are considered to be arguments of + the subcommand, and, conversely, arguments meant for the subcommand may + not preceed the subcommand. + +Therefore, if the options array contains at least one subcommand and +`parse_options()` encounters the first dashless argument, it will either: + +* stop and return, if that dashless argument is a known subcommand, setting + `value` to the function pointer associated with that subcommand, storing + the name of the subcommand in argv[0], and leaving the rest of the + arguments unprocessed, or + +* stop and return, if it was invoked with the `PARSE_OPT_SUBCOMMAND_OPTIONAL` + flag and that dashless argument doesn't match any subcommands, leaving + `value` unchanged and the rest of the arguments unprocessed, or + +* show error and usage, and abort. + Steps to parse options ---------------------- @@ -90,8 +118,8 @@ Flags are the bitwise-or of: Keep the first argument, which contains the program name. It's removed from argv[] by default. -`PARSE_OPT_KEEP_UNKNOWN`:: - Keep unknown arguments instead of erroring out. This doesn't +`PARSE_OPT_KEEP_UNKNOWN_OPT`:: + Keep unknown options instead of erroring out. This doesn't work for all combinations of arguments as users might expect it to do. E.g. if the first argument in `--unknown --known` takes a value (which we can't know), the second one is @@ -101,6 +129,8 @@ Flags are the bitwise-or of: non-option, not as a value belonging to the unknown option, the parser early. That's why parse_options() errors out if both options are set. + Note that non-option arguments are always kept, even without + this flag. `PARSE_OPT_NO_INTERNAL_HELP`:: By default, parse_options() handles `-h`, `--help` and @@ -108,6 +138,13 @@ Flags are the bitwise-or of: turns it off and allows one to add custom handlers for these options, or to just leave them unknown. +`PARSE_OPT_SUBCOMMAND_OPTIONAL`:: + Don't error out when no subcommand is specified. + +Note that `PARSE_OPT_STOP_AT_NON_OPTION` is incompatible with subcommands; +while `PARSE_OPT_KEEP_DASHDASH` and `PARSE_OPT_KEEP_UNKNOWN_OPT` can only be +used with subcommands when combined with `PARSE_OPT_SUBCOMMAND_OPTIONAL`. + Data Structure -------------- @@ -236,10 +273,14 @@ There are some macros to easily define options: `OPT_CMDMODE(short, long, &int_var, description, enum_val)`:: Define an "operation mode" option, only one of which in the same group of "operating mode" options that share the same `int_var` - can be given by the user. `enum_val` is set to `int_var` when the + can be given by the user. `int_var` is set to `enum_val` when the option is used, but an error is reported if other "operating mode" option has already set its value to the same `int_var`. + In new commands consider using subcommands instead. +`OPT_SUBCOMMAND(long, &fn_ptr, subcommand_fn)`:: + Define a subcommand. `subcommand_fn` is put into `fn_ptr` when + this subcommand is used. The last element of the array must be `OPT_END()`. diff --git a/Documentation/technical/api-trace2.txt b/Documentation/technical/api-trace2.txt index 77a150b30e..2afa28bb5a 100644 --- a/Documentation/technical/api-trace2.txt +++ b/Documentation/technical/api-trace2.txt @@ -717,6 +717,7 @@ The "exec_id" field is a command-unique id and is only useful if the { "event":"def_param", ... + "scope":"global", "param":"core.abbrev", "value":"7" } @@ -1207,6 +1208,45 @@ at offset 508. This example also shows that thread names are assigned in a racy manner as each thread starts and allocates TLS storage. +Config (def param) Events:: + + Dump "interesting" config values to trace2 log. ++ +We can optionally emit configuration events, see +`trace2.configparams` in linkgit:git-config[1] for how to enable +it. ++ +---------------- +$ git config --system color.ui never +$ git config --global color.ui always +$ git config --local color.ui auto +$ git config --list --show-scope | grep 'color.ui' +system color.ui=never +global color.ui=always +local color.ui=auto +---------------- ++ +Then, mark the config `color.ui` as "interesting" config with +`GIT_TRACE2_CONFIG_PARAMS`: ++ +---------------- +$ export GIT_TRACE2_PERF_BRIEF=1 +$ export GIT_TRACE2_PERF=~/log.perf +$ export GIT_TRACE2_CONFIG_PARAMS=color.ui +$ git version +... +$ cat ~/log.perf +d0 | main | version | | | | | ... +d0 | main | start | | 0.001642 | | | /usr/local/bin/git version +d0 | main | cmd_name | | | | | version (version) +d0 | main | def_param | | | | scope:system | color.ui:never +d0 | main | def_param | | | | scope:global | color.ui:always +d0 | main | def_param | | | | scope:local | color.ui:auto +d0 | main | data | r0 | 0.002100 | 0.002100 | fsync | fsync/writeout-only:0 +d0 | main | data | r0 | 0.002126 | 0.002126 | fsync | fsync/hardware-flush:0 +d0 | main | exit | | 0.000470 | | | code:0 +d0 | main | atexit | | 0.000477 | | | code:0 +---------------- == Future Work === Relationship to the Existing Trace Api (api-trace.txt) diff --git a/Documentation/technical/bitmap-format.txt b/Documentation/technical/bitmap-format.txt index a85f58f515..c2e652b71a 100644 --- a/Documentation/technical/bitmap-format.txt +++ b/Documentation/technical/bitmap-format.txt @@ -72,6 +72,17 @@ MIDXs, both the bit-cache and rev-cache extensions are required. pack/MIDX. The format and meaning of the name-hash is described below. + ** {empty} + BITMAP_OPT_LOOKUP_TABLE (0x10): ::: + If present, the end of the bitmap file contains a table + containing a list of `N` <commit_pos, offset, xor_row> + triplets. The format and meaning of the table is described + below. ++ +NOTE: Unlike the xor_offset used to compress an individual bitmap, +`xor_row` stores an *absolute* index into the lookup table, not a location +relative to the current entry. + 4-byte entry count (network byte order): :: The total count of entries (bitmapped commits) in this bitmap index. @@ -216,3 +227,31 @@ Note that this hashing scheme is tied to the BITMAP_OPT_HASH_CACHE flag. If implementations want to choose a different hashing scheme, they are free to do so, but MUST allocate a new header flag (because comparing hashes made under two different schemes would be pointless). + +Commit lookup table +------------------- + +If the BITMAP_OPT_LOOKUP_TABLE flag is set, the last `N * (4 + 8 + 4)` +bytes (preceding the name-hash cache and trailing hash) of the `.bitmap` +file contains a lookup table specifying the information needed to get +the desired bitmap from the entries without parsing previous unnecessary +bitmaps. + +For a `.bitmap` containing `nr_entries` reachability bitmaps, the table +contains a list of `nr_entries` <commit_pos, offset, xor_row> triplets +(sorted in the ascending order of `commit_pos`). The content of i'th +triplet is - + + * {empty} + commit_pos (4 byte integer, network byte order): :: + It stores the object position of a commit (in the midx or pack + index). + + * {empty} + offset (8 byte integer, network byte order): :: + The offset from which that commit's bitmap can be read. + + * {empty} + xor_row (4 byte integer, network byte order): :: + The position of the triplet whose bitmap is used to compress + this one, or `0xffffffff` if no such bitmap exists. diff --git a/Documentation/technical/scalar.txt b/Documentation/technical/scalar.txt index 08bc09c225..0600150b3a 100644 --- a/Documentation/technical/scalar.txt +++ b/Documentation/technical/scalar.txt @@ -84,23 +84,23 @@ series have been accepted: - `scalar-diagnose`: The `scalar` command is taught the `diagnose` subcommand. -Roughly speaking (and subject to change), the following series are needed to -"finish" this initial version of Scalar: +- `scalar-generalize-diagnose`: Move the functionality of `scalar diagnose` + into `git diagnose` and `git bugreport --diagnose`. -- Finish Scalar features: Enable the built-in FSMonitor in Scalar enlistments - and implement `scalar help`. At the end of this series, Scalar should be - feature-complete from the perspective of a user. +- 'scalar-add-fsmonitor: Enable the built-in FSMonitor in Scalar + enlistments. At the end of this series, Scalar should be feature-complete + from the perspective of a user. -- Generalize features not specific to Scalar: In the spirit of making Scalar - configure only what is needed for large repo performance, move common - utilities into other parts of Git. Some of this will be internal-only, but one - major change will be generalizing `scalar diagnose` for use with any Git - repository. +Roughly speaking (and subject to change), the following series are needed to +"finish" this initial version of Scalar: - Move Scalar to toplevel: Move Scalar out of `contrib/` and into the root of - `git`, including updates to build and install it with the rest of Git. This - change will incorporate Scalar into the Git CI and test framework, as well as - expand regression and performance testing to ensure the tool is stable. + `git`. This includes a variety of related updates, including: + - building & installing Scalar in the Git root-level 'make [install]'. + - builing & testing Scalar as part of CI. + - moving and expanding test coverage of Scalar (including perf tests). + - implementing 'scalar help'/'git help scalar' to display scalar + documentation. Finally, there are two additional patch series that exist in Microsoft's fork of Git, but there is no current plan to upstream them. There are some interesting @@ -135,8 +135,7 @@ Issues of note: By default, git uses OpenSSL for SHA1 but it will use its own library (inspired by Mozilla's) with either NO_OPENSSL or - BLK_SHA1. Also included is a version optimized for PowerPC - (PPC_SHA1). + BLK_SHA1. - "libcurl" library is used for fetching and pushing repositories over http:// or https://, as well as by @@ -155,9 +155,6 @@ include shared.mak # Define BLK_SHA1 environment variable to make use of the bundled # optimized C SHA1 routine. # -# Define PPC_SHA1 environment variable when running make to make use of -# a bundled SHA1 routine optimized for PowerPC. -# # Define DC_SHA1 to unconditionally enable the collision-detecting sha1 # algorithm. This is slower, but may detect attempted collision attacks. # Takes priority over other *_SHA1 knobs. @@ -772,6 +769,7 @@ TEST_BUILTINS_OBJS += test-read-midx.o TEST_BUILTINS_OBJS += test-ref-store.o TEST_BUILTINS_OBJS += test-reftable.o TEST_BUILTINS_OBJS += test-regex.o +TEST_BUILTINS_OBJS += test-rot13-filter.o TEST_BUILTINS_OBJS += test-repository.o TEST_BUILTINS_OBJS += test-revision-walking.o TEST_BUILTINS_OBJS += test-run-command.o @@ -785,6 +783,7 @@ TEST_BUILTINS_OBJS += test-strcmp-offset.o TEST_BUILTINS_OBJS += test-string-list.o TEST_BUILTINS_OBJS += test-submodule-config.o TEST_BUILTINS_OBJS += test-submodule-nested-repo-config.o +TEST_BUILTINS_OBJS += test-submodule.o TEST_BUILTINS_OBJS += test-subprocess.o TEST_BUILTINS_OBJS += test-trace2.o TEST_BUILTINS_OBJS += test-urlmatch-normalization.o @@ -906,6 +905,7 @@ LIB_OBJS += blob.o LIB_OBJS += bloom.o LIB_OBJS += branch.o LIB_OBJS += bulk-checkin.o +LIB_OBJS += bundle-uri.o LIB_OBJS += bundle.o LIB_OBJS += cache-tree.o LIB_OBJS += cbtree.o @@ -918,6 +918,7 @@ LIB_OBJS += combine-diff.o LIB_OBJS += commit-graph.o LIB_OBJS += commit-reach.o LIB_OBJS += commit.o +LIB_OBJS += compat/nonblock.o LIB_OBJS += compat/obstack.o LIB_OBJS += compat/terminal.o LIB_OBJS += compat/zlib-uncompress2.o @@ -932,6 +933,7 @@ LIB_OBJS += ctype.o LIB_OBJS += date.o LIB_OBJS += decorate.o LIB_OBJS += delta-islands.o +LIB_OBJS += diagnose.o LIB_OBJS += diff-delta.o LIB_OBJS += diff-merges.o LIB_OBJS += diff-lib.o @@ -1152,6 +1154,7 @@ BUILTIN_OBJS += builtin/credential-cache.o BUILTIN_OBJS += builtin/credential-store.o BUILTIN_OBJS += builtin/credential.o BUILTIN_OBJS += builtin/describe.o +BUILTIN_OBJS += builtin/diagnose.o BUILTIN_OBJS += builtin/diff-files.o BUILTIN_OBJS += builtin/diff-index.o BUILTIN_OBJS += builtin/diff-tree.o @@ -1798,6 +1801,10 @@ ifdef APPLE_COMMON_CRYPTO SHA1_MAX_BLOCK_SIZE = 1024L*1024L*1024L endif +ifdef PPC_SHA1 +$(error the PPC_SHA1 flag has been removed along with the PowerPC-specific SHA-1 implementation.) +endif + ifdef OPENSSL_SHA1 EXTLIBS += $(LIB_4_CRYPTO) BASIC_CFLAGS += -DSHA1_OPENSSL @@ -1806,10 +1813,6 @@ ifdef BLK_SHA1 LIB_OBJS += block-sha1/sha1.o BASIC_CFLAGS += -DSHA1_BLK else -ifdef PPC_SHA1 - LIB_OBJS += ppc/sha1.o ppc/sha1ppc.o - BASIC_CFLAGS += -DSHA1_PPC -else ifdef APPLE_COMMON_CRYPTO COMPAT_CFLAGS += -DCOMMON_DIGEST_FOR_OPENSSL BASIC_CFLAGS += -DSHA1_APPLE @@ -1843,7 +1846,6 @@ endif endif endif endif -endif ifdef OPENSSL_SHA256 EXTLIBS += $(LIB_4_CRYPTO) @@ -2590,13 +2592,7 @@ missing_compdb_dir = compdb_args = endif -ASM_SRC := $(wildcard $(OBJECTS:o=S)) -ASM_OBJ := $(ASM_SRC:S=o) -C_OBJ := $(filter-out $(ASM_OBJ),$(OBJECTS)) - -$(C_OBJ): %.o: %.c GIT-CFLAGS $(missing_dep_dirs) $(missing_compdb_dir) - $(QUIET_CC)$(CC) -o $*.o -c $(dep_args) $(compdb_args) $(ALL_CFLAGS) $(EXTRA_CPPFLAGS) $< -$(ASM_OBJ): %.o: %.S GIT-CFLAGS $(missing_dep_dirs) $(missing_compdb_dir) +$(OBJECTS): %.o: %.c GIT-CFLAGS $(missing_dep_dirs) $(missing_compdb_dir) $(QUIET_CC)$(CC) -o $*.o -c $(dep_args) $(compdb_args) $(ALL_CFLAGS) $(EXTRA_CPPFLAGS) $< %.s: %.c GIT-CFLAGS FORCE @@ -3088,7 +3084,7 @@ t/helper/test-%$X: t/helper/test-%.o GIT-LDFLAGS $(GITLIBS) $(REFTABLE_TEST_LIB) check-sha1:: t/helper/test-tool$X t/helper/test-sha1.sh -SP_OBJ = $(patsubst %.o,%.sp,$(C_OBJ)) +SP_OBJ = $(patsubst %.o,%.sp,$(OBJECTS)) $(SP_OBJ): %.sp: %.c %.o $(QUIET_SP)cgcc -no-compile $(ALL_CFLAGS) $(EXTRA_CPPFLAGS) \ diff --git a/add-patch.c b/add-patch.c index 509ca04456..29f9456df3 100644 --- a/add-patch.c +++ b/add-patch.c @@ -238,6 +238,7 @@ struct hunk_header { * include the newline. */ size_t extra_start, extra_end, colored_extra_start, colored_extra_end; + unsigned suppress_colored_line_range:1; }; struct hunk { @@ -358,15 +359,14 @@ static int parse_hunk_header(struct add_p_state *s, struct hunk *hunk) if (!eol) eol = s->colored.buf + s->colored.len; p = memmem(line, eol - line, "@@ -", 4); - if (!p) - return error(_("could not parse colored hunk header '%.*s'"), - (int)(eol - line), line); - p = memmem(p + 4, eol - p - 4, " @@", 3); - if (!p) - return error(_("could not parse colored hunk header '%.*s'"), - (int)(eol - line), line); + if (p && (p = memmem(p + 4, eol - p - 4, " @@", 3))) { + header->colored_extra_start = p + 3 - s->colored.buf; + } else { + /* could not parse colored hunk header, leave as-is */ + header->colored_extra_start = hunk->colored_start; + header->suppress_colored_line_range = 1; + } hunk->colored_start = eol - s->colored.buf + (*eol == '\n'); - header->colored_extra_start = p + 3 - s->colored.buf; header->colored_extra_end = hunk->colored_start; return 0; @@ -419,7 +419,8 @@ static int parse_diff(struct add_p_state *s, const struct pathspec *ps) } color_arg_index = args.nr; /* Use `--no-color` explicitly, just in case `diff.color = always`. */ - strvec_pushl(&args, "--no-color", "-p", "--", NULL); + strvec_pushl(&args, "--no-color", "--ignore-submodules=dirty", "-p", + "--", NULL); for (i = 0; i < ps->nr; i++) strvec_push(&args, ps->items[i].original); @@ -592,7 +593,10 @@ static int parse_diff(struct add_p_state *s, const struct pathspec *ps) if (colored_eol) colored_p = colored_eol + 1; else if (p != pend) - /* colored shorter than non-colored? */ + /* non-colored has more lines? */ + goto mismatched_output; + else if (colored_p == colored_pend) + /* last line has no matching colored one? */ goto mismatched_output; else colored_p = colored_pend; @@ -656,6 +660,15 @@ static void render_hunk(struct add_p_state *s, struct hunk *hunk, if (!colored) { p = s->plain.buf + header->extra_start; len = header->extra_end - header->extra_start; + } else if (header->suppress_colored_line_range) { + strbuf_add(out, + s->colored.buf + header->colored_extra_start, + header->colored_extra_end - + header->colored_extra_start); + + strbuf_add(out, s->colored.buf + hunk->colored_start, + hunk->colored_end - hunk->colored_start); + return; } else { strbuf_addstr(out, s->s.fraginfo_color); p = s->colored.buf + header->colored_extra_start; @@ -1547,7 +1560,7 @@ soft_increment: strbuf_remove(&s->answer, 0, 1); strbuf_trim(&s->answer); i = hunk_index - DISPLAY_HUNKS_LINES / 2; - if (i < file_diff->mode_change) + if (i < (int)file_diff->mode_change) i = file_diff->mode_change; while (s->answer.len == 0) { i = display_hunks(s, file_diff, i); @@ -1023,7 +1023,7 @@ static int path_matches(const char *pathname, int pathlen, } return match_pathname(pathname, pathlen - isdir, base, baselen, - pattern, prefix, pat->patternlen, pat->flags); + pattern, prefix, pat->patternlen); } static int macroexpand_one(struct all_attrs_item *all_attrs, int nr, int rem); diff --git a/block-sha1/sha1.c b/block-sha1/sha1.c index 5974cd7dd3..80cebd2756 100644 --- a/block-sha1/sha1.c +++ b/block-sha1/sha1.c @@ -28,10 +28,6 @@ * try to do the silly "optimize away loads" part because it won't * see what the value will be). * - * Ben Herrenschmidt reports that on PPC, the C version comes close - * to the optimized asm with this (ie on PPC you don't want that - * 'volatile', since there are lots of registers). - * * On ARM we get the best code generation by forcing a full memory barrier * between each SHA_ROUND, otherwise gcc happily get wild with spilling and * the stack frame size simply explode and performance goes down the drain. @@ -144,6 +144,7 @@ int cmd_credential_cache(int argc, const char **argv, const char *prefix); int cmd_credential_cache_daemon(int argc, const char **argv, const char *prefix); int cmd_credential_store(int argc, const char **argv, const char *prefix); int cmd_describe(int argc, const char **argv, const char *prefix); +int cmd_diagnose(int argc, const char **argv, const char *prefix); int cmd_diff_files(int argc, const char **argv, const char *prefix); int cmd_diff_index(int argc, const char **argv, const char *prefix); int cmd_diff(int argc, const char **argv, const char *prefix); diff --git a/builtin/archive.c b/builtin/archive.c index 7176b041b6..f094390ee0 100644 --- a/builtin/archive.c +++ b/builtin/archive.c @@ -75,7 +75,7 @@ static int run_remote_archiver(int argc, const char **argv, #define PARSE_OPT_KEEP_ALL ( PARSE_OPT_KEEP_DASHDASH | \ PARSE_OPT_KEEP_ARGV0 | \ - PARSE_OPT_KEEP_UNKNOWN | \ + PARSE_OPT_KEEP_UNKNOWN_OPT | \ PARSE_OPT_NO_INTERNAL_HELP ) int cmd_archive(int argc, const char **argv, const char *prefix) diff --git a/builtin/bisect--helper.c b/builtin/bisect--helper.c index aa975bd926..501245fac9 100644 --- a/builtin/bisect--helper.c +++ b/builtin/bisect--helper.c @@ -1326,7 +1326,7 @@ int cmd_bisect__helper(int argc, const char **argv, const char *prefix) argc = parse_options(argc, argv, prefix, options, git_bisect_helper_usage, - PARSE_OPT_KEEP_DASHDASH | PARSE_OPT_KEEP_UNKNOWN); + PARSE_OPT_KEEP_DASHDASH | PARSE_OPT_KEEP_UNKNOWN_OPT); if (!cmdmode) usage_with_options(git_bisect_helper_usage, options); diff --git a/builtin/blame.c b/builtin/blame.c index 02e39420b6..a9fe8cf7a6 100644 --- a/builtin/blame.c +++ b/builtin/blame.c @@ -920,6 +920,7 @@ int cmd_blame(int argc, const char **argv, const char *prefix) break; case PARSE_OPT_HELP: case PARSE_OPT_ERROR: + case PARSE_OPT_SUBCOMMAND: exit(129); case PARSE_OPT_COMPLETE: exit(0); diff --git a/builtin/bugreport.c b/builtin/bugreport.c index 9de32bc96e..530895be55 100644 --- a/builtin/bugreport.c +++ b/builtin/bugreport.c @@ -5,6 +5,7 @@ #include "compat/compiler.h" #include "hook.h" #include "hook-list.h" +#include "diagnose.h" static void get_system_info(struct strbuf *sys_info) @@ -59,7 +60,7 @@ static void get_populated_hooks(struct strbuf *hook_info, int nongit) } static const char * const bugreport_usage[] = { - N_("git bugreport [-o|--output-directory <file>] [-s|--suffix <format>]"), + N_("git bugreport [-o|--output-directory <file>] [-s|--suffix <format>] [--diagnose[=<mode>]"), NULL }; @@ -98,16 +99,21 @@ int cmd_bugreport(int argc, const char **argv, const char *prefix) int report = -1; time_t now = time(NULL); struct tm tm; + enum diagnose_mode diagnose = DIAGNOSE_NONE; char *option_output = NULL; char *option_suffix = "%Y-%m-%d-%H%M"; const char *user_relative_path = NULL; char *prefixed_filename; + size_t output_path_len; const struct option bugreport_options[] = { + OPT_CALLBACK_F(0, "diagnose", &diagnose, N_("mode"), + N_("create an additional zip archive of detailed diagnostics (default 'stats')"), + PARSE_OPT_OPTARG, option_parse_diagnose), OPT_STRING('o', "output-directory", &option_output, N_("path"), - N_("specify a destination for the bugreport file")), + N_("specify a destination for the bugreport file(s)")), OPT_STRING('s', "suffix", &option_suffix, N_("format"), - N_("specify a strftime format suffix for the filename")), + N_("specify a strftime format suffix for the filename(s)")), OPT_END() }; @@ -119,6 +125,7 @@ int cmd_bugreport(int argc, const char **argv, const char *prefix) option_output ? option_output : ""); strbuf_addstr(&report_path, prefixed_filename); strbuf_complete(&report_path, '/'); + output_path_len = report_path.len; strbuf_addstr(&report_path, "git-bugreport-"); strbuf_addftime(&report_path, option_suffix, localtime_r(&now, &tm), 0, 0); @@ -133,6 +140,20 @@ int cmd_bugreport(int argc, const char **argv, const char *prefix) report_path.buf); } + /* Prepare diagnostics, if requested */ + if (diagnose != DIAGNOSE_NONE) { + struct strbuf zip_path = STRBUF_INIT; + strbuf_add(&zip_path, report_path.buf, output_path_len); + strbuf_addstr(&zip_path, "git-diagnostics-"); + strbuf_addftime(&zip_path, option_suffix, localtime_r(&now, &tm), 0, 0); + strbuf_addstr(&zip_path, ".zip"); + + if (create_diagnostics_archive(&zip_path, diagnose)) + die_errno(_("unable to create diagnostics archive %s"), zip_path.buf); + + strbuf_release(&zip_path); + } + /* Prepare the report contents */ get_bug_template(&buffer); diff --git a/builtin/bundle.c b/builtin/bundle.c index 2adad545a2..e80efce3a4 100644 --- a/builtin/bundle.c +++ b/builtin/bundle.c @@ -195,30 +195,19 @@ cleanup: int cmd_bundle(int argc, const char **argv, const char *prefix) { + parse_opt_subcommand_fn *fn = NULL; struct option options[] = { + OPT_SUBCOMMAND("create", &fn, cmd_bundle_create), + OPT_SUBCOMMAND("verify", &fn, cmd_bundle_verify), + OPT_SUBCOMMAND("list-heads", &fn, cmd_bundle_list_heads), + OPT_SUBCOMMAND("unbundle", &fn, cmd_bundle_unbundle), OPT_END() }; - int result; argc = parse_options(argc, argv, prefix, options, builtin_bundle_usage, - PARSE_OPT_STOP_AT_NON_OPTION); + 0); packet_trace_identity("bundle"); - if (argc < 2) - usage_with_options(builtin_bundle_usage, options); - - else if (!strcmp(argv[0], "create")) - result = cmd_bundle_create(argc, argv, prefix); - else if (!strcmp(argv[0], "verify")) - result = cmd_bundle_verify(argc, argv, prefix); - else if (!strcmp(argv[0], "list-heads")) - result = cmd_bundle_list_heads(argc, argv, prefix); - else if (!strcmp(argv[0], "unbundle")) - result = cmd_bundle_unbundle(argc, argv, prefix); - else { - error(_("Unknown subcommand: %s"), argv[0]); - usage_with_options(builtin_bundle_usage, options); - } - return result ? 1 : 0; + return !!fn(argc, argv, prefix); } diff --git a/builtin/clone.c b/builtin/clone.c index c4ff4643ec..e21d42dfee 100644 --- a/builtin/clone.c +++ b/builtin/clone.c @@ -34,6 +34,7 @@ #include "list-objects-filter-options.h" #include "hook.h" #include "bundle.h" +#include "bundle-uri.h" /* * Overall FIXMEs: @@ -77,6 +78,7 @@ static int option_filter_submodules = -1; /* unspecified */ static int config_filter_submodules = -1; /* unspecified */ static struct string_list server_options = STRING_LIST_INIT_NODUP; static int option_remote_submodules; +static const char *bundle_uri; static int recurse_submodules_cb(const struct option *opt, const char *arg, int unset) @@ -160,6 +162,8 @@ static struct option builtin_clone_options[] = { N_("any cloned submodules will use their remote-tracking branch")), OPT_BOOL(0, "sparse", &option_sparse_checkout, N_("initialize sparse-checkout file to include only files at root")), + OPT_STRING(0, "bundle-uri", &bundle_uri, + N_("uri"), N_("a URI for downloading bundles before fetching from origin remote")), OPT_END() }; @@ -933,6 +937,9 @@ int cmd_clone(int argc, const char **argv, const char *prefix) option_no_checkout = 1; } + if (bundle_uri && deepen) + die(_("--bundle-uri is incompatible with --depth, --shallow-since, and --shallow-exclude")); + repo_name = argv[0]; path = get_repo_path(repo_name, &is_bundle); @@ -1232,6 +1239,18 @@ int cmd_clone(int argc, const char **argv, const char *prefix) if (transport->smart_options && !deepen && !filter_options.choice) transport->smart_options->check_self_contained_and_connected = 1; + /* + * Before fetching from the remote, download and install bundle + * data from the --bundle-uri option. + */ + if (bundle_uri) { + /* At this point, we need the_repository to match the cloned repo. */ + if (repo_init(the_repository, git_dir, work_tree)) + warning(_("failed to initialize the repo, skipping bundle URI")); + else if (fetch_bundle_uri(the_repository, bundle_uri)) + warning(_("failed to fetch objects from bundle URI '%s'"), + bundle_uri); + } strvec_push(&transport_ls_refs_options.ref_prefixes, "HEAD"); refspec_ref_prefixes(&remote->fetch, diff --git a/builtin/commit-graph.c b/builtin/commit-graph.c index aaa1b5b960..51557fe786 100644 --- a/builtin/commit-graph.c +++ b/builtin/commit-graph.c @@ -58,7 +58,7 @@ static struct option *add_common_options(struct option *to) return parse_options_concat(common_opts, to); } -static int graph_verify(int argc, const char **argv) +static int graph_verify(int argc, const char **argv, const char *prefix) { struct commit_graph *graph = NULL; struct object_directory *odb = NULL; @@ -80,7 +80,7 @@ static int graph_verify(int argc, const char **argv) trace2_cmd_mode("verify"); opts.progress = isatty(2); - argc = parse_options(argc, argv, NULL, + argc = parse_options(argc, argv, prefix, options, builtin_commit_graph_verify_usage, 0); if (argc) @@ -190,7 +190,7 @@ static int git_commit_graph_write_config(const char *var, const char *value, return 0; } -static int graph_write(int argc, const char **argv) +static int graph_write(int argc, const char **argv, const char *prefix) { struct string_list pack_indexes = STRING_LIST_INIT_DUP; struct strbuf buf = STRBUF_INIT; @@ -241,7 +241,7 @@ static int graph_write(int argc, const char **argv) git_config(git_commit_graph_write_config, &opts); - argc = parse_options(argc, argv, NULL, + argc = parse_options(argc, argv, prefix, options, builtin_commit_graph_write_usage, 0); if (argc) @@ -307,26 +307,22 @@ cleanup: int cmd_commit_graph(int argc, const char **argv, const char *prefix) { - struct option *builtin_commit_graph_options = common_opts; + parse_opt_subcommand_fn *fn = NULL; + struct option builtin_commit_graph_options[] = { + OPT_SUBCOMMAND("verify", &fn, graph_verify), + OPT_SUBCOMMAND("write", &fn, graph_write), + OPT_END(), + }; + struct option *options = parse_options_concat(builtin_commit_graph_options, common_opts); git_config(git_default_config, NULL); - argc = parse_options(argc, argv, prefix, - builtin_commit_graph_options, - builtin_commit_graph_usage, - PARSE_OPT_STOP_AT_NON_OPTION); - if (!argc) - goto usage; read_replace_refs = 0; save_commit_buffer = 0; - if (!strcmp(argv[0], "verify")) - return graph_verify(argc, argv); - else if (argc && !strcmp(argv[0], "write")) - return graph_write(argc, argv); + argc = parse_options(argc, argv, prefix, options, + builtin_commit_graph_usage, 0); + FREE_AND_NULL(options); - error(_("unrecognized subcommand: %s"), argv[0]); -usage: - usage_with_options(builtin_commit_graph_usage, - builtin_commit_graph_options); + return fn(argc, argv, prefix); } diff --git a/builtin/diagnose.c b/builtin/diagnose.c new file mode 100644 index 0000000000..cd260c2015 --- /dev/null +++ b/builtin/diagnose.c @@ -0,0 +1,61 @@ +#include "builtin.h" +#include "parse-options.h" +#include "diagnose.h" + +static const char * const diagnose_usage[] = { + N_("git diagnose [-o|--output-directory <path>] [-s|--suffix <format>] [--mode=<mode>]"), + NULL +}; + +int cmd_diagnose(int argc, const char **argv, const char *prefix) +{ + struct strbuf zip_path = STRBUF_INIT; + time_t now = time(NULL); + struct tm tm; + enum diagnose_mode mode = DIAGNOSE_STATS; + char *option_output = NULL; + char *option_suffix = "%Y-%m-%d-%H%M"; + char *prefixed_filename; + + const struct option diagnose_options[] = { + OPT_STRING('o', "output-directory", &option_output, N_("path"), + N_("specify a destination for the diagnostics archive")), + OPT_STRING('s', "suffix", &option_suffix, N_("format"), + N_("specify a strftime format suffix for the filename")), + OPT_CALLBACK_F(0, "mode", &mode, N_("(stats|all)"), + N_("specify the content of the diagnostic archive"), + PARSE_OPT_NONEG, option_parse_diagnose), + OPT_END() + }; + + argc = parse_options(argc, argv, prefix, diagnose_options, + diagnose_usage, 0); + + /* Prepare the path to put the result */ + prefixed_filename = prefix_filename(prefix, + option_output ? option_output : ""); + strbuf_addstr(&zip_path, prefixed_filename); + strbuf_complete(&zip_path, '/'); + + strbuf_addstr(&zip_path, "git-diagnostics-"); + strbuf_addftime(&zip_path, option_suffix, localtime_r(&now, &tm), 0, 0); + strbuf_addstr(&zip_path, ".zip"); + + switch (safe_create_leading_directories(zip_path.buf)) { + case SCLD_OK: + case SCLD_EXISTS: + break; + default: + die_errno(_("could not create leading directories for '%s'"), + zip_path.buf); + } + + /* Prepare diagnostics */ + if (create_diagnostics_archive(&zip_path, mode)) + die_errno(_("unable to create diagnostics archive %s"), + zip_path.buf); + + free(prefixed_filename); + strbuf_release(&zip_path); + return 0; +} diff --git a/builtin/difftool.c b/builtin/difftool.c index f780ebfd55..4b10ad1a36 100644 --- a/builtin/difftool.c +++ b/builtin/difftool.c @@ -716,7 +716,7 @@ int cmd_difftool(int argc, const char **argv, const char *prefix) symlinks = has_symlinks; argc = parse_options(argc, argv, prefix, builtin_difftool_options, - builtin_difftool_usage, PARSE_OPT_KEEP_UNKNOWN | + builtin_difftool_usage, PARSE_OPT_KEEP_UNKNOWN_OPT | PARSE_OPT_KEEP_DASHDASH); if (tool_help) diff --git a/builtin/env--helper.c b/builtin/env--helper.c index 27349098b0..ea04c16636 100644 --- a/builtin/env--helper.c +++ b/builtin/env--helper.c @@ -50,7 +50,7 @@ int cmd_env__helper(int argc, const char **argv, const char *prefix) }; argc = parse_options(argc, argv, prefix, opts, env__helper_usage, - PARSE_OPT_KEEP_UNKNOWN); + PARSE_OPT_KEEP_UNKNOWN_OPT); if (env_default && !*env_default) usage_with_options(env__helper_usage, opts); if (!cmdmode) diff --git a/builtin/fast-export.c b/builtin/fast-export.c index ab5b0ff610..3b3314e7b2 100644 --- a/builtin/fast-export.c +++ b/builtin/fast-export.c @@ -1221,7 +1221,7 @@ int cmd_fast_export(int argc, const char **argv, const char *prefix) revs.sources = &revision_sources; revs.rewrite_parents = 1; argc = parse_options(argc, argv, prefix, options, fast_export_usage, - PARSE_OPT_KEEP_ARGV0 | PARSE_OPT_KEEP_UNKNOWN); + PARSE_OPT_KEEP_ARGV0 | PARSE_OPT_KEEP_UNKNOWN_OPT); argc = setup_revisions(argc, argv, &revs, NULL); if (argc > 1) usage_with_options (fast_export_usage, options); diff --git a/builtin/fetch.c b/builtin/fetch.c index 7f5b0f4c2a..e6b926dba2 100644 --- a/builtin/fetch.c +++ b/builtin/fetch.c @@ -490,7 +490,9 @@ static void filter_prefetch_refspec(struct refspec *rs) continue; if (!rs->items[i].dst || (rs->items[i].src && - !strncmp(rs->items[i].src, "refs/tags/", 10))) { + !strncmp(rs->items[i].src, + ref_namespace[NAMESPACE_TAGS].ref, + strlen(ref_namespace[NAMESPACE_TAGS].ref)))) { int j; free(rs->items[i].src); @@ -506,7 +508,7 @@ static void filter_prefetch_refspec(struct refspec *rs) } old_dst = rs->items[i].dst; - strbuf_addstr(&new_dst, "refs/prefetch/"); + strbuf_addstr(&new_dst, ref_namespace[NAMESPACE_PREFETCH].ref); /* * If old_dst starts with "refs/", then place diff --git a/builtin/gc.c b/builtin/gc.c index 2d09321185..0accc02406 100644 --- a/builtin/gc.c +++ b/builtin/gc.c @@ -911,12 +911,6 @@ static int fetch_remote(struct remote *remote, void *cbdata) static int maintenance_task_prefetch(struct maintenance_run_opts *opts) { - git_config_set_multivar_gently("log.excludedecoration", - "refs/prefetch/", - "refs/prefetch/", - CONFIG_FLAGS_FIXED_VALUE | - CONFIG_FLAGS_MULTI_REPLACE); - if (for_each_remote(fetch_remote, opts)) { error(_("failed to prefetch remotes")); return 1; @@ -1466,14 +1460,28 @@ static char *get_maintpath(void) return strbuf_detach(&sb, NULL); } -static int maintenance_register(void) +static char const * const builtin_maintenance_register_usage[] = { + N_("git maintenance register"), + NULL +}; + +static int maintenance_register(int argc, const char **argv, const char *prefix) { + struct option options[] = { + OPT_END(), + }; int rc; char *config_value; struct child_process config_set = CHILD_PROCESS_INIT; struct child_process config_get = CHILD_PROCESS_INIT; char *maintpath = get_maintpath(); + argc = parse_options(argc, argv, prefix, options, + builtin_maintenance_register_usage, 0); + if (argc) + usage_with_options(builtin_maintenance_register_usage, + options); + /* Disable foreground maintenance */ git_config_set("maintenance.auto", "false"); @@ -1510,12 +1518,26 @@ done: return rc; } -static int maintenance_unregister(void) +static char const * const builtin_maintenance_unregister_usage[] = { + N_("git maintenance unregister"), + NULL +}; + +static int maintenance_unregister(int argc, const char **argv, const char *prefix) { + struct option options[] = { + OPT_END(), + }; int rc; struct child_process config_unset = CHILD_PROCESS_INIT; char *maintpath = get_maintpath(); + argc = parse_options(argc, argv, prefix, options, + builtin_maintenance_unregister_usage, 0); + if (argc) + usage_with_options(builtin_maintenance_unregister_usage, + options); + config_unset.git_cmd = 1; strvec_pushl(&config_unset.args, "config", "--global", "--unset", "--fixed-value", "maintenance.repo", maintpath, NULL); @@ -2066,6 +2088,7 @@ static int crontab_update_schedule(int run_maintenance, int fd) struct child_process crontab_edit = CHILD_PROCESS_INIT; FILE *cron_list, *cron_in; struct strbuf line = STRBUF_INIT; + struct tempfile *tmpedit = NULL; get_schedule_cmd(&cmd, NULL); strvec_split(&crontab_list.args, cmd); @@ -2080,6 +2103,17 @@ static int crontab_update_schedule(int run_maintenance, int fd) /* Ignore exit code, as an empty crontab will return error. */ finish_command(&crontab_list); + tmpedit = mks_tempfile_t(".git_cron_edit_tmpXXXXXX"); + if (!tmpedit) { + result = error(_("failed to create crontab temporary file")); + goto out; + } + cron_in = fdopen_tempfile(tmpedit, "w"); + if (!cron_in) { + result = error(_("failed to open temporary file")); + goto out; + } + /* * Read from the .lock file, filtering out the old * schedule while appending the new schedule. @@ -2087,19 +2121,6 @@ static int crontab_update_schedule(int run_maintenance, int fd) cron_list = fdopen(fd, "r"); rewind(cron_list); - strvec_split(&crontab_edit.args, cmd); - crontab_edit.in = -1; - crontab_edit.git_cmd = 0; - - if (start_command(&crontab_edit)) - return error(_("failed to run 'crontab'; your system might not support 'cron'")); - - cron_in = fdopen(crontab_edit.in, "w"); - if (!cron_in) { - result = error(_("failed to open stdin of 'crontab'")); - goto done_editing; - } - while (!strbuf_getline_lf(&line, cron_list)) { if (!in_old_region && !strcmp(line.buf, BEGIN_LINE)) in_old_region = 1; @@ -2133,14 +2154,22 @@ static int crontab_update_schedule(int run_maintenance, int fd) } fflush(cron_in); - fclose(cron_in); - close(crontab_edit.in); -done_editing: + strvec_split(&crontab_edit.args, cmd); + strvec_push(&crontab_edit.args, get_tempfile_path(tmpedit)); + crontab_edit.git_cmd = 0; + + if (start_command(&crontab_edit)) { + result = error(_("failed to run 'crontab'; your system might not support 'cron'")); + goto out; + } + if (finish_command(&crontab_edit)) result = error(_("'crontab' died")); else fclose(cron_list); +out: + delete_tempfile(&tmpedit); return result; } @@ -2497,6 +2526,7 @@ static int maintenance_start(int argc, const char **argv, const char *prefix) PARSE_OPT_NONEG, maintenance_opt_scheduler), OPT_END() }; + const char *register_args[] = { "register", NULL }; argc = parse_options(argc, argv, prefix, options, builtin_maintenance_start_usage, 0); @@ -2506,34 +2536,46 @@ static int maintenance_start(int argc, const char **argv, const char *prefix) opts.scheduler = resolve_scheduler(opts.scheduler); validate_scheduler(opts.scheduler); - if (maintenance_register()) + if (maintenance_register(ARRAY_SIZE(register_args)-1, register_args, NULL)) warning(_("failed to add repo to global config")); return update_background_schedule(&opts, 1); } -static int maintenance_stop(void) +static const char *const builtin_maintenance_stop_usage[] = { + N_("git maintenance stop"), + NULL +}; + +static int maintenance_stop(int argc, const char **argv, const char *prefix) { + struct option options[] = { + OPT_END() + }; + argc = parse_options(argc, argv, prefix, options, + builtin_maintenance_stop_usage, 0); + if (argc) + usage_with_options(builtin_maintenance_stop_usage, options); return update_background_schedule(NULL, 0); } -static const char builtin_maintenance_usage[] = N_("git maintenance <subcommand> [<options>]"); +static const char * const builtin_maintenance_usage[] = { + N_("git maintenance <subcommand> [<options>]"), + NULL, +}; int cmd_maintenance(int argc, const char **argv, const char *prefix) { - if (argc < 2 || - (argc == 2 && !strcmp(argv[1], "-h"))) - usage(builtin_maintenance_usage); - - if (!strcmp(argv[1], "run")) - return maintenance_run(argc - 1, argv + 1, prefix); - if (!strcmp(argv[1], "start")) - return maintenance_start(argc - 1, argv + 1, prefix); - if (!strcmp(argv[1], "stop")) - return maintenance_stop(); - if (!strcmp(argv[1], "register")) - return maintenance_register(); - if (!strcmp(argv[1], "unregister")) - return maintenance_unregister(); - - die(_("invalid subcommand: %s"), argv[1]); + parse_opt_subcommand_fn *fn = NULL; + struct option builtin_maintenance_options[] = { + OPT_SUBCOMMAND("run", &fn, maintenance_run), + OPT_SUBCOMMAND("start", &fn, maintenance_start), + OPT_SUBCOMMAND("stop", &fn, maintenance_stop), + OPT_SUBCOMMAND("register", &fn, maintenance_register), + OPT_SUBCOMMAND("unregister", &fn, maintenance_unregister), + OPT_END(), + }; + + argc = parse_options(argc, argv, prefix, builtin_maintenance_options, + builtin_maintenance_usage, 0); + return fn(argc, argv, prefix); } diff --git a/builtin/hook.c b/builtin/hook.c index 54e5c6ec93..b6530d189a 100644 --- a/builtin/hook.c +++ b/builtin/hook.c @@ -67,18 +67,14 @@ usage: int cmd_hook(int argc, const char **argv, const char *prefix) { + parse_opt_subcommand_fn *fn = NULL; struct option builtin_hook_options[] = { + OPT_SUBCOMMAND("run", &fn, run), OPT_END(), }; argc = parse_options(argc, argv, NULL, builtin_hook_options, - builtin_hook_usage, PARSE_OPT_STOP_AT_NON_OPTION); - if (!argc) - goto usage; + builtin_hook_usage, 0); - if (!strcmp(argv[0], "run")) - return run(argc, argv, prefix); - -usage: - usage_with_options(builtin_hook_usage, builtin_hook_options); + return fn(argc, argv, prefix); } diff --git a/builtin/log.c b/builtin/log.c index 3f9a6e8cf2..ee19dc5d45 100644 --- a/builtin/log.c +++ b/builtin/log.c @@ -52,6 +52,7 @@ static int default_encode_email_headers = 1; static int decoration_style; static int decoration_given; static int use_mailmap_config = 1; +static unsigned int force_in_body_from; static const char *fmt_patch_subject_prefix = "PATCH"; static int fmt_patch_name_max = FORMAT_PATCH_NAME_MAX_DEFAULT; static const char *fmt_pretty; @@ -101,6 +102,20 @@ static int parse_decoration_style(const char *value) return -1; } +static int use_default_decoration_filter = 1; +static struct string_list decorate_refs_exclude = STRING_LIST_INIT_NODUP; +static struct string_list decorate_refs_exclude_config = STRING_LIST_INIT_NODUP; +static struct string_list decorate_refs_include = STRING_LIST_INIT_NODUP; + +static int clear_decorations_callback(const struct option *opt, + const char *arg, int unset) +{ + string_list_clear(&decorate_refs_include, 0); + string_list_clear(&decorate_refs_exclude, 0); + use_default_decoration_filter = 0; + return 0; +} + static int decorate_callback(const struct option *opt, const char *arg, int unset) { if (unset) @@ -162,18 +177,61 @@ static void cmd_log_init_defaults(struct rev_info *rev) parse_date_format(default_date_mode, &rev->date_mode); } +static void set_default_decoration_filter(struct decoration_filter *decoration_filter) +{ + int i; + char *value = NULL; + struct string_list *include = decoration_filter->include_ref_pattern; + const struct string_list *config_exclude = + git_config_get_value_multi("log.excludeDecoration"); + + if (config_exclude) { + struct string_list_item *item; + for_each_string_list_item(item, config_exclude) + string_list_append(decoration_filter->exclude_ref_config_pattern, + item->string); + } + + /* + * By default, decorate_all is disabled. Enable it if + * log.initialDecorationSet=all. Don't ever disable it by config, + * since the command-line takes precedent. + */ + if (use_default_decoration_filter && + !git_config_get_string("log.initialdecorationset", &value) && + !strcmp("all", value)) + use_default_decoration_filter = 0; + free(value); + + if (!use_default_decoration_filter || + decoration_filter->exclude_ref_pattern->nr || + decoration_filter->include_ref_pattern->nr || + decoration_filter->exclude_ref_config_pattern->nr) + return; + + /* + * No command-line or config options were given, so + * populate with sensible defaults. + */ + for (i = 0; i < ARRAY_SIZE(ref_namespace); i++) { + if (!ref_namespace[i].decoration) + continue; + + string_list_append(include, ref_namespace[i].ref); + } +} + static void cmd_log_init_finish(int argc, const char **argv, const char *prefix, struct rev_info *rev, struct setup_revision_opt *opt) { struct userformat_want w; int quiet = 0, source = 0, mailmap; static struct line_opt_callback_data line_cb = {NULL, NULL, STRING_LIST_INIT_DUP}; - static struct string_list decorate_refs_exclude = STRING_LIST_INIT_NODUP; - static struct string_list decorate_refs_exclude_config = STRING_LIST_INIT_NODUP; - static struct string_list decorate_refs_include = STRING_LIST_INIT_NODUP; - struct decoration_filter decoration_filter = {&decorate_refs_include, - &decorate_refs_exclude, - &decorate_refs_exclude_config}; + struct decoration_filter decoration_filter = { + .exclude_ref_pattern = &decorate_refs_exclude, + .include_ref_pattern = &decorate_refs_include, + .exclude_ref_config_pattern = &decorate_refs_exclude_config, + }; static struct revision_sources revision_sources; const struct option builtin_log_options[] = { @@ -181,6 +239,10 @@ static void cmd_log_init_finish(int argc, const char **argv, const char *prefix, OPT_BOOL(0, "source", &source, N_("show source")), OPT_BOOL(0, "use-mailmap", &mailmap, N_("use mail map file")), OPT_ALIAS(0, "mailmap", "use-mailmap"), + OPT_CALLBACK_F(0, "clear-decorations", NULL, NULL, + N_("clear all previously-defined decoration filters"), + PARSE_OPT_NOARG | PARSE_OPT_NONEG, + clear_decorations_callback), OPT_STRING_LIST(0, "decorate-refs", &decorate_refs_include, N_("pattern"), N_("only decorate refs that match <pattern>")), OPT_STRING_LIST(0, "decorate-refs-exclude", &decorate_refs_exclude, @@ -199,7 +261,7 @@ static void cmd_log_init_finish(int argc, const char **argv, const char *prefix, mailmap = use_mailmap_config; argc = parse_options(argc, argv, prefix, builtin_log_options, builtin_log_usage, - PARSE_OPT_KEEP_ARGV0 | PARSE_OPT_KEEP_UNKNOWN | + PARSE_OPT_KEEP_ARGV0 | PARSE_OPT_KEEP_UNKNOWN_OPT | PARSE_OPT_KEEP_DASHDASH); if (quiet) @@ -265,16 +327,7 @@ static void cmd_log_init_finish(int argc, const char **argv, const char *prefix, } if (decoration_style || rev->simplify_by_decoration) { - const struct string_list *config_exclude = - repo_config_get_value_multi(the_repository, - "log.excludeDecoration"); - - if (config_exclude) { - struct string_list_item *item; - for_each_string_list_item(item, config_exclude) - string_list_append(&decorate_refs_exclude_config, - item->string); - } + set_default_decoration_filter(&decoration_filter); if (decoration_style) rev->show_decorations = 1; @@ -1007,6 +1060,10 @@ static int git_format_config(const char *var, const char *value, void *cb) from = NULL; return 0; } + if (!strcmp(var, "format.forceinbodyfrom")) { + force_in_body_from = git_config_bool(var, value); + return 0; + } if (!strcmp(var, "format.notes")) { int b = git_parse_maybe_bool(value); if (b < 0) @@ -1898,6 +1955,8 @@ int cmd_format_patch(int argc, const char **argv, const char *prefix) N_("show changes against <refspec> in cover letter or single patch")), OPT_INTEGER(0, "creation-factor", &creation_factor, N_("percentage by which creation is weighted")), + OPT_BOOL(0, "force-in-body-from", &force_in_body_from, + N_("show in-body From: even if identical to the e-mail header")), OPT_END() }; @@ -1938,9 +1997,11 @@ int cmd_format_patch(int argc, const char **argv, const char *prefix) */ argc = parse_options(argc, argv, prefix, builtin_format_patch_options, builtin_format_patch_usage, - PARSE_OPT_KEEP_ARGV0 | PARSE_OPT_KEEP_UNKNOWN | + PARSE_OPT_KEEP_ARGV0 | PARSE_OPT_KEEP_UNKNOWN_OPT | PARSE_OPT_KEEP_DASHDASH); + rev.force_in_body_from = force_in_body_from; + /* Make sure "0000-$sub.patch" gives non-negative length for $sub */ if (fmt_patch_name_max <= strlen("0000-") + strlen(fmt_patch_suffix)) fmt_patch_name_max = strlen("0000-") + strlen(fmt_patch_suffix); diff --git a/builtin/merge.c b/builtin/merge.c index f7c92c0e64..5900b81729 100644 --- a/builtin/merge.c +++ b/builtin/merge.c @@ -503,7 +503,8 @@ static void finish(struct commit *head_commit, /* Run a post-merge hook */ run_hooks_l("post-merge", squash ? "1" : "0", NULL); - apply_autostash(git_path_merge_autostash(the_repository)); + if (new_head) + apply_autostash(git_path_merge_autostash(the_repository)); strbuf_release(&reflog_message); } @@ -1692,7 +1693,7 @@ int cmd_merge(int argc, const char **argv, const char *prefix) if (save_state(&stash)) oidclr(&stash); - for (i = 0; !merge_was_ok && i < use_strategies_nr; i++) { + for (i = 0; i < use_strategies_nr; i++) { int ret, cnt; if (i) { printf(_("Rewinding the tree to pristine...\n")); @@ -1707,7 +1708,7 @@ int cmd_merge(int argc, const char **argv, const char *prefix) */ wt_strategy = use_strategies[i]->name; - ret = try_merge_strategy(use_strategies[i]->name, + ret = try_merge_strategy(wt_strategy, common, remoteheads, head_commit); /* @@ -1717,16 +1718,17 @@ int cmd_merge(int argc, const char **argv, const char *prefix) */ if (ret < 2) { if (!ret) { - if (option_commit) { - /* Automerge succeeded. */ - automerge_was_ok = 1; - break; - } + /* + * This strategy worked; no point in trying + * another. + */ merge_was_ok = 1; + best_strategy = wt_strategy; + break; } cnt = (use_strategies_nr > 1) ? evaluate_result() : 0; if (best_cnt <= 0 || cnt <= best_cnt) { - best_strategy = use_strategies[i]->name; + best_strategy = wt_strategy; best_cnt = cnt; } } @@ -1736,7 +1738,8 @@ int cmd_merge(int argc, const char **argv, const char *prefix) * If we have a resulting tree, that means the strategy module * auto resolved the merge cleanly. */ - if (automerge_was_ok) { + if (merge_was_ok && option_commit) { + automerge_was_ok = 1; ret = finish_automerge(head_commit, head_subsumed, common, remoteheads, &result_tree, wt_strategy); @@ -1781,6 +1784,8 @@ int cmd_merge(int argc, const char **argv, const char *prefix) "stopped before committing as requested\n")); else ret = suggest_conflicts(); + if (autostash) + printf(_("When finished, apply stashed changes with `git stash pop`\n")); done: if (!automerge_was_ok) { diff --git a/builtin/multi-pack-index.c b/builtin/multi-pack-index.c index b3a3f0a571..9b126d6ce0 100644 --- a/builtin/multi-pack-index.c +++ b/builtin/multi-pack-index.c @@ -87,6 +87,13 @@ static int git_multi_pack_index_write_config(const char *var, const char *value, opts.flags &= ~MIDX_WRITE_BITMAP_HASH_CACHE; } + if (!strcmp(var, "pack.writebitmaplookuptable")) { + if (git_config_bool(var, value)) + opts.flags |= MIDX_WRITE_BITMAP_LOOKUP_TABLE; + else + opts.flags &= ~MIDX_WRITE_BITMAP_LOOKUP_TABLE; + } + /* * We should never make a fall-back call to 'git_default_config', since * this was already called in 'cmd_multi_pack_index()'. @@ -104,7 +111,8 @@ static void read_packs_from_stdin(struct string_list *to) strbuf_release(&buf); } -static int cmd_multi_pack_index_write(int argc, const char **argv) +static int cmd_multi_pack_index_write(int argc, const char **argv, + const char *prefix) { struct option *options; static struct option builtin_multi_pack_index_write_options[] = { @@ -132,7 +140,7 @@ static int cmd_multi_pack_index_write(int argc, const char **argv) if (isatty(2)) opts.flags |= MIDX_PROGRESS; - argc = parse_options(argc, argv, NULL, + argc = parse_options(argc, argv, prefix, options, builtin_multi_pack_index_write_usage, 0); if (argc) @@ -160,7 +168,8 @@ static int cmd_multi_pack_index_write(int argc, const char **argv) opts.refs_snapshot, opts.flags); } -static int cmd_multi_pack_index_verify(int argc, const char **argv) +static int cmd_multi_pack_index_verify(int argc, const char **argv, + const char *prefix) { struct option *options; static struct option builtin_multi_pack_index_verify_options[] = { @@ -174,7 +183,7 @@ static int cmd_multi_pack_index_verify(int argc, const char **argv) if (isatty(2)) opts.flags |= MIDX_PROGRESS; - argc = parse_options(argc, argv, NULL, + argc = parse_options(argc, argv, prefix, options, builtin_multi_pack_index_verify_usage, 0); if (argc) @@ -186,7 +195,8 @@ static int cmd_multi_pack_index_verify(int argc, const char **argv) return verify_midx_file(the_repository, opts.object_dir, opts.flags); } -static int cmd_multi_pack_index_expire(int argc, const char **argv) +static int cmd_multi_pack_index_expire(int argc, const char **argv, + const char *prefix) { struct option *options; static struct option builtin_multi_pack_index_expire_options[] = { @@ -200,7 +210,7 @@ static int cmd_multi_pack_index_expire(int argc, const char **argv) if (isatty(2)) opts.flags |= MIDX_PROGRESS; - argc = parse_options(argc, argv, NULL, + argc = parse_options(argc, argv, prefix, options, builtin_multi_pack_index_expire_usage, 0); if (argc) @@ -212,7 +222,8 @@ static int cmd_multi_pack_index_expire(int argc, const char **argv) return expire_midx_packs(the_repository, opts.object_dir, opts.flags); } -static int cmd_multi_pack_index_repack(int argc, const char **argv) +static int cmd_multi_pack_index_repack(int argc, const char **argv, + const char *prefix) { struct option *options; static struct option builtin_multi_pack_index_repack_options[] = { @@ -229,7 +240,7 @@ static int cmd_multi_pack_index_repack(int argc, const char **argv) if (isatty(2)) opts.flags |= MIDX_PROGRESS; - argc = parse_options(argc, argv, NULL, + argc = parse_options(argc, argv, prefix, options, builtin_multi_pack_index_repack_usage, 0); @@ -247,7 +258,15 @@ int cmd_multi_pack_index(int argc, const char **argv, const char *prefix) { int res; - struct option *builtin_multi_pack_index_options = common_opts; + parse_opt_subcommand_fn *fn = NULL; + struct option builtin_multi_pack_index_options[] = { + OPT_SUBCOMMAND("repack", &fn, cmd_multi_pack_index_repack), + OPT_SUBCOMMAND("write", &fn, cmd_multi_pack_index_write), + OPT_SUBCOMMAND("verify", &fn, cmd_multi_pack_index_verify), + OPT_SUBCOMMAND("expire", &fn, cmd_multi_pack_index_expire), + OPT_END(), + }; + struct option *options = parse_options_concat(builtin_multi_pack_index_options, common_opts); git_config(git_default_config, NULL); @@ -256,31 +275,12 @@ int cmd_multi_pack_index(int argc, const char **argv, the_repository->objects->odb) opts.object_dir = xstrdup(the_repository->objects->odb->path); - argc = parse_options(argc, argv, prefix, - builtin_multi_pack_index_options, - builtin_multi_pack_index_usage, - PARSE_OPT_STOP_AT_NON_OPTION); - - if (!argc) - goto usage; - - if (!strcmp(argv[0], "repack")) - res = cmd_multi_pack_index_repack(argc, argv); - else if (!strcmp(argv[0], "write")) - res = cmd_multi_pack_index_write(argc, argv); - else if (!strcmp(argv[0], "verify")) - res = cmd_multi_pack_index_verify(argc, argv); - else if (!strcmp(argv[0], "expire")) - res = cmd_multi_pack_index_expire(argc, argv); - else { - error(_("unrecognized subcommand: %s"), argv[0]); - goto usage; - } + argc = parse_options(argc, argv, prefix, options, + builtin_multi_pack_index_usage, 0); + FREE_AND_NULL(options); + + res = fn(argc, argv, prefix); free(opts.object_dir); return res; - -usage: - usage_with_options(builtin_multi_pack_index_usage, - builtin_multi_pack_index_options); } diff --git a/builtin/notes.c b/builtin/notes.c index a3d0d15a22..be51f69225 100644 --- a/builtin/notes.c +++ b/builtin/notes.c @@ -994,17 +994,34 @@ static int get_ref(int argc, const char **argv, const char *prefix) int cmd_notes(int argc, const char **argv, const char *prefix) { - int result; const char *override_notes_ref = NULL; + parse_opt_subcommand_fn *fn = NULL; struct option options[] = { OPT_STRING(0, "ref", &override_notes_ref, N_("notes-ref"), N_("use notes from <notes-ref>")), + OPT_SUBCOMMAND("list", &fn, list), + OPT_SUBCOMMAND("add", &fn, add), + OPT_SUBCOMMAND("copy", &fn, copy), + OPT_SUBCOMMAND("append", &fn, append_edit), + OPT_SUBCOMMAND("edit", &fn, append_edit), + OPT_SUBCOMMAND("show", &fn, show), + OPT_SUBCOMMAND("merge", &fn, merge), + OPT_SUBCOMMAND("remove", &fn, remove_cmd), + OPT_SUBCOMMAND("prune", &fn, prune), + OPT_SUBCOMMAND("get-ref", &fn, get_ref), OPT_END() }; git_config(git_default_config, NULL); argc = parse_options(argc, argv, prefix, options, git_notes_usage, - PARSE_OPT_STOP_AT_NON_OPTION); + PARSE_OPT_SUBCOMMAND_OPTIONAL); + if (!fn) { + if (argc) { + error(_("unknown subcommand: `%s'"), argv[0]); + usage_with_options(git_notes_usage, options); + } + fn = list; + } if (override_notes_ref) { struct strbuf sb = STRBUF_INIT; @@ -1014,28 +1031,5 @@ int cmd_notes(int argc, const char **argv, const char *prefix) strbuf_release(&sb); } - if (argc < 1 || !strcmp(argv[0], "list")) - result = list(argc, argv, prefix); - else if (!strcmp(argv[0], "add")) - result = add(argc, argv, prefix); - else if (!strcmp(argv[0], "copy")) - result = copy(argc, argv, prefix); - else if (!strcmp(argv[0], "append") || !strcmp(argv[0], "edit")) - result = append_edit(argc, argv, prefix); - else if (!strcmp(argv[0], "show")) - result = show(argc, argv, prefix); - else if (!strcmp(argv[0], "merge")) - result = merge(argc, argv, prefix); - else if (!strcmp(argv[0], "remove")) - result = remove_cmd(argc, argv, prefix); - else if (!strcmp(argv[0], "prune")) - result = prune(argc, argv, prefix); - else if (!strcmp(argv[0], "get-ref")) - result = get_ref(argc, argv, prefix); - else { - result = error(_("unknown subcommand: %s"), argv[0]); - usage_with_options(git_notes_usage, options); - } - - return result ? 1 : 0; + return !!fn(argc, argv, prefix); } diff --git a/builtin/pack-objects.c b/builtin/pack-objects.c index 8fd2794a32..3658c05caf 100644 --- a/builtin/pack-objects.c +++ b/builtin/pack-objects.c @@ -3149,6 +3149,14 @@ static int git_pack_config(const char *k, const char *v, void *cb) else write_bitmap_options &= ~BITMAP_OPT_HASH_CACHE; } + + if (!strcmp(k, "pack.writebitmaplookuptable")) { + if (git_config_bool(k, v)) + write_bitmap_options |= BITMAP_OPT_LOOKUP_TABLE; + else + write_bitmap_options &= ~BITMAP_OPT_LOOKUP_TABLE; + } + if (!strcmp(k, "pack.usebitmaps")) { use_bitmap_index_default = git_config_bool(k, v); return 0; diff --git a/builtin/range-diff.c b/builtin/range-diff.c index 50318849d6..e2a74efb42 100644 --- a/builtin/range-diff.c +++ b/builtin/range-diff.c @@ -38,8 +38,10 @@ int cmd_range_diff(int argc, const char **argv, const char *prefix) OPT_END() }; struct option *options; - int res = 0; + int i, dash_dash = -1, res = 0; struct strbuf range1 = STRBUF_INIT, range2 = STRBUF_INIT; + struct object_id oid; + const char *three_dots = NULL; git_config(git_diff_ui_config, NULL); @@ -47,7 +49,7 @@ int cmd_range_diff(int argc, const char **argv, const char *prefix) options = parse_options_concat(range_diff_options, diffopt.parseopts); argc = parse_options(argc, argv, prefix, options, - builtin_range_diff_usage, 0); + builtin_range_diff_usage, PARSE_OPT_KEEP_DASHDASH); diff_setup_done(&diffopt); @@ -55,40 +57,91 @@ int cmd_range_diff(int argc, const char **argv, const char *prefix) if (!simple_color) diffopt.use_color = 1; - if (argc == 2) { - if (!is_range_diff_range(argv[0])) - die(_("not a commit range: '%s'"), argv[0]); - strbuf_addstr(&range1, argv[0]); + for (i = 0; i < argc; i++) + if (!strcmp(argv[i], "--")) { + dash_dash = i; + break; + } + + if (dash_dash == 3 || + (dash_dash < 0 && argc > 2 && + !get_oid_committish(argv[0], &oid) && + !get_oid_committish(argv[1], &oid) && + !get_oid_committish(argv[2], &oid))) { + if (dash_dash < 0) + ; /* already validated arguments */ + else if (get_oid_committish(argv[0], &oid)) + usage_msg_optf(_("not a revision: '%s'"), + builtin_range_diff_usage, options, + argv[0]); + else if (get_oid_committish(argv[1], &oid)) + usage_msg_optf(_("not a revision: '%s'"), + builtin_range_diff_usage, options, + argv[1]); + else if (get_oid_committish(argv[2], &oid)) + usage_msg_optf(_("not a revision: '%s'"), + builtin_range_diff_usage, options, + argv[2]); - if (!is_range_diff_range(argv[1])) - die(_("not a commit range: '%s'"), argv[1]); - strbuf_addstr(&range2, argv[1]); - } else if (argc == 3) { strbuf_addf(&range1, "%s..%s", argv[0], argv[1]); strbuf_addf(&range2, "%s..%s", argv[0], argv[2]); - } else if (argc == 1) { - const char *b = strstr(argv[0], "..."), *a = argv[0]; + + strvec_pushv(&other_arg, argv + + (dash_dash < 0 ? 3 : dash_dash)); + } else if (dash_dash == 2 || + (dash_dash < 0 && argc > 1 && + is_range_diff_range(argv[0]) && + is_range_diff_range(argv[1]))) { + if (dash_dash < 0) + ; /* already validated arguments */ + else if (!is_range_diff_range(argv[0])) + usage_msg_optf(_("not a commit range: '%s'"), + builtin_range_diff_usage, options, + argv[0]); + else if (!is_range_diff_range(argv[1])) + usage_msg_optf(_("not a commit range: '%s'"), + builtin_range_diff_usage, options, + argv[1]); + + strbuf_addstr(&range1, argv[0]); + strbuf_addstr(&range2, argv[1]); + + strvec_pushv(&other_arg, argv + + (dash_dash < 0 ? 2 : dash_dash)); + } else if (dash_dash == 1 || + (dash_dash < 0 && argc > 0 && + (three_dots = strstr(argv[0], "...")))) { + const char *a, *b; int a_len; - if (!b) { - error(_("single arg format must be symmetric range")); - usage_with_options(builtin_range_diff_usage, options); - } + if (dash_dash < 0) + ; /* already validated arguments */ + else if (!(three_dots = strstr(argv[0], "..."))) + usage_msg_optf(_("not a symmetric range: '%s'"), + builtin_range_diff_usage, options, + argv[0]); - a_len = (int)(b - a); - if (!a_len) { + if (three_dots == argv[0]) { a = "HEAD"; a_len = strlen(a); + } else { + a = argv[0]; + a_len = (int)(three_dots - a); } - b += 3; - if (!*b) + + if (three_dots[3]) + b = three_dots + 3; + else b = "HEAD"; + strbuf_addf(&range1, "%s..%.*s", b, a_len, a); strbuf_addf(&range2, "%.*s..%s", a_len, a, b); - } else { - error(_("need two commit ranges")); - usage_with_options(builtin_range_diff_usage, options); - } + + strvec_pushv(&other_arg, argv + + (dash_dash < 0 ? 1 : dash_dash)); + } else + usage_msg_opt(_("need two commit ranges"), + builtin_range_diff_usage, options); FREE_AND_NULL(options); range_diff_opts.dual_color = simple_color < 1; diff --git a/builtin/reflog.c b/builtin/reflog.c index 8f2da0b65b..57c5c0d061 100644 --- a/builtin/reflog.c +++ b/builtin/reflog.c @@ -194,6 +194,8 @@ static int expire_unreachable_callback(const struct option *opt, { struct cmd_reflog_expire_cb *cmd = opt->value; + BUG_ON_OPT_NEG(unset); + if (parse_expiry_date(arg, &cmd->expire_unreachable)) die(_("invalid timestamp '%s' given to '--%s'"), arg, opt->long_name); @@ -208,6 +210,8 @@ static int expire_total_callback(const struct option *opt, { struct cmd_reflog_expire_cb *cmd = opt->value; + BUG_ON_OPT_NEG(unset); + if (parse_expiry_date(arg, &cmd->expire_total)) die(_("invalid timestamp '%s' given to '--%s'"), arg, opt->long_name); @@ -224,7 +228,7 @@ static int cmd_reflog_show(int argc, const char **argv, const char *prefix) parse_options(argc, argv, prefix, options, reflog_show_usage, PARSE_OPT_KEEP_DASHDASH | PARSE_OPT_KEEP_ARGV0 | - PARSE_OPT_KEEP_UNKNOWN); + PARSE_OPT_KEEP_UNKNOWN_OPT); return cmd_log_reflog(argc, argv, prefix); } @@ -405,40 +409,21 @@ static int cmd_reflog_exists(int argc, const char **argv, const char *prefix) int cmd_reflog(int argc, const char **argv, const char *prefix) { + parse_opt_subcommand_fn *fn = NULL; struct option options[] = { + OPT_SUBCOMMAND("show", &fn, cmd_reflog_show), + OPT_SUBCOMMAND("expire", &fn, cmd_reflog_expire), + OPT_SUBCOMMAND("delete", &fn, cmd_reflog_delete), + OPT_SUBCOMMAND("exists", &fn, cmd_reflog_exists), OPT_END() }; argc = parse_options(argc, argv, prefix, options, reflog_usage, + PARSE_OPT_SUBCOMMAND_OPTIONAL | PARSE_OPT_KEEP_DASHDASH | PARSE_OPT_KEEP_ARGV0 | - PARSE_OPT_KEEP_UNKNOWN | - PARSE_OPT_NO_INTERNAL_HELP); - - /* - * With "git reflog" we default to showing it. !argc is - * impossible with PARSE_OPT_KEEP_ARGV0. - */ - if (argc == 1) - goto log_reflog; - - if (!strcmp(argv[1], "-h")) - usage_with_options(reflog_usage, options); - else if (*argv[1] == '-') - goto log_reflog; - - if (!strcmp(argv[1], "show")) - return cmd_reflog_show(argc - 1, argv + 1, prefix); - else if (!strcmp(argv[1], "expire")) - return cmd_reflog_expire(argc - 1, argv + 1, prefix); - else if (!strcmp(argv[1], "delete")) - return cmd_reflog_delete(argc - 1, argv + 1, prefix); - else if (!strcmp(argv[1], "exists")) - return cmd_reflog_exists(argc - 1, argv + 1, prefix); - - /* - * Fall-through for e.g. "git reflog -1", "git reflog master", - * as well as the plain "git reflog" above goto above. - */ -log_reflog: - return cmd_log_reflog(argc, argv, prefix); + PARSE_OPT_KEEP_UNKNOWN_OPT); + if (fn) + return fn(argc - 1, argv + 1, prefix); + else + return cmd_log_reflog(argc, argv, prefix); } diff --git a/builtin/remote.c b/builtin/remote.c index 24cd809d24..985b845a18 100644 --- a/builtin/remote.c +++ b/builtin/remote.c @@ -150,7 +150,7 @@ static int parse_mirror_opt(const struct option *opt, const char *arg, int not) return 0; } -static int add(int argc, const char **argv) +static int add(int argc, const char **argv, const char *prefix) { int fetch = 0, fetch_tags = TAGS_DEFAULT; unsigned mirror = MIRROR_NONE; @@ -177,8 +177,8 @@ static int add(int argc, const char **argv) OPT_END() }; - argc = parse_options(argc, argv, NULL, options, builtin_remote_add_usage, - 0); + argc = parse_options(argc, argv, prefix, options, + builtin_remote_add_usage, 0); if (argc != 2) usage_with_options(builtin_remote_add_usage, options); @@ -683,7 +683,7 @@ static void handle_push_default(const char* old_name, const char* new_name) } -static int mv(int argc, const char **argv) +static int mv(int argc, const char **argv, const char *prefix) { int show_progress = isatty(2); struct option options[] = { @@ -698,7 +698,7 @@ static int mv(int argc, const char **argv) int i, refs_renamed_nr = 0, refspec_updated = 0; struct progress *progress = NULL; - argc = parse_options(argc, argv, NULL, options, + argc = parse_options(argc, argv, prefix, options, builtin_remote_rename_usage, 0); if (argc != 2) @@ -847,7 +847,7 @@ static int mv(int argc, const char **argv) return 0; } -static int rm(int argc, const char **argv) +static int rm(int argc, const char **argv, const char *prefix) { struct option options[] = { OPT_END() @@ -865,12 +865,14 @@ static int rm(int argc, const char **argv) cb_data.skipped = &skipped; cb_data.keep = &known_remotes; - if (argc != 2) + argc = parse_options(argc, argv, prefix, options, + builtin_remote_rm_usage, 0); + if (argc != 1) usage_with_options(builtin_remote_rm_usage, options); - remote = remote_get(argv[1]); + remote = remote_get(argv[0]); if (!remote_is_configured(remote, 1)) { - error(_("No such remote: '%s'"), argv[1]); + error(_("No such remote: '%s'"), argv[0]); exit(2); } @@ -1258,7 +1260,7 @@ static int show_all(void) return result; } -static int show(int argc, const char **argv) +static int show(int argc, const char **argv, const char *prefix) { int no_query = 0, result = 0, query_flag = 0; struct option options[] = { @@ -1267,7 +1269,8 @@ static int show(int argc, const char **argv) }; struct show_info info = SHOW_INFO_INIT; - argc = parse_options(argc, argv, NULL, options, builtin_remote_show_usage, + argc = parse_options(argc, argv, prefix, options, + builtin_remote_show_usage, 0); if (argc < 1) @@ -1361,7 +1364,7 @@ static int show(int argc, const char **argv) return result; } -static int set_head(int argc, const char **argv) +static int set_head(int argc, const char **argv, const char *prefix) { int i, opt_a = 0, opt_d = 0, result = 0; struct strbuf buf = STRBUF_INIT, buf2 = STRBUF_INIT; @@ -1374,8 +1377,8 @@ static int set_head(int argc, const char **argv) N_("delete refs/remotes/<name>/HEAD")), OPT_END() }; - argc = parse_options(argc, argv, NULL, options, builtin_remote_sethead_usage, - 0); + argc = parse_options(argc, argv, prefix, options, + builtin_remote_sethead_usage, 0); if (argc) strbuf_addf(&buf, "refs/remotes/%s/HEAD", argv[0]); @@ -1466,7 +1469,7 @@ static int prune_remote(const char *remote, int dry_run) return result; } -static int prune(int argc, const char **argv) +static int prune(int argc, const char **argv, const char *prefix) { int dry_run = 0, result = 0; struct option options[] = { @@ -1474,8 +1477,8 @@ static int prune(int argc, const char **argv) OPT_END() }; - argc = parse_options(argc, argv, NULL, options, builtin_remote_prune_usage, - 0); + argc = parse_options(argc, argv, prefix, options, + builtin_remote_prune_usage, 0); if (argc < 1) usage_with_options(builtin_remote_prune_usage, options); @@ -1495,7 +1498,7 @@ static int get_remote_default(const char *key, const char *value UNUSED, void *p return 0; } -static int update(int argc, const char **argv) +static int update(int argc, const char **argv, const char *prefix) { int i, prune = -1; struct option options[] = { @@ -1507,7 +1510,8 @@ static int update(int argc, const char **argv) int default_defined = 0; int retval; - argc = parse_options(argc, argv, NULL, options, builtin_remote_update_usage, + argc = parse_options(argc, argv, prefix, options, + builtin_remote_update_usage, PARSE_OPT_KEEP_ARGV0); strvec_push(&fetch_argv, "fetch"); @@ -1578,7 +1582,7 @@ static int set_remote_branches(const char *remotename, const char **branches, return 0; } -static int set_branches(int argc, const char **argv) +static int set_branches(int argc, const char **argv, const char *prefix) { int add_mode = 0; struct option options[] = { @@ -1586,7 +1590,7 @@ static int set_branches(int argc, const char **argv) OPT_END() }; - argc = parse_options(argc, argv, NULL, options, + argc = parse_options(argc, argv, prefix, options, builtin_remote_setbranches_usage, 0); if (argc == 0) { error(_("no remote specified")); @@ -1597,7 +1601,7 @@ static int set_branches(int argc, const char **argv) return set_remote_branches(argv[0], argv + 1, add_mode); } -static int get_url(int argc, const char **argv) +static int get_url(int argc, const char **argv, const char *prefix) { int i, push_mode = 0, all_mode = 0; const char *remotename = NULL; @@ -1611,7 +1615,8 @@ static int get_url(int argc, const char **argv) N_("return all URLs")), OPT_END() }; - argc = parse_options(argc, argv, NULL, options, builtin_remote_geturl_usage, 0); + argc = parse_options(argc, argv, prefix, options, + builtin_remote_geturl_usage, 0); if (argc != 1) usage_with_options(builtin_remote_geturl_usage, options); @@ -1650,7 +1655,7 @@ static int get_url(int argc, const char **argv) return 0; } -static int set_url(int argc, const char **argv) +static int set_url(int argc, const char **argv, const char *prefix) { int i, push_mode = 0, add_mode = 0, delete_mode = 0; int matches = 0, negative_matches = 0; @@ -1671,7 +1676,8 @@ static int set_url(int argc, const char **argv) N_("delete URLs")), OPT_END() }; - argc = parse_options(argc, argv, NULL, options, builtin_remote_seturl_usage, + argc = parse_options(argc, argv, prefix, options, + builtin_remote_seturl_usage, PARSE_OPT_KEEP_ARGV0); if (add_mode && delete_mode) @@ -1742,41 +1748,33 @@ out: int cmd_remote(int argc, const char **argv, const char *prefix) { + parse_opt_subcommand_fn *fn = NULL; struct option options[] = { OPT__VERBOSE(&verbose, N_("be verbose; must be placed before a subcommand")), + OPT_SUBCOMMAND("add", &fn, add), + OPT_SUBCOMMAND("rename", &fn, mv), + OPT_SUBCOMMAND_F("rm", &fn, rm, PARSE_OPT_NOCOMPLETE), + OPT_SUBCOMMAND("remove", &fn, rm), + OPT_SUBCOMMAND("set-head", &fn, set_head), + OPT_SUBCOMMAND("set-branches", &fn, set_branches), + OPT_SUBCOMMAND("get-url", &fn, get_url), + OPT_SUBCOMMAND("set-url", &fn, set_url), + OPT_SUBCOMMAND("show", &fn, show), + OPT_SUBCOMMAND("prune", &fn, prune), + OPT_SUBCOMMAND("update", &fn, update), OPT_END() }; - int result; argc = parse_options(argc, argv, prefix, options, builtin_remote_usage, - PARSE_OPT_STOP_AT_NON_OPTION); + PARSE_OPT_SUBCOMMAND_OPTIONAL); - if (argc < 1) - result = show_all(); - else if (!strcmp(argv[0], "add")) - result = add(argc, argv); - else if (!strcmp(argv[0], "rename")) - result = mv(argc, argv); - else if (!strcmp(argv[0], "rm") || !strcmp(argv[0], "remove")) - result = rm(argc, argv); - else if (!strcmp(argv[0], "set-head")) - result = set_head(argc, argv); - else if (!strcmp(argv[0], "set-branches")) - result = set_branches(argc, argv); - else if (!strcmp(argv[0], "get-url")) - result = get_url(argc, argv); - else if (!strcmp(argv[0], "set-url")) - result = set_url(argc, argv); - else if (!strcmp(argv[0], "show")) - result = show(argc, argv); - else if (!strcmp(argv[0], "prune")) - result = prune(argc, argv); - else if (!strcmp(argv[0], "update")) - result = update(argc, argv); - else { - error(_("Unknown subcommand: %s"), argv[0]); - usage_with_options(builtin_remote_usage, options); + if (fn) { + return !!fn(argc, argv, prefix); + } else { + if (argc) { + error(_("unknown subcommand: `%s'"), argv[0]); + usage_with_options(builtin_remote_usage, options); + } + return !!show_all(); } - - return result ? 1 : 0; } diff --git a/builtin/replace.c b/builtin/replace.c index 583702a098..a29e911d30 100644 --- a/builtin/replace.c +++ b/builtin/replace.c @@ -106,6 +106,7 @@ static int for_each_replace_name(const char **argv, each_replace_name_fn fn) size_t base_len; int had_error = 0; struct object_id oid; + const char *git_replace_ref_base = ref_namespace[NAMESPACE_REPLACE].ref; strbuf_addstr(&ref, git_replace_ref_base); base_len = ref.len; @@ -147,6 +148,8 @@ static int check_ref_valid(struct object_id *object, struct strbuf *ref, int force) { + const char *git_replace_ref_base = ref_namespace[NAMESPACE_REPLACE].ref; + strbuf_reset(ref); strbuf_addf(ref, "%s%s", git_replace_ref_base, oid_to_hex(object)); if (check_refname_format(ref->buf, 0)) diff --git a/builtin/rev-parse.c b/builtin/rev-parse.c index e5d70ee6d2..8f61050bde 100644 --- a/builtin/rev-parse.c +++ b/builtin/rev-parse.c @@ -481,6 +481,9 @@ static int cmd_parseopt(int argc, const char **argv, const char *prefix) if (!s) s = help; + if (s == sb.buf) + die(_("missing opt-spec before option flags")); + if (s - sb.buf == 1) /* short option only */ o->short_name = *sb.buf; else if (sb.buf[1] != ',') /* long option only */ diff --git a/builtin/revert.c b/builtin/revert.c index 2554f9099c..ee2a0807f0 100644 --- a/builtin/revert.c +++ b/builtin/revert.c @@ -141,7 +141,7 @@ static int run_sequencer(int argc, const char **argv, struct replay_opts *opts) argc = parse_options(argc, argv, NULL, options, usage_str, PARSE_OPT_KEEP_ARGV0 | - PARSE_OPT_KEEP_UNKNOWN); + PARSE_OPT_KEEP_UNKNOWN_OPT); prepare_repo_settings(the_repository); the_repository->settings.command_requires_full_index = 0; diff --git a/builtin/shortlog.c b/builtin/shortlog.c index 086dfee45a..7a1e1fe7c0 100644 --- a/builtin/shortlog.c +++ b/builtin/shortlog.c @@ -381,6 +381,7 @@ int cmd_shortlog(int argc, const char **argv, const char *prefix) break; case PARSE_OPT_HELP: case PARSE_OPT_ERROR: + case PARSE_OPT_SUBCOMMAND: exit(129); case PARSE_OPT_COMPLETE: exit(0); diff --git a/builtin/sparse-checkout.c b/builtin/sparse-checkout.c index f91e29b56a..287716db68 100644 --- a/builtin/sparse-checkout.c +++ b/builtin/sparse-checkout.c @@ -48,7 +48,7 @@ static char const * const builtin_sparse_checkout_list_usage[] = { NULL }; -static int sparse_checkout_list(int argc, const char **argv) +static int sparse_checkout_list(int argc, const char **argv, const char *prefix) { static struct option builtin_sparse_checkout_list_options[] = { OPT_END(), @@ -60,7 +60,7 @@ static int sparse_checkout_list(int argc, const char **argv) if (!core_apply_sparse_checkout) die(_("this worktree is not sparse")); - argc = parse_options(argc, argv, NULL, + argc = parse_options(argc, argv, prefix, builtin_sparse_checkout_list_options, builtin_sparse_checkout_list_usage, 0); @@ -431,7 +431,7 @@ static struct sparse_checkout_init_opts { int sparse_index; } init_opts; -static int sparse_checkout_init(int argc, const char **argv) +static int sparse_checkout_init(int argc, const char **argv, const char *prefix) { struct pattern_list pl; char *sparse_filename; @@ -452,7 +452,7 @@ static int sparse_checkout_init(int argc, const char **argv) init_opts.cone_mode = -1; init_opts.sparse_index = -1; - argc = parse_options(argc, argv, NULL, + argc = parse_options(argc, argv, prefix, builtin_sparse_checkout_init_options, builtin_sparse_checkout_init_usage, 0); @@ -767,7 +767,7 @@ static int sparse_checkout_add(int argc, const char **argv, const char *prefix) argc = parse_options(argc, argv, prefix, builtin_sparse_checkout_add_options, builtin_sparse_checkout_add_usage, - PARSE_OPT_KEEP_UNKNOWN); + PARSE_OPT_KEEP_UNKNOWN_OPT); sanitize_paths(argc, argv, prefix, add_opts.skip_checks); @@ -813,7 +813,7 @@ static int sparse_checkout_set(int argc, const char **argv, const char *prefix) argc = parse_options(argc, argv, prefix, builtin_sparse_checkout_set_options, builtin_sparse_checkout_set_usage, - PARSE_OPT_KEEP_UNKNOWN); + PARSE_OPT_KEEP_UNKNOWN_OPT); if (update_modes(&set_opts.cone_mode, &set_opts.sparse_index)) return 1; @@ -843,7 +843,8 @@ static struct sparse_checkout_reapply_opts { int sparse_index; } reapply_opts; -static int sparse_checkout_reapply(int argc, const char **argv) +static int sparse_checkout_reapply(int argc, const char **argv, + const char *prefix) { static struct option builtin_sparse_checkout_reapply_options[] = { OPT_BOOL(0, "cone", &reapply_opts.cone_mode, @@ -859,7 +860,7 @@ static int sparse_checkout_reapply(int argc, const char **argv) reapply_opts.cone_mode = -1; reapply_opts.sparse_index = -1; - argc = parse_options(argc, argv, NULL, + argc = parse_options(argc, argv, prefix, builtin_sparse_checkout_reapply_options, builtin_sparse_checkout_reapply_usage, 0); @@ -876,7 +877,8 @@ static char const * const builtin_sparse_checkout_disable_usage[] = { NULL }; -static int sparse_checkout_disable(int argc, const char **argv) +static int sparse_checkout_disable(int argc, const char **argv, + const char *prefix) { static struct option builtin_sparse_checkout_disable_options[] = { OPT_END(), @@ -895,7 +897,7 @@ static int sparse_checkout_disable(int argc, const char **argv) * forcibly return to a dense checkout regardless of initial state. */ - argc = parse_options(argc, argv, NULL, + argc = parse_options(argc, argv, prefix, builtin_sparse_checkout_disable_options, builtin_sparse_checkout_disable_usage, 0); @@ -922,39 +924,25 @@ static int sparse_checkout_disable(int argc, const char **argv) int cmd_sparse_checkout(int argc, const char **argv, const char *prefix) { - static struct option builtin_sparse_checkout_options[] = { + parse_opt_subcommand_fn *fn = NULL; + struct option builtin_sparse_checkout_options[] = { + OPT_SUBCOMMAND("list", &fn, sparse_checkout_list), + OPT_SUBCOMMAND("init", &fn, sparse_checkout_init), + OPT_SUBCOMMAND("set", &fn, sparse_checkout_set), + OPT_SUBCOMMAND("add", &fn, sparse_checkout_add), + OPT_SUBCOMMAND("reapply", &fn, sparse_checkout_reapply), + OPT_SUBCOMMAND("disable", &fn, sparse_checkout_disable), OPT_END(), }; - if (argc == 2 && !strcmp(argv[1], "-h")) - usage_with_options(builtin_sparse_checkout_usage, - builtin_sparse_checkout_options); - argc = parse_options(argc, argv, prefix, builtin_sparse_checkout_options, - builtin_sparse_checkout_usage, - PARSE_OPT_STOP_AT_NON_OPTION); + builtin_sparse_checkout_usage, 0); git_config(git_default_config, NULL); prepare_repo_settings(the_repository); the_repository->settings.command_requires_full_index = 0; - if (argc > 0) { - if (!strcmp(argv[0], "list")) - return sparse_checkout_list(argc, argv); - if (!strcmp(argv[0], "init")) - return sparse_checkout_init(argc, argv); - if (!strcmp(argv[0], "set")) - return sparse_checkout_set(argc, argv, prefix); - if (!strcmp(argv[0], "add")) - return sparse_checkout_add(argc, argv, prefix); - if (!strcmp(argv[0], "reapply")) - return sparse_checkout_reapply(argc, argv); - if (!strcmp(argv[0], "disable")) - return sparse_checkout_disable(argc, argv); - } - - usage_with_options(builtin_sparse_checkout_usage, - builtin_sparse_checkout_options); + return fn(argc, argv, prefix); } diff --git a/builtin/stash.c b/builtin/stash.c index 3492aff751..2274aae255 100644 --- a/builtin/stash.c +++ b/builtin/stash.c @@ -785,7 +785,7 @@ static int list_stash(int argc, const char **argv, const char *prefix) argc = parse_options(argc, argv, prefix, options, git_stash_list_usage, - PARSE_OPT_KEEP_UNKNOWN); + PARSE_OPT_KEEP_UNKNOWN_OPT); if (!ref_exists(ref_stash)) return 0; @@ -876,7 +876,7 @@ static int show_stash(int argc, const char **argv, const char *prefix) init_revisions(&rev, prefix); argc = parse_options(argc, argv, prefix, options, git_stash_show_usage, - PARSE_OPT_KEEP_ARGV0 | PARSE_OPT_KEEP_UNKNOWN | + PARSE_OPT_KEEP_ARGV0 | PARSE_OPT_KEEP_UNKNOWN_OPT | PARSE_OPT_KEEP_DASHDASH); strvec_push(&revision_args, argv[0]); @@ -982,7 +982,7 @@ static int store_stash(int argc, const char **argv, const char *prefix) argc = parse_options(argc, argv, prefix, options, git_stash_store_usage, - PARSE_OPT_KEEP_UNKNOWN); + PARSE_OPT_KEEP_UNKNOWN_OPT); if (argc != 1) { if (!quiet) @@ -1742,6 +1742,11 @@ static int push_stash(int argc, const char **argv, const char *prefix, include_untracked, only_staged); } +static int push_stash_unassumed(int argc, const char **argv, const char *prefix) +{ + return push_stash(argc, argv, prefix, 0); +} + static int save_stash(int argc, const char **argv, const char *prefix) { int keep_index = -1; @@ -1790,15 +1795,28 @@ int cmd_stash(int argc, const char **argv, const char *prefix) pid_t pid = getpid(); const char *index_file; struct strvec args = STRVEC_INIT; - + parse_opt_subcommand_fn *fn = NULL; struct option options[] = { + OPT_SUBCOMMAND("apply", &fn, apply_stash), + OPT_SUBCOMMAND("clear", &fn, clear_stash), + OPT_SUBCOMMAND("drop", &fn, drop_stash), + OPT_SUBCOMMAND("pop", &fn, pop_stash), + OPT_SUBCOMMAND("branch", &fn, branch_stash), + OPT_SUBCOMMAND("list", &fn, list_stash), + OPT_SUBCOMMAND("show", &fn, show_stash), + OPT_SUBCOMMAND("store", &fn, store_stash), + OPT_SUBCOMMAND("create", &fn, create_stash), + OPT_SUBCOMMAND("push", &fn, push_stash_unassumed), + OPT_SUBCOMMAND_F("save", &fn, save_stash, PARSE_OPT_NOCOMPLETE), OPT_END() }; git_config(git_stash_config, NULL); argc = parse_options(argc, argv, prefix, options, git_stash_usage, - PARSE_OPT_KEEP_UNKNOWN | PARSE_OPT_KEEP_DASHDASH); + PARSE_OPT_SUBCOMMAND_OPTIONAL | + PARSE_OPT_KEEP_UNKNOWN_OPT | + PARSE_OPT_KEEP_DASHDASH); prepare_repo_settings(the_repository); the_repository->settings.command_requires_full_index = 0; @@ -1807,33 +1825,10 @@ int cmd_stash(int argc, const char **argv, const char *prefix) strbuf_addf(&stash_index_path, "%s.stash.%" PRIuMAX, index_file, (uintmax_t)pid); - if (!argc) - return !!push_stash(0, NULL, prefix, 0); - else if (!strcmp(argv[0], "apply")) - return !!apply_stash(argc, argv, prefix); - else if (!strcmp(argv[0], "clear")) - return !!clear_stash(argc, argv, prefix); - else if (!strcmp(argv[0], "drop")) - return !!drop_stash(argc, argv, prefix); - else if (!strcmp(argv[0], "pop")) - return !!pop_stash(argc, argv, prefix); - else if (!strcmp(argv[0], "branch")) - return !!branch_stash(argc, argv, prefix); - else if (!strcmp(argv[0], "list")) - return !!list_stash(argc, argv, prefix); - else if (!strcmp(argv[0], "show")) - return !!show_stash(argc, argv, prefix); - else if (!strcmp(argv[0], "store")) - return !!store_stash(argc, argv, prefix); - else if (!strcmp(argv[0], "create")) - return !!create_stash(argc, argv, prefix); - else if (!strcmp(argv[0], "push")) - return !!push_stash(argc, argv, prefix, 0); - else if (!strcmp(argv[0], "save")) - return !!save_stash(argc, argv, prefix); - else if (*argv[0] != '-') - usage_msg_optf(_("unknown subcommand: %s"), - git_stash_usage, options, argv[0]); + if (fn) + return !!fn(argc, argv, prefix); + else if (!argc) + return !!push_stash_unassumed(0, NULL, prefix); /* Assume 'stash push' */ strvec_push(&args, "push"); diff --git a/builtin/submodule--helper.c b/builtin/submodule--helper.c index 88df6c5017..60f9f568c6 100644 --- a/builtin/submodule--helper.c +++ b/builtin/submodule--helper.c @@ -31,45 +31,57 @@ typedef void (*each_submodule_fn)(const struct cache_entry *list_item, void *cb_data); -static char *repo_get_default_remote(struct repository *repo) +static int repo_get_default_remote(struct repository *repo, char **default_remote) { - char *dest = NULL, *ret; + char *dest = NULL; struct strbuf sb = STRBUF_INIT; struct ref_store *store = get_main_ref_store(repo); const char *refname = refs_resolve_ref_unsafe(store, "HEAD", 0, NULL, NULL); if (!refname) - die(_("No such ref: %s"), "HEAD"); + return die_message(_("No such ref: %s"), "HEAD"); /* detached HEAD */ - if (!strcmp(refname, "HEAD")) - return xstrdup("origin"); + if (!strcmp(refname, "HEAD")) { + *default_remote = xstrdup("origin"); + return 0; + } if (!skip_prefix(refname, "refs/heads/", &refname)) - die(_("Expecting a full ref name, got %s"), refname); + return die_message(_("Expecting a full ref name, got %s"), + refname); strbuf_addf(&sb, "branch.%s.remote", refname); if (repo_config_get_string(repo, sb.buf, &dest)) - ret = xstrdup("origin"); + *default_remote = xstrdup("origin"); else - ret = dest; + *default_remote = dest; strbuf_release(&sb); - return ret; + return 0; } -static char *get_default_remote_submodule(const char *module_path) +static int get_default_remote_submodule(const char *module_path, char **default_remote) { struct repository subrepo; - repo_submodule_init(&subrepo, the_repository, module_path, null_oid()); - return repo_get_default_remote(&subrepo); + if (repo_submodule_init(&subrepo, the_repository, module_path, + null_oid()) < 0) + return die_message(_("could not get a repository handle for submodule '%s'"), + module_path); + return repo_get_default_remote(&subrepo, default_remote); } static char *get_default_remote(void) { - return repo_get_default_remote(the_repository); + char *default_remote; + int code = repo_get_default_remote(the_repository, &default_remote); + + if (code) + exit(code); + + return default_remote; } static char *resolve_relative_url(const char *rel_url, const char *up_path, int quiet) @@ -96,28 +108,6 @@ static char *resolve_relative_url(const char *rel_url, const char *up_path, int return resolved_url; } -static int resolve_relative_url_test(int argc, const char **argv, const char *prefix) -{ - char *remoteurl, *res; - const char *up_path, *url; - - if (argc != 4) - die("resolve-relative-url-test only accepts three arguments: <up_path> <remoteurl> <url>"); - - up_path = argv[1]; - remoteurl = xstrdup(argv[2]); - url = argv[3]; - - if (!strcmp(up_path, "(null)")) - up_path = NULL; - - res = relative_url(remoteurl, url, up_path); - puts(res); - free(res); - free(remoteurl); - return 0; -} - /* the result should be freed by the caller. */ static char *get_submodule_displaypath(const char *path, const char *prefix) { @@ -189,6 +179,7 @@ static int module_list_compute(int argc, const char **argv, { int i, result = 0; char *ps_matched = NULL; + parse_pathspec(pathspec, 0, PATHSPEC_PREFER_FULL, prefix, argv); @@ -266,49 +257,11 @@ static char *get_up_path(const char *path) return strbuf_detach(&sb, NULL); } -static int module_list(int argc, const char **argv, const char *prefix) -{ - int i; - struct pathspec pathspec; - struct module_list list = MODULE_LIST_INIT; - - struct option module_list_options[] = { - OPT_STRING(0, "prefix", &prefix, - N_("path"), - N_("alternative anchor for relative paths")), - OPT_END() - }; - - const char *const git_submodule_helper_usage[] = { - N_("git submodule--helper list [--prefix=<path>] [<path>...]"), - NULL - }; - - argc = parse_options(argc, argv, prefix, module_list_options, - git_submodule_helper_usage, 0); - - if (module_list_compute(argc, argv, prefix, &pathspec, &list) < 0) - return 1; - - for (i = 0; i < list.nr; i++) { - const struct cache_entry *ce = list.entries[i]; - - if (ce_stage(ce)) - printf("%06o %s U\t", ce->ce_mode, - oid_to_hex(null_oid())); - else - printf("%06o %s %d\t", ce->ce_mode, - oid_to_hex(&ce->oid), ce_stage(ce)); - - fprintf(stdout, "%s\n", ce->name); - } - return 0; -} - static void for_each_listed_submodule(const struct module_list *list, each_submodule_fn fn, void *cb_data) { int i; + for (i = 0; i < list->nr; i++) fn(list->entries[i], cb_data); } @@ -328,7 +281,6 @@ static void runcommand_in_submodule_cb(const struct cache_entry *list_item, struct foreach_cb *info = cb_data; const char *path = list_item->name; const struct object_id *ce_oid = &list_item->oid; - const struct submodule *sub; struct child_process cp = CHILD_PROCESS_INIT; char *displaypath; @@ -429,14 +381,12 @@ static int module_foreach(int argc, const char **argv, const char *prefix) struct foreach_cb info = FOREACH_CB_INIT; struct pathspec pathspec; struct module_list list = MODULE_LIST_INIT; - struct option module_foreach_options[] = { OPT__QUIET(&info.quiet, N_("suppress output of entering each submodule command")), OPT_BOOL(0, "recursive", &info.recursive, N_("recurse into nested submodules")), OPT_END() }; - const char *const git_submodule_helper_usage[] = { N_("git submodule foreach [--quiet] [--recursive] [--] <command>"), NULL @@ -480,7 +430,8 @@ static void init_submodule(const char *path, const char *prefix, { const struct submodule *sub; struct strbuf sb = STRBUF_INIT; - char *upd = NULL, *url = NULL, *displaypath; + const char *upd; + char *url = NULL, *displaypath; displaypath = get_submodule_displaypath(path, prefix); @@ -519,6 +470,7 @@ static void init_submodule(const char *path, const char *prefix, if (starts_with_dot_dot_slash(url) || starts_with_dot_slash(url)) { char *oldurl = url; + url = resolve_relative_url(oldurl, NULL, 0); free(oldurl); } @@ -535,14 +487,15 @@ static void init_submodule(const char *path, const char *prefix, /* Copy "update" setting when it is not set yet */ strbuf_addf(&sb, "submodule.%s.update", sub->name); - if (git_config_get_string(sb.buf, &upd) && + if (git_config_get_string_tmp(sb.buf, &upd) && sub->update_strategy.type != SM_UPDATE_UNSPECIFIED) { if (sub->update_strategy.type == SM_UPDATE_COMMAND) { fprintf(stderr, _("warning: command update mode suggested for submodule '%s'\n"), sub->name); - upd = xstrdup("none"); - } else - upd = xstrdup(submodule_strategy_to_string(&sub->update_strategy)); + upd = "none"; + } else { + upd = submodule_update_type_to_string(sub->update_strategy.type); + } if (git_config_set_gently(sb.buf, upd)) die(_("Failed to register update mode for submodule path '%s'"), displaypath); @@ -550,12 +503,12 @@ static void init_submodule(const char *path, const char *prefix, strbuf_release(&sb); free(displaypath); free(url); - free(upd); } static void init_submodule_cb(const struct cache_entry *list_item, void *cb_data) { struct init_cb *info = cb_data; + init_submodule(list_item->name, info->prefix, info->flags); } @@ -565,12 +518,10 @@ static int module_init(int argc, const char **argv, const char *prefix) struct pathspec pathspec; struct module_list list = MODULE_LIST_INIT; int quiet = 0; - struct option module_init_options[] = { OPT__QUIET(&quiet, N_("suppress output for initializing a submodule")), OPT_END() }; - const char *const git_submodule_helper_usage[] = { N_("git submodule init [<options>] [<path>]"), NULL @@ -628,6 +579,7 @@ static int handle_submodule_head_ref(const char *refname UNUSED, void *cb_data) { struct object_id *output = cb_data; + if (oid) oidcpy(output, oid); @@ -734,6 +686,7 @@ static void status_submodule_cb(const struct cache_entry *list_item, void *cb_data) { struct status_cb *info = cb_data; + status_submodule(list_item->name, &list_item->oid, list_item->ce_flags, info->prefix, info->flags); } @@ -744,14 +697,12 @@ static int module_status(int argc, const char **argv, const char *prefix) struct pathspec pathspec; struct module_list list = MODULE_LIST_INIT; int quiet = 0; - struct option module_status_options[] = { OPT__QUIET(&quiet, N_("suppress submodule status output")), OPT_BIT(0, "cached", &info.flags, N_("use commit stored in the index instead of the one stored in the submodule HEAD"), OPT_CACHED), OPT_BIT(0, "recursive", &info.flags, N_("recurse into nested submodules"), OPT_RECURSIVE), OPT_END() }; - const char *const git_submodule_helper_usage[] = { N_("git submodule status [--quiet] [--cached] [--recursive] [<path>...]"), NULL @@ -772,24 +723,6 @@ static int module_status(int argc, const char **argv, const char *prefix) return 0; } -static int module_name(int argc, const char **argv, const char *prefix) -{ - const struct submodule *sub; - - if (argc != 2) - usage(_("git submodule--helper name <path>")); - - sub = submodule_from_path(the_repository, null_oid(), argv[1]); - - if (!sub) - die(_("no submodule mapping found in .gitmodules for path '%s'"), - argv[1]); - - printf("%s\n", sub->name); - - return 0; -} - struct module_cb { unsigned int mod_src; unsigned int mod_dst; @@ -842,7 +775,7 @@ static char *verify_submodule_committish(const char *sm_path, return strbuf_detach(&result, NULL); } -static void print_submodule_summary(struct summary_cb *info, char *errmsg, +static void print_submodule_summary(struct summary_cb *info, const char *errmsg, int total_commits, const char *displaypath, const char *src_abbrev, const char *dst_abbrev, struct module_cb *p) @@ -900,12 +833,13 @@ static void generate_submodule_summary(struct summary_cb *info, { char *displaypath, *src_abbrev = NULL, *dst_abbrev; int missing_src = 0, missing_dst = 0; - char *errmsg = NULL; + struct strbuf errmsg = STRBUF_INIT; int total_commits = -1; if (!info->cached && oideq(&p->oid_dst, null_oid())) { if (S_ISGITLINK(p->mod_dst)) { struct ref_store *refs = get_submodule_ref_store(p->sm_path); + if (refs) refs_head_ref(refs, handle_submodule_head_ref, &p->oid_dst); } else if (S_ISLNK(p->mod_dst) || S_ISREG(p->mod_dst)) { @@ -1000,23 +934,21 @@ static void generate_submodule_summary(struct summary_cb *info, * submodule, i.e., deleted or changed to blob */ if (S_ISGITLINK(p->mod_dst)) { - struct strbuf errmsg_str = STRBUF_INIT; if (missing_src && missing_dst) { - strbuf_addf(&errmsg_str, " Warn: %s doesn't contain commits %s and %s\n", + strbuf_addf(&errmsg, " Warn: %s doesn't contain commits %s and %s\n", displaypath, oid_to_hex(&p->oid_src), oid_to_hex(&p->oid_dst)); } else { - strbuf_addf(&errmsg_str, " Warn: %s doesn't contain commit %s\n", + strbuf_addf(&errmsg, " Warn: %s doesn't contain commit %s\n", displaypath, missing_src ? oid_to_hex(&p->oid_src) : oid_to_hex(&p->oid_dst)); } - errmsg = strbuf_detach(&errmsg_str, NULL); } } - print_submodule_summary(info, errmsg, total_commits, - displaypath, src_abbrev, + print_submodule_summary(info, errmsg.len ? errmsg.buf : NULL, + total_commits, displaypath, src_abbrev, dst_abbrev, p); free(displaypath); @@ -1165,7 +1097,6 @@ static int module_summary(int argc, const char **argv, const char *prefix) enum diff_cmd diff_cmd = DIFF_INDEX; struct object_id head_oid; int ret; - struct option module_summary_options[] = { OPT_BOOL(0, "cached", &cached, N_("use the commit stored in the index instead of the submodule HEAD")), @@ -1177,7 +1108,6 @@ static int module_summary(int argc, const char **argv, const char *prefix) N_("limit the summary size")), OPT_END() }; - const char *const git_submodule_helper_usage[] = { N_("git submodule summary [<options>] [<commit>] [--] [<path>]"), NULL @@ -1239,6 +1169,7 @@ static void sync_submodule(const char *path, const char *prefix, char *sub_origin_url, *super_config_url, *displaypath, *default_remote; struct strbuf sb = STRBUF_INIT; char *sub_config_path = NULL; + int code; if (!is_submodule_active(the_repository, path)) return; @@ -1249,6 +1180,7 @@ static void sync_submodule(const char *path, const char *prefix, if (starts_with_dot_dot_slash(sub->url) || starts_with_dot_slash(sub->url)) { char *up_path = get_up_path(path); + sub_origin_url = resolve_relative_url(sub->url, up_path, 1); super_config_url = resolve_relative_url(sub->url, NULL, 1); free(up_path); @@ -1277,10 +1209,9 @@ static void sync_submodule(const char *path, const char *prefix, goto cleanup; strbuf_reset(&sb); - default_remote = get_default_remote_submodule(path); - if (!default_remote) - die(_("failed to get the default remote for submodule '%s'"), - path); + code = get_default_remote_submodule(path, &default_remote); + if (code) + exit(code); remote_key = xstrfmt("remote.%s.url", default_remote); free(default_remote); @@ -1324,6 +1255,7 @@ cleanup: static void sync_submodule_cb(const struct cache_entry *list_item, void *cb_data) { struct sync_cb *info = cb_data; + sync_submodule(list_item->name, info->prefix, info->flags); } @@ -1334,14 +1266,12 @@ static int module_sync(int argc, const char **argv, const char *prefix) struct module_list list = MODULE_LIST_INIT; int quiet = 0; int recursive = 0; - struct option module_sync_options[] = { OPT__QUIET(&quiet, N_("suppress output of synchronizing submodule url")), OPT_BOOL(0, "recursive", &recursive, N_("recurse into nested submodules")), OPT_END() }; - const char *const git_submodule_helper_usage[] = { N_("git submodule sync [--quiet] [--recursive] [<path>]"), NULL @@ -1405,6 +1335,7 @@ static void deinit_submodule(const char *path, const char *prefix, if (!(flags & OPT_FORCE)) { struct child_process cp_rm = CHILD_PROCESS_INIT; + cp_rm.git_cmd = 1; strvec_pushl(&cp_rm.args, "rm", "-qn", path, NULL); @@ -1441,6 +1372,7 @@ static void deinit_submodule(const char *path, const char *prefix, /* remove the .git/config entries (unless the user already did it) */ if (!capture_command(&cp_config, &sb_config, 0) && sb_config.len) { char *sub_key = xstrfmt("submodule.%s", sub->name); + /* * remove the whole section so we have a clean state when * the user later decides to init this submodule again @@ -1473,14 +1405,12 @@ static int module_deinit(int argc, const char **argv, const char *prefix) int quiet = 0; int force = 0; int all = 0; - struct option module_deinit_options[] = { OPT__QUIET(&quiet, N_("suppress submodule status output")), OPT__FORCE(&force, N_("remove submodule working trees even if they contain local changes"), 0), OPT_BOOL(0, "all", &all, N_("unregister all submodules")), OPT_END() }; - const char *const git_submodule_helper_usage[] = { N_("git submodule deinit [--quiet] [-f | --force] [--all | [--] [<path>...]]"), NULL @@ -1519,7 +1449,6 @@ struct module_clone_data { const char *url; const char *depth; struct list_objects_filter_options *filter_options; - struct string_list reference; unsigned int quiet: 1; unsigned int progress: 1; unsigned int dissociate: 1; @@ -1527,7 +1456,6 @@ struct module_clone_data { int single_branch; }; #define MODULE_CLONE_DATA_INIT { \ - .reference = STRING_LIST_INIT_NODUP, \ .single_branch = -1, \ } @@ -1568,7 +1496,9 @@ static int add_possible_reference_from_superproject( struct strbuf err = STRBUF_INIT; strbuf_add(&sb, odb->path, len); - repo_init(&alternate, sb.buf, NULL); + if (repo_init(&alternate, sb.buf, NULL) < 0) + die(_("could not get a repository handle for gitdir '%s'"), + sb.buf); /* * We need to end the new path with '/' to mark it as a dir, @@ -1642,23 +1572,32 @@ static void prepare_possible_alternates(const char *sm_name, free(error_strategy); } -static int clone_submodule(struct module_clone_data *clone_data) +static char *clone_submodule_sm_gitdir(const char *name) { - char *p, *sm_gitdir; - char *sm_alternate = NULL, *error_strategy = NULL; struct strbuf sb = STRBUF_INIT; - struct child_process cp = CHILD_PROCESS_INIT; + char *sm_gitdir; - submodule_name_to_gitdir(&sb, the_repository, clone_data->name); + submodule_name_to_gitdir(&sb, the_repository, name); sm_gitdir = absolute_pathdup(sb.buf); - strbuf_reset(&sb); + strbuf_release(&sb); - if (!is_absolute_path(clone_data->path)) { - strbuf_addf(&sb, "%s/%s", get_git_work_tree(), clone_data->path); - clone_data->path = strbuf_detach(&sb, NULL); - } else { - clone_data->path = xstrdup(clone_data->path); - } + return sm_gitdir; +} + +static int clone_submodule(const struct module_clone_data *clone_data, + struct string_list *reference) +{ + char *p; + char *sm_gitdir = clone_submodule_sm_gitdir(clone_data->name); + char *sm_alternate = NULL, *error_strategy = NULL; + struct child_process cp = CHILD_PROCESS_INIT; + const char *clone_data_path; + + if (!is_absolute_path(clone_data->path)) + clone_data_path = xstrfmt("%s/%s", get_git_work_tree(), + clone_data->path); + else + clone_data_path = xstrdup(clone_data->path); if (validate_submodule_git_dir(sm_gitdir, clone_data->name) < 0) die(_("refusing to create/use '%s' in another submodule's " @@ -1668,7 +1607,7 @@ static int clone_submodule(struct module_clone_data *clone_data) if (safe_create_leading_directories_const(sm_gitdir) < 0) die(_("could not create directory '%s'"), sm_gitdir); - prepare_possible_alternates(clone_data->name, &clone_data->reference); + prepare_possible_alternates(clone_data->name, reference); strvec_push(&cp.args, "clone"); strvec_push(&cp.args, "--no-checkout"); @@ -1678,9 +1617,10 @@ static int clone_submodule(struct module_clone_data *clone_data) strvec_push(&cp.args, "--progress"); if (clone_data->depth && *(clone_data->depth)) strvec_pushl(&cp.args, "--depth", clone_data->depth, NULL); - if (clone_data->reference.nr) { + if (reference->nr) { struct string_list_item *item; - for_each_string_list_item(item, &clone_data->reference) + + for_each_string_list_item(item, reference) strvec_pushl(&cp.args, "--reference", item->string, NULL); } @@ -1699,7 +1639,7 @@ static int clone_submodule(struct module_clone_data *clone_data) strvec_push(&cp.args, "--"); strvec_push(&cp.args, clone_data->url); - strvec_push(&cp.args, clone_data->path); + strvec_push(&cp.args, clone_data_path); cp.git_cmd = 1; prepare_submodule_repo_env(&cp.env); @@ -1707,23 +1647,25 @@ static int clone_submodule(struct module_clone_data *clone_data) if(run_command(&cp)) die(_("clone of '%s' into submodule path '%s' failed"), - clone_data->url, clone_data->path); + clone_data->url, clone_data_path); } else { - if (clone_data->require_init && !access(clone_data->path, X_OK) && - !is_empty_dir(clone_data->path)) - die(_("directory not empty: '%s'"), clone_data->path); - if (safe_create_leading_directories_const(clone_data->path) < 0) - die(_("could not create directory '%s'"), clone_data->path); - strbuf_addf(&sb, "%s/index", sm_gitdir); - unlink_or_warn(sb.buf); - strbuf_reset(&sb); + char *path; + + if (clone_data->require_init && !access(clone_data_path, X_OK) && + !is_empty_dir(clone_data_path)) + die(_("directory not empty: '%s'"), clone_data_path); + if (safe_create_leading_directories_const(clone_data_path) < 0) + die(_("could not create directory '%s'"), clone_data_path); + path = xstrfmt("%s/index", sm_gitdir); + unlink_or_warn(path); + free(path); } - connect_work_tree_and_git_dir(clone_data->path, sm_gitdir, 0); + connect_work_tree_and_git_dir(clone_data_path, sm_gitdir, 0); - p = git_pathdup_submodule(clone_data->path, "config"); + p = git_pathdup_submodule(clone_data_path, "config"); if (!p) - die(_("could not get submodule directory for '%s'"), clone_data->path); + die(_("could not get submodule directory for '%s'"), clone_data_path); /* setup alternateLocation and alternateErrorStrategy in the cloned submodule if needed */ git_config_get_string("submodule.alternateLocation", &sm_alternate); @@ -1738,7 +1680,6 @@ static int clone_submodule(struct module_clone_data *clone_data) free(sm_alternate); free(error_strategy); - strbuf_release(&sb); free(sm_gitdir); free(p); return 0; @@ -1748,8 +1689,8 @@ static int module_clone(int argc, const char **argv, const char *prefix) { int dissociate = 0, quiet = 0, progress = 0, require_init = 0; struct module_clone_data clone_data = MODULE_CLONE_DATA_INIT; - struct list_objects_filter_options filter_options; - + struct list_objects_filter_options filter_options = { 0 }; + struct string_list reference = STRING_LIST_INIT_NODUP; struct option module_clone_options[] = { OPT_STRING(0, "prefix", &clone_data.prefix, N_("path"), @@ -1763,7 +1704,7 @@ static int module_clone(int argc, const char **argv, const char *prefix) OPT_STRING(0, "url", &clone_data.url, N_("string"), N_("url where to clone the submodule from")), - OPT_STRING_LIST(0, "reference", &clone_data.reference, + OPT_STRING_LIST(0, "reference", &reference, N_("repo"), N_("reference repository")), OPT_BOOL(0, "dissociate", &dissociate, @@ -1781,7 +1722,6 @@ static int module_clone(int argc, const char **argv, const char *prefix) OPT_PARSE_LIST_OBJECTS_FILTER(&filter_options), OPT_END() }; - const char *const git_submodule_helper_usage[] = { N_("git submodule--helper clone [--prefix=<path>] [--quiet] " "[--reference <repository>] [--name <name>] [--depth <depth>] " @@ -1790,7 +1730,6 @@ static int module_clone(int argc, const char **argv, const char *prefix) NULL }; - memset(&filter_options, 0, sizeof(filter_options)); argc = parse_options(argc, argv, prefix, module_clone_options, git_submodule_helper_usage, 0); @@ -1804,29 +1743,32 @@ static int module_clone(int argc, const char **argv, const char *prefix) usage_with_options(git_submodule_helper_usage, module_clone_options); - clone_submodule(&clone_data); + clone_submodule(&clone_data, &reference); list_objects_filter_release(&filter_options); return 0; } -static void determine_submodule_update_strategy(struct repository *r, - int just_cloned, - const char *path, - enum submodule_update_type update, - struct submodule_update_strategy *out) +static int determine_submodule_update_strategy(struct repository *r, + int just_cloned, + const char *path, + enum submodule_update_type update, + struct submodule_update_strategy *out) { const struct submodule *sub = submodule_from_path(r, null_oid(), path); char *key; const char *val; + int ret; key = xstrfmt("submodule.%s.update", sub->name); if (update) { out->type = update; } else if (!repo_config_get_string_tmp(r, key, &val)) { - if (parse_submodule_update_strategy(val, out) < 0) - die(_("Invalid update mode '%s' configured for submodule path '%s'"), - val, path); + if (parse_submodule_update_strategy(val, out) < 0) { + ret = die_message(_("Invalid update mode '%s' configured for submodule path '%s'"), + val, path); + goto cleanup; + } } else if (sub->update_strategy.type != SM_UPDATE_UNSPECIFIED) { if (sub->update_strategy.type == SM_UPDATE_COMMAND) BUG("how did we read update = !command from .gitmodules?"); @@ -1841,7 +1783,10 @@ static void determine_submodule_update_strategy(struct repository *r, out->type == SM_UPDATE_NONE)) out->type = SM_UPDATE_CHECKOUT; + ret = 0; +cleanup: free(key); + return ret; } struct update_clone_data { @@ -1855,7 +1800,7 @@ struct submodule_update_clone { int current; /* configuration parameters which are passed on to the children */ - struct update_data *update_data; + const struct update_data *update_data; /* to be consumed by update_submodule() */ struct update_clone_data *update_clone; @@ -1940,7 +1885,7 @@ static int prepare_to_clone_next_submodule(const struct cache_entry *ce, const char *update_string; enum submodule_update_type update_type; char *key; - struct update_data *ud = suc->update_data; + const struct update_data *ud = suc->update_data; char *displaypath = get_submodule_displaypath(ce->name, ud->prefix); struct strbuf sb = STRBUF_INIT; int needs_cloning = 0; @@ -2032,6 +1977,7 @@ static int prepare_to_clone_next_submodule(const struct cache_entry *ce, strvec_pushl(&child->args, "--url", url, NULL); if (suc->update_data->references.nr) { struct string_list_item *item; + for_each_string_list_item(item, &suc->update_data->references) strvec_pushl(&child->args, "--reference", item->string, NULL); } @@ -2064,6 +2010,7 @@ static int update_clone_get_next_task(struct child_process *child, ce = suc->update_data->list.entries[suc->current]; if (prepare_to_clone_next_submodule(ce, child, suc, err)) { int *p = xmalloc(sizeof(*p)); + *p = suc->current; *idx_task_cb = p; suc->current++; @@ -2079,6 +2026,7 @@ static int update_clone_get_next_task(struct child_process *child, index = suc->current - suc->update_data->list.nr; if (index < suc->failed_clones_nr) { int *p; + ce = suc->failed_clones[index]; if (!prepare_to_clone_next_submodule(ce, child, suc, err)) { suc->current ++; @@ -2102,6 +2050,7 @@ static int update_clone_start_failure(struct strbuf *err, void *idx_task_cb) { struct submodule_update_clone *suc = suc_cb; + suc->quickstop = 1; return 1; } @@ -2113,9 +2062,9 @@ static int update_clone_task_finished(int result, { const struct cache_entry *ce; struct submodule_update_clone *suc = suc_cb; - int *idxP = idx_task_cb; int idx = *idxP; + free(idxP); if (!result) @@ -2148,12 +2097,13 @@ static int git_update_clone_config(const char *var, const char *value, void *cb) { int *max_jobs = cb; + if (!strcmp(var, "submodule.fetchjobs")) *max_jobs = parse_submodule_fetchjobs(var, value); return 0; } -static int is_tip_reachable(const char *path, struct object_id *oid) +static int is_tip_reachable(const char *path, const struct object_id *oid) { struct child_process cp = CHILD_PROCESS_INIT; struct strbuf rev = STRBUF_INIT; @@ -2172,7 +2122,8 @@ static int is_tip_reachable(const char *path, struct object_id *oid) return 1; } -static int fetch_in_submodule(const char *module_path, int depth, int quiet, struct object_id *oid) +static int fetch_in_submodule(const char *module_path, int depth, int quiet, + const struct object_id *oid) { struct child_process cp = CHILD_PROCESS_INIT; @@ -2188,6 +2139,7 @@ static int fetch_in_submodule(const char *module_path, int depth, int quiet, str if (oid) { char *hex = oid_to_hex(oid); char *remote = get_default_remote(); + strvec_pushl(&cp.args, remote, hex, NULL); free(remote); } @@ -2195,11 +2147,11 @@ static int fetch_in_submodule(const char *module_path, int depth, int quiet, str return run_command(&cp); } -static int run_update_command(struct update_data *ud, int subforce) +static int run_update_command(const struct update_data *ud, int subforce) { struct child_process cp = CHILD_PROCESS_INIT; char *oid = oid_to_hex(&ud->oid); - int must_die_on_failure = 0; + int ret; switch (ud->update_strategy.type) { case SM_UPDATE_CHECKOUT: @@ -2213,55 +2165,50 @@ static int run_update_command(struct update_data *ud, int subforce) strvec_push(&cp.args, "rebase"); if (ud->quiet) strvec_push(&cp.args, "--quiet"); - must_die_on_failure = 1; break; case SM_UPDATE_MERGE: cp.git_cmd = 1; strvec_push(&cp.args, "merge"); if (ud->quiet) strvec_push(&cp.args, "--quiet"); - must_die_on_failure = 1; break; case SM_UPDATE_COMMAND: cp.use_shell = 1; strvec_push(&cp.args, ud->update_strategy.command); - must_die_on_failure = 1; break; default: - BUG("unexpected update strategy type: %s", - submodule_strategy_to_string(&ud->update_strategy)); + BUG("unexpected update strategy type: %d", + ud->update_strategy.type); } strvec_push(&cp.args, oid); cp.dir = xstrdup(ud->sm_path); prepare_submodule_repo_env(&cp.env); - if (run_command(&cp)) { + if ((ret = run_command(&cp))) { switch (ud->update_strategy.type) { case SM_UPDATE_CHECKOUT: die_message(_("Unable to checkout '%s' in submodule path '%s'"), oid, ud->displaypath); + /* No "ret" assignment, use "git checkout"'s */ break; case SM_UPDATE_REBASE: - die_message(_("Unable to rebase '%s' in submodule path '%s'"), - oid, ud->displaypath); + ret = die_message(_("Unable to rebase '%s' in submodule path '%s'"), + oid, ud->displaypath); break; case SM_UPDATE_MERGE: - die_message(_("Unable to merge '%s' in submodule path '%s'"), - oid, ud->displaypath); + ret = die_message(_("Unable to merge '%s' in submodule path '%s'"), + oid, ud->displaypath); break; case SM_UPDATE_COMMAND: - die_message(_("Execution of '%s %s' failed in submodule path '%s'"), - ud->update_strategy.command, oid, ud->displaypath); + ret = die_message(_("Execution of '%s %s' failed in submodule path '%s'"), + ud->update_strategy.command, oid, ud->displaypath); break; default: - BUG("unexpected update strategy type: %s", - submodule_strategy_to_string(&ud->update_strategy)); + BUG("unexpected update strategy type: %d", + ud->update_strategy.type); } - if (must_die_on_failure) - exit(128); - /* the command failed, but update must continue */ - return 1; + return ret; } if (ud->quiet) @@ -2285,14 +2232,14 @@ static int run_update_command(struct update_data *ud, int subforce) ud->displaypath, ud->update_strategy.command, oid); break; default: - BUG("unexpected update strategy type: %s", - submodule_strategy_to_string(&ud->update_strategy)); + BUG("unexpected update strategy type: %d", + ud->update_strategy.type); } return 0; } -static int run_update_procedure(struct update_data *ud) +static int run_update_procedure(const struct update_data *ud) { int subforce = is_null_oid(&ud->suboid) || ud->force; @@ -2314,59 +2261,67 @@ static int run_update_procedure(struct update_data *ud) */ if (!is_tip_reachable(ud->sm_path, &ud->oid) && fetch_in_submodule(ud->sm_path, ud->depth, ud->quiet, &ud->oid)) - die(_("Fetched in submodule path '%s', but it did not " - "contain %s. Direct fetching of that commit failed."), - ud->displaypath, oid_to_hex(&ud->oid)); + return die_message(_("Fetched in submodule path '%s', but it did not " + "contain %s. Direct fetching of that commit failed."), + ud->displaypath, oid_to_hex(&ud->oid)); } return run_update_command(ud, subforce); } -static const char *remote_submodule_branch(const char *path) +static int remote_submodule_branch(const char *path, const char **branch) { const struct submodule *sub; - const char *branch = NULL; char *key; + *branch = NULL; sub = submodule_from_path(the_repository, null_oid(), path); if (!sub) - return NULL; + return die_message(_("could not initialize submodule at path '%s'"), + path); key = xstrfmt("submodule.%s.branch", sub->name); - if (repo_config_get_string_tmp(the_repository, key, &branch)) - branch = sub->branch; + if (repo_config_get_string_tmp(the_repository, key, branch)) + *branch = sub->branch; free(key); - if (!branch) - return "HEAD"; + if (!*branch) { + *branch = "HEAD"; + return 0; + } - if (!strcmp(branch, ".")) { + if (!strcmp(*branch, ".")) { const char *refname = resolve_ref_unsafe("HEAD", 0, NULL, NULL); if (!refname) - die(_("No such ref: %s"), "HEAD"); + return die_message(_("No such ref: %s"), "HEAD"); /* detached HEAD */ if (!strcmp(refname, "HEAD")) - die(_("Submodule (%s) branch configured to inherit " - "branch from superproject, but the superproject " - "is not on any branch"), sub->name); + return die_message(_("Submodule (%s) branch configured to inherit " + "branch from superproject, but the superproject " + "is not on any branch"), sub->name); if (!skip_prefix(refname, "refs/heads/", &refname)) - die(_("Expecting a full ref name, got %s"), refname); - return refname; + return die_message(_("Expecting a full ref name, got %s"), + refname); + + *branch = refname; + return 0; } - return branch; + /* Our "branch" is coming from repo_config_get_string_tmp() */ + return 0; } -static void ensure_core_worktree(const char *path) +static int ensure_core_worktree(const char *path) { const char *cw; struct repository subrepo; if (repo_submodule_init(&subrepo, the_repository, path, null_oid())) - die(_("could not get a repository handle for submodule '%s'"), path); + return die_message(_("could not get a repository handle for submodule '%s'"), + path); if (!repo_config_get_string_tmp(&subrepo, "core.worktree", &cw)) { char *cfg_file, *abs_path; @@ -2384,6 +2339,8 @@ static void ensure_core_worktree(const char *path) free(abs_path); strbuf_release(&sb); } + + return 0; } static const char *submodule_update_type_to_label(enum submodule_update_type type) @@ -2403,7 +2360,8 @@ static const char *submodule_update_type_to_label(enum submodule_update_type typ BUG("unreachable with type %d", type); } -static void update_data_to_args(struct update_data *update_data, struct strvec *args) +static void update_data_to_args(const struct update_data *update_data, + struct strvec *args) { enum submodule_update_type update_type = update_data->update_default; @@ -2437,6 +2395,7 @@ static void update_data_to_args(struct update_data *update_data, struct strvec * if (update_data->references.nr) { struct string_list_item *item; + for_each_string_list_item(item, &update_data->references) strvec_pushl(args, "--reference", item->string, NULL); } @@ -2456,48 +2415,66 @@ static void update_data_to_args(struct update_data *update_data, struct strvec * static int update_submodule(struct update_data *update_data) { - ensure_core_worktree(update_data->sm_path); + int ret; + + ret = ensure_core_worktree(update_data->sm_path); + if (ret) + return ret; update_data->displaypath = get_submodule_displaypath( update_data->sm_path, update_data->prefix); - determine_submodule_update_strategy(the_repository, update_data->just_cloned, - update_data->sm_path, update_data->update_default, - &update_data->update_strategy); + ret = determine_submodule_update_strategy(the_repository, + update_data->just_cloned, + update_data->sm_path, + update_data->update_default, + &update_data->update_strategy); + if (ret) + return ret; if (update_data->just_cloned) oidcpy(&update_data->suboid, null_oid()); else if (resolve_gitlink_ref(update_data->sm_path, "HEAD", &update_data->suboid)) - die(_("Unable to find current revision in submodule path '%s'"), - update_data->displaypath); + return die_message(_("Unable to find current revision in submodule path '%s'"), + update_data->displaypath); if (update_data->remote) { - char *remote_name = get_default_remote_submodule(update_data->sm_path); - const char *branch = remote_submodule_branch(update_data->sm_path); - char *remote_ref = xstrfmt("refs/remotes/%s/%s", remote_name, branch); + char *remote_name; + const char *branch; + char *remote_ref; + int code; + + code = get_default_remote_submodule(update_data->sm_path, &remote_name); + if (code) + return code; + code = remote_submodule_branch(update_data->sm_path, &branch); + if (code) + return code; + remote_ref = xstrfmt("refs/remotes/%s/%s", remote_name, branch); if (!update_data->nofetch) { if (fetch_in_submodule(update_data->sm_path, update_data->depth, 0, NULL)) - die(_("Unable to fetch in submodule path '%s'"), - update_data->sm_path); + return die_message(_("Unable to fetch in submodule path '%s'"), + update_data->sm_path); } if (resolve_gitlink_ref(update_data->sm_path, remote_ref, &update_data->oid)) - die(_("Unable to find %s revision in submodule path '%s'"), - remote_ref, update_data->sm_path); + return die_message(_("Unable to find %s revision in submodule path '%s'"), + remote_ref, update_data->sm_path); free(remote_ref); } - if (!oideq(&update_data->oid, &update_data->suboid) || update_data->force) - if (run_update_procedure(update_data)) - return 1; + if (!oideq(&update_data->oid, &update_data->suboid) || update_data->force) { + ret = run_update_procedure(update_data); + if (ret) + return ret; + } if (update_data->recursive) { struct child_process cp = CHILD_PROCESS_INIT; struct update_data next = *update_data; - int res; next.prefix = NULL; oidcpy(&next.oid, null_oid()); @@ -2508,16 +2485,11 @@ static int update_submodule(struct update_data *update_data) prepare_submodule_repo_env(&cp.env); update_data_to_args(&next, &cp.args); - /* die() if child process die()'d */ - res = run_command(&cp); - if (!res) - return 0; - die_message(_("Failed to recurse into submodule path '%s'"), - update_data->displaypath); - if (res == 128) - exit(res); - else if (res) - return 1; + ret = run_command(&cp); + if (ret) + die_message(_("Failed to recurse into submodule path '%s'"), + update_data->displaypath); + return ret; } return 0; @@ -2525,7 +2497,7 @@ static int update_submodule(struct update_data *update_data) static int update_submodules(struct update_data *update_data) { - int i, res = 0; + int i, ret = 0; struct submodule_update_clone suc = SUBMODULE_UPDATE_CLONE_INIT; suc.update_data = update_data; @@ -2543,33 +2515,37 @@ static int update_submodules(struct update_data *update_data) * - the listener can avoid doing any work if fetching failed. */ if (suc.quickstop) { - res = 1; + ret = 1; goto cleanup; } for (i = 0; i < suc.update_clone_nr; i++) { struct update_clone_data ucd = suc.update_clone[i]; + int code; oidcpy(&update_data->oid, &ucd.oid); update_data->just_cloned = ucd.just_cloned; update_data->sm_path = ucd.sub->path; - if (update_submodule(update_data)) - res = 1; + code = update_submodule(update_data); + if (!code) + continue; + ret = code; + if (ret == 128) + goto cleanup; } cleanup: string_list_clear(&update_data->references, 0); - return res; + return ret; } static int module_update(int argc, const char **argv, const char *prefix) { struct pathspec pathspec; struct update_data opt = UPDATE_DATA_INIT; - struct list_objects_filter_options filter_options; + struct list_objects_filter_options filter_options = { 0 }; int ret; - struct option module_update_options[] = { OPT__FORCE(&opt.force, N_("force checkout updates"), 0), OPT_BOOL(0, "init", &opt.init, @@ -2613,7 +2589,6 @@ static int module_update(int argc, const char **argv, const char *prefix) OPT_PARSE_LIST_OBJECTS_FILTER(&filter_options), OPT_END() }; - const char *const git_submodule_helper_usage[] = { N_("git submodule [--quiet] update" " [--init [--filter=<filter-spec>]] [--remote]" @@ -2627,7 +2602,6 @@ static int module_update(int argc, const char **argv, const char *prefix) update_clone_config_from_gitmodules(&opt.max_jobs); git_config(git_update_clone_config, &opt.max_jobs); - memset(&filter_options, 0, sizeof(filter_options)); argc = parse_options(argc, argv, prefix, module_update_options, git_submodule_helper_usage, 0); @@ -2762,7 +2736,6 @@ static int absorb_git_dirs(int argc, const char **argv, const char *prefix) struct pathspec pathspec; struct module_list list = MODULE_LIST_INIT; unsigned flags = ABSORB_GITDIR_RECURSE_SUBMODULES; - struct option embed_gitdir_options[] = { OPT_STRING(0, "prefix", &prefix, N_("path"), @@ -2771,7 +2744,6 @@ static int absorb_git_dirs(int argc, const char **argv, const char *prefix) ABSORB_GITDIR_RECURSE_SUBMODULES), OPT_END() }; - const char *const git_submodule_helper_usage[] = { N_("git submodule absorbgitdirs [<options>] [<path>...]"), NULL @@ -2789,44 +2761,12 @@ static int absorb_git_dirs(int argc, const char **argv, const char *prefix) return 0; } -static int is_active(int argc, const char **argv, const char *prefix) -{ - if (argc != 2) - die("submodule--helper is-active takes exactly 1 argument"); - - return !is_submodule_active(the_repository, argv[1]); -} - -/* - * Exit non-zero if any of the submodule names given on the command line is - * invalid. If no names are given, filter stdin to print only valid names - * (which is primarily intended for testing). - */ -static int check_name(int argc, const char **argv, const char *prefix) -{ - if (argc > 1) { - while (*++argv) { - if (check_submodule_name(*argv) < 0) - return 1; - } - } else { - struct strbuf buf = STRBUF_INIT; - while (strbuf_getline(&buf, stdin) != EOF) { - if (!check_submodule_name(buf.buf)) - printf("%s\n", buf.buf); - } - strbuf_release(&buf); - } - return 0; -} - static int module_config(int argc, const char **argv, const char *prefix) { enum { CHECK_WRITEABLE = 1, DO_UNSET = 2 } command = 0; - struct option module_config_options[] = { OPT_CMDMODE(0, "check-writeable", &command, N_("check if it is safe to write to the .gitmodules file"), @@ -2872,7 +2812,6 @@ static int module_set_url(int argc, const char **argv, const char *prefix) const char *newurl; const char *path; char *config_name; - struct option options[] = { OPT__QUIET(&quiet, N_("suppress output for setting url of a submodule")), OPT_END() @@ -2903,13 +2842,13 @@ static int module_set_branch(int argc, const char **argv, const char *prefix) const char *opt_branch = NULL; const char *path; char *config_name; - - /* - * We accept the `quiet` option for uniformity across subcommands, - * though there is nothing to make less verbose in this subcommand. - */ struct option options[] = { + /* + * We accept the `quiet` option for uniformity across subcommands, + * though there is nothing to make less verbose in this subcommand. + */ OPT_NOOP_NOARG('q', "quiet"), + OPT_BOOL('d', "default", &opt_default, N_("set the default tracking branch to master")), OPT_STRING('b', "branch", &opt_branch, N_("branch"), @@ -2944,7 +2883,6 @@ static int module_create_branch(int argc, const char **argv, const char *prefix) { enum branch_track track; int quiet = 0, force = 0, reflog = 0, dry_run = 0; - struct option options[] = { OPT__QUIET(&quiet, N_("print only error messages")), OPT__FORCE(&force, N_("force creation"), 0), @@ -3007,8 +2945,10 @@ static void append_fetch_remotes(struct strbuf *msg, const char *git_dir_path) if (!capture_command(&cp_remote, &sb_remote_out, 0)) { char *next_line; char *line = sb_remote_out.buf; + while ((next_line = strchr(line, '\n')) != NULL) { size_t len = next_line - line; + if (strip_suffix_mem(line, &len, " (fetch)")) strbuf_addf(msg, " %.*s\n", (int)len, line); line = next_line + 1; @@ -3022,6 +2962,7 @@ static int add_submodule(const struct add_data *add_data) { char *submod_gitdir_path; struct module_clone_data clone_data = MODULE_CLONE_DATA_INIT; + struct string_list reference = STRING_LIST_INIT_NODUP; /* perhaps the path already exists and is already a git repo, else clone it */ if (is_directory(add_data->sm_path)) { @@ -3038,6 +2979,7 @@ static int add_submodule(const struct add_data *add_data) free(submod_gitdir_path); } else { struct child_process cp = CHILD_PROCESS_INIT; + submod_gitdir_path = xstrfmt(".git/modules/%s", add_data->sm_name); if (is_directory(submod_gitdir_path)) { @@ -3077,13 +3019,13 @@ static int add_submodule(const struct add_data *add_data) clone_data.quiet = add_data->quiet; clone_data.progress = add_data->progress; if (add_data->reference_path) - string_list_append(&clone_data.reference, + string_list_append(&reference, xstrdup(add_data->reference_path)); clone_data.dissociate = add_data->dissociate; if (add_data->depth >= 0) clone_data.depth = xstrfmt("%d", add_data->depth); - if (clone_submodule(&clone_data)) + if (clone_submodule(&clone_data, &reference)) return -1; prepare_submodule_repo_env(&cp.env); @@ -3169,7 +3111,7 @@ static void configure_added_submodule(struct add_data *add_data) * is_submodule_active(), since that function needs to find * out the value of "submodule.active" again anyway. */ - if (!git_config_get_string("submodule.active", &val) && val) { + if (!git_config_get_string("submodule.active", &val)) { /* * If the submodule being added isn't already covered by the * current configured pathspec, set the submodule's active flag @@ -3243,7 +3185,6 @@ static int module_add(int argc, const char **argv, const char *prefix) int force = 0, quiet = 0, progress = 0, dissociate = 0; struct add_data add_data = ADD_DATA_INIT; char *to_free = NULL; - struct option options[] = { OPT_STRING('b', "branch", &add_data.branch, N_("branch"), N_("branch of repository to add as submodule")), @@ -3260,7 +3201,6 @@ static int module_add(int argc, const char **argv, const char *prefix) OPT_INTEGER(0, "depth", &add_data.depth, N_("depth for shallow clones")), OPT_END() }; - const char *const usage[] = { N_("git submodule add [<options>] [--] <repository> [<path>]"), NULL @@ -3317,6 +3257,7 @@ static int module_add(int argc, const char **argv, const char *prefix) int exit_code = -1; struct strbuf sb = STRBUF_INIT; struct child_process cp = CHILD_PROCESS_INIT; + cp.git_cmd = 1; cp.no_stdout = 1; strvec_pushl(&cp.args, "add", "--dry-run", "--ignore-missing", @@ -3362,12 +3303,9 @@ struct cmd_struct { }; static struct cmd_struct commands[] = { - {"list", module_list, 0}, - {"name", module_name, 0}, {"clone", module_clone, SUPPORT_SUPER_PREFIX}, {"add", module_add, 0}, {"update", module_update, SUPPORT_SUPER_PREFIX}, - {"resolve-relative-url-test", resolve_relative_url_test, 0}, {"foreach", module_foreach, SUPPORT_SUPER_PREFIX}, {"init", module_init, 0}, {"status", module_status, SUPPORT_SUPER_PREFIX}, @@ -3376,8 +3314,6 @@ static struct cmd_struct commands[] = { {"summary", module_summary, 0}, {"push-check", push_check, 0}, {"absorbgitdirs", absorb_git_dirs, SUPPORT_SUPER_PREFIX}, - {"is-active", is_active, 0}, - {"check-name", check_name, 0}, {"config", module_config, 0}, {"set-url", module_set_url, 0}, {"set-branch", module_set_branch, 0}, diff --git a/builtin/worktree.c b/builtin/worktree.c index cd62eef240..c6710b2552 100644 --- a/builtin/worktree.c +++ b/builtin/worktree.c @@ -1112,31 +1112,24 @@ static int repair(int ac, const char **av, const char *prefix) int cmd_worktree(int ac, const char **av, const char *prefix) { + parse_opt_subcommand_fn *fn = NULL; struct option options[] = { + OPT_SUBCOMMAND("add", &fn, add), + OPT_SUBCOMMAND("prune", &fn, prune), + OPT_SUBCOMMAND("list", &fn, list), + OPT_SUBCOMMAND("lock", &fn, lock_worktree), + OPT_SUBCOMMAND("unlock", &fn, unlock_worktree), + OPT_SUBCOMMAND("move", &fn, move_worktree), + OPT_SUBCOMMAND("remove", &fn, remove_worktree), + OPT_SUBCOMMAND("repair", &fn, repair), OPT_END() }; git_config(git_worktree_config, NULL); - if (ac < 2) - usage_with_options(worktree_usage, options); if (!prefix) prefix = ""; - if (!strcmp(av[1], "add")) - return add(ac - 1, av + 1, prefix); - if (!strcmp(av[1], "prune")) - return prune(ac - 1, av + 1, prefix); - if (!strcmp(av[1], "list")) - return list(ac - 1, av + 1, prefix); - if (!strcmp(av[1], "lock")) - return lock_worktree(ac - 1, av + 1, prefix); - if (!strcmp(av[1], "unlock")) - return unlock_worktree(ac - 1, av + 1, prefix); - if (!strcmp(av[1], "move")) - return move_worktree(ac - 1, av + 1, prefix); - if (!strcmp(av[1], "remove")) - return remove_worktree(ac - 1, av + 1, prefix); - if (!strcmp(av[1], "repair")) - return repair(ac - 1, av + 1, prefix); - usage_with_options(worktree_usage, options); + + ac = parse_options(ac, av, prefix, options, worktree_usage, 0); + return fn(ac, av, prefix); } diff --git a/bundle-uri.c b/bundle-uri.c new file mode 100644 index 0000000000..4a8cc74ed0 --- /dev/null +++ b/bundle-uri.c @@ -0,0 +1,168 @@ +#include "cache.h" +#include "bundle-uri.h" +#include "bundle.h" +#include "object-store.h" +#include "refs.h" +#include "run-command.h" + +static int find_temp_filename(struct strbuf *name) +{ + int fd; + /* + * Find a temporary filename that is available. This is briefly + * racy, but unlikely to collide. + */ + fd = odb_mkstemp(name, "bundles/tmp_uri_XXXXXX"); + if (fd < 0) { + warning(_("failed to create temporary file")); + return -1; + } + + close(fd); + unlink(name->buf); + return 0; +} + +static int download_https_uri_to_file(const char *file, const char *uri) +{ + int result = 0; + struct child_process cp = CHILD_PROCESS_INIT; + FILE *child_in = NULL, *child_out = NULL; + struct strbuf line = STRBUF_INIT; + int found_get = 0; + + strvec_pushl(&cp.args, "git-remote-https", uri, NULL); + cp.in = -1; + cp.out = -1; + + if (start_command(&cp)) + return 1; + + child_in = fdopen(cp.in, "w"); + if (!child_in) { + result = 1; + goto cleanup; + } + + child_out = fdopen(cp.out, "r"); + if (!child_out) { + result = 1; + goto cleanup; + } + + fprintf(child_in, "capabilities\n"); + fflush(child_in); + + while (!strbuf_getline(&line, child_out)) { + if (!line.len) + break; + if (!strcmp(line.buf, "get")) + found_get = 1; + } + strbuf_release(&line); + + if (!found_get) { + result = error(_("insufficient capabilities")); + goto cleanup; + } + + fprintf(child_in, "get %s %s\n\n", uri, file); + +cleanup: + if (child_in) + fclose(child_in); + if (finish_command(&cp)) + return 1; + if (child_out) + fclose(child_out); + return result; +} + +static int copy_uri_to_file(const char *filename, const char *uri) +{ + const char *out; + + if (starts_with(uri, "https:") || + starts_with(uri, "http:")) + return download_https_uri_to_file(filename, uri); + + if (skip_prefix(uri, "file://", &out)) + uri = out; + + /* Copy as a file */ + return copy_file(filename, uri, 0); +} + +static int unbundle_from_file(struct repository *r, const char *file) +{ + int result = 0; + int bundle_fd; + struct bundle_header header = BUNDLE_HEADER_INIT; + struct string_list_item *refname; + struct strbuf bundle_ref = STRBUF_INIT; + size_t bundle_prefix_len; + + if ((bundle_fd = read_bundle_header(file, &header)) < 0) + return 1; + + if ((result = unbundle(r, &header, bundle_fd, NULL))) + return 1; + + /* + * Convert all refs/heads/ from the bundle into refs/bundles/ + * in the local repository. + */ + strbuf_addstr(&bundle_ref, "refs/bundles/"); + bundle_prefix_len = bundle_ref.len; + + for_each_string_list_item(refname, &header.references) { + struct object_id *oid = refname->util; + struct object_id old_oid; + const char *branch_name; + int has_old; + + if (!skip_prefix(refname->string, "refs/heads/", &branch_name)) + continue; + + strbuf_setlen(&bundle_ref, bundle_prefix_len); + strbuf_addstr(&bundle_ref, branch_name); + + has_old = !read_ref(bundle_ref.buf, &old_oid); + update_ref("fetched bundle", bundle_ref.buf, oid, + has_old ? &old_oid : NULL, + REF_SKIP_OID_VERIFICATION, + UPDATE_REFS_MSG_ON_ERR); + } + + bundle_header_release(&header); + return result; +} + +int fetch_bundle_uri(struct repository *r, const char *uri) +{ + int result = 0; + struct strbuf filename = STRBUF_INIT; + + if ((result = find_temp_filename(&filename))) + goto cleanup; + + if ((result = copy_uri_to_file(filename.buf, uri))) { + warning(_("failed to download bundle from URI '%s'"), uri); + goto cleanup; + } + + if ((result = !is_bundle(filename.buf, 0))) { + warning(_("file at URI '%s' is not a bundle"), uri); + goto cleanup; + } + + if ((result = unbundle_from_file(r, filename.buf))) { + warning(_("failed to unbundle bundle from URI '%s'"), uri); + goto cleanup; + } + +cleanup: + unlink(filename.buf); + strbuf_release(&filename); + return result; +} diff --git a/bundle-uri.h b/bundle-uri.h new file mode 100644 index 0000000000..8a152f1ef1 --- /dev/null +++ b/bundle-uri.h @@ -0,0 +1,14 @@ +#ifndef BUNDLE_URI_H +#define BUNDLE_URI_H + +struct repository; + +/** + * Fetch data from the given 'uri' and unbundle the bundle data found + * based on that information. + * + * Returns non-zero if no bundle information is found at the given 'uri'. + */ +int fetch_bundle_uri(struct repository *r, const char *uri); + +#endif diff --git a/cache-tree.c b/cache-tree.c index 56db0b5026..c97111cccf 100644 --- a/cache-tree.c +++ b/cache-tree.c @@ -857,9 +857,7 @@ int cache_tree_matches_traversal(struct cache_tree *root, return 0; } -static void verify_one_sparse(struct repository *r, - struct index_state *istate, - struct cache_tree *it, +static void verify_one_sparse(struct index_state *istate, struct strbuf *path, int pos) { @@ -910,7 +908,7 @@ static int verify_one(struct repository *r, return 1; if (pos >= 0) { - verify_one_sparse(r, istate, it, path, pos); + verify_one_sparse(istate, path, pos); return 0; } @@ -1016,7 +1016,6 @@ void reset_shared_repository(void); * commands that do not want replace references to be active. */ extern int read_replace_refs; -extern char *git_replace_ref_base; /* * These values are used to help identify parts of a repository to fsync. diff --git a/commit-graph.c b/commit-graph.c index 97dd1001ec..06f7d9e0b6 100644 --- a/commit-graph.c +++ b/commit-graph.c @@ -901,7 +901,7 @@ struct commit *lookup_commit_in_graph(struct repository *repo, const struct obje struct commit *commit; uint32_t pos; - if (!repo->objects->commit_graph) + if (!prepare_commit_graph(repo)) return NULL; if (!search_commit_pos_in_graph(id, repo->objects->commit_graph, &pos)) return NULL; diff --git a/compat/disk.h b/compat/disk.h new file mode 100644 index 0000000000..50a32e3d8a --- /dev/null +++ b/compat/disk.h @@ -0,0 +1,56 @@ +#ifndef COMPAT_DISK_H +#define COMPAT_DISK_H + +#include "git-compat-util.h" + +static int get_disk_info(struct strbuf *out) +{ + struct strbuf buf = STRBUF_INIT; + int res = 0; + +#ifdef GIT_WINDOWS_NATIVE + char volume_name[MAX_PATH], fs_name[MAX_PATH]; + DWORD serial_number, component_length, flags; + ULARGE_INTEGER avail2caller, total, avail; + + strbuf_realpath(&buf, ".", 1); + if (!GetDiskFreeSpaceExA(buf.buf, &avail2caller, &total, &avail)) { + error(_("could not determine free disk size for '%s'"), + buf.buf); + res = -1; + goto cleanup; + } + + strbuf_setlen(&buf, offset_1st_component(buf.buf)); + if (!GetVolumeInformationA(buf.buf, volume_name, sizeof(volume_name), + &serial_number, &component_length, &flags, + fs_name, sizeof(fs_name))) { + error(_("could not get info for '%s'"), buf.buf); + res = -1; + goto cleanup; + } + strbuf_addf(out, "Available space on '%s': ", buf.buf); + strbuf_humanise_bytes(out, avail2caller.QuadPart); + strbuf_addch(out, '\n'); +#else + struct statvfs stat; + + strbuf_realpath(&buf, ".", 1); + if (statvfs(buf.buf, &stat) < 0) { + error_errno(_("could not determine free disk size for '%s'"), + buf.buf); + res = -1; + goto cleanup; + } + + strbuf_addf(out, "Available space on '%s': ", buf.buf); + strbuf_humanise_bytes(out, (off_t)stat.f_bsize * (off_t)stat.f_bavail); + strbuf_addf(out, " (mount flags 0x%lx)\n", stat.f_flag); +#endif + +cleanup: + strbuf_release(&buf); + return res; +} + +#endif /* COMPAT_DISK_H */ diff --git a/compat/fsmonitor/fsm-settings-win32.c b/compat/fsmonitor/fsm-settings-win32.c index 907655720b..e5ec5b0a9f 100644 --- a/compat/fsmonitor/fsm-settings-win32.c +++ b/compat/fsmonitor/fsm-settings-win32.c @@ -25,6 +25,59 @@ static enum fsmonitor_reason check_vfs4git(struct repository *r) } /* + * Check if monitoring remote working directories is allowed. + * + * By default, monitoring remote working directories is + * disabled. Users may override this behavior in enviroments where + * they have proper support. + */ +static int check_config_allowremote(struct repository *r) +{ + int allow; + + if (!repo_config_get_bool(r, "fsmonitor.allowremote", &allow)) + return allow; + + return -1; /* fsmonitor.allowremote not set */ +} + +/* + * Check remote working directory protocol. + * + * Error if client machine cannot get remote protocol information. + */ +static int check_remote_protocol(wchar_t *wpath) +{ + HANDLE h; + FILE_REMOTE_PROTOCOL_INFO proto_info; + + h = CreateFileW(wpath, GENERIC_READ, FILE_SHARE_READ, NULL, OPEN_EXISTING, + FILE_FLAG_BACKUP_SEMANTICS, NULL); + + if (h == INVALID_HANDLE_VALUE) { + error(_("[GLE %ld] unable to open for read '%ls'"), + GetLastError(), wpath); + return -1; + } + + if (!GetFileInformationByHandleEx(h, FileRemoteProtocolInfo, + &proto_info, sizeof(proto_info))) { + error(_("[GLE %ld] unable to get protocol information for '%ls'"), + GetLastError(), wpath); + CloseHandle(h); + return -1; + } + + CloseHandle(h); + + trace_printf_key(&trace_fsmonitor, + "check_remote_protocol('%ls') remote protocol %#8.8lx", + wpath, proto_info.Protocol); + + return 0; +} + +/* * Remote working directories are problematic for FSMonitor. * * The underlying file system on the server machine and/or the remote @@ -76,6 +129,7 @@ static enum fsmonitor_reason check_vfs4git(struct repository *r) */ static enum fsmonitor_reason check_remote(struct repository *r) { + int ret; wchar_t wpath[MAX_PATH]; wchar_t wfullpath[MAX_PATH]; size_t wlen; @@ -115,6 +169,20 @@ static enum fsmonitor_reason check_remote(struct repository *r) trace_printf_key(&trace_fsmonitor, "check_remote('%s') true", r->worktree); + + ret = check_remote_protocol(wfullpath); + if (ret < 0) + return FSMONITOR_REASON_ERROR; + + switch (check_config_allowremote(r)) { + case 0: /* config overrides and disables */ + return FSMONITOR_REASON_REMOTE; + case 1: /* config overrides and enables */ + return FSMONITOR_REASON_OK; + default: + break; /* config has no opinion */ + } + return FSMONITOR_REASON_REMOTE; } diff --git a/compat/nonblock.c b/compat/nonblock.c new file mode 100644 index 0000000000..9694ebdb1d --- /dev/null +++ b/compat/nonblock.c @@ -0,0 +1,50 @@ +#include "git-compat-util.h" +#include "nonblock.h" + +#ifdef O_NONBLOCK + +int enable_pipe_nonblock(int fd) +{ + int flags = fcntl(fd, F_GETFL); + if (flags < 0) + return -1; + flags |= O_NONBLOCK; + return fcntl(fd, F_SETFL, flags); +} + +#elif defined(GIT_WINDOWS_NATIVE) + +#include "win32.h" + +int enable_pipe_nonblock(int fd) +{ + HANDLE h = (HANDLE)_get_osfhandle(fd); + DWORD mode; + DWORD type = GetFileType(h); + if (type == FILE_TYPE_UNKNOWN && GetLastError() != NO_ERROR) { + errno = EBADF; + return -1; + } + if (type != FILE_TYPE_PIPE) + BUG("unsupported file type: %lu", type); + if (!GetNamedPipeHandleState(h, &mode, NULL, NULL, NULL, NULL, 0)) { + errno = err_win_to_posix(GetLastError()); + return -1; + } + mode |= PIPE_NOWAIT; + if (!SetNamedPipeHandleState(h, &mode, NULL, NULL)) { + errno = err_win_to_posix(GetLastError()); + return -1; + } + return 0; +} + +#else + +int enable_pipe_nonblock(int fd) +{ + errno = ENOSYS; + return -1; +} + +#endif diff --git a/compat/nonblock.h b/compat/nonblock.h new file mode 100644 index 0000000000..af1a331301 --- /dev/null +++ b/compat/nonblock.h @@ -0,0 +1,9 @@ +#ifndef COMPAT_NONBLOCK_H +#define COMPAT_NONBLOCK_H + +/* + * Enable non-blocking I/O for the pipe specified by the passed-in descriptor. + */ +int enable_pipe_nonblock(int fd); + +#endif diff --git a/configure.ac b/configure.ac index 7dcd048204..38ff86678a 100644 --- a/configure.ac +++ b/configure.ac @@ -237,9 +237,6 @@ AC_MSG_NOTICE([CHECKS for site configuration]) # tests. These tests take up a significant amount of the total test time # but are not needed unless you plan to talk to SVN repos. # -# Define PPC_SHA1 environment variable when running make to make use of -# a bundled SHA1 routine optimized for PowerPC. -# # Define NO_OPENSSL environment variable if you do not have OpenSSL. # # Define OPENSSLDIR=/foo/bar if your openssl header and library files are in diff --git a/contrib/completion/git-prompt.sh b/contrib/completion/git-prompt.sh index 1435548e00..57972c2845 100644 --- a/contrib/completion/git-prompt.sh +++ b/contrib/completion/git-prompt.sh @@ -84,6 +84,10 @@ # single '?' character by setting GIT_PS1_COMPRESSSPARSESTATE, or omitted # by setting GIT_PS1_OMITSPARSESTATE. # +# If you would like to see a notification on the prompt when there are +# unresolved conflicts, set GIT_PS1_SHOWCONFLICTSTATE to "yes". The +# prompt will include "|CONFLICT". +# # If you would like to see more information about the identity of # commits checked out as a detached HEAD, set GIT_PS1_DESCRIBE_STYLE # to one of these values: @@ -508,6 +512,12 @@ __git_ps1 () r="$r $step/$total" fi + local conflict="" # state indicator for unresolved conflicts + if [[ "${GIT_PS1_SHOWCONFLICTSTATE}" == "yes" ]] && + [[ $(git ls-files --unmerged 2>/dev/null) ]]; then + conflict="|CONFLICT" + fi + local w="" local i="" local s="" @@ -572,7 +582,7 @@ __git_ps1 () fi local f="$h$w$i$s$u$p" - local gitstring="$c$b${f:+$z$f}${sparse}$r${upstream}" + local gitstring="$c$b${f:+$z$f}${sparse}$r${upstream}${conflict}" if [ $pcmode = yes ]; then if [ "${__git_printf_supports_v-}" != yes ]; then diff --git a/contrib/scalar/scalar.c b/contrib/scalar/scalar.c index 97e71fe19c..642d16124e 100644 --- a/contrib/scalar/scalar.c +++ b/contrib/scalar/scalar.c @@ -7,27 +7,13 @@ #include "parse-options.h" #include "config.h" #include "run-command.h" +#include "simple-ipc.h" +#include "fsmonitor-ipc.h" +#include "fsmonitor-settings.h" #include "refs.h" #include "dir.h" #include "packfile.h" #include "help.h" -#include "archive.h" -#include "object-store.h" - -/* - * Remove the deepest subdirectory in the provided path string. Path must not - * include a trailing path separator. Returns 1 if parent directory found, - * otherwise 0. - */ -static int strbuf_parent_directory(struct strbuf *buf) -{ - size_t len = buf->len; - size_t offset = offset_1st_component(buf->buf); - char *path_sep = find_last_dir_sep(buf->buf + offset); - strbuf_setlen(buf, path_sep ? path_sep - buf->buf : offset); - - return buf->len < len; -} static void setup_enlistment_directory(int argc, const char **argv, const char * const *usagestr, @@ -35,8 +21,8 @@ static void setup_enlistment_directory(int argc, const char **argv, struct strbuf *enlistment_root) { struct strbuf path = STRBUF_INIT; - char *root; - int enlistment_found = 0; + int enlistment_is_repo_parent = 0; + size_t len; if (startup_info->have_repository) BUG("gitdir already set up?!?"); @@ -49,51 +35,36 @@ static void setup_enlistment_directory(int argc, const char **argv, strbuf_add_absolute_path(&path, argv[0]); if (!is_directory(path.buf)) die(_("'%s' does not exist"), path.buf); + if (chdir(path.buf) < 0) + die_errno(_("could not switch to '%s'"), path.buf); } else if (strbuf_getcwd(&path) < 0) die(_("need a working directory")); strbuf_trim_trailing_dir_sep(&path); - do { - const size_t len = path.len; - - /* check if currently in enlistment root with src/ workdir */ - strbuf_addstr(&path, "/src"); - if (is_nonbare_repository_dir(&path)) { - if (enlistment_root) - strbuf_add(enlistment_root, path.buf, len); - enlistment_found = 1; - break; - } - - /* reset to original path */ - strbuf_setlen(&path, len); - - /* check if currently in workdir */ - if (is_nonbare_repository_dir(&path)) { - if (enlistment_root) { - /* - * If the worktree's directory's name is `src`, the enlistment is the - * parent directory, otherwise it is identical to the worktree. - */ - root = strip_path_suffix(path.buf, "src"); - strbuf_addstr(enlistment_root, root ? root : path.buf); - free(root); - } + /* check if currently in enlistment root with src/ workdir */ + len = path.len; + strbuf_addstr(&path, "/src"); + if (is_nonbare_repository_dir(&path)) { + enlistment_is_repo_parent = 1; + if (chdir(path.buf) < 0) + die_errno(_("could not switch to '%s'"), path.buf); + } + strbuf_setlen(&path, len); - enlistment_found = 1; - break; - } - } while (strbuf_parent_directory(&path)); + setup_git_directory(); - if (!enlistment_found) - die(_("could not find enlistment root")); + if (!the_repository->worktree) + die(_("Scalar enlistments require a worktree")); - if (chdir(path.buf) < 0) - die_errno(_("could not switch to '%s'"), path.buf); + if (enlistment_root) { + if (enlistment_is_repo_parent) + strbuf_addbuf(enlistment_root, &path); + else + strbuf_addstr(enlistment_root, the_repository->worktree); + } strbuf_release(&path); - setup_git_directory(); } static int run_git(const char *arg, ...) @@ -115,13 +86,39 @@ static int run_git(const char *arg, ...) return res; } +struct scalar_config { + const char *key; + const char *value; + int overwrite_on_reconfigure; +}; + +static int set_scalar_config(const struct scalar_config *config, int reconfigure) +{ + char *value = NULL; + int res; + + if ((reconfigure && config->overwrite_on_reconfigure) || + git_config_get_string(config->key, &value)) { + trace2_data_string("scalar", the_repository, config->key, "created"); + res = git_config_set_gently(config->key, config->value); + } else { + trace2_data_string("scalar", the_repository, config->key, "exists"); + res = 0; + } + + free(value); + return res; +} + +static int have_fsmonitor_support(void) +{ + return fsmonitor_ipc__is_supported() && + fsm_settings__get_reason(the_repository) == FSMONITOR_REASON_OK; +} + static int set_recommended_config(int reconfigure) { - struct { - const char *key; - const char *value; - int overwrite_on_reconfigure; - } config[] = { + struct scalar_config config[] = { /* Required */ { "am.keepCR", "true", 1 }, { "core.FSCache", "true", 1 }, @@ -175,17 +172,16 @@ static int set_recommended_config(int reconfigure) char *value; for (i = 0; config[i].key; i++) { - if ((reconfigure && config[i].overwrite_on_reconfigure) || - git_config_get_string(config[i].key, &value)) { - trace2_data_string("scalar", the_repository, config[i].key, "created"); - if (git_config_set_gently(config[i].key, - config[i].value) < 0) - return error(_("could not configure %s=%s"), - config[i].key, config[i].value); - } else { - trace2_data_string("scalar", the_repository, config[i].key, "exists"); - free(value); - } + if (set_scalar_config(config + i, reconfigure)) + return error(_("could not configure %s=%s"), + config[i].key, config[i].value); + } + + if (have_fsmonitor_support()) { + struct scalar_config fsmonitor = { "core.fsmonitor", "true" }; + if (set_scalar_config(&fsmonitor, reconfigure)) + return error(_("could not configure %s=%s"), + fsmonitor.key, fsmonitor.value); } /* @@ -236,123 +232,55 @@ static int add_or_remove_enlistment(int add) "scalar.repo", the_repository->worktree, NULL); } -static int register_dir(void) +static int start_fsmonitor_daemon(void) { - int res = add_or_remove_enlistment(1); - - if (!res) - res = set_recommended_config(0); + assert(have_fsmonitor_support()); - if (!res) - res = toggle_maintenance(1); + if (fsmonitor_ipc__get_state() != IPC_STATE__LISTENING) + return run_git("fsmonitor--daemon", "start", NULL); - return res; + return 0; } -static int unregister_dir(void) +static int stop_fsmonitor_daemon(void) { - int res = 0; + assert(have_fsmonitor_support()); - if (toggle_maintenance(0) < 0) - res = -1; + if (fsmonitor_ipc__get_state() == IPC_STATE__LISTENING) + return run_git("fsmonitor--daemon", "stop", NULL); - if (add_or_remove_enlistment(0) < 0) - res = -1; - - return res; + return 0; } -static int add_directory_to_archiver(struct strvec *archiver_args, - const char *path, int recurse) +static int register_dir(void) { - int at_root = !*path; - DIR *dir = opendir(at_root ? "." : path); - struct dirent *e; - struct strbuf buf = STRBUF_INIT; - size_t len; - int res = 0; + if (add_or_remove_enlistment(1)) + return error(_("could not add enlistment")); - if (!dir) - return error_errno(_("could not open directory '%s'"), path); - - if (!at_root) - strbuf_addf(&buf, "%s/", path); - len = buf.len; - strvec_pushf(archiver_args, "--prefix=%s", buf.buf); - - while (!res && (e = readdir(dir))) { - if (!strcmp(".", e->d_name) || !strcmp("..", e->d_name)) - continue; - - strbuf_setlen(&buf, len); - strbuf_addstr(&buf, e->d_name); - - if (e->d_type == DT_REG) - strvec_pushf(archiver_args, "--add-file=%s", buf.buf); - else if (e->d_type != DT_DIR) - warning(_("skipping '%s', which is neither file nor " - "directory"), buf.buf); - else if (recurse && - add_directory_to_archiver(archiver_args, - buf.buf, recurse) < 0) - res = -1; + if (set_recommended_config(0)) + return error(_("could not set recommended config")); + + if (toggle_maintenance(1)) + return error(_("could not turn on maintenance")); + + if (have_fsmonitor_support() && start_fsmonitor_daemon()) { + return error(_("could not start the FSMonitor daemon")); } - closedir(dir); - strbuf_release(&buf); - return res; + return 0; } -#ifndef WIN32 -#include <sys/statvfs.h> -#endif - -static int get_disk_info(struct strbuf *out) +static int unregister_dir(void) { -#ifdef WIN32 - struct strbuf buf = STRBUF_INIT; - char volume_name[MAX_PATH], fs_name[MAX_PATH]; - DWORD serial_number, component_length, flags; - ULARGE_INTEGER avail2caller, total, avail; - - strbuf_realpath(&buf, ".", 1); - if (!GetDiskFreeSpaceExA(buf.buf, &avail2caller, &total, &avail)) { - error(_("could not determine free disk size for '%s'"), - buf.buf); - strbuf_release(&buf); - return -1; - } + int res = 0; - strbuf_setlen(&buf, offset_1st_component(buf.buf)); - if (!GetVolumeInformationA(buf.buf, volume_name, sizeof(volume_name), - &serial_number, &component_length, &flags, - fs_name, sizeof(fs_name))) { - error(_("could not get info for '%s'"), buf.buf); - strbuf_release(&buf); - return -1; - } - strbuf_addf(out, "Available space on '%s': ", buf.buf); - strbuf_humanise_bytes(out, avail2caller.QuadPart); - strbuf_addch(out, '\n'); - strbuf_release(&buf); -#else - struct strbuf buf = STRBUF_INIT; - struct statvfs stat; + if (toggle_maintenance(0)) + res = error(_("could not turn off maintenance")); - strbuf_realpath(&buf, ".", 1); - if (statvfs(buf.buf, &stat) < 0) { - error_errno(_("could not determine free disk size for '%s'"), - buf.buf); - strbuf_release(&buf); - return -1; - } + if (add_or_remove_enlistment(0)) + res = error(_("could not remove enlistment")); - strbuf_addf(out, "Available space on '%s': ", buf.buf); - strbuf_humanise_bytes(out, st_mult(stat.f_bsize, stat.f_bavail)); - strbuf_addf(out, " (mount flags 0x%lx)\n", stat.f_flag); - strbuf_release(&buf); -#endif - return 0; + return res; } /* printf-style interface, expects `<key>=<value>` argument */ @@ -431,25 +359,35 @@ static int delete_enlistment(struct strbuf *enlistment) { #ifdef WIN32 struct strbuf parent = STRBUF_INIT; + size_t offset; + char *path_sep; #endif if (unregister_dir()) - die(_("failed to unregister repository")); + return error(_("failed to unregister repository")); #ifdef WIN32 /* * Change the current directory to one outside of the enlistment so * that we may delete everything underneath it. */ - strbuf_addbuf(&parent, enlistment); - strbuf_parent_directory(&parent); - if (chdir(parent.buf) < 0) - die_errno(_("could not switch to '%s'"), parent.buf); + offset = offset_1st_component(enlistment->buf); + path_sep = find_last_dir_sep(enlistment->buf + offset); + strbuf_add(&parent, enlistment->buf, + path_sep ? path_sep - enlistment->buf : offset); + if (chdir(parent.buf) < 0) { + int res = error_errno(_("could not switch to '%s'"), parent.buf); + strbuf_release(&parent); + return res; + } strbuf_release(&parent); #endif + if (have_fsmonitor_support() && stop_fsmonitor_daemon()) + return error(_("failed to stop the FSMonitor daemon")); + if (remove_dir_recursively(enlistment, 0)) - die(_("failed to delete enlistment directory")); + return error(_("failed to delete enlistment directory")); return 0; } @@ -595,83 +533,6 @@ cleanup: return res; } -static void dir_file_stats_objects(const char *full_path, size_t full_path_len, - const char *file_name, void *data) -{ - struct strbuf *buf = data; - struct stat st; - - if (!stat(full_path, &st)) - strbuf_addf(buf, "%-70s %16" PRIuMAX "\n", file_name, - (uintmax_t)st.st_size); -} - -static int dir_file_stats(struct object_directory *object_dir, void *data) -{ - struct strbuf *buf = data; - - strbuf_addf(buf, "Contents of %s:\n", object_dir->path); - - for_each_file_in_pack_dir(object_dir->path, dir_file_stats_objects, - data); - - return 0; -} - -static int count_files(char *path) -{ - DIR *dir = opendir(path); - struct dirent *e; - int count = 0; - - if (!dir) - return 0; - - while ((e = readdir(dir)) != NULL) - if (!is_dot_or_dotdot(e->d_name) && e->d_type == DT_REG) - count++; - - closedir(dir); - return count; -} - -static void loose_objs_stats(struct strbuf *buf, const char *path) -{ - DIR *dir = opendir(path); - struct dirent *e; - int count; - int total = 0; - unsigned char c; - struct strbuf count_path = STRBUF_INIT; - size_t base_path_len; - - if (!dir) - return; - - strbuf_addstr(buf, "Object directory stats for "); - strbuf_add_absolute_path(buf, path); - strbuf_addstr(buf, ":\n"); - - strbuf_add_absolute_path(&count_path, path); - strbuf_addch(&count_path, '/'); - base_path_len = count_path.len; - - while ((e = readdir(dir)) != NULL) - if (!is_dot_or_dotdot(e->d_name) && - e->d_type == DT_DIR && strlen(e->d_name) == 2 && - !hex_to_bytes(&c, e->d_name, 1)) { - strbuf_setlen(&count_path, base_path_len); - strbuf_addstr(&count_path, e->d_name); - total += (count = count_files(count_path.buf)); - strbuf_addf(buf, "%s : %7d files\n", e->d_name, count); - } - - strbuf_addf(buf, "Total: %d loose objects", total); - - strbuf_release(&count_path); - closedir(dir); -} - static int cmd_diagnose(int argc, const char **argv) { struct option options[] = { @@ -681,106 +542,19 @@ static int cmd_diagnose(int argc, const char **argv) N_("scalar diagnose [<enlistment>]"), NULL }; - struct strbuf zip_path = STRBUF_INIT; - struct strvec archiver_args = STRVEC_INIT; - char **argv_copy = NULL; - int stdout_fd = -1, archiver_fd = -1; - time_t now = time(NULL); - struct tm tm; - struct strbuf buf = STRBUF_INIT; + struct strbuf diagnostics_root = STRBUF_INIT; int res = 0; argc = parse_options(argc, argv, NULL, options, usage, 0); - setup_enlistment_directory(argc, argv, usage, options, &zip_path); - - strbuf_addstr(&zip_path, "/.scalarDiagnostics/scalar_"); - strbuf_addftime(&zip_path, - "%Y%m%d_%H%M%S", localtime_r(&now, &tm), 0, 0); - strbuf_addstr(&zip_path, ".zip"); - switch (safe_create_leading_directories(zip_path.buf)) { - case SCLD_EXISTS: - case SCLD_OK: - break; - default: - error_errno(_("could not create directory for '%s'"), - zip_path.buf); - goto diagnose_cleanup; - } - stdout_fd = dup(1); - if (stdout_fd < 0) { - res = error_errno(_("could not duplicate stdout")); - goto diagnose_cleanup; - } - - archiver_fd = xopen(zip_path.buf, O_CREAT | O_WRONLY | O_TRUNC, 0666); - if (archiver_fd < 0 || dup2(archiver_fd, 1) < 0) { - res = error_errno(_("could not redirect output")); - goto diagnose_cleanup; - } - - init_zip_archiver(); - strvec_pushl(&archiver_args, "scalar-diagnose", "--format=zip", NULL); + setup_enlistment_directory(argc, argv, usage, options, &diagnostics_root); + strbuf_addstr(&diagnostics_root, "/.scalarDiagnostics"); - strbuf_reset(&buf); - strbuf_addstr(&buf, "Collecting diagnostic info\n\n"); - get_version_info(&buf, 1); - - strbuf_addf(&buf, "Enlistment root: %s\n", the_repository->worktree); - get_disk_info(&buf); - write_or_die(stdout_fd, buf.buf, buf.len); - strvec_pushf(&archiver_args, - "--add-virtual-file=diagnostics.log:%.*s", - (int)buf.len, buf.buf); - - strbuf_reset(&buf); - strbuf_addstr(&buf, "--add-virtual-file=packs-local.txt:"); - dir_file_stats(the_repository->objects->odb, &buf); - foreach_alt_odb(dir_file_stats, &buf); - strvec_push(&archiver_args, buf.buf); - - strbuf_reset(&buf); - strbuf_addstr(&buf, "--add-virtual-file=objects-local.txt:"); - loose_objs_stats(&buf, ".git/objects"); - strvec_push(&archiver_args, buf.buf); - - if ((res = add_directory_to_archiver(&archiver_args, ".git", 0)) || - (res = add_directory_to_archiver(&archiver_args, ".git/hooks", 0)) || - (res = add_directory_to_archiver(&archiver_args, ".git/info", 0)) || - (res = add_directory_to_archiver(&archiver_args, ".git/logs", 1)) || - (res = add_directory_to_archiver(&archiver_args, ".git/objects/info", 0))) - goto diagnose_cleanup; - - strvec_pushl(&archiver_args, "--prefix=", - oid_to_hex(the_hash_algo->empty_tree), "--", NULL); - - /* `write_archive()` modifies the `argv` passed to it. Let it. */ - argv_copy = xmemdupz(archiver_args.v, - sizeof(char *) * archiver_args.nr); - res = write_archive(archiver_args.nr, (const char **)argv_copy, NULL, - the_repository, NULL, 0); - if (res) { - error(_("failed to write archive")); - goto diagnose_cleanup; - } - - if (!res) - fprintf(stderr, "\n" - "Diagnostics complete.\n" - "All of the gathered info is captured in '%s'\n", - zip_path.buf); - -diagnose_cleanup: - if (archiver_fd >= 0) { - close(1); - dup2(stdout_fd, 1); - } - free(argv_copy); - strvec_clear(&archiver_args); - strbuf_release(&zip_path); - strbuf_release(&buf); + res = run_git("diagnose", "--mode=all", "-s", "%Y%m%d_%H%M%S", + "-o", diagnostics_root.buf, NULL); + strbuf_release(&diagnostics_root); return res; } diff --git a/contrib/scalar/t/t9099-scalar.sh b/contrib/scalar/t/t9099-scalar.sh index 10b1172a8a..dfb949f52e 100755 --- a/contrib/scalar/t/t9099-scalar.sh +++ b/contrib/scalar/t/t9099-scalar.sh @@ -17,6 +17,99 @@ test_expect_success 'scalar shows a usage' ' test_expect_code 129 scalar -h ' +test_expect_success 'scalar invoked on enlistment root' ' + test_when_finished rm -rf test src deeper && + + for enlistment_root in test src deeper/test + do + git init ${enlistment_root}/src && + + # Register + scalar register ${enlistment_root} && + scalar list >out && + grep "$(pwd)/${enlistment_root}/src\$" out && + + # Delete (including enlistment root) + scalar delete $enlistment_root && + test_path_is_missing $enlistment_root && + scalar list >out && + ! grep "^$(pwd)/${enlistment_root}/src\$" out || return 1 + done +' + +test_expect_success 'scalar invoked on enlistment src repo' ' + test_when_finished rm -rf test src deeper && + + for enlistment_root in test src deeper/test + do + git init ${enlistment_root}/src && + + # Register + scalar register ${enlistment_root}/src && + scalar list >out && + grep "$(pwd)/${enlistment_root}/src\$" out && + + # Delete (will not include enlistment root) + scalar delete ${enlistment_root}/src && + test_path_is_dir $enlistment_root && + scalar list >out && + ! grep "^$(pwd)/${enlistment_root}/src\$" out || return 1 + done +' + +test_expect_success 'scalar invoked when enlistment root and repo are the same' ' + test_when_finished rm -rf test src deeper && + + for enlistment_root in test src deeper/test + do + git init ${enlistment_root} && + + # Register + scalar register ${enlistment_root} && + scalar list >out && + grep "$(pwd)/${enlistment_root}\$" out && + + # Delete (will not include enlistment root) + scalar delete ${enlistment_root} && + test_path_is_missing $enlistment_root && + scalar list >out && + ! grep "^$(pwd)/${enlistment_root}\$" out && + + # Make sure we did not accidentally delete the trash dir + test_path_is_dir "$TRASH_DIRECTORY" || return 1 + done +' + +test_expect_success 'scalar repo search respects GIT_CEILING_DIRECTORIES' ' + test_when_finished rm -rf test && + + git init test/src && + mkdir -p test/src/deep && + GIT_CEILING_DIRECTORIES="$(pwd)/test/src" && + ! scalar register test/src/deep 2>err && + grep "not a git repository" err +' + +test_expect_success 'scalar enlistments need a worktree' ' + test_when_finished rm -rf bare test && + + git init --bare bare/src && + ! scalar register bare/src 2>err && + grep "Scalar enlistments require a worktree" err && + + git init test/src && + ! scalar register test/src/.git 2>err && + grep "Scalar enlistments require a worktree" err +' + +test_expect_success FSMONITOR_DAEMON 'scalar register starts fsmon daemon' ' + git init test/src && + test_must_fail git -C test/src fsmonitor--daemon status && + scalar register test/src && + git -C test/src fsmonitor--daemon status && + test_cmp_config -C test/src true core.fsmonitor +' + test_expect_success 'scalar unregister' ' git init vanish/src && scalar register vanish/src && @@ -109,14 +202,14 @@ test_expect_success UNZIP 'scalar diagnose' ' sed -n "s/.*$SQ\\(.*\\.zip\\)$SQ.*/\\1/p" <err >zip_path && zip_path=$(cat zip_path) && test -n "$zip_path" && - unzip -v "$zip_path" && + "$GIT_UNZIP" -v "$zip_path" && folder=${zip_path%.zip} && test_path_is_missing "$folder" && - unzip -p "$zip_path" diagnostics.log >out && + "$GIT_UNZIP" -p "$zip_path" diagnostics.log >out && test_file_not_empty out && - unzip -p "$zip_path" packs-local.txt >out && + "$GIT_UNZIP" -p "$zip_path" packs-local.txt >out && grep "$(pwd)/.git/objects" out && - unzip -p "$zip_path" objects-local.txt >out && + "$GIT_UNZIP" -p "$zip_path" objects-local.txt >out && grep "^Total: [1-9]" out ' diff --git a/diagnose.c b/diagnose.c new file mode 100644 index 0000000000..beb0a8741b --- /dev/null +++ b/diagnose.c @@ -0,0 +1,269 @@ +#include "cache.h" +#include "diagnose.h" +#include "compat/disk.h" +#include "archive.h" +#include "dir.h" +#include "help.h" +#include "strvec.h" +#include "object-store.h" +#include "packfile.h" + +struct archive_dir { + const char *path; + int recursive; +}; + +struct diagnose_option { + enum diagnose_mode mode; + const char *option_name; +}; + +static struct diagnose_option diagnose_options[] = { + { DIAGNOSE_STATS, "stats" }, + { DIAGNOSE_ALL, "all" }, +}; + +int option_parse_diagnose(const struct option *opt, const char *arg, int unset) +{ + int i; + enum diagnose_mode *diagnose = opt->value; + + if (!arg) { + *diagnose = unset ? DIAGNOSE_NONE : DIAGNOSE_STATS; + return 0; + } + + for (i = 0; i < ARRAY_SIZE(diagnose_options); i++) { + if (!strcmp(arg, diagnose_options[i].option_name)) { + *diagnose = diagnose_options[i].mode; + return 0; + } + } + + return error(_("invalid --%s value '%s'"), opt->long_name, arg); +} + +static void dir_file_stats_objects(const char *full_path, size_t full_path_len, + const char *file_name, void *data) +{ + struct strbuf *buf = data; + struct stat st; + + if (!stat(full_path, &st)) + strbuf_addf(buf, "%-70s %16" PRIuMAX "\n", file_name, + (uintmax_t)st.st_size); +} + +static int dir_file_stats(struct object_directory *object_dir, void *data) +{ + struct strbuf *buf = data; + + strbuf_addf(buf, "Contents of %s:\n", object_dir->path); + + for_each_file_in_pack_dir(object_dir->path, dir_file_stats_objects, + data); + + return 0; +} + +static int count_files(char *path) +{ + DIR *dir = opendir(path); + struct dirent *e; + int count = 0; + + if (!dir) + return 0; + + while ((e = readdir(dir)) != NULL) + if (!is_dot_or_dotdot(e->d_name) && e->d_type == DT_REG) + count++; + + closedir(dir); + return count; +} + +static void loose_objs_stats(struct strbuf *buf, const char *path) +{ + DIR *dir = opendir(path); + struct dirent *e; + int count; + int total = 0; + unsigned char c; + struct strbuf count_path = STRBUF_INIT; + size_t base_path_len; + + if (!dir) + return; + + strbuf_addstr(buf, "Object directory stats for "); + strbuf_add_absolute_path(buf, path); + strbuf_addstr(buf, ":\n"); + + strbuf_add_absolute_path(&count_path, path); + strbuf_addch(&count_path, '/'); + base_path_len = count_path.len; + + while ((e = readdir(dir)) != NULL) + if (!is_dot_or_dotdot(e->d_name) && + e->d_type == DT_DIR && strlen(e->d_name) == 2 && + !hex_to_bytes(&c, e->d_name, 1)) { + strbuf_setlen(&count_path, base_path_len); + strbuf_addstr(&count_path, e->d_name); + total += (count = count_files(count_path.buf)); + strbuf_addf(buf, "%s : %7d files\n", e->d_name, count); + } + + strbuf_addf(buf, "Total: %d loose objects", total); + + strbuf_release(&count_path); + closedir(dir); +} + +static int add_directory_to_archiver(struct strvec *archiver_args, + const char *path, int recurse) +{ + int at_root = !*path; + DIR *dir; + struct dirent *e; + struct strbuf buf = STRBUF_INIT; + size_t len; + int res = 0; + + dir = opendir(at_root ? "." : path); + if (!dir) { + if (errno == ENOENT) { + warning(_("could not archive missing directory '%s'"), path); + return 0; + } + return error_errno(_("could not open directory '%s'"), path); + } + + if (!at_root) + strbuf_addf(&buf, "%s/", path); + len = buf.len; + strvec_pushf(archiver_args, "--prefix=%s", buf.buf); + + while (!res && (e = readdir(dir))) { + if (!strcmp(".", e->d_name) || !strcmp("..", e->d_name)) + continue; + + strbuf_setlen(&buf, len); + strbuf_addstr(&buf, e->d_name); + + if (e->d_type == DT_REG) + strvec_pushf(archiver_args, "--add-file=%s", buf.buf); + else if (e->d_type != DT_DIR) + warning(_("skipping '%s', which is neither file nor " + "directory"), buf.buf); + else if (recurse && + add_directory_to_archiver(archiver_args, + buf.buf, recurse) < 0) + res = -1; + } + + closedir(dir); + strbuf_release(&buf); + return res; +} + +int create_diagnostics_archive(struct strbuf *zip_path, enum diagnose_mode mode) +{ + struct strvec archiver_args = STRVEC_INIT; + char **argv_copy = NULL; + int stdout_fd = -1, archiver_fd = -1; + struct strbuf buf = STRBUF_INIT; + int res, i; + struct archive_dir archive_dirs[] = { + { ".git", 0 }, + { ".git/hooks", 0 }, + { ".git/info", 0 }, + { ".git/logs", 1 }, + { ".git/objects/info", 0 } + }; + + if (mode == DIAGNOSE_NONE) { + res = 0; + goto diagnose_cleanup; + } + + stdout_fd = dup(STDOUT_FILENO); + if (stdout_fd < 0) { + res = error_errno(_("could not duplicate stdout")); + goto diagnose_cleanup; + } + + archiver_fd = xopen(zip_path->buf, O_CREAT | O_WRONLY | O_TRUNC, 0666); + if (dup2(archiver_fd, STDOUT_FILENO) < 0) { + res = error_errno(_("could not redirect output")); + goto diagnose_cleanup; + } + + init_zip_archiver(); + strvec_pushl(&archiver_args, "git-diagnose", "--format=zip", NULL); + + strbuf_reset(&buf); + strbuf_addstr(&buf, "Collecting diagnostic info\n\n"); + get_version_info(&buf, 1); + + strbuf_addf(&buf, "Repository root: %s\n", the_repository->worktree); + get_disk_info(&buf); + write_or_die(stdout_fd, buf.buf, buf.len); + strvec_pushf(&archiver_args, + "--add-virtual-file=diagnostics.log:%.*s", + (int)buf.len, buf.buf); + + strbuf_reset(&buf); + strbuf_addstr(&buf, "--add-virtual-file=packs-local.txt:"); + dir_file_stats(the_repository->objects->odb, &buf); + foreach_alt_odb(dir_file_stats, &buf); + strvec_push(&archiver_args, buf.buf); + + strbuf_reset(&buf); + strbuf_addstr(&buf, "--add-virtual-file=objects-local.txt:"); + loose_objs_stats(&buf, ".git/objects"); + strvec_push(&archiver_args, buf.buf); + + /* Only include this if explicitly requested */ + if (mode == DIAGNOSE_ALL) { + for (i = 0; i < ARRAY_SIZE(archive_dirs); i++) { + if (add_directory_to_archiver(&archiver_args, + archive_dirs[i].path, + archive_dirs[i].recursive)) { + res = error_errno(_("could not add directory '%s' to archiver"), + archive_dirs[i].path); + goto diagnose_cleanup; + } + } + } + + strvec_pushl(&archiver_args, "--prefix=", + oid_to_hex(the_hash_algo->empty_tree), "--", NULL); + + /* `write_archive()` modifies the `argv` passed to it. Let it. */ + argv_copy = xmemdupz(archiver_args.v, + sizeof(char *) * archiver_args.nr); + res = write_archive(archiver_args.nr, (const char **)argv_copy, NULL, + the_repository, NULL, 0); + if (res) { + error(_("failed to write archive")); + goto diagnose_cleanup; + } + + fprintf(stderr, "\n" + "Diagnostics complete.\n" + "All of the gathered info is captured in '%s'\n", + zip_path->buf); + +diagnose_cleanup: + if (archiver_fd >= 0) { + dup2(stdout_fd, STDOUT_FILENO); + close(stdout_fd); + close(archiver_fd); + } + free(argv_copy); + strvec_clear(&archiver_args); + strbuf_release(&buf); + + return res; +} diff --git a/diagnose.h b/diagnose.h new file mode 100644 index 0000000000..7a4951a786 --- /dev/null +++ b/diagnose.h @@ -0,0 +1,17 @@ +#ifndef DIAGNOSE_H +#define DIAGNOSE_H + +#include "strbuf.h" +#include "parse-options.h" + +enum diagnose_mode { + DIAGNOSE_NONE, + DIAGNOSE_STATS, + DIAGNOSE_ALL +}; + +int option_parse_diagnose(const struct option *opt, const char *arg, int unset); + +int create_diagnostics_archive(struct strbuf *zip_path, enum diagnose_mode mode); + +#endif /* DIAGNOSE_H */ diff --git a/diff-no-index.c b/diff-no-index.c index 9a8b09346b..18edbdf4b5 100644 --- a/diff-no-index.c +++ b/diff-no-index.c @@ -243,7 +243,9 @@ int diff_no_index(struct rev_info *revs, int argc, const char **argv) { int i, no_index; + int ret = 1; const char *paths[2]; + char *to_free[ARRAY_SIZE(paths)] = { 0 }; struct strbuf replacement = STRBUF_INIT; const char *prefix = revs->prefix; struct option no_index_options[] = { @@ -265,7 +267,7 @@ int diff_no_index(struct rev_info *revs, } FREE_AND_NULL(options); for (i = 0; i < 2; i++) { - const char *p = argv[argc - 2 + i]; + const char *p = argv[i]; if (!strcmp(p, "-")) /* * stdin should be spelled as "-"; if you have @@ -273,7 +275,7 @@ int diff_no_index(struct rev_info *revs, */ p = file_from_standard_input; else if (prefix) - p = prefix_filename(prefix, p); + p = to_free[i] = prefix_filename(prefix, p); paths[i] = p; } @@ -295,16 +297,20 @@ int diff_no_index(struct rev_info *revs, revs->diffopt.flags.exit_with_status = 1; if (queue_diff(&revs->diffopt, paths[0], paths[1])) - return 1; + goto out; diff_set_mnemonic_prefix(&revs->diffopt, "1/", "2/"); diffcore_std(&revs->diffopt); diff_flush(&revs->diffopt); - strbuf_release(&replacement); - /* * The return code for --no-index imitates diff(1): * 0 = no changes, 1 = changes, else error */ - return diff_result_code(&revs->diffopt, 0); + ret = diff_result_code(&revs->diffopt, 0); + +out: + for (i = 0; i < ARRAY_SIZE(to_free); i++) + free(to_free[i]); + strbuf_release(&replacement); + return ret; } @@ -5662,7 +5662,7 @@ int diff_opt_parse(struct diff_options *options, ac = parse_options(ac, av, prefix, options->parseopts, NULL, PARSE_OPT_KEEP_DASHDASH | - PARSE_OPT_KEEP_UNKNOWN | + PARSE_OPT_KEEP_UNKNOWN_OPT | PARSE_OPT_NO_INTERNAL_HELP | PARSE_OPT_ONE_SHOT | PARSE_OPT_STOP_AT_NON_OPTION); @@ -1244,8 +1244,7 @@ int match_basename(const char *basename, int basenamelen, int match_pathname(const char *pathname, int pathlen, const char *base, int baselen, - const char *pattern, int prefix, int patternlen, - unsigned flags) + const char *pattern, int prefix, int patternlen) { const char *name; int namelen; @@ -1347,8 +1346,7 @@ static struct path_pattern *last_matching_pattern_from_list(const char *pathname if (match_pathname(pathname, pathlen, pattern->base, pattern->baselen ? pattern->baselen - 1 : 0, - exclude, prefix, pattern->patternlen, - pattern->flags)) { + exclude, prefix, pattern->patternlen)) { res = pattern; break; } @@ -414,7 +414,7 @@ int match_basename(const char *, int, const char *, int, int, unsigned); int match_pathname(const char *, int, const char *, int, - const char *, int, int, unsigned); + const char *, int, int); struct path_pattern *last_matching_pattern(struct dir_struct *dir, struct index_state *istate, diff --git a/environment.c b/environment.c index c94480f92f..18d042b467 100644 --- a/environment.c +++ b/environment.c @@ -56,7 +56,6 @@ const char *askpass_program; const char *excludes_file; enum auto_crlf auto_crlf = AUTO_CRLF_FALSE; int read_replace_refs = 1; -char *git_replace_ref_base; enum eol core_eol = EOL_UNSET; int global_conv_flags_eol = CONV_EOL_RNDTRP_WARN; char *check_roundtrip_encoding = "SHIFT-JIS"; @@ -162,6 +161,7 @@ const char *getenv_safe(struct strvec *argv, const char *name) void setup_git_env(const char *git_dir) { + char *git_replace_ref_base; const char *shallow_file; const char *replace_ref_base; struct set_gitdir_args args = { NULL }; @@ -182,9 +182,10 @@ void setup_git_env(const char *git_dir) if (getenv(NO_REPLACE_OBJECTS_ENVIRONMENT)) read_replace_refs = 0; replace_ref_base = getenv(GIT_REPLACE_REF_BASE_ENVIRONMENT); - free(git_replace_ref_base); git_replace_ref_base = xstrdup(replace_ref_base ? replace_ref_base : "refs/replace/"); + update_ref_namespace(NAMESPACE_REPLACE, git_replace_ref_base); + free(git_namespace); git_namespace = expand_namespace(getenv(GIT_NAMESPACE_ENVIRONMENT)); shallow_file = getenv(GIT_SHALLOW_FILE_ENVIRONMENT); diff --git a/fetch-pack.c b/fetch-pack.c index 633718099b..998fc2fa1e 100644 --- a/fetch-pack.c +++ b/fetch-pack.c @@ -325,6 +325,7 @@ static int find_common(struct fetch_negotiator *negotiator, { int fetching; int count = 0, flushes = 0, flush_at = INITIAL_FLUSH, retval; + int negotiation_round = 0, haves = 0; const struct object_id *oid; unsigned in_vain = 0; int got_continue = 0; @@ -463,9 +464,19 @@ static int find_common(struct fetch_negotiator *negotiator, packet_buf_write(&req_buf, "have %s\n", oid_to_hex(oid)); print_verbose(args, "have %s", oid_to_hex(oid)); in_vain++; + haves++; if (flush_at <= ++count) { int ack; + negotiation_round++; + trace2_region_enter_printf("negotiation_v0_v1", "round", + the_repository, "%d", + negotiation_round); + trace2_data_intmax("negotiation_v0_v1", the_repository, + "haves_added", haves); + trace2_data_intmax("negotiation_v0_v1", the_repository, + "in_vain", in_vain); + haves = 0; packet_buf_flush(&req_buf); send_request(args, fd[1], &req_buf); strbuf_setlen(&req_buf, state_len); @@ -487,6 +498,9 @@ static int find_common(struct fetch_negotiator *negotiator, ack, oid_to_hex(result_oid)); switch (ack) { case ACK: + trace2_region_leave_printf("negotiation_v0_v1", "round", + the_repository, "%d", + negotiation_round); flushes = 0; multi_ack = 0; retval = 0; @@ -512,6 +526,7 @@ static int find_common(struct fetch_negotiator *negotiator, const char *hex = oid_to_hex(result_oid); packet_buf_write(&req_buf, "have %s\n", hex); state_len = req_buf.len; + haves++; /* * Reset in_vain because an ack * for this commit has not been @@ -530,6 +545,9 @@ static int find_common(struct fetch_negotiator *negotiator, } } while (ack); flushes--; + trace2_region_leave_printf("negotiation_v0_v1", "round", + the_repository, "%d", + negotiation_round); if (got_continue && MAX_IN_VAIN < in_vain) { print_verbose(args, _("giving up")); break; /* give up */ @@ -540,6 +558,8 @@ static int find_common(struct fetch_negotiator *negotiator, } done: trace2_region_leave("fetch-pack", "negotiation_v0_v1", the_repository); + trace2_data_intmax("negotiation_v0_v1", the_repository, "total_rounds", + negotiation_round); if (!got_ready || !no_done) { packet_buf_write(&req_buf, "done\n"); send_request(args, fd[1], &req_buf); @@ -1385,6 +1405,8 @@ static int send_fetch_request(struct fetch_negotiator *negotiator, int fd_out, haves_added = add_haves(negotiator, &req_buf, haves_to_send); *in_vain += haves_added; + trace2_data_intmax("negotiation_v2", the_repository, "haves_added", haves_added); + trace2_data_intmax("negotiation_v2", the_repository, "in_vain", *in_vain); if (!haves_added || (seen_ack && *in_vain >= MAX_IN_VAIN)) { /* Send Done */ packet_buf_write(&req_buf, "done\n"); @@ -1627,6 +1649,7 @@ static struct ref *do_fetch_pack_v2(struct fetch_pack_args *args, struct oidset common = OIDSET_INIT; struct packet_reader reader; int in_vain = 0, negotiation_started = 0; + int negotiation_round = 0; int haves_to_send = INITIAL_FLUSH; struct fetch_negotiator negotiator_alloc; struct fetch_negotiator *negotiator; @@ -1683,12 +1706,20 @@ static struct ref *do_fetch_pack_v2(struct fetch_pack_args *args, "negotiation_v2", the_repository); } + negotiation_round++; + trace2_region_enter_printf("negotiation_v2", "round", + the_repository, "%d", + negotiation_round); if (send_fetch_request(negotiator, fd[1], args, ref, &common, &haves_to_send, &in_vain, reader.use_sideband, - seen_ack)) + seen_ack)) { + trace2_region_leave_printf("negotiation_v2", "round", + the_repository, "%d", + negotiation_round); state = FETCH_GET_PACK; + } else state = FETCH_PROCESS_ACKS; break; @@ -1701,6 +1732,9 @@ static struct ref *do_fetch_pack_v2(struct fetch_pack_args *args, seen_ack = 1; oidset_insert(&common, &common_oid); } + trace2_region_leave_printf("negotiation_v2", "round", + the_repository, "%d", + negotiation_round); if (received_ready) { /* * Don't check for response delimiter; get_pack() will @@ -1716,6 +1750,8 @@ static struct ref *do_fetch_pack_v2(struct fetch_pack_args *args, trace2_region_leave("fetch-pack", "negotiation_v2", the_repository); + trace2_data_intmax("negotiation_v2", the_repository, + "total_rounds", negotiation_round); /* Check for shallow-info section */ if (process_section_header(&reader, "shallow-info", 1)) receive_shallow_info(args, &reader, shallows, si); @@ -2095,6 +2131,7 @@ void negotiate_using_fetch(const struct oid_array *negotiation_tips, int in_vain = 0; int seen_ack = 0; int last_iteration = 0; + int negotiation_round = 0; timestamp_t min_generation = GENERATION_NUMBER_INFINITY; fetch_negotiator_init(the_repository, &negotiator); @@ -2108,11 +2145,17 @@ void negotiate_using_fetch(const struct oid_array *negotiation_tips, add_to_object_array, &nt_object_array); + trace2_region_enter("fetch-pack", "negotiate_using_fetch", the_repository); while (!last_iteration) { int haves_added; struct object_id common_oid; int received_ready = 0; + negotiation_round++; + + trace2_region_enter_printf("negotiate_using_fetch", "round", + the_repository, "%d", + negotiation_round); strbuf_reset(&req_buf); write_fetch_command_and_capabilities(&req_buf, server_options); @@ -2123,6 +2166,11 @@ void negotiate_using_fetch(const struct oid_array *negotiation_tips, if (!haves_added || (seen_ack && in_vain >= MAX_IN_VAIN)) last_iteration = 1; + trace2_data_intmax("negotiate_using_fetch", the_repository, + "haves_added", haves_added); + trace2_data_intmax("negotiate_using_fetch", the_repository, + "in_vain", in_vain); + /* Send request */ packet_buf_flush(&req_buf); if (write_in_full(fd[1], req_buf.buf, req_buf.len) < 0) @@ -2155,7 +2203,13 @@ void negotiate_using_fetch(const struct oid_array *negotiation_tips, REACH_SCRATCH, 0, min_generation)) last_iteration = 1; + trace2_region_leave_printf("negotiation", "round", + the_repository, "%d", + negotiation_round); } + trace2_region_enter("fetch-pack", "negotiate_using_fetch", the_repository); + trace2_data_intmax("negotiate_using_fetch", the_repository, + "total_rounds", negotiation_round); clear_common_flag(acked_commits); strbuf_release(&req_buf); } diff --git a/git-compat-util.h b/git-compat-util.h index 71a004be40..b90b64718e 100644 --- a/git-compat-util.h +++ b/git-compat-util.h @@ -268,6 +268,7 @@ static inline int is_xplatform_dir_sep(int c) #include <sys/resource.h> #include <sys/socket.h> #include <sys/ioctl.h> +#include <sys/statvfs.h> #include <termios.h> #ifndef NO_SYS_SELECT_H #include <sys/select.h> @@ -578,8 +579,11 @@ static inline int git_has_dir_sep(const char *path) /* The sentinel attribute is valid from gcc version 4.0 */ #if defined(__GNUC__) && (__GNUC__ >= 4) #define LAST_ARG_MUST_BE_NULL __attribute__((sentinel)) +/* warn_unused_result exists as of gcc 3.4.0, but be lazy and check 4.0 */ +#define RESULT_MUST_BE_USED __attribute__ ((warn_unused_result)) #else #define LAST_ARG_MUST_BE_NULL +#define RESULT_MUST_BE_USED #endif #define MAYBE_UNUSED __attribute__((__unused__)) @@ -1008,6 +1012,28 @@ static inline unsigned long cast_size_t_to_ulong(size_t a) return (unsigned long)a; } +/* + * Limit size of IO chunks, because huge chunks only cause pain. OS X + * 64-bit is buggy, returning EINVAL if len >= INT_MAX; and even in + * the absence of bugs, large chunks can result in bad latencies when + * you decide to kill the process. + * + * We pick 8 MiB as our default, but if the platform defines SSIZE_MAX + * that is smaller than that, clip it to SSIZE_MAX, as a call to + * read(2) or write(2) larger than that is allowed to fail. As the last + * resort, we allow a port to pass via CFLAGS e.g. "-DMAX_IO_SIZE=value" + * to override this, if the definition of SSIZE_MAX given by the platform + * is broken. + */ +#ifndef MAX_IO_SIZE +# define MAX_IO_SIZE_DEFAULT (8*1024*1024) +# if defined(SSIZE_MAX) && (SSIZE_MAX < MAX_IO_SIZE_DEFAULT) +# define MAX_IO_SIZE SSIZE_MAX +# else +# define MAX_IO_SIZE MAX_IO_SIZE_DEFAULT +# endif +#endif + #ifdef HAVE_ALLOCA_H # include <alloca.h> # define xalloca(size) (alloca(size)) @@ -489,14 +489,14 @@ static int run_builtin(struct cmd_struct *p, int argc, const char **argv) static struct cmd_struct commands[] = { { "add", cmd_add, RUN_SETUP | NEED_WORK_TREE }, { "am", cmd_am, RUN_SETUP | NEED_WORK_TREE }, - { "annotate", cmd_annotate, RUN_SETUP | NO_PARSEOPT }, + { "annotate", cmd_annotate, RUN_SETUP }, { "apply", cmd_apply, RUN_SETUP_GENTLY }, { "archive", cmd_archive, RUN_SETUP_GENTLY }, { "bisect--helper", cmd_bisect__helper, RUN_SETUP }, { "blame", cmd_blame, RUN_SETUP }, { "branch", cmd_branch, RUN_SETUP | DELAY_PAGER_CONFIG }, { "bugreport", cmd_bugreport, RUN_SETUP_GENTLY }, - { "bundle", cmd_bundle, RUN_SETUP_GENTLY | NO_PARSEOPT }, + { "bundle", cmd_bundle, RUN_SETUP_GENTLY }, { "cat-file", cmd_cat_file, RUN_SETUP }, { "check-attr", cmd_check_attr, RUN_SETUP }, { "check-ignore", cmd_check_ignore, RUN_SETUP | NEED_WORK_TREE }, @@ -514,7 +514,7 @@ static struct cmd_struct commands[] = { { "column", cmd_column, RUN_SETUP_GENTLY }, { "commit", cmd_commit, RUN_SETUP | NEED_WORK_TREE }, { "commit-graph", cmd_commit_graph, RUN_SETUP }, - { "commit-tree", cmd_commit_tree, RUN_SETUP | NO_PARSEOPT }, + { "commit-tree", cmd_commit_tree, RUN_SETUP }, { "config", cmd_config, RUN_SETUP_GENTLY | DELAY_PAGER_CONFIG }, { "count-objects", cmd_count_objects, RUN_SETUP }, { "credential", cmd_credential, RUN_SETUP_GENTLY | NO_PARSEOPT }, @@ -522,6 +522,7 @@ static struct cmd_struct commands[] = { { "credential-cache--daemon", cmd_credential_cache_daemon }, { "credential-store", cmd_credential_store }, { "describe", cmd_describe, RUN_SETUP }, + { "diagnose", cmd_diagnose, RUN_SETUP_GENTLY }, { "diff", cmd_diff, NO_PARSEOPT }, { "diff-files", cmd_diff_files, RUN_SETUP | NEED_WORK_TREE | NO_PARSEOPT }, { "diff-index", cmd_diff_index, RUN_SETUP | NO_PARSEOPT }, @@ -553,9 +554,9 @@ static struct cmd_struct commands[] = { { "ls-files", cmd_ls_files, RUN_SETUP }, { "ls-remote", cmd_ls_remote, RUN_SETUP_GENTLY }, { "ls-tree", cmd_ls_tree, RUN_SETUP }, - { "mailinfo", cmd_mailinfo, RUN_SETUP_GENTLY | NO_PARSEOPT }, + { "mailinfo", cmd_mailinfo, RUN_SETUP_GENTLY }, { "mailsplit", cmd_mailsplit, NO_PARSEOPT }, - { "maintenance", cmd_maintenance, RUN_SETUP | NO_PARSEOPT }, + { "maintenance", cmd_maintenance, RUN_SETUP }, { "merge", cmd_merge, RUN_SETUP | NEED_WORK_TREE }, { "merge-base", cmd_merge_base, RUN_SETUP }, { "merge-file", cmd_merge_file, RUN_SETUP_GENTLY }, @@ -566,7 +567,7 @@ static struct cmd_struct commands[] = { { "merge-recursive-theirs", cmd_merge_recursive, RUN_SETUP | NEED_WORK_TREE | NO_PARSEOPT }, { "merge-subtree", cmd_merge_recursive, RUN_SETUP | NEED_WORK_TREE | NO_PARSEOPT }, { "merge-tree", cmd_merge_tree, RUN_SETUP }, - { "mktag", cmd_mktag, RUN_SETUP | NO_PARSEOPT }, + { "mktag", cmd_mktag, RUN_SETUP }, { "mktree", cmd_mktree, RUN_SETUP }, { "multi-pack-index", cmd_multi_pack_index, RUN_SETUP }, { "mv", cmd_mv, RUN_SETUP | NEED_WORK_TREE }, @@ -627,7 +628,7 @@ static struct cmd_struct commands[] = { { "verify-tag", cmd_verify_tag, RUN_SETUP }, { "version", cmd_version }, { "whatchanged", cmd_whatchanged, RUN_SETUP }, - { "worktree", cmd_worktree, RUN_SETUP | NO_PARSEOPT }, + { "worktree", cmd_worktree, RUN_SETUP }, { "write-tree", cmd_write_tree, RUN_SETUP }, }; @@ -4,9 +4,7 @@ #include "git-compat-util.h" #include "repository.h" -#if defined(SHA1_PPC) -#include "ppc/sha1.h" -#elif defined(SHA1_APPLE) +#if defined(SHA1_APPLE) #include <CommonCrypto/CommonDigest.h> #elif defined(SHA1_OPENSSL) #include <openssl/sha.h> @@ -32,7 +30,7 @@ * platform's underlying implementation of SHA-1; could be OpenSSL, * blk_SHA, Apple CommonCrypto, etc... Note that the relevant * SHA-1 header may have already defined platform_SHA_CTX for our - * own implementations like block-sha1 and ppc-sha1, so we list + * own implementations like block-sha1, so we list * the default for OpenSSL compatible SHA-1 implementations here. */ #define platform_SHA_CTX SHA_CTX diff --git a/log-tree.c b/log-tree.c index ccdbf08feb..1dd5fcbf7b 100644 --- a/log-tree.c +++ b/log-tree.c @@ -138,10 +138,12 @@ static int add_ref_decoration(const char *refname, const struct object_id *oid, int flags UNUSED, void *cb_data) { + int i; struct object *obj; enum object_type objtype; enum decoration_type deco_type = DECORATION_NONE; struct decoration_filter *filter = (struct decoration_filter *)cb_data; + const char *git_replace_ref_base = ref_namespace[NAMESPACE_REPLACE].ref; if (filter && !ref_filter_match(refname, filter)) return 0; @@ -166,16 +168,21 @@ static int add_ref_decoration(const char *refname, const struct object_id *oid, return 0; obj = lookup_object_by_type(the_repository, oid, objtype); - if (starts_with(refname, "refs/heads/")) - deco_type = DECORATION_REF_LOCAL; - else if (starts_with(refname, "refs/remotes/")) - deco_type = DECORATION_REF_REMOTE; - else if (starts_with(refname, "refs/tags/")) - deco_type = DECORATION_REF_TAG; - else if (!strcmp(refname, "refs/stash")) - deco_type = DECORATION_REF_STASH; - else if (!strcmp(refname, "HEAD")) - deco_type = DECORATION_REF_HEAD; + for (i = 0; i < ARRAY_SIZE(ref_namespace); i++) { + struct ref_namespace_info *info = &ref_namespace[i]; + + if (!info->decoration) + continue; + if (info->exact) { + if (!strcmp(refname, info->ref)) { + deco_type = info->decoration; + break; + } + } else if (starts_with(refname, info->ref)) { + deco_type = info->decoration; + break; + } + } add_name_decoration(deco_type, refname, obj); while (obj->type == OBJ_TAG) { @@ -957,8 +964,7 @@ static void cleanup_additional_headers(struct diff_options *o) static int do_remerge_diff(struct rev_info *opt, struct commit_list *parents, - struct object_id *oid, - struct commit *commit) + struct object_id *oid) { struct merge_options o; struct commit_list *bases; @@ -1053,7 +1059,7 @@ static int log_tree_diff(struct rev_info *opt, struct commit *commit, struct log "for octopus merges.\n"); return 1; } - return do_remerge_diff(opt, parents, oid, commit); + return do_remerge_diff(opt, parents, oid); } if (opt->combine_merges) return do_diff_combined(opt, commit); diff --git a/merge-ort.c b/merge-ort.c index 7d105be275..99dcee2db8 100644 --- a/merge-ort.c +++ b/merge-ort.c @@ -387,8 +387,24 @@ struct merge_options_internal { /* call_depth: recursion level counter for merging merge bases */ int call_depth; + + /* field that holds submodule conflict information */ + struct string_list conflicted_submodules; +}; + +struct conflicted_submodule_item { + char *abbrev; + int flag; }; +static void conflicted_submodule_item_free(void *util, const char *str) +{ + struct conflicted_submodule_item *item = util; + + free(item->abbrev); + free(item); +} + struct version_info { struct object_id oid; unsigned short mode; @@ -491,7 +507,6 @@ enum conflict_and_info_types { CONFLICT_FILE_DIRECTORY, CONFLICT_DISTINCT_MODES, CONFLICT_MODIFY_DELETE, - CONFLICT_PRESENT_DESPITE_SKIPPED, /* Regular rename */ CONFLICT_RENAME_RENAME, /* same file renamed differently */ @@ -517,6 +532,7 @@ enum conflict_and_info_types { CONFLICT_SUBMODULE_NOT_INITIALIZED, CONFLICT_SUBMODULE_HISTORY_NOT_AVAILABLE, CONFLICT_SUBMODULE_MAY_HAVE_REWINDS, + CONFLICT_SUBMODULE_NULL_MERGE_BASE, /* Keep this entry _last_ in the list */ NB_CONFLICT_TYPES, @@ -536,8 +552,6 @@ static const char *type_short_descriptions[] = { [CONFLICT_FILE_DIRECTORY] = "CONFLICT (file/directory)", [CONFLICT_DISTINCT_MODES] = "CONFLICT (distinct modes)", [CONFLICT_MODIFY_DELETE] = "CONFLICT (modify/delete)", - [CONFLICT_PRESENT_DESPITE_SKIPPED] = - "CONFLICT (upgrade your version of git)", /*** Regular rename ***/ [CONFLICT_RENAME_RENAME] = "CONFLICT (rename/rename)", @@ -570,6 +584,8 @@ static const char *type_short_descriptions[] = { "CONFLICT (submodule history not available)", [CONFLICT_SUBMODULE_MAY_HAVE_REWINDS] = "CONFLICT (submodule may have rewinds)", + [CONFLICT_SUBMODULE_NULL_MERGE_BASE] = + "CONFLICT (submodule lacks merge base)" }; struct logical_conflict_info { @@ -686,6 +702,9 @@ static void clear_or_reinit_internal_opts(struct merge_options_internal *opti, mem_pool_discard(&opti->pool, 0); + string_list_clear_func(&opti->conflicted_submodules, + conflicted_submodule_item_free); + /* Clean out callback_data as well. */ FREE_AND_NULL(renames->callback_data); renames->callback_data_nr = renames->callback_data_alloc = 0; @@ -748,8 +767,7 @@ static void path_msg(struct merge_options *opt, /* Sanity checks */ assert(omittable_hint == !starts_with(type_short_descriptions[type], "CONFLICT") || - type == CONFLICT_DIR_RENAME_SUGGESTED || - type == CONFLICT_PRESENT_DESPITE_SKIPPED); + type == CONFLICT_DIR_RENAME_SUGGESTED); if (opt->record_conflict_msgs_as_headers && omittable_hint) return; /* Do not record mere hints in headers */ if (opt->priv->call_depth && opt->verbosity < 5) @@ -1744,24 +1762,32 @@ static int merge_submodule(struct merge_options *opt, int i; int search = !opt->priv->call_depth; + int sub_not_initialized = 1; + int sub_flag = CONFLICT_SUBMODULE_FAILED_TO_MERGE; /* store fallback answer in result in case we fail */ oidcpy(result, opt->priv->call_depth ? o : a); /* we can not handle deletion conflicts */ - if (is_null_oid(o)) - return 0; - if (is_null_oid(a)) - return 0; - if (is_null_oid(b)) - return 0; + if (is_null_oid(a) || is_null_oid(b)) + BUG("submodule deleted on one side; this should be handled outside of merge_submodule()"); - if (repo_submodule_init(&subrepo, opt->repo, path, null_oid())) { + if ((sub_not_initialized = repo_submodule_init(&subrepo, + opt->repo, path, null_oid()))) { path_msg(opt, CONFLICT_SUBMODULE_NOT_INITIALIZED, 0, path, NULL, NULL, NULL, _("Failed to merge submodule %s (not checked out)"), path); - return 0; + sub_flag = CONFLICT_SUBMODULE_NOT_INITIALIZED; + goto cleanup; + } + + if (is_null_oid(o)) { + path_msg(opt, CONFLICT_SUBMODULE_NULL_MERGE_BASE, 0, + path, NULL, NULL, NULL, + _("Failed to merge submodule %s (no merge base)"), + path); + goto cleanup; } if (!(commit_o = lookup_commit_reference(&subrepo, o)) || @@ -1771,6 +1797,7 @@ static int merge_submodule(struct merge_options *opt, path, NULL, NULL, NULL, _("Failed to merge submodule %s (commits not present)"), path); + sub_flag = CONFLICT_SUBMODULE_HISTORY_NOT_AVAILABLE; goto cleanup; } @@ -1849,7 +1876,23 @@ static int merge_submodule(struct merge_options *opt, object_array_clear(&merges); cleanup: - repo_clear(&subrepo); + if (!opt->priv->call_depth && !ret) { + struct string_list *csub = &opt->priv->conflicted_submodules; + struct conflicted_submodule_item *util; + const char *abbrev; + + util = xmalloc(sizeof(*util)); + util->flag = sub_flag; + util->abbrev = NULL; + if (!sub_not_initialized) { + abbrev = repo_find_unique_abbrev(&subrepo, b, DEFAULT_ABBREV); + util->abbrev = xstrdup(abbrev); + } + string_list_append(csub, path)->util = util; + } + + if (!sub_not_initialized) + repo_clear(&subrepo); return ret; } @@ -4377,22 +4420,8 @@ static int record_conflicted_index_entries(struct merge_options *opt) * the CE_SKIP_WORKTREE bit and manually write those * files to the working disk here. */ - if (ce_skip_worktree(ce)) { - struct stat st; - - if (!lstat(path, &st)) { - char *new_name = unique_path(opt, - path, - "cruft"); - - path_msg(opt, CONFLICT_PRESENT_DESPITE_SKIPPED, 1, - path, NULL, NULL, NULL, - _("Note: %s not up to date and in way of checking out conflicted version; old copy renamed to %s"), - path, new_name); - errs |= rename(path, new_name); - } + if (ce_skip_worktree(ce)) errs |= checkout_entry(ce, &state, NULL, NULL); - } /* * Mark this cache entry for removal and instead add @@ -4434,6 +4463,63 @@ static int record_conflicted_index_entries(struct merge_options *opt) return errs; } +static void print_submodule_conflict_suggestion(struct string_list *csub) { + struct string_list_item *item; + struct strbuf msg = STRBUF_INIT; + struct strbuf tmp = STRBUF_INIT; + struct strbuf subs = STRBUF_INIT; + + if (!csub->nr) + return; + + strbuf_add_separated_string_list(&subs, " ", csub); + for_each_string_list_item(item, csub) { + struct conflicted_submodule_item *util = item->util; + + /* + * NEEDSWORK: The steps to resolve these errors deserve a more + * detailed explanation than what is currently printed below. + */ + if (util->flag == CONFLICT_SUBMODULE_NOT_INITIALIZED || + util->flag == CONFLICT_SUBMODULE_HISTORY_NOT_AVAILABLE) + continue; + + /* + * TRANSLATORS: This is a line of advice to resolve a merge + * conflict in a submodule. The first argument is the submodule + * name, and the second argument is the abbreviated id of the + * commit that needs to be merged. For example: + * - go to submodule (mysubmodule), and either merge commit abc1234" + */ + strbuf_addf(&tmp, _(" - go to submodule (%s), and either merge commit %s\n" + " or update to an existing commit which has merged those changes\n"), + item->string, util->abbrev); + } + + /* + * TRANSLATORS: This is a detailed message for resolving submodule + * conflicts. The first argument is string containing one step per + * submodule. The second is a space-separated list of submodule names. + */ + strbuf_addf(&msg, + _("Recursive merging with submodules currently only supports trivial cases.\n" + "Please manually handle the merging of each conflicted submodule.\n" + "This can be accomplished with the following steps:\n" + "%s" + " - come back to superproject and run:\n\n" + " git add %s\n\n" + " to record the above merge or update\n" + " - resolve any other conflicts in the superproject\n" + " - commit the resulting index in the superproject\n"), + tmp.buf, subs.buf); + + printf("%s", msg.buf); + + strbuf_release(&subs); + strbuf_release(&tmp); + strbuf_release(&msg); +} + void merge_display_update_messages(struct merge_options *opt, int detailed, struct merge_result *result) @@ -4483,6 +4569,8 @@ void merge_display_update_messages(struct merge_options *opt, } string_list_clear(&olist, 0); + print_submodule_conflict_suggestion(&opti->conflicted_submodules); + /* Also include needed rename limit adjustment now */ diff_warn_rename_limit("merge.renamelimit", opti->renames.needed_limit, 0); @@ -4684,6 +4772,7 @@ static void merge_start(struct merge_options *opt, struct merge_result *result) trace2_region_enter("merge", "allocate/init", opt->repo); if (opt->priv) { clear_or_reinit_internal_opts(opt->priv, 1); + string_list_init_nodup(&opt->priv->conflicted_submodules); trace2_region_leave("merge", "allocate/init", opt->repo); return; } @@ -577,6 +577,78 @@ static void fill_pack_entry(uint32_t pack_int_id, entry->preferred = !!preferred; } +struct midx_fanout { + struct pack_midx_entry *entries; + uint32_t nr; + uint32_t alloc; +}; + +static void midx_fanout_grow(struct midx_fanout *fanout, uint32_t nr) +{ + ALLOC_GROW(fanout->entries, nr, fanout->alloc); +} + +static void midx_fanout_sort(struct midx_fanout *fanout) +{ + QSORT(fanout->entries, fanout->nr, midx_oid_compare); +} + +static void midx_fanout_add_midx_fanout(struct midx_fanout *fanout, + struct multi_pack_index *m, + uint32_t cur_fanout, + int preferred_pack) +{ + uint32_t start = 0, end; + uint32_t cur_object; + + if (cur_fanout) + start = ntohl(m->chunk_oid_fanout[cur_fanout - 1]); + end = ntohl(m->chunk_oid_fanout[cur_fanout]); + + for (cur_object = start; cur_object < end; cur_object++) { + if ((preferred_pack > -1) && + (preferred_pack == nth_midxed_pack_int_id(m, cur_object))) { + /* + * Objects from preferred packs are added + * separately. + */ + continue; + } + + midx_fanout_grow(fanout, fanout->nr + 1); + nth_midxed_pack_midx_entry(m, + &fanout->entries[fanout->nr], + cur_object); + fanout->entries[fanout->nr].preferred = 0; + fanout->nr++; + } +} + +static void midx_fanout_add_pack_fanout(struct midx_fanout *fanout, + struct pack_info *info, + uint32_t cur_pack, + int preferred, + uint32_t cur_fanout) +{ + struct packed_git *pack = info[cur_pack].p; + uint32_t start = 0, end; + uint32_t cur_object; + + if (cur_fanout) + start = get_pack_fanout(pack, cur_fanout - 1); + end = get_pack_fanout(pack, cur_fanout); + + for (cur_object = start; cur_object < end; cur_object++) { + midx_fanout_grow(fanout, fanout->nr + 1); + fill_pack_entry(cur_pack, + info[cur_pack].p, + cur_object, + &fanout->entries[fanout->nr], + preferred); + fanout->nr++; + } +} + /* * It is possible to artificially get into a state where there are many * duplicate copies of objects. That can create high memory pressure if @@ -595,8 +667,8 @@ static struct pack_midx_entry *get_sorted_entries(struct multi_pack_index *m, int preferred_pack) { uint32_t cur_fanout, cur_pack, cur_object; - uint32_t alloc_fanout, alloc_objects, total_objects = 0; - struct pack_midx_entry *entries_by_fanout = NULL; + uint32_t alloc_objects, total_objects = 0; + struct midx_fanout fanout = { 0 }; struct pack_midx_entry *deduplicated_entries = NULL; uint32_t start_pack = m ? m->num_packs : 0; @@ -608,74 +680,51 @@ static struct pack_midx_entry *get_sorted_entries(struct multi_pack_index *m, * slices to be evenly distributed, with some noise. Hence, * allocate slightly more than one 256th. */ - alloc_objects = alloc_fanout = total_objects > 3200 ? total_objects / 200 : 16; + alloc_objects = fanout.alloc = total_objects > 3200 ? total_objects / 200 : 16; - ALLOC_ARRAY(entries_by_fanout, alloc_fanout); + ALLOC_ARRAY(fanout.entries, fanout.alloc); ALLOC_ARRAY(deduplicated_entries, alloc_objects); *nr_objects = 0; for (cur_fanout = 0; cur_fanout < 256; cur_fanout++) { - uint32_t nr_fanout = 0; - - if (m) { - uint32_t start = 0, end; - - if (cur_fanout) - start = ntohl(m->chunk_oid_fanout[cur_fanout - 1]); - end = ntohl(m->chunk_oid_fanout[cur_fanout]); - - for (cur_object = start; cur_object < end; cur_object++) { - ALLOC_GROW(entries_by_fanout, nr_fanout + 1, alloc_fanout); - nth_midxed_pack_midx_entry(m, - &entries_by_fanout[nr_fanout], - cur_object); - if (nth_midxed_pack_int_id(m, cur_object) == preferred_pack) - entries_by_fanout[nr_fanout].preferred = 1; - else - entries_by_fanout[nr_fanout].preferred = 0; - nr_fanout++; - } - } + fanout.nr = 0; + + if (m) + midx_fanout_add_midx_fanout(&fanout, m, cur_fanout, + preferred_pack); for (cur_pack = start_pack; cur_pack < nr_packs; cur_pack++) { - uint32_t start = 0, end; int preferred = cur_pack == preferred_pack; - - if (cur_fanout) - start = get_pack_fanout(info[cur_pack].p, cur_fanout - 1); - end = get_pack_fanout(info[cur_pack].p, cur_fanout); - - for (cur_object = start; cur_object < end; cur_object++) { - ALLOC_GROW(entries_by_fanout, nr_fanout + 1, alloc_fanout); - fill_pack_entry(cur_pack, - info[cur_pack].p, - cur_object, - &entries_by_fanout[nr_fanout], - preferred); - nr_fanout++; - } + midx_fanout_add_pack_fanout(&fanout, + info, cur_pack, + preferred, cur_fanout); } - QSORT(entries_by_fanout, nr_fanout, midx_oid_compare); + if (-1 < preferred_pack && preferred_pack < start_pack) + midx_fanout_add_pack_fanout(&fanout, info, + preferred_pack, 1, + cur_fanout); + + midx_fanout_sort(&fanout); /* * The batch is now sorted by OID and then mtime (descending). * Take only the first duplicate. */ - for (cur_object = 0; cur_object < nr_fanout; cur_object++) { - if (cur_object && oideq(&entries_by_fanout[cur_object - 1].oid, - &entries_by_fanout[cur_object].oid)) + for (cur_object = 0; cur_object < fanout.nr; cur_object++) { + if (cur_object && oideq(&fanout.entries[cur_object - 1].oid, + &fanout.entries[cur_object].oid)) continue; ALLOC_GROW(deduplicated_entries, *nr_objects + 1, alloc_objects); memcpy(&deduplicated_entries[*nr_objects], - &entries_by_fanout[cur_object], + &fanout.entries[cur_object], sizeof(struct pack_midx_entry)); (*nr_objects)++; } } - free(entries_by_fanout); + free(fanout.entries); return deduplicated_entries; } @@ -1070,6 +1119,9 @@ static int write_midx_bitmap(const char *midx_name, if (flags & MIDX_WRITE_BITMAP_HASH_CACHE) options |= BITMAP_OPT_HASH_CACHE; + if (flags & MIDX_WRITE_BITMAP_LOOKUP_TABLE) + options |= BITMAP_OPT_LOOKUP_TABLE; + /* * Build the MIDX-order index based on pdata.objects (which is already * in MIDX order; c.f., 'midx_pack_order_cmp()' for the definition of @@ -47,6 +47,7 @@ struct multi_pack_index { #define MIDX_WRITE_REV_INDEX (1 << 1) #define MIDX_WRITE_BITMAP (1 << 2) #define MIDX_WRITE_BITMAP_HASH_CACHE (1 << 3) +#define MIDX_WRITE_BITMAP_LOOKUP_TABLE (1 << 4) const unsigned char *get_midx_checksum(struct multi_pack_index *m); void get_midx_filename(struct strbuf *out, const char *object_dir); @@ -1006,6 +1006,7 @@ void init_notes(struct notes_tree *t, const char *notes_ref, if (!notes_ref) notes_ref = default_notes_ref(); + update_ref_namespace(NAMESPACE_NOTES, xstrdup(notes_ref)); if (!combine_notes) combine_notes = combine_notes_concatenate; @@ -263,8 +263,11 @@ struct object *parse_object_or_die(const struct object_id *oid, die(_("unable to parse object: %s"), name ? name : oid_to_hex(oid)); } -struct object *parse_object(struct repository *r, const struct object_id *oid) +struct object *parse_object_with_flags(struct repository *r, + const struct object_id *oid, + enum parse_object_flags flags) { + int skip_hash = !!(flags & PARSE_OBJECT_SKIP_HASH_CHECK); unsigned long size; enum object_type type; int eaten; @@ -276,10 +279,16 @@ struct object *parse_object(struct repository *r, const struct object_id *oid) if (obj && obj->parsed) return obj; + if (skip_hash) { + struct commit *commit = lookup_commit_in_graph(r, repl); + if (commit) + return &commit->object; + } + if ((obj && obj->type == OBJ_BLOB && repo_has_object_file(r, oid)) || (!obj && repo_has_object_file(r, oid) && oid_object_info(r, oid, NULL) == OBJ_BLOB)) { - if (stream_object_signature(r, repl) < 0) { + if (!skip_hash && stream_object_signature(r, repl) < 0) { error(_("hash mismatch %s"), oid_to_hex(oid)); return NULL; } @@ -289,7 +298,8 @@ struct object *parse_object(struct repository *r, const struct object_id *oid) buffer = repo_read_object_file(r, oid, &type, &size); if (buffer) { - if (check_object_signature(r, repl, buffer, size, type) < 0) { + if (!skip_hash && + check_object_signature(r, repl, buffer, size, type) < 0) { free(buffer); error(_("hash mismatch %s"), oid_to_hex(repl)); return NULL; @@ -304,6 +314,11 @@ struct object *parse_object(struct repository *r, const struct object_id *oid) return NULL; } +struct object *parse_object(struct repository *r, const struct object_id *oid) +{ + return parse_object_with_flags(r, oid, 0); +} + struct object_list *object_list_insert(struct object *item, struct object_list **list_p) { @@ -59,7 +59,7 @@ struct object_array { /* * object flag allocation: - * revision.h: 0---------10 15 23------26 + * revision.h: 0---------10 15 23------27 * fetch-pack.c: 01 67 * negotiator/default.c: 2--5 * walker.c: 0-2 @@ -128,7 +128,13 @@ void *object_as_type(struct object *obj, enum object_type type, int quiet); * * Returns NULL if the object is missing or corrupt. */ +enum parse_object_flags { + PARSE_OBJECT_SKIP_HASH_CHECK = 1 << 0, +}; struct object *parse_object(struct repository *r, const struct object_id *oid); +struct object *parse_object_with_flags(struct repository *r, + const struct object_id *oid, + enum parse_object_flags flags); /* * Like parse_object, but will die() instead of returning NULL. If the diff --git a/pack-bitmap-write.c b/pack-bitmap-write.c index 4fcfaed428..a213f5eddc 100644 --- a/pack-bitmap-write.c +++ b/pack-bitmap-write.c @@ -649,21 +649,18 @@ static const struct object_id *oid_access(size_t pos, const void *table) } static void write_selected_commits_v1(struct hashfile *f, - struct pack_idx_entry **index, - uint32_t index_nr) + uint32_t *commit_positions, + off_t *offsets) { int i; for (i = 0; i < writer.selected_nr; ++i) { struct bitmapped_commit *stored = &writer.selected[i]; - int commit_pos = - oid_pos(&stored->commit->object.oid, index, index_nr, oid_access); + if (offsets) + offsets[i] = hashfile_total(f); - if (commit_pos < 0) - BUG("trying to write commit not in index"); - - hashwrite_be32(f, commit_pos); + hashwrite_be32(f, commit_positions[i]); hashwrite_u8(f, stored->xor_offset); hashwrite_u8(f, stored->flags); @@ -671,6 +668,79 @@ static void write_selected_commits_v1(struct hashfile *f, } } +static int table_cmp(const void *_va, const void *_vb, void *_data) +{ + uint32_t *commit_positions = _data; + uint32_t a = commit_positions[*(uint32_t *)_va]; + uint32_t b = commit_positions[*(uint32_t *)_vb]; + + if (a > b) + return 1; + else if (a < b) + return -1; + + return 0; +} + +static void write_lookup_table(struct hashfile *f, + uint32_t *commit_positions, + off_t *offsets) +{ + uint32_t i; + uint32_t *table, *table_inv; + + ALLOC_ARRAY(table, writer.selected_nr); + ALLOC_ARRAY(table_inv, writer.selected_nr); + + for (i = 0; i < writer.selected_nr; i++) + table[i] = i; + + /* + * At the end of this sort table[j] = i means that the i'th + * bitmap corresponds to j'th bitmapped commit (among the selected + * commits) in lex order of OIDs. + */ + QSORT_S(table, writer.selected_nr, table_cmp, commit_positions); + + /* table_inv helps us discover that relationship (i'th bitmap + * to j'th commit by j = table_inv[i]) + */ + for (i = 0; i < writer.selected_nr; i++) + table_inv[table[i]] = i; + + trace2_region_enter("pack-bitmap-write", "writing_lookup_table", the_repository); + for (i = 0; i < writer.selected_nr; i++) { + struct bitmapped_commit *selected = &writer.selected[table[i]]; + uint32_t xor_offset = selected->xor_offset; + uint32_t xor_row; + + if (xor_offset) { + /* + * xor_index stores the index (in the bitmap entries) + * of the corresponding xor bitmap. But we need to convert + * this index into lookup table's index. So, table_inv[xor_index] + * gives us the index position w.r.t. the lookup table. + * + * If "k = table[i] - xor_offset" then the xor base is the k'th + * bitmap. `table_inv[k]` gives us the position of that bitmap + * in the lookup table. + */ + uint32_t xor_index = table[i] - xor_offset; + xor_row = table_inv[xor_index]; + } else { + xor_row = 0xffffffff; + } + + hashwrite_be32(f, commit_positions[table[i]]); + hashwrite_be64(f, (uint64_t)offsets[table[i]]); + hashwrite_be32(f, xor_row); + } + trace2_region_leave("pack-bitmap-write", "writing_lookup_table", the_repository); + + free(table); + free(table_inv); +} + static void write_hash_cache(struct hashfile *f, struct pack_idx_entry **index, uint32_t index_nr) @@ -697,6 +767,9 @@ void bitmap_writer_finish(struct pack_idx_entry **index, static uint16_t flags = BITMAP_OPT_FULL_DAG; struct strbuf tmp_file = STRBUF_INIT; struct hashfile *f; + uint32_t *commit_positions = NULL; + off_t *offsets = NULL; + uint32_t i; struct bitmap_disk_header header; @@ -715,7 +788,26 @@ void bitmap_writer_finish(struct pack_idx_entry **index, dump_bitmap(f, writer.trees); dump_bitmap(f, writer.blobs); dump_bitmap(f, writer.tags); - write_selected_commits_v1(f, index, index_nr); + + if (options & BITMAP_OPT_LOOKUP_TABLE) + CALLOC_ARRAY(offsets, index_nr); + + ALLOC_ARRAY(commit_positions, writer.selected_nr); + + for (i = 0; i < writer.selected_nr; i++) { + struct bitmapped_commit *stored = &writer.selected[i]; + int commit_pos = oid_pos(&stored->commit->object.oid, index, index_nr, oid_access); + + if (commit_pos < 0) + BUG(_("trying to write commit not in index")); + + commit_positions[i] = commit_pos; + } + + write_selected_commits_v1(f, commit_positions, offsets); + + if (options & BITMAP_OPT_LOOKUP_TABLE) + write_lookup_table(f, commit_positions, offsets); if (options & BITMAP_OPT_HASH_CACHE) write_hash_cache(f, index, index_nr); @@ -730,4 +822,6 @@ void bitmap_writer_finish(struct pack_idx_entry **index, die_errno("unable to rename temporary bitmap file to '%s'", filename); strbuf_release(&tmp_file); + free(commit_positions); + free(offsets); } diff --git a/pack-bitmap.c b/pack-bitmap.c index ef580be9e3..9a208abc1f 100644 --- a/pack-bitmap.c +++ b/pack-bitmap.c @@ -84,6 +84,12 @@ struct bitmap_index { const unsigned char *checksum; /* + * If not NULL, this point into the commit table extension + * (within the memory mapped region `map`). + */ + unsigned char *table_lookup; + + /* * Extended index. * * When trying to perform bitmap operations with objects that are not @@ -186,6 +192,16 @@ static int load_bitmap_header(struct bitmap_index *index) index->hashes = (void *)(index_end - cache_size); index_end -= cache_size; } + + if (flags & BITMAP_OPT_LOOKUP_TABLE) { + size_t table_size = st_mult(ntohl(header->entry_count), + BITMAP_LOOKUP_TABLE_TRIPLET_WIDTH); + if (table_size > index_end - index->map - header_size) + return error(_("corrupted bitmap index file (too short to fit lookup table)")); + if (git_env_bool("GIT_TEST_READ_COMMIT_TABLE", 1)) + index->table_lookup = (void *)(index_end - table_size); + index_end -= table_size; + } } index->entry_count = ntohl(header->entry_count); @@ -212,9 +228,11 @@ static struct stored_bitmap *store_bitmap(struct bitmap_index *index, hash_pos = kh_put_oid_map(index->bitmaps, stored->oid, &ret); - /* a 0 return code means the insertion succeeded with no changes, - * because the SHA1 already existed on the map. this is bad, there - * shouldn't be duplicated commits in the index */ + /* + * A 0 return code means the insertion succeeded with no changes, + * because the SHA1 already existed on the map. This is bad, there + * shouldn't be duplicated commits in the index. + */ if (ret == 0) { error(_("duplicate entry in bitmap index: '%s'"), oid_to_hex(oid)); return NULL; @@ -482,7 +500,7 @@ static int load_bitmap(struct bitmap_index *bitmap_git) !(bitmap_git->tags = read_bitmap_1(bitmap_git))) goto failed; - if (load_bitmap_entries_v1(bitmap_git) < 0) + if (!bitmap_git->table_lookup && load_bitmap_entries_v1(bitmap_git) < 0) goto failed; return 0; @@ -570,13 +588,256 @@ struct include_data { struct bitmap *seen; }; +struct bitmap_lookup_table_triplet { + uint32_t commit_pos; + uint64_t offset; + uint32_t xor_row; +}; + +struct bitmap_lookup_table_xor_item { + struct object_id oid; + uint64_t offset; +}; + +/* + * Given a `triplet` struct pointer and pointer `p`, this + * function reads the triplet beginning at `p` into the struct. + * Note that this function assumes that there is enough memory + * left for filling the `triplet` struct from `p`. + */ +static int bitmap_lookup_table_get_triplet_by_pointer(struct bitmap_lookup_table_triplet *triplet, + const unsigned char *p) +{ + if (!triplet) + return -1; + + triplet->commit_pos = get_be32(p); + p += sizeof(uint32_t); + triplet->offset = get_be64(p); + p += sizeof(uint64_t); + triplet->xor_row = get_be32(p); + return 0; +} + +/* + * This function gets the raw triplet from `row`'th row in the + * lookup table and fills that data to the `triplet`. + */ +static int bitmap_lookup_table_get_triplet(struct bitmap_index *bitmap_git, + uint32_t pos, + struct bitmap_lookup_table_triplet *triplet) +{ + unsigned char *p = NULL; + if (pos >= bitmap_git->entry_count) + return error(_("corrupt bitmap lookup table: triplet position out of index")); + + p = bitmap_git->table_lookup + st_mult(pos, BITMAP_LOOKUP_TABLE_TRIPLET_WIDTH); + + return bitmap_lookup_table_get_triplet_by_pointer(triplet, p); +} + +/* + * Searches for a matching triplet. `commit_pos` is a pointer + * to the wanted commit position value. `table_entry` points to + * a triplet in lookup table. The first 4 bytes of each + * triplet (pointed by `table_entry`) are compared with `*commit_pos`. + */ +static int triplet_cmp(const void *commit_pos, const void *table_entry) +{ + + uint32_t a = *(uint32_t *)commit_pos; + uint32_t b = get_be32(table_entry); + if (a > b) + return 1; + else if (a < b) + return -1; + + return 0; +} + +static uint32_t bitmap_bsearch_pos(struct bitmap_index *bitmap_git, + struct object_id *oid, + uint32_t *result) +{ + int found; + + if (bitmap_is_midx(bitmap_git)) + found = bsearch_midx(oid, bitmap_git->midx, result); + else + found = bsearch_pack(oid, bitmap_git->pack, result); + + return found; +} + +/* + * `bsearch_triplet_by_pos` function searches for the raw triplet + * having commit position same as `commit_pos` and fills `triplet` + * object from the raw triplet. Returns 1 on success and 0 on + * failure. + */ +static int bitmap_bsearch_triplet_by_pos(uint32_t commit_pos, + struct bitmap_index *bitmap_git, + struct bitmap_lookup_table_triplet *triplet) +{ + unsigned char *p = bsearch(&commit_pos, bitmap_git->table_lookup, bitmap_git->entry_count, + BITMAP_LOOKUP_TABLE_TRIPLET_WIDTH, triplet_cmp); + + if (!p) + return -1; + + return bitmap_lookup_table_get_triplet_by_pointer(triplet, p); +} + +static struct stored_bitmap *lazy_bitmap_for_commit(struct bitmap_index *bitmap_git, + struct commit *commit) +{ + uint32_t commit_pos, xor_row; + uint64_t offset; + int flags; + struct bitmap_lookup_table_triplet triplet; + struct object_id *oid = &commit->object.oid; + struct ewah_bitmap *bitmap; + struct stored_bitmap *xor_bitmap = NULL; + const int bitmap_header_size = 6; + static struct bitmap_lookup_table_xor_item *xor_items = NULL; + static size_t xor_items_nr = 0, xor_items_alloc = 0; + static int is_corrupt = 0; + int xor_flags; + khiter_t hash_pos; + struct bitmap_lookup_table_xor_item *xor_item; + + if (is_corrupt) + return NULL; + + if (!bitmap_bsearch_pos(bitmap_git, oid, &commit_pos)) + return NULL; + + if (bitmap_bsearch_triplet_by_pos(commit_pos, bitmap_git, &triplet) < 0) + return NULL; + + xor_items_nr = 0; + offset = triplet.offset; + xor_row = triplet.xor_row; + + while (xor_row != 0xffffffff) { + ALLOC_GROW(xor_items, xor_items_nr + 1, xor_items_alloc); + + if (xor_items_nr + 1 >= bitmap_git->entry_count) { + error(_("corrupt bitmap lookup table: xor chain exceed entry count")); + goto corrupt; + } + + if (bitmap_lookup_table_get_triplet(bitmap_git, xor_row, &triplet) < 0) + goto corrupt; + + xor_item = &xor_items[xor_items_nr]; + xor_item->offset = triplet.offset; + + if (nth_bitmap_object_oid(bitmap_git, &xor_item->oid, triplet.commit_pos) < 0) { + error(_("corrupt bitmap lookup table: commit index %u out of range"), + triplet.commit_pos); + goto corrupt; + } + + hash_pos = kh_get_oid_map(bitmap_git->bitmaps, xor_item->oid); + + /* + * If desired bitmap is already stored, we don't need + * to iterate further. Because we know that bitmaps + * that are needed to be parsed to parse this bitmap + * has already been stored. So, assign this stored bitmap + * to the xor_bitmap. + */ + if (hash_pos < kh_end(bitmap_git->bitmaps) && + (xor_bitmap = kh_value(bitmap_git->bitmaps, hash_pos))) + break; + xor_items_nr++; + xor_row = triplet.xor_row; + } + + while (xor_items_nr) { + xor_item = &xor_items[xor_items_nr - 1]; + bitmap_git->map_pos = xor_item->offset; + if (bitmap_git->map_size - bitmap_git->map_pos < bitmap_header_size) { + error(_("corrupt ewah bitmap: truncated header for bitmap of commit \"%s\""), + oid_to_hex(&xor_item->oid)); + goto corrupt; + } + + bitmap_git->map_pos += sizeof(uint32_t) + sizeof(uint8_t); + xor_flags = read_u8(bitmap_git->map, &bitmap_git->map_pos); + bitmap = read_bitmap_1(bitmap_git); + + if (!bitmap) + goto corrupt; + + xor_bitmap = store_bitmap(bitmap_git, bitmap, &xor_item->oid, xor_bitmap, xor_flags); + xor_items_nr--; + } + + bitmap_git->map_pos = offset; + if (bitmap_git->map_size - bitmap_git->map_pos < bitmap_header_size) { + error(_("corrupt ewah bitmap: truncated header for bitmap of commit \"%s\""), + oid_to_hex(oid)); + goto corrupt; + } + + /* + * Don't bother reading the commit's index position or its xor + * offset: + * + * - The commit's index position is irrelevant to us, since + * load_bitmap_entries_v1 only uses it to learn the object + * id which is used to compute the hashmap's key. We already + * have an object id, so no need to look it up again. + * + * - The xor_offset is unusable for us, since it specifies how + * many entries previous to ours we should look at. This + * makes sense when reading the bitmaps sequentially (as in + * load_bitmap_entries_v1()), since we can keep track of + * each bitmap as we read them. + * + * But it can't work for us, since the bitmap's don't have a + * fixed size. So we learn the position of the xor'd bitmap + * from the commit table (and resolve it to a bitmap in the + * above if-statement). + * + * Instead, we can skip ahead and immediately read the flags and + * ewah bitmap. + */ + bitmap_git->map_pos += sizeof(uint32_t) + sizeof(uint8_t); + flags = read_u8(bitmap_git->map, &bitmap_git->map_pos); + bitmap = read_bitmap_1(bitmap_git); + + if (!bitmap) + goto corrupt; + + return store_bitmap(bitmap_git, bitmap, oid, xor_bitmap, flags); + +corrupt: + free(xor_items); + is_corrupt = 1; + return NULL; +} + struct ewah_bitmap *bitmap_for_commit(struct bitmap_index *bitmap_git, struct commit *commit) { khiter_t hash_pos = kh_get_oid_map(bitmap_git->bitmaps, commit->object.oid); - if (hash_pos >= kh_end(bitmap_git->bitmaps)) - return NULL; + if (hash_pos >= kh_end(bitmap_git->bitmaps)) { + struct stored_bitmap *bitmap = NULL; + if (!bitmap_git->table_lookup) + return NULL; + + trace2_region_enter("pack-bitmap", "reading_lookup_table", the_repository); + /* NEEDSWORK: cache misses aren't recorded */ + bitmap = lazy_bitmap_for_commit(bitmap_git, commit); + trace2_region_leave("pack-bitmap", "reading_lookup_table", the_repository); + if (!bitmap) + return NULL; + return lookup_stored_bitmap(bitmap); + } return lookup_stored_bitmap(kh_value(bitmap_git->bitmaps, hash_pos)); } @@ -1712,8 +1973,10 @@ void test_bitmap_walk(struct rev_info *revs) if (revs->pending.nr != 1) die(_("you must specify exactly one commit to test")); - fprintf_ln(stderr, "Bitmap v%d test (%d entries loaded)", - bitmap_git->version, bitmap_git->entry_count); + fprintf_ln(stderr, "Bitmap v%d test (%d entries%s)", + bitmap_git->version, + bitmap_git->entry_count, + bitmap_git->table_lookup ? "" : " loaded"); root = revs->pending.objects[0].item; bm = bitmap_for_commit(bitmap_git, (struct commit *)root); @@ -1766,13 +2029,22 @@ void test_bitmap_walk(struct rev_info *revs) int test_bitmap_commits(struct repository *r) { - struct bitmap_index *bitmap_git = prepare_bitmap_git(r); struct object_id oid; MAYBE_UNUSED void *value; + struct bitmap_index *bitmap_git = prepare_bitmap_git(r); if (!bitmap_git) die(_("failed to load bitmap indexes")); + /* + * As this function is only used to print bitmap selected + * commits, we don't have to read the commit table. + */ + if (bitmap_git->table_lookup) { + if (load_bitmap_entries_v1(bitmap_git) < 0) + die(_("failed to load bitmap indexes")); + } + kh_foreach(bitmap_git->bitmaps, oid, value, { printf_ln("%s", oid_to_hex(&oid)); }); diff --git a/pack-bitmap.h b/pack-bitmap.h index f3a57ca065..f0180b5276 100644 --- a/pack-bitmap.h +++ b/pack-bitmap.h @@ -23,9 +23,19 @@ struct bitmap_disk_header { #define NEEDS_BITMAP (1u<<22) +/* + * The width in bytes of a single triplet in the lookup table + * extension: + * (commit_pos, offset, xor_row) + * + * whose fields ar 32-, 64-, 32- bits wide, respectively. + */ +#define BITMAP_LOOKUP_TABLE_TRIPLET_WIDTH (16) + enum pack_bitmap_opts { - BITMAP_OPT_FULL_DAG = 1, - BITMAP_OPT_HASH_CACHE = 4, + BITMAP_OPT_FULL_DAG = 0x1, + BITMAP_OPT_HASH_CACHE = 0x4, + BITMAP_OPT_LOOKUP_TABLE = 0x10, }; enum pack_bitmap_flags { diff --git a/packfile.c b/packfile.c index bee8583119..c0d7dd93f4 100644 --- a/packfile.c +++ b/packfile.c @@ -2217,7 +2217,17 @@ static int add_promisor_object(const struct object_id *oid, void *set_) { struct oidset *set = set_; - struct object *obj = parse_object(the_repository, oid); + struct object *obj; + int we_parsed_object; + + obj = lookup_object(the_repository, oid); + if (obj && obj->parsed) { + we_parsed_object = 0; + } else { + we_parsed_object = 1; + obj = parse_object(the_repository, oid); + } + if (!obj) return 1; @@ -2239,7 +2249,8 @@ static int add_promisor_object(const struct object_id *oid, return 0; while (tree_entry_gently(&desc, &entry)) oidset_insert(set, &entry.oid); - free_tree_buffer(tree); + if (we_parsed_object) + free_tree_buffer(tree); } else if (obj->type == OBJ_COMMIT) { struct commit *commit = (struct commit *) obj; struct commit_list *parents = commit->parents; diff --git a/parse-options.c b/parse-options.c index edf55d3ef5..a1ec932f0f 100644 --- a/parse-options.c +++ b/parse-options.c @@ -324,6 +324,8 @@ static enum parse_opt_result parse_long_opt( const char *rest, *long_name = options->long_name; enum opt_parsed flags = OPT_LONG, opt_flags = OPT_LONG; + if (options->type == OPTION_SUBCOMMAND) + continue; if (!long_name) continue; @@ -332,7 +334,7 @@ again: rest = NULL; if (!rest) { /* abbreviated? */ - if (!(p->flags & PARSE_OPT_KEEP_UNKNOWN) && + if (!(p->flags & PARSE_OPT_KEEP_UNKNOWN_OPT) && !strncmp(long_name, arg, arg_end - arg)) { is_abbreviated: if (abbrev_option && @@ -419,6 +421,19 @@ static enum parse_opt_result parse_nodash_opt(struct parse_opt_ctx_t *p, return PARSE_OPT_ERROR; } +static enum parse_opt_result parse_subcommand(const char *arg, + const struct option *options) +{ + for (; options->type != OPTION_END; options++) + if (options->type == OPTION_SUBCOMMAND && + !strcmp(options->long_name, arg)) { + *(parse_opt_subcommand_fn **)options->value = options->subcommand_fn; + return PARSE_OPT_SUBCOMMAND; + } + + return PARSE_OPT_UNKNOWN; +} + static void check_typos(const char *arg, const struct option *options) { if (strlen(arg) < 3) @@ -442,6 +457,7 @@ static void check_typos(const char *arg, const struct option *options) static void parse_options_check(const struct option *opts) { char short_opts[128]; + void *subcommand_value = NULL; memset(short_opts, '\0', sizeof(short_opts)); for (; opts->type != OPTION_END; opts++) { @@ -489,6 +505,14 @@ static void parse_options_check(const struct option *opts) "Are you using parse_options_step() directly?\n" "That case is not supported yet."); break; + case OPTION_SUBCOMMAND: + if (!opts->value || !opts->subcommand_fn) + optbug(opts, "OPTION_SUBCOMMAND needs a value and a subcommand function"); + if (!subcommand_value) + subcommand_value = opts->value; + else if (subcommand_value != opts->value) + optbug(opts, "all OPTION_SUBCOMMANDs need the same value"); + break; default: ; /* ok. (usually accepts an argument) */ } @@ -499,6 +523,14 @@ static void parse_options_check(const struct option *opts) BUG_if_bug("invalid 'struct option'"); } +static int has_subcommands(const struct option *options) +{ + for (; options->type != OPTION_END; options++) + if (options->type == OPTION_SUBCOMMAND) + return 1; + return 0; +} + static void parse_options_start_1(struct parse_opt_ctx_t *ctx, int argc, const char **argv, const char *prefix, const struct option *options, @@ -515,7 +547,20 @@ static void parse_options_start_1(struct parse_opt_ctx_t *ctx, ctx->prefix = prefix; ctx->cpidx = ((flags & PARSE_OPT_KEEP_ARGV0) != 0); ctx->flags = flags; - if ((flags & PARSE_OPT_KEEP_UNKNOWN) && + ctx->has_subcommands = has_subcommands(options); + if (!ctx->has_subcommands && (flags & PARSE_OPT_SUBCOMMAND_OPTIONAL)) + BUG("Using PARSE_OPT_SUBCOMMAND_OPTIONAL without subcommands"); + if (ctx->has_subcommands) { + if (flags & PARSE_OPT_STOP_AT_NON_OPTION) + BUG("subcommands are incompatible with PARSE_OPT_STOP_AT_NON_OPTION"); + if (!(flags & PARSE_OPT_SUBCOMMAND_OPTIONAL)) { + if (flags & PARSE_OPT_KEEP_UNKNOWN_OPT) + BUG("subcommands are incompatible with PARSE_OPT_KEEP_UNKNOWN_OPT unless in combination with PARSE_OPT_SUBCOMMAND_OPTIONAL"); + if (flags & PARSE_OPT_KEEP_DASHDASH) + BUG("subcommands are incompatible with PARSE_OPT_KEEP_DASHDASH unless in combination with PARSE_OPT_SUBCOMMAND_OPTIONAL"); + } + } + if ((flags & PARSE_OPT_KEEP_UNKNOWN_OPT) && (flags & PARSE_OPT_STOP_AT_NON_OPTION) && !(flags & PARSE_OPT_ONE_SHOT)) BUG("STOP_AT_NON_OPTION and KEEP_UNKNOWN don't go together"); @@ -589,6 +634,7 @@ static int show_gitcomp(const struct option *opts, int show_all) int nr_noopts = 0; for (; opts->type != OPTION_END; opts++) { + const char *prefix = "--"; const char *suffix = ""; if (!opts->long_name) @@ -598,6 +644,9 @@ static int show_gitcomp(const struct option *opts, int show_all) continue; switch (opts->type) { + case OPTION_SUBCOMMAND: + prefix = ""; + break; case OPTION_GROUP: continue; case OPTION_STRING: @@ -620,7 +669,8 @@ static int show_gitcomp(const struct option *opts, int show_all) suffix = "="; if (starts_with(opts->long_name, "no-")) nr_noopts++; - printf(" --%s%s", opts->long_name, suffix); + printf("%s%s%s%s", opts == original_opts ? "" : " ", + prefix, opts->long_name, suffix); } show_negated_gitcomp(original_opts, show_all, -1); show_negated_gitcomp(original_opts, show_all, nr_noopts); @@ -743,10 +793,38 @@ enum parse_opt_result parse_options_step(struct parse_opt_ctx_t *ctx, if (*arg != '-' || !arg[1]) { if (parse_nodash_opt(ctx, arg, options) == 0) continue; - if (ctx->flags & PARSE_OPT_STOP_AT_NON_OPTION) - return PARSE_OPT_NON_OPTION; - ctx->out[ctx->cpidx++] = ctx->argv[0]; - continue; + if (!ctx->has_subcommands) { + if (ctx->flags & PARSE_OPT_STOP_AT_NON_OPTION) + return PARSE_OPT_NON_OPTION; + ctx->out[ctx->cpidx++] = ctx->argv[0]; + continue; + } + switch (parse_subcommand(arg, options)) { + case PARSE_OPT_SUBCOMMAND: + return PARSE_OPT_SUBCOMMAND; + case PARSE_OPT_UNKNOWN: + if (ctx->flags & PARSE_OPT_SUBCOMMAND_OPTIONAL) + /* + * arg is neither a short or long + * option nor a subcommand. Since + * this command has a default + * operation mode, we have to treat + * this arg and all remaining args + * as args meant to that default + * operation mode. + * So we are done parsing. + */ + return PARSE_OPT_DONE; + error(_("unknown subcommand: `%s'"), arg); + usage_with_options(usagestr, options); + case PARSE_OPT_COMPLETE: + case PARSE_OPT_HELP: + case PARSE_OPT_ERROR: + case PARSE_OPT_DONE: + case PARSE_OPT_NON_OPTION: + /* Impossible. */ + BUG("parse_subcommand() cannot return these"); + } } /* lone -h asks for help */ @@ -774,6 +852,7 @@ enum parse_opt_result parse_options_step(struct parse_opt_ctx_t *ctx, goto show_usage; goto unknown; case PARSE_OPT_NON_OPTION: + case PARSE_OPT_SUBCOMMAND: case PARSE_OPT_HELP: case PARSE_OPT_COMPLETE: BUG("parse_short_opt() cannot return these"); @@ -799,6 +878,7 @@ enum parse_opt_result parse_options_step(struct parse_opt_ctx_t *ctx, *(char *)ctx->argv[0] = '-'; goto unknown; case PARSE_OPT_NON_OPTION: + case PARSE_OPT_SUBCOMMAND: case PARSE_OPT_COMPLETE: case PARSE_OPT_HELP: BUG("parse_short_opt() cannot return these"); @@ -830,6 +910,7 @@ enum parse_opt_result parse_options_step(struct parse_opt_ctx_t *ctx, case PARSE_OPT_HELP: goto show_usage; case PARSE_OPT_NON_OPTION: + case PARSE_OPT_SUBCOMMAND: case PARSE_OPT_COMPLETE: BUG("parse_long_opt() cannot return these"); case PARSE_OPT_DONE: @@ -839,7 +920,19 @@ enum parse_opt_result parse_options_step(struct parse_opt_ctx_t *ctx, unknown: if (ctx->flags & PARSE_OPT_ONE_SHOT) break; - if (!(ctx->flags & PARSE_OPT_KEEP_UNKNOWN)) + if (ctx->has_subcommands && + (ctx->flags & PARSE_OPT_SUBCOMMAND_OPTIONAL) && + (ctx->flags & PARSE_OPT_KEEP_UNKNOWN_OPT)) { + /* + * Found an unknown option given to a command with + * subcommands that has a default operation mode: + * we treat this option and all remaining args as + * arguments meant to that default operation mode. + * So we are done parsing. + */ + return PARSE_OPT_DONE; + } + if (!(ctx->flags & PARSE_OPT_KEEP_UNKNOWN_OPT)) return PARSE_OPT_UNKNOWN; ctx->out[ctx->cpidx++] = ctx->argv[0]; ctx->opt = NULL; @@ -884,7 +977,14 @@ int parse_options(int argc, const char **argv, case PARSE_OPT_COMPLETE: exit(0); case PARSE_OPT_NON_OPTION: + case PARSE_OPT_SUBCOMMAND: + break; case PARSE_OPT_DONE: + if (ctx.has_subcommands && + !(flags & PARSE_OPT_SUBCOMMAND_OPTIONAL)) { + error(_("need a subcommand")); + usage_with_options(usagestr, options); + } break; case PARSE_OPT_UNKNOWN: if (ctx.argv[0][1] == '-') { @@ -1009,6 +1109,8 @@ static enum parse_opt_result usage_with_options_internal(struct parse_opt_ctx_t size_t pos; int pad; + if (opts->type == OPTION_SUBCOMMAND) + continue; if (opts->type == OPTION_GROUP) { fputc('\n', outfile); need_newline = 0; diff --git a/parse-options.h b/parse-options.h index 685fccac13..b6ef86e0d1 100644 --- a/parse-options.h +++ b/parse-options.h @@ -11,6 +11,7 @@ enum parse_opt_type { OPTION_GROUP, OPTION_NUMBER, OPTION_ALIAS, + OPTION_SUBCOMMAND, /* options with no arguments */ OPTION_BIT, OPTION_NEGBIT, @@ -30,10 +31,11 @@ enum parse_opt_flags { PARSE_OPT_KEEP_DASHDASH = 1 << 0, PARSE_OPT_STOP_AT_NON_OPTION = 1 << 1, PARSE_OPT_KEEP_ARGV0 = 1 << 2, - PARSE_OPT_KEEP_UNKNOWN = 1 << 3, + PARSE_OPT_KEEP_UNKNOWN_OPT = 1 << 3, PARSE_OPT_NO_INTERNAL_HELP = 1 << 4, PARSE_OPT_ONE_SHOT = 1 << 5, PARSE_OPT_SHELL_EVAL = 1 << 6, + PARSE_OPT_SUBCOMMAND_OPTIONAL = 1 << 7, }; enum parse_opt_option_flags { @@ -56,6 +58,7 @@ enum parse_opt_result { PARSE_OPT_ERROR = -1, /* must be the same as error() */ PARSE_OPT_DONE = 0, /* fixed so that "return 0" works */ PARSE_OPT_NON_OPTION, + PARSE_OPT_SUBCOMMAND, PARSE_OPT_UNKNOWN }; @@ -67,6 +70,9 @@ typedef enum parse_opt_result parse_opt_ll_cb(struct parse_opt_ctx_t *ctx, const struct option *opt, const char *arg, int unset); +typedef int parse_opt_subcommand_fn(int argc, const char **argv, + const char *prefix); + /* * `type`:: * holds the type of the option, you must have an OPTION_END last in your @@ -76,7 +82,8 @@ typedef enum parse_opt_result parse_opt_ll_cb(struct parse_opt_ctx_t *ctx, * the character to use as a short option name, '\0' if none. * * `long_name`:: - * the long option name, without the leading dashes, NULL if none. + * the long option (without the leading dashes) or subcommand name, + * NULL if none. * * `value`:: * stores pointers to the values to be filled. @@ -93,7 +100,7 @@ typedef enum parse_opt_result parse_opt_ll_cb(struct parse_opt_ctx_t *ctx, * * `help`:: * the short help associated to what the option does. - * Must never be NULL (except for OPTION_END). + * Must never be NULL (except for OPTION_END and OPTION_SUBCOMMAND). * OPTION_GROUP uses this pointer to store the group header. * Should be wrapped by N_() for translation. * @@ -109,7 +116,8 @@ typedef enum parse_opt_result parse_opt_ll_cb(struct parse_opt_ctx_t *ctx, * is last on the command line. If the option is * not last it will require an argument. * Should not be used with PARSE_OPT_OPTARG. - * PARSE_OPT_NODASH: this option doesn't start with a dash. + * PARSE_OPT_NODASH: this option doesn't start with a dash; can only be a + * short option and can't accept arguments. * PARSE_OPT_LITERAL_ARGHELP: says that argh shouldn't be enclosed in brackets * (i.e. '<argh>') in the help message. * Useful for options with multiple parameters. @@ -130,6 +138,9 @@ typedef enum parse_opt_result parse_opt_ll_cb(struct parse_opt_ctx_t *ctx, * `ll_callback`:: * pointer to the callback to use for OPTION_LOWLEVEL_CALLBACK * + * `subcommand_fn`:: + * pointer to a function to use for OPTION_SUBCOMMAND. + * It will be put in value when the subcommand is given on the command line. */ struct option { enum parse_opt_type type; @@ -144,6 +155,7 @@ struct option { intptr_t defval; parse_opt_ll_cb *ll_callback; intptr_t extra; + parse_opt_subcommand_fn *subcommand_fn; }; #define OPT_BIT_F(s, l, v, h, b, f) { OPTION_BIT, (s), (l), (v), NULL, (h), \ @@ -205,6 +217,14 @@ struct option { #define OPT_ALIAS(s, l, source_long_name) \ { OPTION_ALIAS, (s), (l), (source_long_name) } +#define OPT_SUBCOMMAND_F(l, v, fn, f) { \ + .type = OPTION_SUBCOMMAND, \ + .long_name = (l), \ + .value = (v), \ + .flags = (f), \ + .subcommand_fn = (fn) } +#define OPT_SUBCOMMAND(l, v, fn) OPT_SUBCOMMAND_F((l), (v), (fn), 0) + /* * parse_options() will filter out the processed options and leave the * non-option arguments in argv[]. argv0 is assumed program name and @@ -294,6 +314,7 @@ struct parse_opt_ctx_t { int argc, cpidx, total; const char *opt; enum parse_opt_flags flags; + unsigned has_subcommands; const char *prefix; const char **alias_groups; /* must be in groups of 3 elements! */ struct option *updated_options; diff --git a/pkt-line.c b/pkt-line.c index 8e43c2def4..ce4e73b683 100644 --- a/pkt-line.c +++ b/pkt-line.c @@ -309,7 +309,8 @@ int write_packetized_from_fd_no_flush(int fd_in, int fd_out) return err; } -int write_packetized_from_buf_no_flush(const char *src_in, size_t len, int fd_out) +int write_packetized_from_buf_no_flush_count(const char *src_in, size_t len, + int fd_out, int *packet_counter) { int err = 0; size_t bytes_written = 0; @@ -324,6 +325,8 @@ int write_packetized_from_buf_no_flush(const char *src_in, size_t len, int fd_ou break; err = packet_write_gently(fd_out, src_in + bytes_written, bytes_to_write); bytes_written += bytes_to_write; + if (packet_counter) + (*packet_counter)++; } return err; } diff --git a/pkt-line.h b/pkt-line.h index 1f623de60a..79c538b99e 100644 --- a/pkt-line.h +++ b/pkt-line.h @@ -32,7 +32,13 @@ void packet_buf_write(struct strbuf *buf, const char *fmt, ...) __attribute__((f int packet_flush_gently(int fd); int packet_write_fmt_gently(int fd, const char *fmt, ...) __attribute__((format (printf, 2, 3))); int write_packetized_from_fd_no_flush(int fd_in, int fd_out); -int write_packetized_from_buf_no_flush(const char *src_in, size_t len, int fd_out); +int write_packetized_from_buf_no_flush_count(const char *src_in, size_t len, + int fd_out, int *packet_counter); +static inline int write_packetized_from_buf_no_flush(const char *src_in, + size_t len, int fd_out) +{ + return write_packetized_from_buf_no_flush_count(src_in, len, fd_out, NULL); +} /* * Stdio versions of packet_write functions. When mixing these with fd diff --git a/ppc/sha1.c b/ppc/sha1.c deleted file mode 100644 index 1b705cee1f..0000000000 --- a/ppc/sha1.c +++ /dev/null @@ -1,72 +0,0 @@ -/* - * SHA-1 implementation. - * - * Copyright (C) 2005 Paul Mackerras <paulus@samba.org> - * - * This version assumes we are running on a big-endian machine. - * It calls an external sha1_core() to process blocks of 64 bytes. - */ -#include <stdio.h> -#include <string.h> -#include "sha1.h" - -void ppc_sha1_core(uint32_t *hash, const unsigned char *p, - unsigned int nblocks); - -int ppc_SHA1_Init(ppc_SHA_CTX *c) -{ - c->hash[0] = 0x67452301; - c->hash[1] = 0xEFCDAB89; - c->hash[2] = 0x98BADCFE; - c->hash[3] = 0x10325476; - c->hash[4] = 0xC3D2E1F0; - c->len = 0; - c->cnt = 0; - return 0; -} - -int ppc_SHA1_Update(ppc_SHA_CTX *c, const void *ptr, unsigned long n) -{ - unsigned long nb; - const unsigned char *p = ptr; - - c->len += (uint64_t) n << 3; - while (n != 0) { - if (c->cnt || n < 64) { - nb = 64 - c->cnt; - if (nb > n) - nb = n; - memcpy(&c->buf.b[c->cnt], p, nb); - if ((c->cnt += nb) == 64) { - ppc_sha1_core(c->hash, c->buf.b, 1); - c->cnt = 0; - } - } else { - nb = n >> 6; - ppc_sha1_core(c->hash, p, nb); - nb <<= 6; - } - n -= nb; - p += nb; - } - return 0; -} - -int ppc_SHA1_Final(unsigned char *hash, ppc_SHA_CTX *c) -{ - unsigned int cnt = c->cnt; - - c->buf.b[cnt++] = 0x80; - if (cnt > 56) { - if (cnt < 64) - memset(&c->buf.b[cnt], 0, 64 - cnt); - ppc_sha1_core(c->hash, c->buf.b, 1); - cnt = 0; - } - if (cnt < 56) - memset(&c->buf.b[cnt], 0, 56 - cnt); - c->buf.l[7] = c->len; - ppc_sha1_core(c->hash, c->buf.b, 1); - memcpy(hash, c->hash, 20); - return 0; -} diff --git a/ppc/sha1.h b/ppc/sha1.h deleted file mode 100644 index 9b24b32615..0000000000 --- a/ppc/sha1.h +++ /dev/null @@ -1,25 +0,0 @@ -/* - * SHA-1 implementation. - * - * Copyright (C) 2005 Paul Mackerras <paulus@samba.org> - */ -#include <stdint.h> - -typedef struct { - uint32_t hash[5]; - uint32_t cnt; - uint64_t len; - union { - unsigned char b[64]; - uint64_t l[8]; - } buf; -} ppc_SHA_CTX; - -int ppc_SHA1_Init(ppc_SHA_CTX *c); -int ppc_SHA1_Update(ppc_SHA_CTX *c, const void *p, unsigned long n); -int ppc_SHA1_Final(unsigned char *hash, ppc_SHA_CTX *c); - -#define platform_SHA_CTX ppc_SHA_CTX -#define platform_SHA1_Init ppc_SHA1_Init -#define platform_SHA1_Update ppc_SHA1_Update -#define platform_SHA1_Final ppc_SHA1_Final diff --git a/ppc/sha1ppc.S b/ppc/sha1ppc.S deleted file mode 100644 index 1711eef6e7..0000000000 --- a/ppc/sha1ppc.S +++ /dev/null @@ -1,224 +0,0 @@ -/* - * SHA-1 implementation for PowerPC. - * - * Copyright (C) 2005 Paul Mackerras <paulus@samba.org> - */ - -/* - * PowerPC calling convention: - * %r0 - volatile temp - * %r1 - stack pointer. - * %r2 - reserved - * %r3-%r12 - Incoming arguments & return values; volatile. - * %r13-%r31 - Callee-save registers - * %lr - Return address, volatile - * %ctr - volatile - * - * Register usage in this routine: - * %r0 - temp - * %r3 - argument (pointer to 5 words of SHA state) - * %r4 - argument (pointer to data to hash) - * %r5 - Constant K in SHA round (initially number of blocks to hash) - * %r6-%r10 - Working copies of SHA variables A..E (actually E..A order) - * %r11-%r26 - Data being hashed W[]. - * %r27-%r31 - Previous copies of A..E, for final add back. - * %ctr - loop count - */ - - -/* - * We roll the registers for A, B, C, D, E around on each - * iteration; E on iteration t is D on iteration t+1, and so on. - * We use registers 6 - 10 for this. (Registers 27 - 31 hold - * the previous values.) - */ -#define RA(t) (((t)+4)%5+6) -#define RB(t) (((t)+3)%5+6) -#define RC(t) (((t)+2)%5+6) -#define RD(t) (((t)+1)%5+6) -#define RE(t) (((t)+0)%5+6) - -/* We use registers 11 - 26 for the W values */ -#define W(t) ((t)%16+11) - -/* Register 5 is used for the constant k */ - -/* - * The basic SHA-1 round function is: - * E += ROTL(A,5) + F(B,C,D) + W[i] + K; B = ROTL(B,30) - * Then the variables are renamed: (A,B,C,D,E) = (E,A,B,C,D). - * - * Every 20 rounds, the function F() and the constant K changes: - * - 20 rounds of f0(b,c,d) = "bit wise b ? c : d" = (^b & d) + (b & c) - * - 20 rounds of f1(b,c,d) = b^c^d = (b^d)^c - * - 20 rounds of f2(b,c,d) = majority(b,c,d) = (b&d) + ((b^d)&c) - * - 20 more rounds of f1(b,c,d) - * - * These are all scheduled for near-optimal performance on a G4. - * The G4 is a 3-issue out-of-order machine with 3 ALUs, but it can only - * *consider* starting the oldest 3 instructions per cycle. So to get - * maximum performance out of it, you have to treat it as an in-order - * machine. Which means interleaving the computation round t with the - * computation of W[t+4]. - * - * The first 16 rounds use W values loaded directly from memory, while the - * remaining 64 use values computed from those first 16. We preload - * 4 values before starting, so there are three kinds of rounds: - * - The first 12 (all f0) also load the W values from memory. - * - The next 64 compute W(i+4) in parallel. 8*f0, 20*f1, 20*f2, 16*f1. - * - The last 4 (all f1) do not do anything with W. - * - * Therefore, we have 6 different round functions: - * STEPD0_LOAD(t,s) - Perform round t and load W(s). s < 16 - * STEPD0_UPDATE(t,s) - Perform round t and compute W(s). s >= 16. - * STEPD1_UPDATE(t,s) - * STEPD2_UPDATE(t,s) - * STEPD1(t) - Perform round t with no load or update. - * - * The G5 is more fully out-of-order, and can find the parallelism - * by itself. The big limit is that it has a 2-cycle ALU latency, so - * even though it's 2-way, the code has to be scheduled as if it's - * 4-way, which can be a limit. To help it, we try to schedule the - * read of RA(t) as late as possible so it doesn't stall waiting for - * the previous round's RE(t-1), and we try to rotate RB(t) as early - * as possible while reading RC(t) (= RB(t-1)) as late as possible. - */ - -/* the initial loads. */ -#define LOADW(s) \ - lwz W(s),(s)*4(%r4) - -/* - * Perform a step with F0, and load W(s). Uses W(s) as a temporary - * before loading it. - * This is actually 10 instructions, which is an awkward fit. - * It can execute grouped as listed, or delayed one instruction. - * (If delayed two instructions, there is a stall before the start of the - * second line.) Thus, two iterations take 7 cycles, 3.5 cycles per round. - */ -#define STEPD0_LOAD(t,s) \ -add RE(t),RE(t),W(t); andc %r0,RD(t),RB(t); and W(s),RC(t),RB(t); \ -add RE(t),RE(t),%r0; rotlwi %r0,RA(t),5; rotlwi RB(t),RB(t),30; \ -add RE(t),RE(t),W(s); add %r0,%r0,%r5; lwz W(s),(s)*4(%r4); \ -add RE(t),RE(t),%r0 - -/* - * This is likewise awkward, 13 instructions. However, it can also - * execute starting with 2 out of 3 possible moduli, so it does 2 rounds - * in 9 cycles, 4.5 cycles/round. - */ -#define STEPD0_UPDATE(t,s,loadk...) \ -add RE(t),RE(t),W(t); andc %r0,RD(t),RB(t); xor W(s),W((s)-16),W((s)-3); \ -add RE(t),RE(t),%r0; and %r0,RC(t),RB(t); xor W(s),W(s),W((s)-8); \ -add RE(t),RE(t),%r0; rotlwi %r0,RA(t),5; xor W(s),W(s),W((s)-14); \ -add RE(t),RE(t),%r5; loadk; rotlwi RB(t),RB(t),30; rotlwi W(s),W(s),1; \ -add RE(t),RE(t),%r0 - -/* Nicely optimal. Conveniently, also the most common. */ -#define STEPD1_UPDATE(t,s,loadk...) \ -add RE(t),RE(t),W(t); xor %r0,RD(t),RB(t); xor W(s),W((s)-16),W((s)-3); \ -add RE(t),RE(t),%r5; loadk; xor %r0,%r0,RC(t); xor W(s),W(s),W((s)-8); \ -add RE(t),RE(t),%r0; rotlwi %r0,RA(t),5; xor W(s),W(s),W((s)-14); \ -add RE(t),RE(t),%r0; rotlwi RB(t),RB(t),30; rotlwi W(s),W(s),1 - -/* - * The naked version, no UPDATE, for the last 4 rounds. 3 cycles per. - * We could use W(s) as a temp register, but we don't need it. - */ -#define STEPD1(t) \ - add RE(t),RE(t),W(t); xor %r0,RD(t),RB(t); \ -rotlwi RB(t),RB(t),30; add RE(t),RE(t),%r5; xor %r0,%r0,RC(t); \ -add RE(t),RE(t),%r0; rotlwi %r0,RA(t),5; /* spare slot */ \ -add RE(t),RE(t),%r0 - -/* - * 14 instructions, 5 cycles per. The majority function is a bit - * awkward to compute. This can execute with a 1-instruction delay, - * but it causes a 2-instruction delay, which triggers a stall. - */ -#define STEPD2_UPDATE(t,s,loadk...) \ -add RE(t),RE(t),W(t); and %r0,RD(t),RB(t); xor W(s),W((s)-16),W((s)-3); \ -add RE(t),RE(t),%r0; xor %r0,RD(t),RB(t); xor W(s),W(s),W((s)-8); \ -add RE(t),RE(t),%r5; loadk; and %r0,%r0,RC(t); xor W(s),W(s),W((s)-14); \ -add RE(t),RE(t),%r0; rotlwi %r0,RA(t),5; rotlwi W(s),W(s),1; \ -add RE(t),RE(t),%r0; rotlwi RB(t),RB(t),30 - -#define STEP0_LOAD4(t,s) \ - STEPD0_LOAD(t,s); \ - STEPD0_LOAD((t+1),(s)+1); \ - STEPD0_LOAD((t)+2,(s)+2); \ - STEPD0_LOAD((t)+3,(s)+3) - -#define STEPUP4(fn, t, s, loadk...) \ - STEP##fn##_UPDATE(t,s,); \ - STEP##fn##_UPDATE((t)+1,(s)+1,); \ - STEP##fn##_UPDATE((t)+2,(s)+2,); \ - STEP##fn##_UPDATE((t)+3,(s)+3,loadk) - -#define STEPUP20(fn, t, s, loadk...) \ - STEPUP4(fn, t, s,); \ - STEPUP4(fn, (t)+4, (s)+4,); \ - STEPUP4(fn, (t)+8, (s)+8,); \ - STEPUP4(fn, (t)+12, (s)+12,); \ - STEPUP4(fn, (t)+16, (s)+16, loadk) - - .globl ppc_sha1_core -ppc_sha1_core: - stwu %r1,-80(%r1) - stmw %r13,4(%r1) - - /* Load up A - E */ - lmw %r27,0(%r3) - - mtctr %r5 - -1: - LOADW(0) - lis %r5,0x5a82 - mr RE(0),%r31 - LOADW(1) - mr RD(0),%r30 - mr RC(0),%r29 - LOADW(2) - ori %r5,%r5,0x7999 /* K0-19 */ - mr RB(0),%r28 - LOADW(3) - mr RA(0),%r27 - - STEP0_LOAD4(0, 4) - STEP0_LOAD4(4, 8) - STEP0_LOAD4(8, 12) - STEPUP4(D0, 12, 16,) - STEPUP4(D0, 16, 20, lis %r5,0x6ed9) - - ori %r5,%r5,0xeba1 /* K20-39 */ - STEPUP20(D1, 20, 24, lis %r5,0x8f1b) - - ori %r5,%r5,0xbcdc /* K40-59 */ - STEPUP20(D2, 40, 44, lis %r5,0xca62) - - ori %r5,%r5,0xc1d6 /* K60-79 */ - STEPUP4(D1, 60, 64,) - STEPUP4(D1, 64, 68,) - STEPUP4(D1, 68, 72,) - STEPUP4(D1, 72, 76,) - addi %r4,%r4,64 - STEPD1(76) - STEPD1(77) - STEPD1(78) - STEPD1(79) - - /* Add results to original values */ - add %r31,%r31,RE(0) - add %r30,%r30,RD(0) - add %r29,%r29,RC(0) - add %r28,%r28,RB(0) - add %r27,%r27,RA(0) - - bdnz 1b - - /* Save final hash, restore registers, and return */ - stmw %r27,0(%r3) - lmw %r13,4(%r1) - addi %r1,%r1,80 - blr diff --git a/preload-index.c b/preload-index.c index e5529a5863..100f7a374d 100644 --- a/preload-index.c +++ b/preload-index.c @@ -151,6 +151,12 @@ void preload_index(struct index_state *index, } stop_progress(&pd.progress); + if (pathspec) { + /* earlier we made deep copies for each thread to work with */ + for (i = 0; i < threads; i++) + clear_pathspec(&data[i].pathspec); + } + trace_performance_leave("preload index"); trace2_data_intmax("index", NULL, "preload/sum_lstat", t2_sum_lstat); @@ -478,6 +478,16 @@ end: } } +static int use_in_body_from(const struct pretty_print_context *pp, + const struct ident_split *ident) +{ + if (pp->rev && pp->rev->force_in_body_from) + return 1; + if (ident_cmp(pp->from_ident, ident)) + return 1; + return 0; +} + void pp_user_info(struct pretty_print_context *pp, const char *what, struct strbuf *sb, const char *line, const char *encoding) @@ -504,7 +514,7 @@ void pp_user_info(struct pretty_print_context *pp, map_user(pp->mailmap, &mailbuf, &maillen, &namebuf, &namelen); if (cmit_fmt_is_mail(pp->fmt)) { - if (pp->from_ident && ident_cmp(pp->from_ident, &ident)) { + if (pp->from_ident && use_in_body_from(pp, &ident)) { struct strbuf buf = STRBUF_INIT; strbuf_addstr(&buf, "From: "); diff --git a/promisor-remote.c b/promisor-remote.c index 5b33f88bca..68f46f5ec7 100644 --- a/promisor-remote.c +++ b/promisor-remote.c @@ -146,7 +146,7 @@ static void promisor_remote_init(struct repository *r) if (r->promisor_remote_config) return; config = r->promisor_remote_config = - xcalloc(sizeof(*r->promisor_remote_config), 1); + xcalloc(1, sizeof(*r->promisor_remote_config)); config->promisors_tail = &config->promisors; repo_config(r, promisor_remote_config, config); diff --git a/range-diff.c b/range-diff.c index 462710ffe4..8b7d81adc1 100644 --- a/range-diff.c +++ b/range-diff.c @@ -57,9 +57,9 @@ static int read_patches(const char *range, struct string_list *list, "--pretty=medium", "--notes", NULL); + strvec_push(&cp.args, range); if (other_arg) strvec_pushv(&cp.args, other_arg->v); - strvec_push(&cp.args, range); cp.out = -1; cp.no_stdin = 1; cp.git_cmd = 1; @@ -20,6 +20,7 @@ #include "repository.h" #include "sigchain.h" #include "date.h" +#include "commit.h" /* * List of all available backends @@ -56,6 +57,88 @@ static unsigned char refname_disposition[256] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 4, 4 }; +struct ref_namespace_info ref_namespace[] = { + [NAMESPACE_HEAD] = { + .ref = "HEAD", + .decoration = DECORATION_REF_HEAD, + .exact = 1, + }, + [NAMESPACE_BRANCHES] = { + .ref = "refs/heads/", + .decoration = DECORATION_REF_LOCAL, + }, + [NAMESPACE_TAGS] = { + .ref = "refs/tags/", + .decoration = DECORATION_REF_TAG, + }, + [NAMESPACE_REMOTE_REFS] = { + /* + * The default refspec for new remotes copies refs from + * refs/heads/ on the remote into refs/remotes/<remote>/. + * As such, "refs/remotes/" has special handling. + */ + .ref = "refs/remotes/", + .decoration = DECORATION_REF_REMOTE, + }, + [NAMESPACE_STASH] = { + /* + * The single ref "refs/stash" stores the latest stash. + * Older stashes can be found in the reflog. + */ + .ref = "refs/stash", + .exact = 1, + .decoration = DECORATION_REF_STASH, + }, + [NAMESPACE_REPLACE] = { + /* + * This namespace allows Git to act as if one object ID + * points to the content of another. Unlike the other + * ref namespaces, this one can be changed by the + * GIT_REPLACE_REF_BASE environment variable. This + * .namespace value will be overwritten in setup_git_env(). + */ + .ref = "refs/replace/", + .decoration = DECORATION_GRAFTED, + }, + [NAMESPACE_NOTES] = { + /* + * The refs/notes/commit ref points to the tip of a + * parallel commit history that adds metadata to commits + * in the normal history. This ref can be overwritten + * by the core.notesRef config variable or the + * GIT_NOTES_REFS environment variable. + */ + .ref = "refs/notes/commit", + .exact = 1, + }, + [NAMESPACE_PREFETCH] = { + /* + * Prefetch refs are written by the background 'fetch' + * maintenance task. It allows faster foreground fetches + * by advertising these previously-downloaded tips without + * updating refs/remotes/ without user intervention. + */ + .ref = "refs/prefetch/", + }, + [NAMESPACE_REWRITTEN] = { + /* + * Rewritten refs are used by the 'label' command in the + * sequencer. These are particularly useful during an + * interactive rebase that uses the 'merge' command. + */ + .ref = "refs/rewritten/", + }, +}; + +void update_ref_namespace(enum ref_namespace namespace, char *ref) +{ + struct ref_namespace_info *info = &ref_namespace[namespace]; + if (info->ref_updated) + free(info->ref); + info->ref = ref; + info->ref_updated = 1; +} + /* * Try to read one refname component from the front of refname. * Return the length of the component found, or -1 if the component is @@ -456,11 +539,16 @@ void normalize_glob_ref(struct string_list_item *item, const char *prefix, if (*pattern == '/') BUG("pattern must not start with '/'"); - if (prefix) { + if (prefix) strbuf_addstr(&normalized_pattern, prefix); - } - else if (!starts_with(pattern, "refs/")) + else if (!starts_with(pattern, "refs/") && + strcmp(pattern, "HEAD")) strbuf_addstr(&normalized_pattern, "refs/"); + /* + * NEEDSWORK: Special case other symrefs such as REBASE_HEAD, + * MERGE_HEAD, etc. + */ + strbuf_addstr(&normalized_pattern, pattern); strbuf_strip_suffix(&normalized_pattern, "/"); @@ -1529,6 +1617,7 @@ int refs_for_each_fullref_in(struct ref_store *refs, const char *prefix, int for_each_replace_ref(struct repository *r, each_repo_ref_fn fn, void *cb_data) { + const char *git_replace_ref_base = ref_namespace[NAMESPACE_REPLACE].ref; return do_for_each_repo_ref(r, git_replace_ref_base, fn, strlen(git_replace_ref_base), DO_FOR_EACH_INCLUDE_BROKEN, cb_data); @@ -2,6 +2,7 @@ #define REFS_H #include "cache.h" +#include "commit.h" struct object_id; struct ref_store; @@ -930,4 +931,49 @@ struct ref_store *get_main_ref_store(struct repository *r); struct ref_store *get_submodule_ref_store(const char *submodule); struct ref_store *get_worktree_ref_store(const struct worktree *wt); +/* + * Some of the names specified by refs have special meaning to Git. + * Organize these namespaces in a comon 'ref_namespace' array for + * reference from multiple places in the codebase. + */ + +struct ref_namespace_info { + char *ref; + enum decoration_type decoration; + + /* + * If 'exact' is true, then we must match the 'ref' exactly. + * Otherwise, use a prefix match. + * + * 'ref_updated' is for internal use. It represents whether the + * 'ref' value was replaced from its original literal version. + */ + unsigned exact:1, + ref_updated:1; +}; + +enum ref_namespace { + NAMESPACE_HEAD, + NAMESPACE_BRANCHES, + NAMESPACE_TAGS, + NAMESPACE_REMOTE_REFS, + NAMESPACE_STASH, + NAMESPACE_REPLACE, + NAMESPACE_NOTES, + NAMESPACE_PREFETCH, + NAMESPACE_REWRITTEN, + + /* Must be last */ + NAMESPACE__COUNT +}; + +/* See refs.c for the contents of this array. */ +extern struct ref_namespace_info ref_namespace[NAMESPACE__COUNT]; + +/* + * Some ref namespaces can be modified by config values or environment + * variables. Modify a namespace as specified by its ref_namespace key. + */ +void update_ref_namespace(enum ref_namespace namespace, char *ref); + #endif /* REFS_H */ diff --git a/reftable/reader.c b/reftable/reader.c index 54b4025105..b4db23ce18 100644 --- a/reftable/reader.c +++ b/reftable/reader.c @@ -443,7 +443,7 @@ static int reader_start(struct reftable_reader *r, struct table_iter *ti, return reader_table_iter_at(r, ti, off, typ); } -static int reader_seek_linear(struct reftable_reader *r, struct table_iter *ti, +static int reader_seek_linear(struct table_iter *ti, struct reftable_record *want) { struct reftable_record rec = @@ -510,7 +510,7 @@ static int reader_seek_indexed(struct reftable_reader *r, if (err < 0) goto done; - err = reader_seek_linear(r, &index_iter, &want_index); + err = reader_seek_linear(&index_iter, &want_index); while (1) { err = table_iter_next(&index_iter, &index_result); table_iter_block_done(&index_iter); @@ -570,7 +570,7 @@ static int reader_seek_internal(struct reftable_reader *r, err = reader_start(r, &ti, reftable_record_type(rec), 0); if (err < 0) return err; - err = reader_seek_linear(r, &ti, rec); + err = reader_seek_linear(&ti, rec); if (err < 0) return err; else { diff --git a/remote-curl.c b/remote-curl.c index b8758757ec..72dfb8fb86 100644 --- a/remote-curl.c +++ b/remote-curl.c @@ -1286,6 +1286,29 @@ static void parse_fetch(struct strbuf *buf) strbuf_reset(buf); } +static void parse_get(const char *arg) +{ + struct strbuf url = STRBUF_INIT; + struct strbuf path = STRBUF_INIT; + const char *space; + + space = strchr(arg, ' '); + + if (!space) + die(_("protocol error: expected '<url> <path>', missing space")); + + strbuf_add(&url, arg, space - arg); + strbuf_addstr(&path, space + 1); + + if (http_get_file(url.buf, path.buf, NULL)) + die(_("failed to download file at URL '%s'"), url.buf); + + strbuf_release(&url); + strbuf_release(&path); + printf("\n"); + fflush(stdout); +} + static int push_dav(int nr_spec, const char **specs) { struct child_process child = CHILD_PROCESS_INIT; @@ -1564,9 +1587,14 @@ int cmd_main(int argc, const char **argv) printf("unsupported\n"); fflush(stdout); + } else if (skip_prefix(buf.buf, "get ", &arg)) { + parse_get(arg); + fflush(stdout); + } else if (!strcmp(buf.buf, "capabilities")) { printf("stateless-connect\n"); printf("fetch\n"); + printf("get\n"); printf("option\n"); printf("push\n"); printf("check-connectivity\n"); diff --git a/repository.h b/repository.h index 797f471cce..24316ac944 100644 --- a/repository.h +++ b/repository.h @@ -1,6 +1,7 @@ #ifndef REPOSITORY_H #define REPOSITORY_H +#include "git-compat-util.h" #include "path.h" struct config_set; @@ -186,6 +187,7 @@ void repo_set_gitdir(struct repository *repo, const char *root, void repo_set_worktree(struct repository *repo, const char *path); void repo_set_hash_algo(struct repository *repo, int algo); void initialize_the_repository(void); +RESULT_MUST_BE_USED int repo_init(struct repository *r, const char *gitdir, const char *worktree); /* @@ -197,6 +199,7 @@ int repo_init(struct repository *r, const char *gitdir, const char *worktree); * Return 0 upon success and a non-zero value upon failure. */ struct object_id; +RESULT_MUST_BE_USED int repo_submodule_init(struct repository *subrepo, struct repository *superproject, const char *path, diff --git a/revision.c b/revision.c index 5451a698ec..d5f4463cb6 100644 --- a/revision.c +++ b/revision.c @@ -373,18 +373,10 @@ static struct object *get_reference(struct rev_info *revs, const char *name, unsigned int flags) { struct object *object; - struct commit *commit; - /* - * If the repository has commit graphs, we try to opportunistically - * look up the object ID in those graphs. Like this, we can avoid - * parsing commit data from disk. - */ - commit = lookup_commit_in_graph(revs->repo, oid); - if (commit) - object = &commit->object; - else - object = parse_object(revs->repo, oid); + object = parse_object_with_flags(revs->repo, oid, + revs->verify_objects ? 0 : + PARSE_OBJECT_SKIP_HASH_CHECK); if (!object) { if (revs->ignore_missing) @@ -1105,7 +1097,7 @@ static int process_parents(struct rev_info *revs, struct commit *commit, struct commit_list **list, struct prio_queue *queue) { struct commit_list *parent = commit->parents; - unsigned left_flag; + unsigned pass_flags; if (commit->object.flags & ADDED) return 0; @@ -1160,7 +1152,7 @@ static int process_parents(struct rev_info *revs, struct commit *commit, if (revs->no_walk) return 0; - left_flag = (commit->object.flags & SYMMETRIC_LEFT); + pass_flags = (commit->object.flags & (SYMMETRIC_LEFT | ANCESTRY_PATH)); for (parent = commit->parents; parent; parent = parent->next) { struct commit *p = parent->item; @@ -1181,7 +1173,7 @@ static int process_parents(struct rev_info *revs, struct commit *commit, if (!*slot) *slot = *revision_sources_at(revs->sources, commit); } - p->object.flags |= left_flag; + p->object.flags |= pass_flags; if (!(p->object.flags & SEEN)) { p->object.flags |= (SEEN | NOT_USER_GIVEN); if (list) @@ -1304,13 +1296,24 @@ static int still_interesting(struct commit_list *src, timestamp_t date, int slop } /* - * "rev-list --ancestry-path A..B" computes commits that are ancestors - * of B but not ancestors of A but further limits the result to those - * that are descendants of A. This takes the list of bottom commits and - * the result of "A..B" without --ancestry-path, and limits the latter - * further to the ones that can reach one of the commits in "bottom". + * "rev-list --ancestry-path=C_0 [--ancestry-path=C_1 ...] A..B" + * computes commits that are ancestors of B but not ancestors of A but + * further limits the result to those that have any of C in their + * ancestry path (i.e. are either ancestors of any of C, descendants + * of any of C, or are any of C). If --ancestry-path is specified with + * no commit, we use all bottom commits for C. + * + * Before this function is called, ancestors of C will have already + * been marked with ANCESTRY_PATH previously. + * + * This takes the list of bottom commits and the result of "A..B" + * without --ancestry-path, and limits the latter further to the ones + * that have any of C in their ancestry path. Since the ancestors of C + * have already been marked (a prerequisite of this function), we just + * need to mark the descendants, then exclude any commit that does not + * have any of these marks. */ -static void limit_to_ancestry(struct commit_list *bottom, struct commit_list *list) +static void limit_to_ancestry(struct commit_list *bottoms, struct commit_list *list) { struct commit_list *p; struct commit_list *rlist = NULL; @@ -1323,7 +1326,7 @@ static void limit_to_ancestry(struct commit_list *bottom, struct commit_list *li for (p = list; p; p = p->next) commit_list_insert(p->item, &rlist); - for (p = bottom; p; p = p->next) + for (p = bottoms; p; p = p->next) p->item->object.flags |= TMP_MARK; /* @@ -1356,38 +1359,39 @@ static void limit_to_ancestry(struct commit_list *bottom, struct commit_list *li */ /* - * The ones that are not marked with TMP_MARK are uninteresting + * The ones that are not marked with either TMP_MARK or + * ANCESTRY_PATH are uninteresting */ for (p = list; p; p = p->next) { struct commit *c = p->item; - if (c->object.flags & TMP_MARK) + if (c->object.flags & (TMP_MARK | ANCESTRY_PATH)) continue; c->object.flags |= UNINTERESTING; } - /* We are done with the TMP_MARK */ + /* We are done with TMP_MARK and ANCESTRY_PATH */ for (p = list; p; p = p->next) - p->item->object.flags &= ~TMP_MARK; - for (p = bottom; p; p = p->next) - p->item->object.flags &= ~TMP_MARK; + p->item->object.flags &= ~(TMP_MARK | ANCESTRY_PATH); + for (p = bottoms; p; p = p->next) + p->item->object.flags &= ~(TMP_MARK | ANCESTRY_PATH); free_commit_list(rlist); } /* - * Before walking the history, keep the set of "negative" refs the - * caller has asked to exclude. + * Before walking the history, add the set of "negative" refs the + * caller has asked to exclude to the bottom list. * * This is used to compute "rev-list --ancestry-path A..B", as we need * to filter the result of "A..B" further to the ones that can actually * reach A. */ -static struct commit_list *collect_bottom_commits(struct commit_list *list) +static void collect_bottom_commits(struct commit_list *list, + struct commit_list **bottom) { - struct commit_list *elem, *bottom = NULL; + struct commit_list *elem; for (elem = list; elem; elem = elem->next) if (elem->item->object.flags & BOTTOM) - commit_list_insert(elem->item, &bottom); - return bottom; + commit_list_insert(elem->item, bottom); } /* Assumes either left_only or right_only is set */ @@ -1414,12 +1418,12 @@ static int limit_list(struct rev_info *revs) struct commit_list *original_list = revs->commits; struct commit_list *newlist = NULL; struct commit_list **p = &newlist; - struct commit_list *bottom = NULL; struct commit *interesting_cache = NULL; - if (revs->ancestry_path) { - bottom = collect_bottom_commits(original_list); - if (!bottom) + if (revs->ancestry_path_implicit_bottoms) { + collect_bottom_commits(original_list, + &revs->ancestry_path_bottoms); + if (!revs->ancestry_path_bottoms) die("--ancestry-path given but there are no bottom commits"); } @@ -1464,9 +1468,8 @@ static int limit_list(struct rev_info *revs) if (revs->left_only || revs->right_only) limit_left_right(newlist, revs); - if (bottom) - limit_to_ancestry(bottom, newlist); - free_commit_list(bottom); + if (revs->ancestry_path) + limit_to_ancestry(revs->ancestry_path_bottoms, newlist); /* * Check if any commits have become TREESAME by some of their parents @@ -2217,7 +2220,7 @@ static int handle_revision_opt(struct rev_info *revs, int argc, const char **arg const struct setup_revision_opt* opt) { const char *arg = argv[0]; - const char *optarg; + const char *optarg = NULL; int argcount; const unsigned hexsz = the_hash_algo->hexsz; @@ -2288,6 +2291,23 @@ static int handle_revision_opt(struct rev_info *revs, int argc, const char **arg revs->ancestry_path = 1; revs->simplify_history = 0; revs->limited = 1; + revs->ancestry_path_implicit_bottoms = 1; + } else if (skip_prefix(arg, "--ancestry-path=", &optarg)) { + struct commit *c; + struct object_id oid; + const char *msg = _("could not get commit for ancestry-path argument %s"); + + revs->ancestry_path = 1; + revs->simplify_history = 0; + revs->limited = 1; + + if (repo_get_oid_committish(revs->repo, optarg, &oid)) + return error(msg, optarg); + get_reference(revs, optarg, &oid, ANCESTRY_PATH); + c = lookup_commit_reference(revs->repo, &oid); + if (!c) + return error(msg, optarg); + commit_list_insert(c, &revs->ancestry_path_bottoms); } else if (!strcmp(arg, "-g") || !strcmp(arg, "--walk-reflogs")) { init_reflog_walk(&revs->reflog_info); } else if (!strcmp(arg, "--default")) { @@ -2402,6 +2422,7 @@ static int handle_revision_opt(struct rev_info *revs, int argc, const char **arg revs->tree_objects = 1; revs->blob_objects = 1; revs->verify_objects = 1; + disable_commit_graph(revs->repo); } else if (!strcmp(arg, "--unpacked")) { revs->unpacked = 1; } else if (starts_with(arg, "--unpacked=")) { @@ -2997,6 +3018,7 @@ static void release_revisions_topo_walk_info(struct topo_walk_info *info); void release_revisions(struct rev_info *revs) { free_commit_list(revs->commits); + free_commit_list(revs->ancestry_path_bottoms); object_array_clear(&revs->pending); object_array_clear(&revs->boundary_commits); release_revisions_cmdline(&revs->cmdline); diff --git a/revision.h b/revision.h index bb91e7ed91..afe1b77985 100644 --- a/revision.h +++ b/revision.h @@ -48,6 +48,7 @@ */ #define NOT_USER_GIVEN (1u<<25) #define TRACK_LINEAR (1u<<26) +#define ANCESTRY_PATH (1u<<27) #define ALL_REV_FLAGS (((1u<<11)-1) | NOT_USER_GIVEN | TRACK_LINEAR | PULL_MERGE) #define DECORATE_SHORT_REFS 1 @@ -164,6 +165,13 @@ struct rev_info { cherry_mark:1, bisect:1, ancestry_path:1, + + /* True if --ancestry-path was specified without an + * argument. The bottom revisions are implicitly + * the arguments in this case. + */ + ancestry_path_implicit_bottoms:1, + first_parent_only:1, exclude_first_parent_only:1, line_level_traverse:1, @@ -221,6 +229,7 @@ struct rev_info { missing_newline:1, date_mode_explicit:1, preserve_subject:1, + force_in_body_from:1, encode_email_headers:1, include_header:1; unsigned int disable_stdin:1; @@ -306,6 +315,7 @@ struct rev_info { struct saved_parents *saved_parents_slab; struct commit_list *previous_parents; + struct commit_list *ancestry_path_bottoms; const char *break_bar; struct revision_sources *sources; diff --git a/run-command.c b/run-command.c index 14f17830f5..5ec3a46dcc 100644 --- a/run-command.c +++ b/run-command.c @@ -10,6 +10,7 @@ #include "config.h" #include "packfile.h" #include "hook.h" +#include "compat/nonblock.h" void child_process_init(struct child_process *child) { @@ -1364,12 +1365,25 @@ static int pump_io_round(struct io_pump *slots, int nr, struct pollfd *pfd) continue; if (io->type == POLLOUT) { - ssize_t len = xwrite(io->fd, - io->u.out.buf, io->u.out.len); + ssize_t len; + + /* + * Don't use xwrite() here. It loops forever on EAGAIN, + * and we're in our own poll() loop here. + * + * Note that we lose xwrite()'s handling of MAX_IO_SIZE + * and EINTR, so we have to implement those ourselves. + */ + len = write(io->fd, io->u.out.buf, + io->u.out.len <= MAX_IO_SIZE ? + io->u.out.len : MAX_IO_SIZE); if (len < 0) { - io->error = errno; - close(io->fd); - io->fd = -1; + if (errno != EINTR && errno != EAGAIN && + errno != ENOSPC) { + io->error = errno; + close(io->fd); + io->fd = -1; + } } else { io->u.out.buf += len; io->u.out.len -= len; @@ -1438,6 +1452,15 @@ int pipe_command(struct child_process *cmd, return -1; if (in) { + if (enable_pipe_nonblock(cmd->in) < 0) { + error_errno("unable to make pipe non-blocking"); + close(cmd->in); + if (out) + close(cmd->out); + if (err) + close(cmd->err); + return -1; + } io[nr].fd = cmd->in; io[nr].type = POLLOUT; io[nr].u.out.buf = in; diff --git a/sequencer.c b/sequencer.c index 3ac82f9ebe..d26ede83c4 100644 --- a/sequencer.c +++ b/sequencer.c @@ -537,7 +537,7 @@ static struct tree *empty_tree(struct repository *r) static int error_dirty_index(struct repository *repo, struct replay_opts *opts) { if (repo_read_index_unmerged(repo)) - return error_resolve_conflict(_(action_name(opts))); + return error_resolve_conflict(action_name(opts)); error(_("your local changes would be overwritten by %s."), _(action_name(opts))); @@ -575,7 +575,7 @@ static int fast_forward_to(struct repository *r, if (checkout_fast_forward(r, from, to, 1)) return -1; /* the callee should have complained already */ - strbuf_addf(&sb, _("%s: fast-forward"), _(action_name(opts))); + strbuf_addf(&sb, "%s: fast-forward", action_name(opts)); transaction = ref_transaction_begin(&err); if (!transaction || @@ -2422,7 +2422,7 @@ static int read_and_refresh_cache(struct repository *r, if (repo_read_index(r) < 0) { rollback_lock_file(&index_lock); return error(_("git %s: failed to read the index"), - _(action_name(opts))); + action_name(opts)); } refresh_index(r->index, REFRESH_QUIET|REFRESH_UNMERGED, NULL, NULL, NULL); @@ -2430,7 +2430,7 @@ static int read_and_refresh_cache(struct repository *r, if (write_locked_index(r->index, &index_lock, COMMIT_LOCK | SKIP_IF_UNCHANGED)) { return error(_("git %s: failed to refresh the index"), - _(action_name(opts))); + action_name(opts)); } } @@ -3753,7 +3753,7 @@ static int do_reset(struct repository *r, init_checkout_metadata(&unpack_tree_opts.meta, name, &oid, NULL); if (repo_read_index_unmerged(r)) { - ret = error_resolve_conflict(_(action_name(opts))); + ret = error_resolve_conflict(action_name(opts)); goto cleanup; } diff --git a/submodule.c b/submodule.c index 7455b618ea..bf7a2c7918 100644 --- a/submodule.c +++ b/submodule.c @@ -416,10 +416,9 @@ int parse_submodule_update_strategy(const char *value, return 0; } -const char *submodule_strategy_to_string(const struct submodule_update_strategy *s) +const char *submodule_update_type_to_string(enum submodule_update_type type) { - struct strbuf sb = STRBUF_INIT; - switch (s->type) { + switch (type) { case SM_UPDATE_CHECKOUT: return "checkout"; case SM_UPDATE_MERGE: @@ -429,12 +428,11 @@ const char *submodule_strategy_to_string(const struct submodule_update_strategy case SM_UPDATE_NONE: return "none"; case SM_UPDATE_UNSPECIFIED: - return NULL; case SM_UPDATE_COMMAND: - strbuf_addf(&sb, "!%s", s->command); - return strbuf_detach(&sb, NULL); + BUG("init_submodule() should handle type %d", type); + default: + BUG("unexpected update strategy type: %d", type); } - return NULL; } void handle_ignore_submodules_arg(struct diff_options *diffopt, diff --git a/submodule.h b/submodule.h index bfaa9da186..6a9fec6de1 100644 --- a/submodule.h +++ b/submodule.h @@ -72,7 +72,7 @@ void die_path_inside_submodule(struct index_state *istate, enum submodule_update_type parse_submodule_update_type(const char *value); int parse_submodule_update_strategy(const char *value, struct submodule_update_strategy *dst); -const char *submodule_strategy_to_string(const struct submodule_update_strategy *s); +const char *submodule_update_type_to_string(enum submodule_update_type type); void handle_ignore_submodules_arg(struct diff_options *, const char *); void show_submodule_diff_summary(struct diff_options *o, const char *path, struct object_id *one, struct object_id *two, diff --git a/t/helper/test-crontab.c b/t/helper/test-crontab.c index e7c0137a47..e6c1b1e22b 100644 --- a/t/helper/test-crontab.c +++ b/t/helper/test-crontab.c @@ -2,33 +2,34 @@ #include "cache.h" /* - * Usage: test-tool cron <file> [-l] + * Usage: test-tool crontab <file> -l|<input> * * If -l is specified, then write the contents of <file> to stdout. - * Otherwise, write from stdin into <file>. + * Otherwise, copy the contents of <input> into <file>. */ int cmd__crontab(int argc, const char **argv) { int a; FILE *from, *to; - if (argc == 3 && !strcmp(argv[2], "-l")) { + if (argc != 3) + usage("test-tool crontab <file> -l|<input>"); + + if (!strcmp(argv[2], "-l")) { from = fopen(argv[1], "r"); if (!from) return 0; to = stdout; - } else if (argc == 2) { - from = stdin; - to = fopen(argv[1], "w"); - } else - return error("unknown arguments"); + } else { + from = xfopen(argv[2], "r"); + to = xfopen(argv[1], "w"); + } while ((a = fgetc(from)) != EOF) fputc(a, to); - if (argc == 3) - fclose(from); - else + fclose(from); + if (to != stdout) fclose(to); return 0; diff --git a/t/helper/test-mergesort.c b/t/helper/test-mergesort.c index 202e54a7ff..335e5bb3a9 100644 --- a/t/helper/test-mergesort.c +++ b/t/helper/test-mergesort.c @@ -22,21 +22,35 @@ static int compare_strings(const struct line *x, const struct line *y) static int sort_stdin(void) { - struct line *line, *p = NULL, *lines = NULL; + struct line *lines; + struct line **tail = &lines; struct strbuf sb = STRBUF_INIT; - - while (!strbuf_getline(&sb, stdin)) { - line = xmalloc(sizeof(struct line)); - line->text = strbuf_detach(&sb, NULL); - if (p) { - line->next = p->next; - p->next = line; - } else { - line->next = NULL; - lines = line; - } - p = line; + struct mem_pool lines_pool; + char *p; + + strbuf_read(&sb, 0, 0); + + /* + * Split by newline, but don't create an item + * for the empty string after the last separator. + */ + if (sb.len && sb.buf[sb.len - 1] == '\n') + strbuf_setlen(&sb, sb.len - 1); + + mem_pool_init(&lines_pool, 0); + p = sb.buf; + for (;;) { + char *eol = strchr(p, '\n'); + struct line *line = mem_pool_alloc(&lines_pool, sizeof(*line)); + line->text = p; + *tail = line; + tail = &line->next; + if (!eol) + break; + *eol = '\0'; + p = eol + 1; } + *tail = NULL; sort_lines(&lines, compare_strings); diff --git a/t/helper/test-parse-options.c b/t/helper/test-parse-options.c index 48d3cf6692..506835521a 100644 --- a/t/helper/test-parse-options.c +++ b/t/helper/test-parse-options.c @@ -192,3 +192,131 @@ int cmd__parse_options(int argc, const char **argv) return ret; } + +static void print_args(int argc, const char **argv) +{ + int i; + for (i = 0; i < argc; i++) + printf("arg %02d: %s\n", i, argv[i]); +} + +static int parse_options_flags__cmd(int argc, const char **argv, + enum parse_opt_flags test_flags) +{ + const char *usage[] = { + "<...> cmd [options]", + NULL + }; + int opt = 0; + const struct option options[] = { + OPT_INTEGER('o', "opt", &opt, "an integer option"), + OPT_END() + }; + + argc = parse_options(argc, argv, NULL, options, usage, test_flags); + + printf("opt: %d\n", opt); + print_args(argc, argv); + + return 0; +} + +static enum parse_opt_flags test_flags = 0; +static const struct option test_flag_options[] = { + OPT_GROUP("flag-options:"), + OPT_BIT(0, "keep-dashdash", &test_flags, + "pass PARSE_OPT_KEEP_DASHDASH to parse_options()", + PARSE_OPT_KEEP_DASHDASH), + OPT_BIT(0, "stop-at-non-option", &test_flags, + "pass PARSE_OPT_STOP_AT_NON_OPTION to parse_options()", + PARSE_OPT_STOP_AT_NON_OPTION), + OPT_BIT(0, "keep-argv0", &test_flags, + "pass PARSE_OPT_KEEP_ARGV0 to parse_options()", + PARSE_OPT_KEEP_ARGV0), + OPT_BIT(0, "keep-unknown-opt", &test_flags, + "pass PARSE_OPT_KEEP_UNKNOWN_OPT to parse_options()", + PARSE_OPT_KEEP_UNKNOWN_OPT), + OPT_BIT(0, "no-internal-help", &test_flags, + "pass PARSE_OPT_NO_INTERNAL_HELP to parse_options()", + PARSE_OPT_NO_INTERNAL_HELP), + OPT_BIT(0, "subcommand-optional", &test_flags, + "pass PARSE_OPT_SUBCOMMAND_OPTIONAL to parse_options()", + PARSE_OPT_SUBCOMMAND_OPTIONAL), + OPT_END() +}; + +int cmd__parse_options_flags(int argc, const char **argv) +{ + const char *usage[] = { + "test-tool parse-options-flags [flag-options] cmd [options]", + NULL + }; + + argc = parse_options(argc, argv, NULL, test_flag_options, usage, + PARSE_OPT_STOP_AT_NON_OPTION); + + if (!argc || strcmp(argv[0], "cmd")) { + error("'cmd' is mandatory"); + usage_with_options(usage, test_flag_options); + } + + return parse_options_flags__cmd(argc, argv, test_flags); +} + +static int subcmd_one(int argc, const char **argv, const char *prefix) +{ + printf("fn: subcmd_one\n"); + print_args(argc, argv); + return 0; +} + +static int subcmd_two(int argc, const char **argv, const char *prefix) +{ + printf("fn: subcmd_two\n"); + print_args(argc, argv); + return 0; +} + +static int parse_subcommand__cmd(int argc, const char **argv, + enum parse_opt_flags test_flags) +{ + const char *usage[] = { + "<...> cmd subcmd-one", + "<...> cmd subcmd-two", + NULL + }; + parse_opt_subcommand_fn *fn = NULL; + int opt = 0; + struct option options[] = { + OPT_SUBCOMMAND("subcmd-one", &fn, subcmd_one), + OPT_SUBCOMMAND("subcmd-two", &fn, subcmd_two), + OPT_INTEGER('o', "opt", &opt, "an integer option"), + OPT_END() + }; + + if (test_flags & PARSE_OPT_SUBCOMMAND_OPTIONAL) + fn = subcmd_one; + argc = parse_options(argc, argv, NULL, options, usage, test_flags); + + printf("opt: %d\n", opt); + + return fn(argc, argv, NULL); +} + +int cmd__parse_subcommand(int argc, const char **argv) +{ + const char *usage[] = { + "test-tool parse-subcommand [flag-options] cmd <subcommand>", + NULL + }; + + argc = parse_options(argc, argv, NULL, test_flag_options, usage, + PARSE_OPT_STOP_AT_NON_OPTION); + + if (!argc || strcmp(argv[0], "cmd")) { + error("'cmd' is mandatory"); + usage_with_options(usage, test_flag_options); + } + + return parse_subcommand__cmd(argc, argv, test_flags); +} diff --git a/t/helper/test-rot13-filter.c b/t/helper/test-rot13-filter.c new file mode 100644 index 0000000000..f8d564c622 --- /dev/null +++ b/t/helper/test-rot13-filter.c @@ -0,0 +1,382 @@ +/* + * Example implementation for the Git filter protocol version 2 + * See Documentation/gitattributes.txt, section "Filter Protocol" + * + * Usage: test-tool rot13-filter [--always-delay] --log=<path> <capabilities> + * + * Log path defines a debug log file that the script writes to. The + * subsequent arguments define a list of supported protocol capabilities + * ("clean", "smudge", etc). + * + * When --always-delay is given all pathnames with the "can-delay" flag + * that don't appear on the list bellow are delayed with a count of 1 + * (see more below). + * + * This implementation supports special test cases: + * (1) If data with the pathname "clean-write-fail.r" is processed with + * a "clean" operation then the write operation will die. + * (2) If data with the pathname "smudge-write-fail.r" is processed with + * a "smudge" operation then the write operation will die. + * (3) If data with the pathname "error.r" is processed with any + * operation then the filter signals that it cannot or does not want + * to process the file. + * (4) If data with the pathname "abort.r" is processed with any + * operation then the filter signals that it cannot or does not want + * to process the file and any file after that is processed with the + * same command. + * (5) If data with a pathname that is a key in the delay hash is + * requested (e.g. "test-delay10.a") then the filter responds with + * a "delay" status and sets the "requested" field in the delay hash. + * The filter will signal the availability of this object after + * "count" (field in delay hash) "list_available_blobs" commands. + * (6) If data with the pathname "missing-delay.a" is processed that the + * filter will drop the path from the "list_available_blobs" response. + * (7) If data with the pathname "invalid-delay.a" is processed that the + * filter will add the path "unfiltered" which was not delayed before + * to the "list_available_blobs" response. + */ + +#include "test-tool.h" +#include "pkt-line.h" +#include "string-list.h" +#include "strmap.h" +#include "parse-options.h" + +static FILE *logfile; +static int always_delay, has_clean_cap, has_smudge_cap; +static struct strmap delay = STRMAP_INIT; + +static inline const char *str_or_null(const char *str) +{ + return str ? str : "(null)"; +} + +static char *rot13(char *str) +{ + char *c; + for (c = str; *c; c++) + if (isalpha(*c)) + *c += tolower(*c) < 'n' ? 13 : -13; + return str; +} + +static char *get_value(char *buf, const char *key) +{ + const char *orig_buf = buf; + if (!buf || + !skip_prefix((const char *)buf, key, (const char **)&buf) || + !skip_prefix((const char *)buf, "=", (const char **)&buf) || + !*buf) + die("expected key '%s', got '%s'", key, str_or_null(orig_buf)); + return buf; +} + +/* + * Read a text packet, expecting that it is in the form "key=value" for + * the given key. An EOF does not trigger any error and is reported + * back to the caller with NULL. Die if the "key" part of "key=value" does + * not match the given key, or the value part is empty. + */ +static char *packet_key_val_read(const char *key) +{ + char *buf; + if (packet_read_line_gently(0, NULL, &buf) < 0) + return NULL; + return xstrdup(get_value(buf, key)); +} + +static inline void assert_remote_capability(struct strset *caps, const char *cap) +{ + if (!strset_contains(caps, cap)) + die("required '%s' capability not available from remote", cap); +} + +static void read_capabilities(struct strset *remote_caps) +{ + for (;;) { + char *buf = packet_read_line(0, NULL); + if (!buf) + break; + strset_add(remote_caps, get_value(buf, "capability")); + } + + assert_remote_capability(remote_caps, "clean"); + assert_remote_capability(remote_caps, "smudge"); + assert_remote_capability(remote_caps, "delay"); +} + +static void check_and_write_capabilities(struct strset *remote_caps, + const char **caps, int nr_caps) +{ + int i; + for (i = 0; i < nr_caps; i++) { + if (!strset_contains(remote_caps, caps[i])) + die("our capability '%s' is not available from remote", + caps[i]); + packet_write_fmt(1, "capability=%s\n", caps[i]); + } + packet_flush(1); +} + +struct delay_entry { + int requested, count; + char *output; +}; + +static void free_delay_entries(void) +{ + struct hashmap_iter iter; + struct strmap_entry *ent; + + strmap_for_each_entry(&delay, &iter, ent) { + struct delay_entry *delay_entry = ent->value; + free(delay_entry->output); + free(delay_entry); + } + strmap_clear(&delay, 0); +} + +static void add_delay_entry(char *pathname, int count, int requested) +{ + struct delay_entry *entry = xcalloc(1, sizeof(*entry)); + entry->count = count; + entry->requested = requested; + if (strmap_put(&delay, pathname, entry)) + BUG("adding the same path twice to delay hash?"); +} + +static void reply_list_available_blobs_cmd(void) +{ + struct hashmap_iter iter; + struct strmap_entry *ent; + struct string_list_item *str_item; + struct string_list paths = STRING_LIST_INIT_NODUP; + + /* flush */ + if (packet_read_line(0, NULL)) + die("bad list_available_blobs end"); + + strmap_for_each_entry(&delay, &iter, ent) { + struct delay_entry *delay_entry = ent->value; + if (!delay_entry->requested) + continue; + delay_entry->count--; + if (!strcmp(ent->key, "invalid-delay.a")) { + /* Send Git a pathname that was not delayed earlier */ + packet_write_fmt(1, "pathname=unfiltered"); + } + if (!strcmp(ent->key, "missing-delay.a")) { + /* Do not signal Git that this file is available */ + } else if (!delay_entry->count) { + string_list_append(&paths, ent->key); + packet_write_fmt(1, "pathname=%s", ent->key); + } + } + + /* Print paths in sorted order. */ + string_list_sort(&paths); + for_each_string_list_item(str_item, &paths) + fprintf(logfile, " %s", str_item->string); + string_list_clear(&paths, 0); + + packet_flush(1); + + fprintf(logfile, " [OK]\n"); + packet_write_fmt(1, "status=success"); + packet_flush(1); +} + +static void command_loop(void) +{ + for (;;) { + char *buf, *output; + char *pathname; + struct delay_entry *entry; + struct strbuf input = STRBUF_INIT; + char *command = packet_key_val_read("command"); + + if (!command) { + fprintf(logfile, "STOP\n"); + break; + } + fprintf(logfile, "IN: %s", command); + + if (!strcmp(command, "list_available_blobs")) { + reply_list_available_blobs_cmd(); + free(command); + continue; + } + + pathname = packet_key_val_read("pathname"); + if (!pathname) + die("unexpected EOF while expecting pathname"); + fprintf(logfile, " %s", pathname); + + /* Read until flush */ + while ((buf = packet_read_line(0, NULL))) { + if (!strcmp(buf, "can-delay=1")) { + entry = strmap_get(&delay, pathname); + if (entry && !entry->requested) + entry->requested = 1; + else if (!entry && always_delay) + add_delay_entry(pathname, 1, 1); + } else if (starts_with(buf, "ref=") || + starts_with(buf, "treeish=") || + starts_with(buf, "blob=")) { + fprintf(logfile, " %s", buf); + } else { + /* + * In general, filters need to be graceful about + * new metadata, since it's documented that we + * can pass any key-value pairs, but for tests, + * let's be a little stricter. + */ + die("Unknown message '%s'", buf); + } + } + + read_packetized_to_strbuf(0, &input, 0); + fprintf(logfile, " %"PRIuMAX" [OK] -- ", (uintmax_t)input.len); + + entry = strmap_get(&delay, pathname); + if (entry && entry->output) { + output = entry->output; + } else if (!strcmp(pathname, "error.r") || !strcmp(pathname, "abort.r")) { + output = ""; + } else if (!strcmp(command, "clean") && has_clean_cap) { + output = rot13(input.buf); + } else if (!strcmp(command, "smudge") && has_smudge_cap) { + output = rot13(input.buf); + } else { + die("bad command '%s'", command); + } + + if (!strcmp(pathname, "error.r")) { + fprintf(logfile, "[ERROR]\n"); + packet_write_fmt(1, "status=error"); + packet_flush(1); + } else if (!strcmp(pathname, "abort.r")) { + fprintf(logfile, "[ABORT]\n"); + packet_write_fmt(1, "status=abort"); + packet_flush(1); + } else if (!strcmp(command, "smudge") && + (entry = strmap_get(&delay, pathname)) && + entry->requested == 1) { + fprintf(logfile, "[DELAYED]\n"); + packet_write_fmt(1, "status=delayed"); + packet_flush(1); + entry->requested = 2; + if (entry->output != output) { + free(entry->output); + entry->output = xstrdup(output); + } + } else { + int i, nr_packets = 0; + size_t output_len; + const char *p; + packet_write_fmt(1, "status=success"); + packet_flush(1); + + if (skip_prefix(pathname, command, &p) && + !strcmp(p, "-write-fail.r")) { + fprintf(logfile, "[WRITE FAIL]\n"); + die("%s write error", command); + } + + output_len = strlen(output); + fprintf(logfile, "OUT: %"PRIuMAX" ", (uintmax_t)output_len); + + if (write_packetized_from_buf_no_flush_count(output, + output_len, 1, &nr_packets)) + die("failed to write buffer to stdout"); + packet_flush(1); + + for (i = 0; i < nr_packets; i++) + fprintf(logfile, "."); + fprintf(logfile, " [OK]\n"); + + packet_flush(1); + } + free(pathname); + strbuf_release(&input); + free(command); + } +} + +static void packet_initialize(void) +{ + char *pkt_buf = packet_read_line(0, NULL); + + if (!pkt_buf || strcmp(pkt_buf, "git-filter-client")) + die("bad initialize: '%s'", str_or_null(pkt_buf)); + + pkt_buf = packet_read_line(0, NULL); + if (!pkt_buf || strcmp(pkt_buf, "version=2")) + die("bad version: '%s'", str_or_null(pkt_buf)); + + pkt_buf = packet_read_line(0, NULL); + if (pkt_buf) + die("bad version end: '%s'", pkt_buf); + + packet_write_fmt(1, "git-filter-server"); + packet_write_fmt(1, "version=2"); + packet_flush(1); +} + +static const char *rot13_usage[] = { + "test-tool rot13-filter [--always-delay] --log=<path> <capabilities>", + NULL +}; + +int cmd__rot13_filter(int argc, const char **argv) +{ + int i, nr_caps; + struct strset remote_caps = STRSET_INIT; + const char *log_path = NULL; + + struct option options[] = { + OPT_BOOL(0, "always-delay", &always_delay, + "delay all paths with the can-delay flag"), + OPT_STRING(0, "log", &log_path, "path", + "path to the debug log file"), + OPT_END() + }; + nr_caps = parse_options(argc, argv, NULL, options, rot13_usage, + PARSE_OPT_STOP_AT_NON_OPTION); + + if (!log_path || !nr_caps) + usage_with_options(rot13_usage, options); + + logfile = fopen(log_path, "a"); + if (!logfile) + die_errno("failed to open log file"); + + for (i = 0; i < nr_caps; i++) { + if (!strcmp(argv[i], "smudge")) + has_smudge_cap = 1; + if (!strcmp(argv[i], "clean")) + has_clean_cap = 1; + } + + add_delay_entry("test-delay10.a", 1, 0); + add_delay_entry("test-delay11.a", 1, 0); + add_delay_entry("test-delay20.a", 2, 0); + add_delay_entry("test-delay10.b", 1, 0); + add_delay_entry("missing-delay.a", 1, 0); + add_delay_entry("invalid-delay.a", 1, 0); + + fprintf(logfile, "START\n"); + packet_initialize(); + + read_capabilities(&remote_caps); + check_and_write_capabilities(&remote_caps, argv, nr_caps); + fprintf(logfile, "init handshake complete\n"); + strset_clear(&remote_caps); + + command_loop(); + + if (fclose(logfile)) + die_errno("error closing logfile"); + free_delay_entries(); + return 0; +} diff --git a/t/helper/test-serve-v2.c b/t/helper/test-serve-v2.c index 28e905afc3..824e5c0a95 100644 --- a/t/helper/test-serve-v2.c +++ b/t/helper/test-serve-v2.c @@ -24,7 +24,7 @@ int cmd__serve_v2(int argc, const char **argv) /* ignore all unknown cmdline switches for now */ argc = parse_options(argc, argv, prefix, options, serve_usage, PARSE_OPT_KEEP_DASHDASH | - PARSE_OPT_KEEP_UNKNOWN); + PARSE_OPT_KEEP_UNKNOWN_OPT); if (advertise_capabilities) protocol_v2_advertise_capabilities(); diff --git a/t/helper/test-submodule-config.c b/t/helper/test-submodule-config.c index e2692746df..22a41c4092 100644 --- a/t/helper/test-submodule-config.c +++ b/t/helper/test-submodule-config.c @@ -15,14 +15,11 @@ int cmd__submodule_config(int argc, const char **argv) { const char **arg = argv; int my_argc = argc; - int output_url = 0; int lookup_name = 0; arg++; my_argc--; while (arg[0] && starts_with(arg[0], "--")) { - if (!strcmp(arg[0], "--url")) - output_url = 1; if (!strcmp(arg[0], "--name")) lookup_name = 1; arg++; @@ -57,12 +54,8 @@ int cmd__submodule_config(int argc, const char **argv) if (!submodule) die_usage(argc, argv, "Submodule not found."); - if (output_url) - printf("Submodule url: '%s' for path '%s'\n", - submodule->url, submodule->path); - else - printf("Submodule name: '%s' for path '%s'\n", - submodule->name, submodule->path); + printf("Submodule name: '%s' for path '%s'\n", submodule->name, + submodule->path); arg += 2; } diff --git a/t/helper/test-submodule.c b/t/helper/test-submodule.c new file mode 100644 index 0000000000..e0e0c53d38 --- /dev/null +++ b/t/helper/test-submodule.c @@ -0,0 +1,146 @@ +#include "test-tool.h" +#include "test-tool-utils.h" +#include "cache.h" +#include "parse-options.h" +#include "remote.h" +#include "submodule-config.h" +#include "submodule.h" + +#define TEST_TOOL_CHECK_NAME_USAGE \ + "test-tool submodule check-name <name>" +static const char *submodule_check_name_usage[] = { + TEST_TOOL_CHECK_NAME_USAGE, + NULL +}; + +#define TEST_TOOL_IS_ACTIVE_USAGE \ + "test-tool submodule is-active <name>" +static const char *submodule_is_active_usage[] = { + TEST_TOOL_IS_ACTIVE_USAGE, + NULL +}; + +#define TEST_TOOL_RESOLVE_RELATIVE_URL_USAGE \ + "test-tool submodule resolve-relative-url <up_path> <remoteurl> <url>" +static const char *submodule_resolve_relative_url_usage[] = { + TEST_TOOL_RESOLVE_RELATIVE_URL_USAGE, + NULL, +}; + +static const char *submodule_usage[] = { + TEST_TOOL_CHECK_NAME_USAGE, + TEST_TOOL_IS_ACTIVE_USAGE, + TEST_TOOL_RESOLVE_RELATIVE_URL_USAGE, + NULL +}; + +/* + * Exit non-zero if any of the submodule names given on the command line is + * invalid. If no names are given, filter stdin to print only valid names + * (which is primarily intended for testing). + */ +static int check_name(int argc, const char **argv) +{ + if (argc > 1) { + while (*++argv) { + if (check_submodule_name(*argv) < 0) + return 1; + } + } else { + struct strbuf buf = STRBUF_INIT; + while (strbuf_getline(&buf, stdin) != EOF) { + if (!check_submodule_name(buf.buf)) + printf("%s\n", buf.buf); + } + strbuf_release(&buf); + } + return 0; +} + +static int cmd__submodule_check_name(int argc, const char **argv) +{ + struct option options[] = { + OPT_END() + }; + argc = parse_options(argc, argv, "test-tools", options, + submodule_check_name_usage, 0); + if (argc) + usage_with_options(submodule_check_name_usage, options); + + return check_name(argc, argv); +} + +static int cmd__submodule_is_active(int argc, const char **argv) +{ + struct option options[] = { + OPT_END() + }; + argc = parse_options(argc, argv, "test-tools", options, + submodule_is_active_usage, 0); + if (argc != 1) + usage_with_options(submodule_is_active_usage, options); + + setup_git_directory(); + + return !is_submodule_active(the_repository, argv[0]); +} + +static int resolve_relative_url(int argc, const char **argv) +{ + char *remoteurl, *res; + const char *up_path, *url; + + up_path = argv[0]; + remoteurl = xstrdup(argv[1]); + url = argv[2]; + + if (!strcmp(up_path, "(null)")) + up_path = NULL; + + res = relative_url(remoteurl, url, up_path); + puts(res); + free(res); + free(remoteurl); + return 0; +} + +static int cmd__submodule_resolve_relative_url(int argc, const char **argv) +{ + struct option options[] = { + OPT_END() + }; + argc = parse_options(argc, argv, "test-tools", options, + submodule_resolve_relative_url_usage, 0); + if (argc != 3) + usage_with_options(submodule_resolve_relative_url_usage, options); + + return resolve_relative_url(argc, argv); +} + +static struct test_cmd cmds[] = { + { "check-name", cmd__submodule_check_name }, + { "is-active", cmd__submodule_is_active }, + { "resolve-relative-url", cmd__submodule_resolve_relative_url}, +}; + +int cmd__submodule(int argc, const char **argv) +{ + struct option options[] = { + OPT_END() + }; + size_t i; + + argc = parse_options(argc, argv, "test-tools", options, submodule_usage, + PARSE_OPT_STOP_AT_NON_OPTION); + if (argc < 1) + usage_with_options(submodule_usage, options); + + for (i = 0; i < ARRAY_SIZE(cmds); i++) + if (!strcmp(cmds[i].name, argv[0])) + return cmds[i].fn(argc, argv); + + usage_msg_optf("unknown subcommand '%s'", submodule_usage, options, + argv[0]); + + return 0; +} diff --git a/t/helper/test-tool-utils.h b/t/helper/test-tool-utils.h new file mode 100644 index 0000000000..6a0e5e0074 --- /dev/null +++ b/t/helper/test-tool-utils.h @@ -0,0 +1,9 @@ +#ifndef TEST_TOOL_UTILS_H +#define TEST_TOOL_UTILS_H + +struct test_cmd { + const char *name; + int (*fn)(int argc, const char **argv); +}; + +#endif diff --git a/t/helper/test-tool.c b/t/helper/test-tool.c index 318fdbab0c..d1d013bcd9 100644 --- a/t/helper/test-tool.c +++ b/t/helper/test-tool.c @@ -1,5 +1,6 @@ #include "git-compat-util.h" #include "test-tool.h" +#include "test-tool-utils.h" #include "trace2.h" #include "parse-options.h" @@ -8,11 +9,6 @@ static const char * const test_tool_usage[] = { NULL }; -struct test_cmd { - const char *name; - int (*fn)(int argc, const char **argv); -}; - static struct test_cmd cmds[] = { { "advise", cmd__advise_if_enabled }, { "bitmap", cmd__bitmap }, @@ -51,7 +47,9 @@ static struct test_cmd cmds[] = { { "online-cpus", cmd__online_cpus }, { "pack-mtimes", cmd__pack_mtimes }, { "parse-options", cmd__parse_options }, + { "parse-options-flags", cmd__parse_options_flags }, { "parse-pathspec-file", cmd__parse_pathspec_file }, + { "parse-subcommand", cmd__parse_subcommand }, { "partial-clone", cmd__partial_clone }, { "path-utils", cmd__path_utils }, { "pcre2-config", cmd__pcre2_config }, @@ -65,6 +63,7 @@ static struct test_cmd cmds[] = { { "read-midx", cmd__read_midx }, { "ref-store", cmd__ref_store }, { "reftable", cmd__reftable }, + { "rot13-filter", cmd__rot13_filter }, { "dump-reftable", cmd__dump_reftable }, { "regex", cmd__regex }, { "repository", cmd__repository }, @@ -78,6 +77,7 @@ static struct test_cmd cmds[] = { { "simple-ipc", cmd__simple_ipc }, { "strcmp-offset", cmd__strcmp_offset }, { "string-list", cmd__string_list }, + { "submodule", cmd__submodule }, { "submodule-config", cmd__submodule_config }, { "submodule-nested-repo-config", cmd__submodule_nested_repo_config }, { "subprocess", cmd__subprocess }, diff --git a/t/helper/test-tool.h b/t/helper/test-tool.h index bb79927163..6b46b6444b 100644 --- a/t/helper/test-tool.h +++ b/t/helper/test-tool.h @@ -41,7 +41,9 @@ int cmd__oidtree(int argc, const char **argv); int cmd__online_cpus(int argc, const char **argv); int cmd__pack_mtimes(int argc, const char **argv); int cmd__parse_options(int argc, const char **argv); +int cmd__parse_options_flags(int argc, const char **argv); int cmd__parse_pathspec_file(int argc, const char** argv); +int cmd__parse_subcommand(int argc, const char **argv); int cmd__partial_clone(int argc, const char **argv); int cmd__path_utils(int argc, const char **argv); int cmd__pcre2_config(int argc, const char **argv); @@ -54,6 +56,7 @@ int cmd__read_cache(int argc, const char **argv); int cmd__read_graph(int argc, const char **argv); int cmd__read_midx(int argc, const char **argv); int cmd__ref_store(int argc, const char **argv); +int cmd__rot13_filter(int argc, const char **argv); int cmd__reftable(int argc, const char **argv); int cmd__regex(int argc, const char **argv); int cmd__repository(int argc, const char **argv); @@ -68,6 +71,7 @@ int cmd__sigchain(int argc, const char **argv); int cmd__simple_ipc(int argc, const char **argv); int cmd__strcmp_offset(int argc, const char **argv); int cmd__string_list(int argc, const char **argv); +int cmd__submodule(int argc, const char **argv); int cmd__submodule_config(int argc, const char **argv); int cmd__submodule_nested_repo_config(int argc, const char **argv); int cmd__subprocess(int argc, const char **argv); diff --git a/t/lib-bitmap.sh b/t/lib-bitmap.sh index a95537e759..f595937094 100644 --- a/t/lib-bitmap.sh +++ b/t/lib-bitmap.sh @@ -440,7 +440,7 @@ midx_bitmap_partial_tests () { test_commit packed && git repack && test_commit loose && - git multi-pack-index write --bitmap 2>err && + git multi-pack-index write --bitmap && test_path_is_file $midx && test_path_is_file $midx-$(midx_checksum $objdir).bitmap ' diff --git a/t/perf/lib-bitmap.sh b/t/perf/lib-bitmap.sh index 63d3bc7cec..55a8feb1dc 100644 --- a/t/perf/lib-bitmap.sh +++ b/t/perf/lib-bitmap.sh @@ -67,3 +67,34 @@ test_partial_bitmap () { --filter=tree:0 >/dev/null ' } + +test_pack_bitmap () { + test_perf "repack to disk" ' + git repack -ad + ' + + test_full_bitmap + + test_expect_success "create partial bitmap state" ' + # pick a commit to represent the repo tip in the past + cutoff=$(git rev-list HEAD~100 -1) && + orig_tip=$(git rev-parse HEAD) && + + # now kill off all of the refs and pretend we had + # just the one tip + rm -rf .git/logs .git/refs/* .git/packed-refs && + git update-ref HEAD $cutoff && + + # and then repack, which will leave us with a nice + # big bitmap pack of the "old" history, and all of + # the new history will be loose, as if it had been pushed + # up incrementally and exploded via unpack-objects + git repack -Ad && + + # and now restore our original tip, as if the pushes + # had happened + git update-ref HEAD $orig_tip + ' + + test_partial_bitmap +} diff --git a/t/perf/p0004-lazy-init-name-hash.sh b/t/perf/p0004-lazy-init-name-hash.sh index 1afc08fe7f..85be14e4dd 100755 --- a/t/perf/p0004-lazy-init-name-hash.sh +++ b/t/perf/p0004-lazy-init-name-hash.sh @@ -49,7 +49,7 @@ test_perf "single-threaded, $desc" " test-tool lazy-init-name-hash --single --count=$count " -test_perf REPO_BIG_ENOUGH_FOR_MULTI "multi-threaded, $desc" " +test_perf "multi-threaded, $desc" --prereq REPO_BIG_ENOUGH_FOR_MULTI " test-tool lazy-init-name-hash --multi --count=$count " diff --git a/t/perf/p0006-read-tree-checkout.sh b/t/perf/p0006-read-tree-checkout.sh index 900b385c4b..c481c012d2 100755 --- a/t/perf/p0006-read-tree-checkout.sh +++ b/t/perf/p0006-read-tree-checkout.sh @@ -46,7 +46,7 @@ test_expect_success "setup repo" ' ' test_perf "read-tree br_base br_ballast ($nr_files)" ' - git read-tree -m br_base br_ballast -n + git read-tree -n -m br_base br_ballast ' test_perf "switch between br_base br_ballast ($nr_files)" ' diff --git a/t/perf/p5310-pack-bitmaps.sh b/t/perf/p5310-pack-bitmaps.sh index 7ad4f237bc..b1399f1007 100755 --- a/t/perf/p5310-pack-bitmaps.sh +++ b/t/perf/p5310-pack-bitmaps.sh @@ -4,51 +4,37 @@ test_description='Tests pack performance using bitmaps' . ./perf-lib.sh . "${TEST_DIRECTORY}/perf/lib-bitmap.sh" -test_perf_large_repo - -# note that we do everything through config, -# since we want to be able to compare bitmap-aware -# git versus non-bitmap git -# -# We intentionally use the deprecated pack.writebitmaps -# config so that we can test against older versions of git. -test_expect_success 'setup bitmap config' ' - git config pack.writebitmaps true -' - -# we need to create the tag up front such that it is covered by the repack and -# thus by generated bitmaps. -test_expect_success 'create tags' ' - git tag --message="tag pointing to HEAD" perf-tag HEAD -' - -test_perf 'repack to disk' ' - git repack -ad -' - -test_full_bitmap - -test_expect_success 'create partial bitmap state' ' - # pick a commit to represent the repo tip in the past - cutoff=$(git rev-list HEAD~100 -1) && - orig_tip=$(git rev-parse HEAD) && - - # now kill off all of the refs and pretend we had - # just the one tip - rm -rf .git/logs .git/refs/* .git/packed-refs && - git update-ref HEAD $cutoff && - - # and then repack, which will leave us with a nice - # big bitmap pack of the "old" history, and all of - # the new history will be loose, as if it had been pushed - # up incrementally and exploded via unpack-objects - git repack -Ad && - - # and now restore our original tip, as if the pushes - # had happened - git update-ref HEAD $orig_tip -' - -test_partial_bitmap +test_lookup_pack_bitmap () { + test_expect_success 'start the test from scratch' ' + rm -rf * .git + ' + + test_perf_large_repo + + # note that we do everything through config, + # since we want to be able to compare bitmap-aware + # git versus non-bitmap git + # + # We intentionally use the deprecated pack.writebitmaps + # config so that we can test against older versions of git. + test_expect_success 'setup bitmap config' ' + git config pack.writebitmaps true + ' + + # we need to create the tag up front such that it is covered by the repack and + # thus by generated bitmaps. + test_expect_success 'create tags' ' + git tag --message="tag pointing to HEAD" perf-tag HEAD + ' + + test_perf "enable lookup table: $1" ' + git config pack.writeBitmapLookupTable '"$1"' + ' + + test_pack_bitmap +} + +test_lookup_pack_bitmap false +test_lookup_pack_bitmap true test_done diff --git a/t/perf/p5311-pack-bitmaps-fetch.sh b/t/perf/p5311-pack-bitmaps-fetch.sh index 47c3fd7581..426fab87e3 100755 --- a/t/perf/p5311-pack-bitmaps-fetch.sh +++ b/t/perf/p5311-pack-bitmaps-fetch.sh @@ -3,42 +3,52 @@ test_description='performance of fetches from bitmapped packs' . ./perf-lib.sh -test_perf_default_repo - -test_expect_success 'create bitmapped server repo' ' - git config pack.writebitmaps true && - git repack -ad -' - -# simulate a fetch from a repository that last fetched N days ago, for -# various values of N. We do so by following the first-parent chain, -# and assume the first entry in the chain that is N days older than the current -# HEAD is where the HEAD would have been then. -for days in 1 2 4 8 16 32 64 128; do - title=$(printf '%10s' "($days days)") - test_expect_success "setup revs from $days days ago" ' - now=$(git log -1 --format=%ct HEAD) && - then=$(($now - ($days * 86400))) && - tip=$(git rev-list -1 --first-parent --until=$then HEAD) && - { - echo HEAD && - echo ^$tip - } >revs +test_fetch_bitmaps () { + test_expect_success 'setup test directory' ' + rm -fr * .git ' - test_perf "server $title" ' - git pack-objects --stdout --revs \ - --thin --delta-base-offset \ - <revs >tmp.pack - ' + test_perf_default_repo - test_size "size $title" ' - wc -c <tmp.pack + test_expect_success 'create bitmapped server repo' ' + git config pack.writebitmaps true && + git config pack.writeBitmapLookupTable '"$1"' && + git repack -ad ' - test_perf "client $title" ' - git index-pack --stdin --fix-thin <tmp.pack - ' -done + # simulate a fetch from a repository that last fetched N days ago, for + # various values of N. We do so by following the first-parent chain, + # and assume the first entry in the chain that is N days older than the current + # HEAD is where the HEAD would have been then. + for days in 1 2 4 8 16 32 64 128; do + title=$(printf '%10s' "($days days)") + test_expect_success "setup revs from $days days ago" ' + now=$(git log -1 --format=%ct HEAD) && + then=$(($now - ($days * 86400))) && + tip=$(git rev-list -1 --first-parent --until=$then HEAD) && + { + echo HEAD && + echo ^$tip + } >revs + ' + + test_perf "server $title (lookup=$1)" ' + git pack-objects --stdout --revs \ + --thin --delta-base-offset \ + <revs >tmp.pack + ' + + test_size "size $title" ' + wc -c <tmp.pack + ' + + test_perf "client $title (lookup=$1)" ' + git index-pack --stdin --fix-thin <tmp.pack + ' + done +} + +test_fetch_bitmaps true +test_fetch_bitmaps false test_done diff --git a/t/perf/p5312-pack-bitmaps-revs.sh b/t/perf/p5312-pack-bitmaps-revs.sh new file mode 100755 index 0000000000..0684b690af --- /dev/null +++ b/t/perf/p5312-pack-bitmaps-revs.sh @@ -0,0 +1,35 @@ +#!/bin/sh + +test_description='Tests pack performance using bitmaps (rev index enabled)' +. ./perf-lib.sh +. "${TEST_DIRECTORY}/perf/lib-bitmap.sh" + +test_lookup_pack_bitmap () { + test_expect_success 'start the test from scratch' ' + rm -rf * .git + ' + + test_perf_large_repo + + test_expect_success 'setup bitmap config' ' + git config pack.writebitmaps true && + git config pack.writeReverseIndex true + ' + + # we need to create the tag up front such that it is covered by the repack and + # thus by generated bitmaps. + test_expect_success 'create tags' ' + git tag --message="tag pointing to HEAD" perf-tag HEAD + ' + + test_perf "enable lookup table: $1" ' + git config pack.writeBitmapLookupTable '"$1"' + ' + + test_pack_bitmap +} + +test_lookup_pack_bitmap false +test_lookup_pack_bitmap true + +test_done diff --git a/t/perf/p5326-multi-pack-bitmaps.sh b/t/perf/p5326-multi-pack-bitmaps.sh index f2fa228f16..d082e6cacb 100755 --- a/t/perf/p5326-multi-pack-bitmaps.sh +++ b/t/perf/p5326-multi-pack-bitmaps.sh @@ -4,49 +4,64 @@ test_description='Tests performance using midx bitmaps' . ./perf-lib.sh . "${TEST_DIRECTORY}/perf/lib-bitmap.sh" -test_perf_large_repo - -# we need to create the tag up front such that it is covered by the repack and -# thus by generated bitmaps. -test_expect_success 'create tags' ' - git tag --message="tag pointing to HEAD" perf-tag HEAD -' - -test_expect_success 'start with bitmapped pack' ' - git repack -adb -' - -test_perf 'setup multi-pack index' ' - git multi-pack-index write --bitmap -' - -test_expect_success 'drop pack bitmap' ' - rm -f .git/objects/pack/pack-*.bitmap -' - -test_full_bitmap - -test_expect_success 'create partial bitmap state' ' - # pick a commit to represent the repo tip in the past - cutoff=$(git rev-list HEAD~100 -1) && - orig_tip=$(git rev-parse HEAD) && - - # now pretend we have just one tip - rm -rf .git/logs .git/refs/* .git/packed-refs && - git update-ref HEAD $cutoff && - - # and then repack, which will leave us with a nice - # big bitmap pack of the "old" history, and all of - # the new history will be loose, as if it had been pushed - # up incrementally and exploded via unpack-objects - git repack -Ad && - git multi-pack-index write --bitmap && - - # and now restore our original tip, as if the pushes - # had happened - git update-ref HEAD $orig_tip -' - -test_partial_bitmap +test_bitmap () { + local enabled="$1" + + test_expect_success "remove existing repo (lookup=$enabled)" ' + rm -fr * .git + ' + + test_perf_large_repo + + # we need to create the tag up front such that it is covered by the repack and + # thus by generated bitmaps. + test_expect_success 'create tags' ' + git tag --message="tag pointing to HEAD" perf-tag HEAD + ' + + test_expect_success "use lookup table: $enabled" ' + git config pack.writeBitmapLookupTable '"$enabled"' + ' + + test_expect_success "start with bitmapped pack (lookup=$enabled)" ' + git repack -adb + ' + + test_perf "setup multi-pack index (lookup=$enabled)" ' + git multi-pack-index write --bitmap + ' + + test_expect_success "drop pack bitmap (lookup=$enabled)" ' + rm -f .git/objects/pack/pack-*.bitmap + ' + + test_full_bitmap + + test_expect_success "create partial bitmap state (lookup=$enabled)" ' + # pick a commit to represent the repo tip in the past + cutoff=$(git rev-list HEAD~100 -1) && + orig_tip=$(git rev-parse HEAD) && + + # now pretend we have just one tip + rm -rf .git/logs .git/refs/* .git/packed-refs && + git update-ref HEAD $cutoff && + + # and then repack, which will leave us with a nice + # big bitmap pack of the "old" history, and all of + # the new history will be loose, as if it had been pushed + # up incrementally and exploded via unpack-objects + git repack -Ad && + git multi-pack-index write --bitmap && + + # and now restore our original tip, as if the pushes + # had happened + git update-ref HEAD $orig_tip + ' + + test_partial_bitmap +} + +test_bitmap false +test_bitmap true test_done diff --git a/t/perf/p7527-builtin-fsmonitor.sh b/t/perf/p7527-builtin-fsmonitor.sh index 9338b9ea00..c3f9a4caa4 100755 --- a/t/perf/p7527-builtin-fsmonitor.sh +++ b/t/perf/p7527-builtin-fsmonitor.sh @@ -249,7 +249,7 @@ test_expect_success "Cleanup temp and matrix branches" " do for fsm_val in $fsm_values do - cleanup $uc_val $fsm_val + cleanup $uc_val $fsm_val || return 1 done done " diff --git a/t/t0021-conversion.sh b/t/t0021-conversion.sh index 1c840348bd..abecd75e4e 100755 --- a/t/t0021-conversion.sh +++ b/t/t0021-conversion.sh @@ -17,9 +17,6 @@ tr \ 'nopqrstuvwxyzabcdefghijklmNOPQRSTUVWXYZABCDEFGHIJKLM' EOF -write_script rot13-filter.pl "$PERL_PATH" \ - <"$TEST_DIRECTORY"/t0021/rot13-filter.pl - generate_random_characters () { LEN=$1 NAME=$2 @@ -365,8 +362,8 @@ test_expect_success 'diff does not reuse worktree files that need cleaning' ' test_line_count = 0 count ' -test_expect_success PERL 'required process filter should filter data' ' - test_config_global filter.protocol.process "rot13-filter.pl debug.log clean smudge" && +test_expect_success 'required process filter should filter data' ' + test_config_global filter.protocol.process "test-tool rot13-filter --log=debug.log clean smudge" && test_config_global filter.protocol.required true && rm -rf repo && mkdir repo && @@ -450,8 +447,8 @@ test_expect_success PERL 'required process filter should filter data' ' ) ' -test_expect_success PERL 'required process filter should filter data for various subcommands' ' - test_config_global filter.protocol.process "rot13-filter.pl debug.log clean smudge" && +test_expect_success 'required process filter should filter data for various subcommands' ' + test_config_global filter.protocol.process "test-tool rot13-filter --log=debug.log clean smudge" && test_config_global filter.protocol.required true && ( cd repo && @@ -561,9 +558,9 @@ test_expect_success PERL 'required process filter should filter data for various ) ' -test_expect_success PERL 'required process filter takes precedence' ' +test_expect_success 'required process filter takes precedence' ' test_config_global filter.protocol.clean false && - test_config_global filter.protocol.process "rot13-filter.pl debug.log clean" && + test_config_global filter.protocol.process "test-tool rot13-filter --log=debug.log clean" && test_config_global filter.protocol.required true && rm -rf repo && mkdir repo && @@ -587,8 +584,8 @@ test_expect_success PERL 'required process filter takes precedence' ' ) ' -test_expect_success PERL 'required process filter should be used only for "clean" operation only' ' - test_config_global filter.protocol.process "rot13-filter.pl debug.log clean" && +test_expect_success 'required process filter should be used only for "clean" operation only' ' + test_config_global filter.protocol.process "test-tool rot13-filter --log=debug.log clean" && rm -rf repo && mkdir repo && ( @@ -622,8 +619,8 @@ test_expect_success PERL 'required process filter should be used only for "clean ) ' -test_expect_success PERL 'required process filter should process multiple packets' ' - test_config_global filter.protocol.process "rot13-filter.pl debug.log clean smudge" && +test_expect_success 'required process filter should process multiple packets' ' + test_config_global filter.protocol.process "test-tool rot13-filter --log=debug.log clean smudge" && test_config_global filter.protocol.required true && rm -rf repo && @@ -687,8 +684,8 @@ test_expect_success PERL 'required process filter should process multiple packet ) ' -test_expect_success PERL 'required process filter with clean error should fail' ' - test_config_global filter.protocol.process "rot13-filter.pl debug.log clean smudge" && +test_expect_success 'required process filter with clean error should fail' ' + test_config_global filter.protocol.process "test-tool rot13-filter --log=debug.log clean smudge" && test_config_global filter.protocol.required true && rm -rf repo && mkdir repo && @@ -706,8 +703,8 @@ test_expect_success PERL 'required process filter with clean error should fail' ) ' -test_expect_success PERL 'process filter should restart after unexpected write failure' ' - test_config_global filter.protocol.process "rot13-filter.pl debug.log clean smudge" && +test_expect_success 'process filter should restart after unexpected write failure' ' + test_config_global filter.protocol.process "test-tool rot13-filter --log=debug.log clean smudge" && rm -rf repo && mkdir repo && ( @@ -735,7 +732,7 @@ test_expect_success PERL 'process filter should restart after unexpected write f rm -f debug.log && git checkout --quiet --no-progress . 2>git-stderr.log && - grep "smudge write error at" git-stderr.log && + grep "smudge write error" git-stderr.log && test_i18ngrep "error: external filter" git-stderr.log && cat >expected.log <<-EOF && @@ -761,8 +758,8 @@ test_expect_success PERL 'process filter should restart after unexpected write f ) ' -test_expect_success PERL 'process filter should not be restarted if it signals an error' ' - test_config_global filter.protocol.process "rot13-filter.pl debug.log clean smudge" && +test_expect_success 'process filter should not be restarted if it signals an error' ' + test_config_global filter.protocol.process "test-tool rot13-filter --log=debug.log clean smudge" && rm -rf repo && mkdir repo && ( @@ -804,8 +801,8 @@ test_expect_success PERL 'process filter should not be restarted if it signals a ) ' -test_expect_success PERL 'process filter abort stops processing of all further files' ' - test_config_global filter.protocol.process "rot13-filter.pl debug.log clean smudge" && +test_expect_success 'process filter abort stops processing of all further files' ' + test_config_global filter.protocol.process "test-tool rot13-filter --log=debug.log clean smudge" && rm -rf repo && mkdir repo && ( @@ -861,10 +858,10 @@ test_expect_success PERL 'invalid process filter must fail (and not hang!)' ' ) ' -test_expect_success PERL 'delayed checkout in process filter' ' - test_config_global filter.a.process "rot13-filter.pl a.log clean smudge delay" && +test_expect_success 'delayed checkout in process filter' ' + test_config_global filter.a.process "test-tool rot13-filter --log=a.log clean smudge delay" && test_config_global filter.a.required true && - test_config_global filter.b.process "rot13-filter.pl b.log clean smudge delay" && + test_config_global filter.b.process "test-tool rot13-filter --log=b.log clean smudge delay" && test_config_global filter.b.required true && rm -rf repo && @@ -940,8 +937,8 @@ test_expect_success PERL 'delayed checkout in process filter' ' ) ' -test_expect_success PERL 'missing file in delayed checkout' ' - test_config_global filter.bug.process "rot13-filter.pl bug.log clean smudge delay" && +test_expect_success 'missing file in delayed checkout' ' + test_config_global filter.bug.process "test-tool rot13-filter --log=bug.log clean smudge delay" && test_config_global filter.bug.required true && rm -rf repo && @@ -960,8 +957,8 @@ test_expect_success PERL 'missing file in delayed checkout' ' grep "error: .missing-delay\.a. was not filtered properly" git-stderr.log ' -test_expect_success PERL 'invalid file in delayed checkout' ' - test_config_global filter.bug.process "rot13-filter.pl bug.log clean smudge delay" && +test_expect_success 'invalid file in delayed checkout' ' + test_config_global filter.bug.process "test-tool rot13-filter --log=bug.log clean smudge delay" && test_config_global filter.bug.required true && rm -rf repo && @@ -990,10 +987,10 @@ do mode_prereq='UTF8_NFD_TO_NFC' ;; esac - test_expect_success PERL,SYMLINKS,$mode_prereq \ + test_expect_success SYMLINKS,$mode_prereq \ "delayed checkout with $mode-collision don't write to the wrong place" ' test_config_global filter.delay.process \ - "\"$TEST_ROOT/rot13-filter.pl\" --always-delay delayed.log clean smudge delay" && + "test-tool rot13-filter --always-delay --log=delayed.log clean smudge delay" && test_config_global filter.delay.required true && git init $mode-collision && @@ -1026,12 +1023,12 @@ do ' done -test_expect_success PERL,SYMLINKS,CASE_INSENSITIVE_FS \ +test_expect_success SYMLINKS,CASE_INSENSITIVE_FS \ "delayed checkout with submodule collision don't write to the wrong place" ' git init collision-with-submodule && ( cd collision-with-submodule && - git config filter.delay.process "\"$TEST_ROOT/rot13-filter.pl\" --always-delay delayed.log clean smudge delay" && + git config filter.delay.process "test-tool rot13-filter --always-delay --log=delayed.log clean smudge delay" && git config filter.delay.required true && # We need Git to treat the submodule "a" and the @@ -1062,11 +1059,11 @@ test_expect_success PERL,SYMLINKS,CASE_INSENSITIVE_FS \ ) ' -test_expect_success PERL 'setup for progress tests' ' +test_expect_success 'setup for progress tests' ' git init progress && ( cd progress && - git config filter.delay.process "rot13-filter.pl delay-progress.log clean smudge delay" && + git config filter.delay.process "test-tool rot13-filter --log=delay-progress.log clean smudge delay" && git config filter.delay.required true && echo "*.a filter=delay" >.gitattributes && @@ -1132,12 +1129,12 @@ do ' done -test_expect_success PERL 'delayed checkout correctly reports the number of updated entries' ' +test_expect_success 'delayed checkout correctly reports the number of updated entries' ' rm -rf repo && git init repo && ( cd repo && - git config filter.delay.process "../rot13-filter.pl delayed.log clean smudge delay" && + git config filter.delay.process "test-tool rot13-filter --log=delayed.log clean smudge delay" && git config filter.delay.required true && echo "*.a filter=delay" >.gitattributes && diff --git a/t/t0021/rot13-filter.pl b/t/t0021/rot13-filter.pl deleted file mode 100644 index 7bb93768f3..0000000000 --- a/t/t0021/rot13-filter.pl +++ /dev/null @@ -1,247 +0,0 @@ -# -# Example implementation for the Git filter protocol version 2 -# See Documentation/gitattributes.txt, section "Filter Protocol" -# -# Usage: rot13-filter.pl [--always-delay] <log path> <capabilities> -# -# Log path defines a debug log file that the script writes to. The -# subsequent arguments define a list of supported protocol capabilities -# ("clean", "smudge", etc). -# -# When --always-delay is given all pathnames with the "can-delay" flag -# that don't appear on the list bellow are delayed with a count of 1 -# (see more below). -# -# This implementation supports special test cases: -# (1) If data with the pathname "clean-write-fail.r" is processed with -# a "clean" operation then the write operation will die. -# (2) If data with the pathname "smudge-write-fail.r" is processed with -# a "smudge" operation then the write operation will die. -# (3) If data with the pathname "error.r" is processed with any -# operation then the filter signals that it cannot or does not want -# to process the file. -# (4) If data with the pathname "abort.r" is processed with any -# operation then the filter signals that it cannot or does not want -# to process the file and any file after that is processed with the -# same command. -# (5) If data with a pathname that is a key in the DELAY hash is -# requested (e.g. "test-delay10.a") then the filter responds with -# a "delay" status and sets the "requested" field in the DELAY hash. -# The filter will signal the availability of this object after -# "count" (field in DELAY hash) "list_available_blobs" commands. -# (6) If data with the pathname "missing-delay.a" is processed that the -# filter will drop the path from the "list_available_blobs" response. -# (7) If data with the pathname "invalid-delay.a" is processed that the -# filter will add the path "unfiltered" which was not delayed before -# to the "list_available_blobs" response. -# - -use 5.008; -sub gitperllib { - # Git assumes that all path lists are Unix-y colon-separated ones. But - # when the Git for Windows executes the test suite, its MSYS2 Bash - # calls git.exe, and colon-separated path lists are converted into - # Windows-y semicolon-separated lists of *Windows* paths (which - # naturally contain a colon after the drive letter, so splitting by - # colons simply does not cut it). - # - # Detect semicolon-separated path list and handle them appropriately. - - if ($ENV{GITPERLLIB} =~ /;/) { - return split(/;/, $ENV{GITPERLLIB}); - } - return split(/:/, $ENV{GITPERLLIB}); -} -use lib (gitperllib()); -use strict; -use warnings; -use IO::File; -use Git::Packet; - -my $MAX_PACKET_CONTENT_SIZE = 65516; - -my $always_delay = 0; -if ( $ARGV[0] eq '--always-delay' ) { - $always_delay = 1; - shift @ARGV; -} - -my $log_file = shift @ARGV; -my @capabilities = @ARGV; - -open my $debug, ">>", $log_file or die "cannot open log file: $!"; - -my %DELAY = ( - 'test-delay10.a' => { "requested" => 0, "count" => 1 }, - 'test-delay11.a' => { "requested" => 0, "count" => 1 }, - 'test-delay20.a' => { "requested" => 0, "count" => 2 }, - 'test-delay10.b' => { "requested" => 0, "count" => 1 }, - 'missing-delay.a' => { "requested" => 0, "count" => 1 }, - 'invalid-delay.a' => { "requested" => 0, "count" => 1 }, -); - -sub rot13 { - my $str = shift; - $str =~ y/A-Za-z/N-ZA-Mn-za-m/; - return $str; -} - -print $debug "START\n"; -$debug->flush(); - -packet_initialize("git-filter", 2); - -my %remote_caps = packet_read_and_check_capabilities("clean", "smudge", "delay"); -packet_check_and_write_capabilities(\%remote_caps, @capabilities); - -print $debug "init handshake complete\n"; -$debug->flush(); - -while (1) { - my ( $res, $command ) = packet_key_val_read("command"); - if ( $res == -1 ) { - print $debug "STOP\n"; - exit(); - } - print $debug "IN: $command"; - $debug->flush(); - - if ( $command eq "list_available_blobs" ) { - # Flush - packet_compare_lists([1, ""], packet_bin_read()) || - die "bad list_available_blobs end"; - - foreach my $pathname ( sort keys %DELAY ) { - if ( $DELAY{$pathname}{"requested"} >= 1 ) { - $DELAY{$pathname}{"count"} = $DELAY{$pathname}{"count"} - 1; - if ( $pathname eq "invalid-delay.a" ) { - # Send Git a pathname that was not delayed earlier - packet_txt_write("pathname=unfiltered"); - } - if ( $pathname eq "missing-delay.a" ) { - # Do not signal Git that this file is available - } elsif ( $DELAY{$pathname}{"count"} == 0 ) { - print $debug " $pathname"; - packet_txt_write("pathname=$pathname"); - } - } - } - - packet_flush(); - - print $debug " [OK]\n"; - $debug->flush(); - packet_txt_write("status=success"); - packet_flush(); - } else { - my ( $res, $pathname ) = packet_key_val_read("pathname"); - if ( $res == -1 ) { - die "unexpected EOF while expecting pathname"; - } - print $debug " $pathname"; - $debug->flush(); - - # Read until flush - my ( $done, $buffer ) = packet_txt_read(); - while ( $buffer ne '' ) { - if ( $buffer eq "can-delay=1" ) { - if ( exists $DELAY{$pathname} and $DELAY{$pathname}{"requested"} == 0 ) { - $DELAY{$pathname}{"requested"} = 1; - } elsif ( !exists $DELAY{$pathname} and $always_delay ) { - $DELAY{$pathname} = { "requested" => 1, "count" => 1 }; - } - } elsif ($buffer =~ /^(ref|treeish|blob)=/) { - print $debug " $buffer"; - } else { - # In general, filters need to be graceful about - # new metadata, since it's documented that we - # can pass any key-value pairs, but for tests, - # let's be a little stricter. - die "Unknown message '$buffer'"; - } - - ( $done, $buffer ) = packet_txt_read(); - } - if ( $done == -1 ) { - die "unexpected EOF after pathname '$pathname'"; - } - - my $input = ""; - { - binmode(STDIN); - my $buffer; - my $done = 0; - while ( !$done ) { - ( $done, $buffer ) = packet_bin_read(); - $input .= $buffer; - } - if ( $done == -1 ) { - die "unexpected EOF while reading input for '$pathname'"; - } - print $debug " " . length($input) . " [OK] -- "; - $debug->flush(); - } - - my $output; - if ( exists $DELAY{$pathname} and exists $DELAY{$pathname}{"output"} ) { - $output = $DELAY{$pathname}{"output"} - } elsif ( $pathname eq "error.r" or $pathname eq "abort.r" ) { - $output = ""; - } elsif ( $command eq "clean" and grep( /^clean$/, @capabilities ) ) { - $output = rot13($input); - } elsif ( $command eq "smudge" and grep( /^smudge$/, @capabilities ) ) { - $output = rot13($input); - } else { - die "bad command '$command'"; - } - - if ( $pathname eq "error.r" ) { - print $debug "[ERROR]\n"; - $debug->flush(); - packet_txt_write("status=error"); - packet_flush(); - } elsif ( $pathname eq "abort.r" ) { - print $debug "[ABORT]\n"; - $debug->flush(); - packet_txt_write("status=abort"); - packet_flush(); - } elsif ( $command eq "smudge" and - exists $DELAY{$pathname} and - $DELAY{$pathname}{"requested"} == 1 ) { - print $debug "[DELAYED]\n"; - $debug->flush(); - packet_txt_write("status=delayed"); - packet_flush(); - $DELAY{$pathname}{"requested"} = 2; - $DELAY{$pathname}{"output"} = $output; - } else { - packet_txt_write("status=success"); - packet_flush(); - - if ( $pathname eq "${command}-write-fail.r" ) { - print $debug "[WRITE FAIL]\n"; - $debug->flush(); - die "${command} write error"; - } - - print $debug "OUT: " . length($output) . " "; - $debug->flush(); - - while ( length($output) > 0 ) { - my $packet = substr( $output, 0, $MAX_PACKET_CONTENT_SIZE ); - packet_bin_write($packet); - # dots represent the number of packets - print $debug "."; - if ( length($output) > $MAX_PACKET_CONTENT_SIZE ) { - $output = substr( $output, $MAX_PACKET_CONTENT_SIZE ); - } else { - $output = ""; - } - } - packet_flush(); - print $debug " [OK]\n"; - $debug->flush(); - packet_flush(); - } - } -} diff --git a/t/t0040-parse-options.sh b/t/t0040-parse-options.sh index ed2fb620a9..5cc62306e3 100755 --- a/t/t0040-parse-options.sh +++ b/t/t0040-parse-options.sh @@ -456,4 +456,257 @@ test_expect_success '--end-of-options treats remainder as args' ' --end-of-options --verbose ' +test_expect_success 'KEEP_DASHDASH works' ' + test-tool parse-options-flags --keep-dashdash cmd --opt=1 -- --opt=2 --unknown >actual && + cat >expect <<-\EOF && + opt: 1 + arg 00: -- + arg 01: --opt=2 + arg 02: --unknown + EOF + test_cmp expect actual +' + +test_expect_success 'KEEP_ARGV0 works' ' + test-tool parse-options-flags --keep-argv0 cmd arg0 --opt=3 >actual && + cat >expect <<-\EOF && + opt: 3 + arg 00: cmd + arg 01: arg0 + EOF + test_cmp expect actual +' + +test_expect_success 'STOP_AT_NON_OPTION works' ' + test-tool parse-options-flags --stop-at-non-option cmd --opt=4 arg0 --opt=5 --unknown >actual && + cat >expect <<-\EOF && + opt: 4 + arg 00: arg0 + arg 01: --opt=5 + arg 02: --unknown + EOF + test_cmp expect actual +' + +test_expect_success 'KEEP_UNKNOWN_OPT works' ' + test-tool parse-options-flags --keep-unknown-opt cmd --unknown=1 --opt=6 -u2 >actual && + cat >expect <<-\EOF && + opt: 6 + arg 00: --unknown=1 + arg 01: -u2 + EOF + test_cmp expect actual +' + +test_expect_success 'NO_INTERNAL_HELP works for -h' ' + test_expect_code 129 test-tool parse-options-flags --no-internal-help cmd -h 2>err && + grep "^error: unknown switch \`h$SQ" err && + grep "^usage: " err +' + +for help_opt in help help-all +do + test_expect_success "NO_INTERNAL_HELP works for --$help_opt" " + test_expect_code 129 test-tool parse-options-flags --no-internal-help cmd --$help_opt 2>err && + grep '^error: unknown option \`'$help_opt\' err && + grep '^usage: ' err + " +done + +test_expect_success 'KEEP_UNKNOWN_OPT | NO_INTERNAL_HELP works' ' + test-tool parse-options-flags --keep-unknown-opt --no-internal-help cmd -h --help --help-all >actual && + cat >expect <<-\EOF && + opt: 0 + arg 00: -h + arg 01: --help + arg 02: --help-all + EOF + test_cmp expect actual +' + +test_expect_success 'subcommand - no subcommand shows error and usage' ' + test_expect_code 129 test-tool parse-subcommand cmd 2>err && + grep "^error: need a subcommand" err && + grep ^usage: err +' + +test_expect_success 'subcommand - subcommand after -- shows error and usage' ' + test_expect_code 129 test-tool parse-subcommand cmd -- subcmd-one 2>err && + grep "^error: need a subcommand" err && + grep ^usage: err +' + +test_expect_success 'subcommand - subcommand after --end-of-options shows error and usage' ' + test_expect_code 129 test-tool parse-subcommand cmd --end-of-options subcmd-one 2>err && + grep "^error: need a subcommand" err && + grep ^usage: err +' + +test_expect_success 'subcommand - unknown subcommand shows error and usage' ' + test_expect_code 129 test-tool parse-subcommand cmd nope 2>err && + grep "^error: unknown subcommand: \`nope$SQ" err && + grep ^usage: err +' + +test_expect_success 'subcommand - subcommands cannot be abbreviated' ' + test_expect_code 129 test-tool parse-subcommand cmd subcmd-o 2>err && + grep "^error: unknown subcommand: \`subcmd-o$SQ$" err && + grep ^usage: err +' + +test_expect_success 'subcommand - no negated subcommands' ' + test_expect_code 129 test-tool parse-subcommand cmd no-subcmd-one 2>err && + grep "^error: unknown subcommand: \`no-subcmd-one$SQ" err && + grep ^usage: err +' + +test_expect_success 'subcommand - simple' ' + test-tool parse-subcommand cmd subcmd-two >actual && + cat >expect <<-\EOF && + opt: 0 + fn: subcmd_two + arg 00: subcmd-two + EOF + test_cmp expect actual +' + +test_expect_success 'subcommand - stop parsing at the first subcommand' ' + test-tool parse-subcommand cmd --opt=1 subcmd-two subcmd-one --opt=2 >actual && + cat >expect <<-\EOF && + opt: 1 + fn: subcmd_two + arg 00: subcmd-two + arg 01: subcmd-one + arg 02: --opt=2 + EOF + test_cmp expect actual +' + +test_expect_success 'subcommand - KEEP_ARGV0' ' + test-tool parse-subcommand --keep-argv0 cmd subcmd-two >actual && + cat >expect <<-\EOF && + opt: 0 + fn: subcmd_two + arg 00: cmd + arg 01: subcmd-two + EOF + test_cmp expect actual +' + +test_expect_success 'subcommand - SUBCOMMAND_OPTIONAL + subcommand not given' ' + test-tool parse-subcommand --subcommand-optional cmd >actual && + cat >expect <<-\EOF && + opt: 0 + fn: subcmd_one + EOF + test_cmp expect actual +' + +test_expect_success 'subcommand - SUBCOMMAND_OPTIONAL + given subcommand' ' + test-tool parse-subcommand --subcommand-optional cmd subcmd-two branch file >actual && + cat >expect <<-\EOF && + opt: 0 + fn: subcmd_two + arg 00: subcmd-two + arg 01: branch + arg 02: file + EOF + test_cmp expect actual +' + +test_expect_success 'subcommand - SUBCOMMAND_OPTIONAL + subcommand not given + unknown dashless args' ' + test-tool parse-subcommand --subcommand-optional cmd branch file >actual && + cat >expect <<-\EOF && + opt: 0 + fn: subcmd_one + arg 00: branch + arg 01: file + EOF + test_cmp expect actual +' + +test_expect_success 'subcommand - SUBCOMMAND_OPTIONAL + subcommand not given + unknown option' ' + test_expect_code 129 test-tool parse-subcommand --subcommand-optional cmd --subcommand-opt 2>err && + grep "^error: unknown option" err && + grep ^usage: err +' + +test_expect_success 'subcommand - SUBCOMMAND_OPTIONAL | KEEP_UNKNOWN_OPT + subcommand not given + unknown option' ' + test-tool parse-subcommand --subcommand-optional --keep-unknown-opt cmd --subcommand-opt >actual && + cat >expect <<-\EOF && + opt: 0 + fn: subcmd_one + arg 00: --subcommand-opt + EOF + test_cmp expect actual +' + +test_expect_success 'subcommand - SUBCOMMAND_OPTIONAL | KEEP_UNKNOWN_OPT + subcommand ignored after unknown option' ' + test-tool parse-subcommand --subcommand-optional --keep-unknown-opt cmd --subcommand-opt subcmd-two >actual && + cat >expect <<-\EOF && + opt: 0 + fn: subcmd_one + arg 00: --subcommand-opt + arg 01: subcmd-two + EOF + test_cmp expect actual +' + +test_expect_success 'subcommand - SUBCOMMAND_OPTIONAL | KEEP_UNKNOWN_OPT + command and subcommand options cannot be mixed' ' + test-tool parse-subcommand --subcommand-optional --keep-unknown-opt cmd --subcommand-opt branch --opt=1 >actual && + cat >expect <<-\EOF && + opt: 0 + fn: subcmd_one + arg 00: --subcommand-opt + arg 01: branch + arg 02: --opt=1 + EOF + test_cmp expect actual +' + +test_expect_success 'subcommand - SUBCOMMAND_OPTIONAL | KEEP_UNKNOWN_OPT | KEEP_ARGV0' ' + test-tool parse-subcommand --subcommand-optional --keep-unknown-opt --keep-argv0 cmd --subcommand-opt branch >actual && + cat >expect <<-\EOF && + opt: 0 + fn: subcmd_one + arg 00: cmd + arg 01: --subcommand-opt + arg 02: branch + EOF + test_cmp expect actual +' + +test_expect_success 'subcommand - SUBCOMMAND_OPTIONAL | KEEP_UNKNOWN_OPT | KEEP_DASHDASH' ' + test-tool parse-subcommand --subcommand-optional --keep-unknown-opt --keep-dashdash cmd -- --subcommand-opt file >actual && + cat >expect <<-\EOF && + opt: 0 + fn: subcmd_one + arg 00: -- + arg 01: --subcommand-opt + arg 02: file + EOF + test_cmp expect actual +' + +test_expect_success 'subcommand - completion helper' ' + test-tool parse-subcommand cmd --git-completion-helper >actual && + echo "subcmd-one subcmd-two --opt= --no-opt" >expect && + test_cmp expect actual +' + +test_expect_success 'subcommands are incompatible with STOP_AT_NON_OPTION' ' + test_must_fail test-tool parse-subcommand --stop-at-non-option cmd subcmd-one 2>err && + grep ^BUG err +' + +test_expect_success 'subcommands are incompatible with KEEP_UNKNOWN_OPT unless in combination with SUBCOMMAND_OPTIONAL' ' + test_must_fail test-tool parse-subcommand --keep-unknown-opt cmd subcmd-two 2>err && + grep ^BUG err +' + +test_expect_success 'subcommands are incompatible with KEEP_DASHDASH unless in combination with SUBCOMMAND_OPTIONAL' ' + test_must_fail test-tool parse-subcommand --keep-dashdash cmd subcmd-two 2>err && + grep ^BUG err +' + test_done diff --git a/t/t0060-path-utils.sh b/t/t0060-path-utils.sh index 1f2007e62b..68e29c904a 100755 --- a/t/t0060-path-utils.sh +++ b/t/t0060-path-utils.sh @@ -22,7 +22,7 @@ relative_path() { test_submodule_relative_url() { test_expect_success "test_submodule_relative_url: $1 $2 $3 => $4" " - actual=\$(git submodule--helper resolve-relative-url-test '$1' '$2' '$3') && + actual=\$(test-tool submodule resolve-relative-url '$1' '$2' '$3') && test \"\$actual\" = '$4' " } diff --git a/t/t0091-bugreport.sh b/t/t0091-bugreport.sh index 08f5fe9cae..b6d2f591ac 100755 --- a/t/t0091-bugreport.sh +++ b/t/t0091-bugreport.sh @@ -78,4 +78,52 @@ test_expect_success 'indicates populated hooks' ' test_cmp expect actual ' +test_expect_success UNZIP '--diagnose creates diagnostics zip archive' ' + test_when_finished rm -rf report && + + git bugreport --diagnose -o report -s test >out && + + zip_path=report/git-diagnostics-test.zip && + grep "Available space" out && + test_path_is_file "$zip_path" && + + # Check zipped archive content + "$GIT_UNZIP" -p "$zip_path" diagnostics.log >out && + test_file_not_empty out && + + "$GIT_UNZIP" -p "$zip_path" packs-local.txt >out && + grep ".git/objects" out && + + "$GIT_UNZIP" -p "$zip_path" objects-local.txt >out && + grep "^Total: [0-9][0-9]*" out && + + # Should not include .git directory contents by default + ! "$GIT_UNZIP" -l "$zip_path" | grep ".git/" +' + +test_expect_success UNZIP '--diagnose=stats excludes .git dir contents' ' + test_when_finished rm -rf report && + + git bugreport --diagnose=stats -o report -s test >out && + + # Includes pack quantity/size info + "$GIT_UNZIP" -p "$zip_path" packs-local.txt >out && + grep ".git/objects" out && + + # Does not include .git directory contents + ! "$GIT_UNZIP" -l "$zip_path" | grep ".git/" +' + +test_expect_success UNZIP '--diagnose=all includes .git dir contents' ' + test_when_finished rm -rf report && + + git bugreport --diagnose=all -o report -s test >out && + + # Includes .git directory contents + "$GIT_UNZIP" -l "$zip_path" | grep ".git/" && + + "$GIT_UNZIP" -p "$zip_path" .git/HEAD >out && + test_file_not_empty out +' + test_done diff --git a/t/t0092-diagnose.sh b/t/t0092-diagnose.sh new file mode 100755 index 0000000000..fca9b58489 --- /dev/null +++ b/t/t0092-diagnose.sh @@ -0,0 +1,60 @@ +#!/bin/sh + +test_description='git diagnose' + +TEST_PASSES_SANITIZE_LEAK=true +. ./test-lib.sh + +test_expect_success UNZIP 'creates diagnostics zip archive' ' + test_when_finished rm -rf report && + + git diagnose -o report -s test >out && + grep "Available space" out && + + zip_path=report/git-diagnostics-test.zip && + test_path_is_file "$zip_path" && + + # Check zipped archive content + "$GIT_UNZIP" -p "$zip_path" diagnostics.log >out && + test_file_not_empty out && + + "$GIT_UNZIP" -p "$zip_path" packs-local.txt >out && + grep ".git/objects" out && + + "$GIT_UNZIP" -p "$zip_path" objects-local.txt >out && + grep "^Total: [0-9][0-9]*" out && + + # Should not include .git directory contents by default + ! "$GIT_UNZIP" -l "$zip_path" | grep ".git/" +' + +test_expect_success UNZIP '--mode=stats excludes .git dir contents' ' + test_when_finished rm -rf report && + + git diagnose -o report -s test --mode=stats >out && + + # Includes pack quantity/size info + "$GIT_UNZIP" -p "$zip_path" packs-local.txt >out && + grep ".git/objects" out && + + # Does not include .git directory contents + ! "$GIT_UNZIP" -l "$zip_path" | grep ".git/" +' + +test_expect_success UNZIP '--mode=all includes .git dir contents' ' + test_when_finished rm -rf report && + + git diagnose -o report -s test --mode=all >out && + + # Includes pack quantity/size info + "$GIT_UNZIP" -p "$zip_path" packs-local.txt >out && + grep ".git/objects" out && + + # Includes .git directory contents + "$GIT_UNZIP" -l "$zip_path" | grep ".git/" && + + "$GIT_UNZIP" -p "$zip_path" .git/HEAD >out && + test_file_not_empty out +' + +test_done diff --git a/t/t1060-object-corruption.sh b/t/t1060-object-corruption.sh index 5b8e47e346..35261afc9d 100755 --- a/t/t1060-object-corruption.sh +++ b/t/t1060-object-corruption.sh @@ -139,4 +139,11 @@ test_expect_success 'internal tree objects are not "missing"' ' ) ' +test_expect_success 'partial clone of corrupted repository' ' + test_config -C misnamed uploadpack.allowFilter true && + git clone --no-local --no-checkout --filter=blob:none \ + misnamed corrupt-partial && \ + test_must_fail git -C corrupt-partial checkout --force +' + test_done diff --git a/t/t1092-sparse-checkout-compatibility.sh b/t/t1092-sparse-checkout-compatibility.sh index a6a14c8a21..b9350c075c 100755 --- a/t/t1092-sparse-checkout-compatibility.sh +++ b/t/t1092-sparse-checkout-compatibility.sh @@ -380,6 +380,15 @@ test_expect_success 'checkout with modified sparse directory' ' test_all_match git checkout base ' +test_expect_success 'checkout orphan then non-orphan' ' + init_repos && + + test_all_match git checkout --orphan test-orphan && + test_all_match git status --porcelain=v2 && + test_all_match git checkout base && + test_all_match git status --porcelain=v2 +' + test_expect_success 'add outside sparse cone' ' init_repos && @@ -556,7 +565,7 @@ test_expect_success 'blame with pathspec inside sparse definition' ' deep/deeper1/a \ deep/deeper1/deepest/a do - test_all_match git blame $file + test_all_match git blame $file || return 1 done ' @@ -567,7 +576,7 @@ test_expect_success 'blame with pathspec outside sparse definition' ' init_repos && test_sparse_match git sparse-checkout set && - for file in a \ + for file in \ deep/a \ deep/deeper1/a \ deep/deeper1/deepest/a @@ -579,7 +588,7 @@ test_expect_success 'blame with pathspec outside sparse definition' ' # We compare sparse-checkout-err and sparse-index-err in # `test_sparse_match`. Given we know they are the same, we # only check the content of sparse-index-err here. - test_cmp expect sparse-index-err + test_cmp expect sparse-index-err || return 1 done ' @@ -1571,7 +1580,7 @@ test_expect_success 'sparse index is not expanded: blame' ' deep/deeper1/a \ deep/deeper1/deepest/a do - ensure_not_expanded blame $file + ensure_not_expanded blame $file || return 1 done ' @@ -1907,7 +1916,7 @@ test_expect_success 'rm pathspec outside sparse definition' ' test_sparse_match test_must_fail git rm $file && test_sparse_match test_must_fail git rm --cached $file && test_sparse_match git rm --sparse $file && - test_sparse_match git status --porcelain=v2 + test_sparse_match git status --porcelain=v2 || return 1 done && cat >folder1-full <<-EOF && diff --git a/t/t1450-fsck.sh b/t/t1450-fsck.sh index 53c2aa10b7..ace4556788 100755 --- a/t/t1450-fsck.sh +++ b/t/t1450-fsck.sh @@ -507,6 +507,54 @@ test_expect_success 'rev-list --verify-objects with bad sha1' ' test_i18ngrep -q "error: hash mismatch $(dirname $new)$(test_oid ff_2)" out ' +# An actual bit corruption is more likely than swapped commits, but +# this provides an easy way to have commits which don't match their purported +# hashes, but which aren't so broken we can't read them at all. +test_expect_success 'rev-list --verify-objects notices swapped commits' ' + git init swapped-commits && + ( + cd swapped-commits && + test_commit one && + test_commit two && + one_oid=$(git rev-parse HEAD) && + two_oid=$(git rev-parse HEAD^) && + one=.git/objects/$(test_oid_to_path $one_oid) && + two=.git/objects/$(test_oid_to_path $two_oid) && + mv $one tmp && + mv $two $one && + mv tmp $two && + test_must_fail git rev-list --verify-objects HEAD + ) +' + +test_expect_success 'set up repository with commit-graph' ' + git init corrupt-graph && + ( + cd corrupt-graph && + test_commit one && + test_commit two && + git commit-graph write --reachable + ) +' + +corrupt_graph_obj () { + oid=$(git -C corrupt-graph rev-parse "$1") && + obj=corrupt-graph/.git/objects/$(test_oid_to_path $oid) && + test_when_finished 'mv backup $obj' && + mv $obj backup && + echo garbage >$obj +} + +test_expect_success 'rev-list --verify-objects with commit graph (tip)' ' + corrupt_graph_obj HEAD && + test_must_fail git -C corrupt-graph rev-list --verify-objects HEAD +' + +test_expect_success 'rev-list --verify-objects with commit graph (parent)' ' + corrupt_graph_obj HEAD^ && + test_must_fail git -C corrupt-graph rev-list --verify-objects HEAD +' + test_expect_success 'force fsck to ignore double author' ' git cat-file commit HEAD >basis && sed "s/^author .*/&,&/" <basis | tr , \\n >multiple-authors && diff --git a/t/t1502-rev-parse-parseopt.sh b/t/t1502-rev-parse-parseopt.sh index 284fe18e72..de1d48f3ba 100755 --- a/t/t1502-rev-parse-parseopt.sh +++ b/t/t1502-rev-parse-parseopt.sh @@ -306,6 +306,13 @@ test_expect_success 'test --parseopt help output: "wrapped" options normal "or:" test_cmp expect actual ' +test_expect_success 'test --parseopt invalid opt-spec' ' + test_write_lines x -- "=, x" >spec && + echo "fatal: missing opt-spec before option flags" >expect && + test_must_fail git rev-parse --parseopt -- >out <spec 2>err && + test_cmp expect err +' + test_expect_success 'test --parseopt help output: multi-line blurb after empty line' ' sed -e "s/^|//" >spec <<-\EOF && |cmd [--some-option] diff --git a/t/t2080-parallel-checkout-basics.sh b/t/t2080-parallel-checkout-basics.sh index c683e60007..00ce3033d3 100755 --- a/t/t2080-parallel-checkout-basics.sh +++ b/t/t2080-parallel-checkout-basics.sh @@ -230,12 +230,9 @@ test_expect_success SYMLINKS 'parallel checkout checks for symlinks in leading d # check the final report including sequential, parallel, and delayed entries # all at the same time. So we must have finer control of the parallel checkout # variables. -test_expect_success PERL '"git checkout ." report should not include failed entries' ' - write_script rot13-filter.pl "$PERL_PATH" \ - <"$TEST_DIRECTORY"/t0021/rot13-filter.pl && - +test_expect_success '"git checkout ." report should not include failed entries' ' test_config_global filter.delay.process \ - "\"$(pwd)/rot13-filter.pl\" --always-delay delayed.log clean smudge delay" && + "test-tool rot13-filter --always-delay --log=delayed.log clean smudge delay" && test_config_global filter.delay.required true && test_config_global filter.cat.clean cat && test_config_global filter.cat.smudge cat && diff --git a/t/t2082-parallel-checkout-attributes.sh b/t/t2082-parallel-checkout-attributes.sh index 2525457961..f3511cd43a 100755 --- a/t/t2082-parallel-checkout-attributes.sh +++ b/t/t2082-parallel-checkout-attributes.sh @@ -138,12 +138,9 @@ test_expect_success 'parallel-checkout and external filter' ' # The delayed queue is independent from the parallel queue, and they should be # able to work together in the same checkout process. # -test_expect_success PERL 'parallel-checkout and delayed checkout' ' - write_script rot13-filter.pl "$PERL_PATH" \ - <"$TEST_DIRECTORY"/t0021/rot13-filter.pl && - +test_expect_success 'parallel-checkout and delayed checkout' ' test_config_global filter.delay.process \ - "\"$(pwd)/rot13-filter.pl\" --always-delay \"$(pwd)/delayed.log\" clean smudge delay" && + "test-tool rot13-filter --always-delay --log=\"$(pwd)/delayed.log\" clean smudge delay" && test_config_global filter.delay.required true && echo "abcd" >original && diff --git a/t/t2407-worktree-heads.sh b/t/t2407-worktree-heads.sh index 50815acd3e..019a40df2c 100755 --- a/t/t2407-worktree-heads.sh +++ b/t/t2407-worktree-heads.sh @@ -41,10 +41,10 @@ test_expect_success 'setup' ' test_expect_success 'refuse to overwrite: checked out in worktree' ' for i in 1 2 3 4 do - test_must_fail git branch -f wt-$i HEAD 2>err + test_must_fail git branch -f wt-$i HEAD 2>err && grep "cannot force update the branch" err && - test_must_fail git branch -D wt-$i 2>err + test_must_fail git branch -D wt-$i 2>err && grep "Cannot delete branch" err || return 1 done ' diff --git a/t/t3206-range-diff.sh b/t/t3206-range-diff.sh index d12e4e4cc6..459beaf7d9 100755 --- a/t/t3206-range-diff.sh +++ b/t/t3206-range-diff.sh @@ -162,7 +162,7 @@ test_expect_success 'A^! and A^-<n> (unmodified)' ' ' test_expect_success 'A^{/..} is not mistaken for a range' ' - test_must_fail git range-diff topic^.. topic^{/..} 2>error && + test_must_fail git range-diff topic^.. topic^{/..} -- 2>error && test_i18ngrep "not a commit range" error ' @@ -772,6 +772,17 @@ test_expect_success '--left-only/--right-only' ' test_cmp expect actual ' +test_expect_success 'ranges with pathspecs' ' + git range-diff topic...mode-only-change -- other-file >actual && + test_line_count = 2 actual && + topic_oid=$(git rev-parse --short topic) && + mode_change_oid=$(git rev-parse --short mode-only-change^) && + file_change_oid=$(git rev-parse --short mode-only-change) && + grep "$mode_change_oid" actual && + ! grep "$file_change_oid" actual && + ! grep "$topic_oid" actual +' + test_expect_success 'submodule changes are shown irrespective of diff.submodule' ' git init sub-repo && test_commit -C sub-repo sub-first && diff --git a/t/t3301-notes.sh b/t/t3301-notes.sh index d742be8840..3288aaec7d 100755 --- a/t/t3301-notes.sh +++ b/t/t3301-notes.sh @@ -505,6 +505,11 @@ test_expect_success 'list notes with "git notes"' ' test_cmp expect actual ' +test_expect_success '"git notes" without subcommand does not take arguments' ' + test_expect_code 129 git notes HEAD^^ 2>err && + grep "^error: unknown subcommand" err +' + test_expect_success 'list specific note with "git notes list <object>"' ' git rev-parse refs/notes/commits:$commit_3 >expect && git notes list HEAD^^ >actual && diff --git a/t/t3701-add-interactive.sh b/t/t3701-add-interactive.sh index b354fb39de..5841f280fb 100755 --- a/t/t3701-add-interactive.sh +++ b/t/t3701-add-interactive.sh @@ -7,9 +7,9 @@ export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME . ./test-lib.sh . "$TEST_DIRECTORY"/lib-terminal.sh -if ! test_have_prereq PERL +if test_have_prereq !ADD_I_USE_BUILTIN,!PERL then - skip_all='skipping add -i tests, perl not available' + skip_all='skipping add -i (scripted) tests, perl not available' test_done fi @@ -761,9 +761,33 @@ test_expect_success 'detect bogus diffFilter output' ' git reset --hard && echo content >test && - test_config interactive.diffFilter "sed 1d" && + test_config interactive.diffFilter "sed 6d" && printf y >y && - force_color test_must_fail git add -p <y + force_color test_must_fail git add -p <y >output 2>&1 && + grep "mismatched output" output +' + +test_expect_success 'handle iffy colored hunk headers' ' + git reset --hard && + + echo content >test && + printf n >n && + force_color git -c interactive.diffFilter="sed s/.*@@.*/XX/" \ + add -p >output 2>&1 <n && + grep "^XX$" output +' + +test_expect_success 'handle very large filtered diff' ' + git reset --hard && + # The specific number here is not important, but it must + # be large enough that the output of "git diff --color" + # fills up the pipe buffer. 10,000 results in ~200k of + # colored output. + test_seq 10000 >test && + test_config interactive.diffFilter cat && + printf y >y && + force_color git add -p >output 2>&1 <y && + git diff-files --exit-code -- test ' test_expect_success 'diff.algorithm is passed to `git diff-files`' ' @@ -931,6 +955,18 @@ test_expect_success 'status ignores dirty submodules (except HEAD)' ' ! grep dirty-otherwise output ' +test_expect_success 'handle submodules' ' + echo 123 >>for-submodules/dirty-otherwise/initial.t && + + force_color git -C for-submodules add -p dirty-otherwise >output 2>&1 && + grep "No changes" output && + + force_color git -C for-submodules add -p dirty-head >output 2>&1 <y && + git -C for-submodules ls-files --stage dirty-head >actual && + rev="$(git -C for-submodules/dirty-head rev-parse HEAD)" && + grep "$rev" actual +' + test_expect_success 'set up pathological context' ' git reset --hard && test_write_lines a a a a a a a a a a a >a && diff --git a/t/t3903-stash.sh b/t/t3903-stash.sh index 2a4c3fd61c..376cc8f4ab 100755 --- a/t/t3903-stash.sh +++ b/t/t3903-stash.sh @@ -25,7 +25,7 @@ test_expect_success 'usage on main command -h emits a summary of subcommands' ' grep -F "or: git stash show" usage ' -test_expect_failure 'usage for subcommands should emit subcommand usage' ' +test_expect_success 'usage for subcommands should emit subcommand usage' ' test_expect_code 129 git stash push -h >usage && grep -F "usage: git stash [push" usage ' diff --git a/t/t4013-diff-various.sh b/t/t4013-diff-various.sh index 056e922164..dfcf3a0aaa 100755 --- a/t/t4013-diff-various.sh +++ b/t/t4013-diff-various.sh @@ -352,6 +352,8 @@ log -GF -p --pickaxe-all master log -IA -IB -I1 -I2 -p master log --decorate --all log --decorate=full --all +log --decorate --clear-decorations --all +log --decorate=full --clear-decorations --all rev-list --parents HEAD rev-list --children HEAD diff --git a/t/t4013/diff.log_--decorate=full_--all b/t/t4013/diff.log_--decorate=full_--all index 3f9b872ece..6b0b334a5d 100644 --- a/t/t4013/diff.log_--decorate=full_--all +++ b/t/t4013/diff.log_--decorate=full_--all @@ -20,7 +20,7 @@ Date: Mon Jun 26 00:06:00 2006 +0000 Rearranged lines in dir/sub -commit cbacedd14cb8b89255a2c02b59e77a2e9a8021a0 (refs/notes/commits) +commit cbacedd14cb8b89255a2c02b59e77a2e9a8021a0 Author: A U Thor <author@example.com> Date: Mon Jun 26 00:06:00 2006 +0000 diff --git a/t/t4013/diff.log_--decorate=full_--clear-decorations_--all b/t/t4013/diff.log_--decorate=full_--clear-decorations_--all new file mode 100644 index 0000000000..1c030a6554 --- /dev/null +++ b/t/t4013/diff.log_--decorate=full_--clear-decorations_--all @@ -0,0 +1,61 @@ +$ git log --decorate=full --clear-decorations --all +commit b7e0bc69303b488b47deca799a7d723971dfa6cd (refs/heads/mode) +Author: A U Thor <author@example.com> +Date: Mon Jun 26 00:06:00 2006 +0000 + + update mode + +commit a6f364368ca320bc5a92e18912e16fa6b3dff598 (refs/heads/note) +Author: A U Thor <author@example.com> +Date: Mon Jun 26 00:06:00 2006 +0000 + + update mode (file2) + +Notes: + note + +commit cd4e72fd96faed3f0ba949dc42967430374e2290 (refs/heads/rearrange) +Author: A U Thor <author@example.com> +Date: Mon Jun 26 00:06:00 2006 +0000 + + Rearranged lines in dir/sub + +commit cbacedd14cb8b89255a2c02b59e77a2e9a8021a0 (refs/notes/commits) +Author: A U Thor <author@example.com> +Date: Mon Jun 26 00:06:00 2006 +0000 + + Notes added by 'git notes add' + +commit 59d314ad6f356dd08601a4cd5e530381da3e3c64 (HEAD -> refs/heads/master) +Merge: 9a6d494 c7a2ab9 +Author: A U Thor <author@example.com> +Date: Mon Jun 26 00:04:00 2006 +0000 + + Merge branch 'side' + +commit c7a2ab9e8eac7b117442a607d5a9b3950ae34d5a (refs/heads/side) +Author: A U Thor <author@example.com> +Date: Mon Jun 26 00:03:00 2006 +0000 + + Side + +commit 9a6d4949b6b76956d9d5e26f2791ec2ceff5fdc0 +Author: A U Thor <author@example.com> +Date: Mon Jun 26 00:02:00 2006 +0000 + + Third + +commit 1bde4ae5f36c8d9abe3a0fce0c6aab3c4a12fe44 +Author: A U Thor <author@example.com> +Date: Mon Jun 26 00:01:00 2006 +0000 + + Second + + This is the second commit. + +commit 444ac553ac7612cc88969031b02b3767fb8a353a (refs/heads/initial) +Author: A U Thor <author@example.com> +Date: Mon Jun 26 00:00:00 2006 +0000 + + Initial +$ diff --git a/t/t4013/diff.log_--decorate=full_--decorate-all_--all b/t/t4013/diff.log_--decorate=full_--decorate-all_--all new file mode 100644 index 0000000000..d6e7928784 --- /dev/null +++ b/t/t4013/diff.log_--decorate=full_--decorate-all_--all @@ -0,0 +1,61 @@ +$ git log --decorate=full --decorate-all --all +commit b7e0bc69303b488b47deca799a7d723971dfa6cd (refs/heads/mode) +Author: A U Thor <author@example.com> +Date: Mon Jun 26 00:06:00 2006 +0000 + + update mode + +commit a6f364368ca320bc5a92e18912e16fa6b3dff598 (refs/heads/note) +Author: A U Thor <author@example.com> +Date: Mon Jun 26 00:06:00 2006 +0000 + + update mode (file2) + +Notes: + note + +commit cd4e72fd96faed3f0ba949dc42967430374e2290 (refs/heads/rearrange) +Author: A U Thor <author@example.com> +Date: Mon Jun 26 00:06:00 2006 +0000 + + Rearranged lines in dir/sub + +commit cbacedd14cb8b89255a2c02b59e77a2e9a8021a0 (refs/notes/commits) +Author: A U Thor <author@example.com> +Date: Mon Jun 26 00:06:00 2006 +0000 + + Notes added by 'git notes add' + +commit 59d314ad6f356dd08601a4cd5e530381da3e3c64 (HEAD -> refs/heads/master) +Merge: 9a6d494 c7a2ab9 +Author: A U Thor <author@example.com> +Date: Mon Jun 26 00:04:00 2006 +0000 + + Merge branch 'side' + +commit c7a2ab9e8eac7b117442a607d5a9b3950ae34d5a (refs/heads/side) +Author: A U Thor <author@example.com> +Date: Mon Jun 26 00:03:00 2006 +0000 + + Side + +commit 9a6d4949b6b76956d9d5e26f2791ec2ceff5fdc0 +Author: A U Thor <author@example.com> +Date: Mon Jun 26 00:02:00 2006 +0000 + + Third + +commit 1bde4ae5f36c8d9abe3a0fce0c6aab3c4a12fe44 +Author: A U Thor <author@example.com> +Date: Mon Jun 26 00:01:00 2006 +0000 + + Second + + This is the second commit. + +commit 444ac553ac7612cc88969031b02b3767fb8a353a (refs/heads/initial) +Author: A U Thor <author@example.com> +Date: Mon Jun 26 00:00:00 2006 +0000 + + Initial +$ diff --git a/t/t4013/diff.log_--decorate_--all b/t/t4013/diff.log_--decorate_--all index f5e20e1e14..c7df1f5814 100644 --- a/t/t4013/diff.log_--decorate_--all +++ b/t/t4013/diff.log_--decorate_--all @@ -20,7 +20,7 @@ Date: Mon Jun 26 00:06:00 2006 +0000 Rearranged lines in dir/sub -commit cbacedd14cb8b89255a2c02b59e77a2e9a8021a0 (refs/notes/commits) +commit cbacedd14cb8b89255a2c02b59e77a2e9a8021a0 Author: A U Thor <author@example.com> Date: Mon Jun 26 00:06:00 2006 +0000 diff --git a/t/t4013/diff.log_--decorate_--clear-decorations_--all b/t/t4013/diff.log_--decorate_--clear-decorations_--all new file mode 100644 index 0000000000..88be82cce3 --- /dev/null +++ b/t/t4013/diff.log_--decorate_--clear-decorations_--all @@ -0,0 +1,61 @@ +$ git log --decorate --clear-decorations --all +commit b7e0bc69303b488b47deca799a7d723971dfa6cd (mode) +Author: A U Thor <author@example.com> +Date: Mon Jun 26 00:06:00 2006 +0000 + + update mode + +commit a6f364368ca320bc5a92e18912e16fa6b3dff598 (note) +Author: A U Thor <author@example.com> +Date: Mon Jun 26 00:06:00 2006 +0000 + + update mode (file2) + +Notes: + note + +commit cd4e72fd96faed3f0ba949dc42967430374e2290 (rearrange) +Author: A U Thor <author@example.com> +Date: Mon Jun 26 00:06:00 2006 +0000 + + Rearranged lines in dir/sub + +commit cbacedd14cb8b89255a2c02b59e77a2e9a8021a0 (refs/notes/commits) +Author: A U Thor <author@example.com> +Date: Mon Jun 26 00:06:00 2006 +0000 + + Notes added by 'git notes add' + +commit 59d314ad6f356dd08601a4cd5e530381da3e3c64 (HEAD -> master) +Merge: 9a6d494 c7a2ab9 +Author: A U Thor <author@example.com> +Date: Mon Jun 26 00:04:00 2006 +0000 + + Merge branch 'side' + +commit c7a2ab9e8eac7b117442a607d5a9b3950ae34d5a (side) +Author: A U Thor <author@example.com> +Date: Mon Jun 26 00:03:00 2006 +0000 + + Side + +commit 9a6d4949b6b76956d9d5e26f2791ec2ceff5fdc0 +Author: A U Thor <author@example.com> +Date: Mon Jun 26 00:02:00 2006 +0000 + + Third + +commit 1bde4ae5f36c8d9abe3a0fce0c6aab3c4a12fe44 +Author: A U Thor <author@example.com> +Date: Mon Jun 26 00:01:00 2006 +0000 + + Second + + This is the second commit. + +commit 444ac553ac7612cc88969031b02b3767fb8a353a (initial) +Author: A U Thor <author@example.com> +Date: Mon Jun 26 00:00:00 2006 +0000 + + Initial +$ diff --git a/t/t4013/diff.log_--decorate_--decorate-all_--all b/t/t4013/diff.log_--decorate_--decorate-all_--all new file mode 100644 index 0000000000..5d22618bb6 --- /dev/null +++ b/t/t4013/diff.log_--decorate_--decorate-all_--all @@ -0,0 +1,61 @@ +$ git log --decorate --decorate-all --all +commit b7e0bc69303b488b47deca799a7d723971dfa6cd (mode) +Author: A U Thor <author@example.com> +Date: Mon Jun 26 00:06:00 2006 +0000 + + update mode + +commit a6f364368ca320bc5a92e18912e16fa6b3dff598 (note) +Author: A U Thor <author@example.com> +Date: Mon Jun 26 00:06:00 2006 +0000 + + update mode (file2) + +Notes: + note + +commit cd4e72fd96faed3f0ba949dc42967430374e2290 (rearrange) +Author: A U Thor <author@example.com> +Date: Mon Jun 26 00:06:00 2006 +0000 + + Rearranged lines in dir/sub + +commit cbacedd14cb8b89255a2c02b59e77a2e9a8021a0 (refs/notes/commits) +Author: A U Thor <author@example.com> +Date: Mon Jun 26 00:06:00 2006 +0000 + + Notes added by 'git notes add' + +commit 59d314ad6f356dd08601a4cd5e530381da3e3c64 (HEAD -> master) +Merge: 9a6d494 c7a2ab9 +Author: A U Thor <author@example.com> +Date: Mon Jun 26 00:04:00 2006 +0000 + + Merge branch 'side' + +commit c7a2ab9e8eac7b117442a607d5a9b3950ae34d5a (side) +Author: A U Thor <author@example.com> +Date: Mon Jun 26 00:03:00 2006 +0000 + + Side + +commit 9a6d4949b6b76956d9d5e26f2791ec2ceff5fdc0 +Author: A U Thor <author@example.com> +Date: Mon Jun 26 00:02:00 2006 +0000 + + Third + +commit 1bde4ae5f36c8d9abe3a0fce0c6aab3c4a12fe44 +Author: A U Thor <author@example.com> +Date: Mon Jun 26 00:01:00 2006 +0000 + + Second + + This is the second commit. + +commit 444ac553ac7612cc88969031b02b3767fb8a353a (initial) +Author: A U Thor <author@example.com> +Date: Mon Jun 26 00:00:00 2006 +0000 + + Initial +$ diff --git a/t/t4014-format-patch.sh b/t/t4014-format-patch.sh index fbec8ad2ef..ad5c029279 100755 --- a/t/t4014-format-patch.sh +++ b/t/t4014-format-patch.sh @@ -1400,6 +1400,43 @@ test_expect_success '--from omits redundant in-body header' ' test_cmp expect patch.head ' +test_expect_success 'with --force-in-body-from, redundant in-body from is kept' ' + git format-patch --force-in-body-from \ + -1 --stdout --from="A U Thor <author@example.com>" >patch && + cat >expect <<-\EOF && + From: A U Thor <author@example.com> + + From: A U Thor <author@example.com> + + EOF + sed -ne "/^From:/p; /^$/p; /^---$/q" patch >patch.head && + test_cmp expect patch.head +' + +test_expect_success 'format.forceInBodyFrom, equivalent to --force-in-body-from' ' + git -c format.forceInBodyFrom=yes format-patch \ + -1 --stdout --from="A U Thor <author@example.com>" >patch && + cat >expect <<-\EOF && + From: A U Thor <author@example.com> + + From: A U Thor <author@example.com> + + EOF + sed -ne "/^From:/p; /^$/p; /^---$/q" patch >patch.head && + test_cmp expect patch.head +' + +test_expect_success 'format.forceInBodyFrom, equivalent to --force-in-body-from' ' + git -c format.forceInBodyFrom=yes format-patch --no-force-in-body-from \ + -1 --stdout --from="A U Thor <author@example.com>" >patch && + cat >expect <<-\EOF && + From: A U Thor <author@example.com> + + EOF + sed -ne "/^From:/p; /^$/p; /^---$/q" patch >patch.head && + test_cmp expect patch.head +' + test_expect_success 'in-body headers trigger content encoding' ' test_env GIT_AUTHOR_NAME="éxötìc" test_commit exotic && test_when_finished "git reset --hard HEAD^" && diff --git a/t/t4202-log.sh b/t/t4202-log.sh index f0aaa1fa02..cc15cb4ff6 100755 --- a/t/t4202-log.sh +++ b/t/t4202-log.sh @@ -704,9 +704,12 @@ test_expect_success 'set up more tangled history' ' git checkout -b tangle HEAD~6 && test_commit tangle-a tangle-a a && git merge main~3 && + git update-ref refs/prefetch/merge HEAD && git merge side~1 && + git update-ref refs/rewritten/merge HEAD && git checkout main && git merge tangle && + git update-ref refs/hidden/tangle HEAD && git checkout -b reach && test_commit reach && git checkout main && @@ -974,9 +977,9 @@ test_expect_success 'decorate-refs-exclude and simplify-by-decoration' ' Merge-tag-reach (HEAD -> main) reach (tag: reach, reach) seventh (tag: seventh) - Merge-branch-tangle - Merge-branch-side-early-part-into-tangle (tangle) - tangle-a (tag: tangle-a) + Merge-branch-tangle (refs/hidden/tangle) + Merge-branch-side-early-part-into-tangle (refs/rewritten/merge, tangle) + Merge-branch-main-early-part-into-tangle (refs/prefetch/merge) EOF git log -n6 --decorate=short --pretty="tformat:%f%d" \ --decorate-refs-exclude="*octopus*" \ @@ -1025,6 +1028,115 @@ test_expect_success 'decorate-refs and simplify-by-decoration without output' ' test_cmp expect actual ' +test_expect_success 'decorate-refs-exclude HEAD' ' + git log --decorate=full --oneline \ + --decorate-refs-exclude="HEAD" >actual && + ! grep HEAD actual +' + +test_expect_success 'decorate-refs focus from default' ' + git log --decorate=full --oneline \ + --decorate-refs="refs/heads" >actual && + ! grep HEAD actual +' + +test_expect_success '--clear-decorations overrides defaults' ' + cat >expect.default <<-\EOF && + Merge-tag-reach (HEAD -> refs/heads/main) + Merge-tags-octopus-a-and-octopus-b + seventh (tag: refs/tags/seventh) + octopus-b (tag: refs/tags/octopus-b, refs/heads/octopus-b) + octopus-a (tag: refs/tags/octopus-a, refs/heads/octopus-a) + reach (tag: refs/tags/reach, refs/heads/reach) + Merge-branch-tangle + Merge-branch-side-early-part-into-tangle (refs/heads/tangle) + Merge-branch-main-early-part-into-tangle + tangle-a (tag: refs/tags/tangle-a) + Merge-branch-side + side-2 (tag: refs/tags/side-2, refs/heads/side) + side-1 (tag: refs/tags/side-1) + Second + sixth + fifth + fourth + third + second + initial + EOF + git log --decorate=full --pretty="tformat:%f%d" >actual && + test_cmp expect.default actual && + + cat >expect.all <<-\EOF && + Merge-tag-reach (HEAD -> refs/heads/main) + Merge-tags-octopus-a-and-octopus-b + seventh (tag: refs/tags/seventh) + octopus-b (tag: refs/tags/octopus-b, refs/heads/octopus-b) + octopus-a (tag: refs/tags/octopus-a, refs/heads/octopus-a) + reach (tag: refs/tags/reach, refs/heads/reach) + Merge-branch-tangle (refs/hidden/tangle) + Merge-branch-side-early-part-into-tangle (refs/rewritten/merge, refs/heads/tangle) + Merge-branch-main-early-part-into-tangle (refs/prefetch/merge) + tangle-a (tag: refs/tags/tangle-a) + Merge-branch-side + side-2 (tag: refs/tags/side-2, refs/heads/side) + side-1 (tag: refs/tags/side-1) + Second + sixth + fifth + fourth + third + second + initial + EOF + git log --decorate=full --pretty="tformat:%f%d" \ + --clear-decorations >actual && + test_cmp expect.all actual && + git -c log.initialDecorationSet=all log \ + --decorate=full --pretty="tformat:%f%d" >actual && + test_cmp expect.all actual +' + +test_expect_success '--clear-decorations clears previous exclusions' ' + cat >expect.all <<-\EOF && + Merge-tag-reach (HEAD -> refs/heads/main) + reach (tag: refs/tags/reach, refs/heads/reach) + Merge-tags-octopus-a-and-octopus-b + octopus-b (tag: refs/tags/octopus-b, refs/heads/octopus-b) + octopus-a (tag: refs/tags/octopus-a, refs/heads/octopus-a) + seventh (tag: refs/tags/seventh) + Merge-branch-tangle (refs/hidden/tangle) + Merge-branch-side-early-part-into-tangle (refs/rewritten/merge, refs/heads/tangle) + Merge-branch-main-early-part-into-tangle (refs/prefetch/merge) + tangle-a (tag: refs/tags/tangle-a) + side-2 (tag: refs/tags/side-2, refs/heads/side) + side-1 (tag: refs/tags/side-1) + initial + EOF + + git log --decorate=full --pretty="tformat:%f%d" \ + --simplify-by-decoration \ + --decorate-refs-exclude="heads/octopus*" \ + --decorate-refs="heads" \ + --clear-decorations >actual && + test_cmp expect.all actual && + + cat >expect.filtered <<-\EOF && + Merge-tags-octopus-a-and-octopus-b + octopus-b (refs/heads/octopus-b) + octopus-a (refs/heads/octopus-a) + initial + EOF + + git log --decorate=full --pretty="tformat:%f%d" \ + --simplify-by-decoration \ + --decorate-refs-exclude="heads/octopus" \ + --decorate-refs="heads" \ + --clear-decorations \ + --decorate-refs-exclude="tags/" \ + --decorate-refs="heads/octopus*" >actual && + test_cmp expect.filtered actual +' + test_expect_success 'log.decorate config parsing' ' git log --oneline --decorate=full >expect.full && git log --oneline --decorate=short >expect.short && @@ -2192,6 +2304,20 @@ test_expect_success 'log --decorate includes all levels of tag annotated tags' ' test_cmp expect actual ' +test_expect_success 'log --decorate does not include things outside filter' ' + reflist="refs/prefetch refs/rebase-merge refs/bundle" && + + for ref in $reflist + do + git update-ref $ref/fake HEAD || return 1 + done && + + git log --decorate=full --oneline >actual && + + # None of the refs are visible: + ! grep /fake actual +' + test_expect_success 'log --end-of-options' ' git update-ref refs/heads/--source HEAD && git log --end-of-options --source >actual && diff --git a/t/t4207-log-decoration-colors.sh b/t/t4207-log-decoration-colors.sh index 36ac6aff1e..ded33a82e2 100755 --- a/t/t4207-log-decoration-colors.sh +++ b/t/t4207-log-decoration-colors.sh @@ -3,7 +3,7 @@ # Copyright (c) 2010 Nazri Ramliy # -test_description='Test for "git log --decorate" colors' +test_description='test "git log --decorate" colors' GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME @@ -17,6 +17,7 @@ test_expect_success setup ' git config color.decorate.remoteBranch red && git config color.decorate.tag "reverse bold yellow" && git config color.decorate.stash magenta && + git config color.decorate.grafted black && git config color.decorate.HEAD cyan && c_reset="<RESET>" && @@ -27,6 +28,7 @@ test_expect_success setup ' c_tag="<BOLD;REVERSE;YELLOW>" && c_stash="<MAGENTA>" && c_HEAD="<CYAN>" && + c_grafted="<BLACK>" && test_commit A && git clone . other && @@ -42,25 +44,79 @@ test_expect_success setup ' git stash save Changes to A.t ' -cat >expected <<EOF -${c_commit}COMMIT_ID${c_reset}${c_commit} (${c_reset}${c_HEAD}HEAD ->\ - ${c_reset}${c_branch}main${c_reset}${c_commit},\ - ${c_reset}${c_tag}tag: v1.0${c_reset}${c_commit},\ - ${c_reset}${c_tag}tag: B${c_reset}${c_commit})${c_reset} B -${c_commit}COMMIT_ID${c_reset}${c_commit} (${c_reset}${c_tag}tag: A1${c_reset}${c_commit},\ - ${c_reset}${c_remoteBranch}other/main${c_reset}${c_commit})${c_reset} A1 -${c_commit}COMMIT_ID${c_reset}${c_commit} (${c_reset}${c_stash}refs/stash${c_reset}${c_commit})${c_reset}\ - On main: Changes to A.t -${c_commit}COMMIT_ID${c_reset}${c_commit} (${c_reset}${c_tag}tag: A${c_reset}${c_commit})${c_reset} A -EOF +cmp_filtered_decorations () { + sed "s/$OID_REGEX/COMMIT_ID/" actual | test_decode_color >filtered && + test_cmp expect filtered +} # We want log to show all, but the second parent to refs/stash is irrelevant # to this test since it does not contain any decoration, hence --first-parent -test_expect_success 'Commit Decorations Colored Correctly' ' - git log --first-parent --abbrev=10 --all --decorate --oneline --color=always | - sed "s/[0-9a-f]\{10,10\}/COMMIT_ID/" | - test_decode_color >out && - test_cmp expected out +test_expect_success 'commit decorations colored correctly' ' + cat >expect <<-EOF && + ${c_commit}COMMIT_ID${c_reset}${c_commit} (${c_reset}${c_HEAD}HEAD -> \ +${c_reset}${c_branch}main${c_reset}${c_commit}, \ +${c_reset}${c_tag}tag: v1.0${c_reset}${c_commit}, \ +${c_reset}${c_tag}tag: B${c_reset}${c_commit})${c_reset} B +${c_commit}COMMIT_ID${c_reset}${c_commit} (${c_reset}${c_tag}tag: A1${c_reset}${c_commit}, \ +${c_reset}${c_remoteBranch}other/main${c_reset}${c_commit})${c_reset} A1 + ${c_commit}COMMIT_ID${c_reset}${c_commit} (${c_reset}${c_stash}refs/stash${c_reset}${c_commit})${c_reset} \ +On main: Changes to A.t + ${c_commit}COMMIT_ID${c_reset}${c_commit} (${c_reset}${c_tag}tag: A${c_reset}${c_commit})${c_reset} A + EOF + + git log --first-parent --no-abbrev --decorate --oneline --color=always --all >actual && + cmp_filtered_decorations +' + +test_expect_success 'test coloring with replace-objects' ' + test_when_finished rm -rf .git/refs/replace* && + test_commit C && + test_commit D && + + git replace HEAD~1 HEAD~2 && + + cat >expect <<-EOF && + ${c_commit}COMMIT_ID${c_reset}${c_commit} (${c_reset}${c_HEAD}HEAD -> \ +${c_reset}${c_branch}main${c_reset}${c_commit}, \ +${c_reset}${c_tag}tag: D${c_reset}${c_commit})${c_reset} D + ${c_commit}COMMIT_ID${c_reset}${c_commit} (${c_reset}${c_tag}tag: C${c_reset}${c_commit}, \ +${c_reset}${c_grafted}replaced${c_reset}${c_commit})${c_reset} B + ${c_commit}COMMIT_ID${c_reset}${c_commit} (${c_reset}${c_tag}tag: A${c_reset}${c_commit})${c_reset} A +EOF + + git log --first-parent --no-abbrev --decorate --oneline --color=always HEAD >actual && + cmp_filtered_decorations && + git replace -d HEAD~1 && + + GIT_REPLACE_REF_BASE=refs/replace2/ git replace HEAD~1 HEAD~2 && + GIT_REPLACE_REF_BASE=refs/replace2/ git log --first-parent \ + --no-abbrev --decorate --oneline --color=always HEAD >actual && + cmp_filtered_decorations +' + +test_expect_success 'test coloring with grafted commit' ' + test_when_finished rm -rf .git/refs/replace* && + + git replace --graft HEAD HEAD~2 && + + cat >expect <<-EOF && + ${c_commit}COMMIT_ID${c_reset}${c_commit} (${c_reset}${c_HEAD}HEAD -> \ +${c_reset}${c_branch}main${c_reset}${c_commit}, \ +${c_reset}${c_tag}tag: D${c_reset}${c_commit}, \ +${c_reset}${c_grafted}replaced${c_reset}${c_commit})${c_reset} D + ${c_commit}COMMIT_ID${c_reset}${c_commit} (${c_reset}${c_tag}tag: v1.0${c_reset}${c_commit}, \ +${c_reset}${c_tag}tag: B${c_reset}${c_commit})${c_reset} B + ${c_commit}COMMIT_ID${c_reset}${c_commit} (${c_reset}${c_tag}tag: A${c_reset}${c_commit})${c_reset} A + EOF + + git log --first-parent --no-abbrev --decorate --oneline --color=always HEAD >actual && + cmp_filtered_decorations && + git replace -d HEAD && + + GIT_REPLACE_REF_BASE=refs/replace2/ git replace --graft HEAD HEAD~2 && + GIT_REPLACE_REF_BASE=refs/replace2/ git log --first-parent \ + --no-abbrev --decorate --oneline --color=always HEAD >actual && + cmp_filtered_decorations ' test_done diff --git a/t/t4301-merge-tree-write-tree.sh b/t/t4301-merge-tree-write-tree.sh index a243e3c517..28ca5c38bb 100755 --- a/t/t4301-merge-tree-write-tree.sh +++ b/t/t4301-merge-tree-write-tree.sh @@ -2,7 +2,6 @@ test_description='git merge-tree --write-tree' -TEST_PASSES_SANITIZE_LEAK=true . ./test-lib.sh # This test is ort-specific @@ -138,6 +137,579 @@ test_expect_success 'test conflict notices and such' ' test_cmp expect actual ' +# directory rename + content conflict +# Commit O: foo, olddir/{a,b,c} +# Commit A: modify foo, newdir/{a,b,c} +# Commit B: modify foo differently & rename foo -> olddir/bar +# Expected: CONFLICT(content) for for newdir/bar (not olddir/bar or foo) + +test_expect_success 'directory rename + content conflict' ' + # Setup + git init dir-rename-and-content && + ( + cd dir-rename-and-content && + test_write_lines 1 2 3 4 5 >foo && + mkdir olddir && + for i in a b c; do echo $i >olddir/$i || exit 1; done && + git add foo olddir && + git commit -m "original" && + + git branch O && + git branch A && + git branch B && + + git checkout A && + test_write_lines 1 2 3 4 5 6 >foo && + git add foo && + git mv olddir newdir && + git commit -m "Modify foo, rename olddir to newdir" && + + git checkout B && + test_write_lines 1 2 3 4 5 six >foo && + git add foo && + git mv foo olddir/bar && + git commit -m "Modify foo & rename foo -> olddir/bar" + ) && + # Testing + ( + cd dir-rename-and-content && + + test_expect_code 1 \ + git merge-tree -z A^0 B^0 >out && + echo >>out && + anonymize_hash out >actual && + q_to_tab <<-\EOF | lf_to_nul >expect && + HASH + 100644 HASH 1Qnewdir/bar + 100644 HASH 2Qnewdir/bar + 100644 HASH 3Qnewdir/bar + EOF + + q_to_nul <<-EOF >>expect && + Q2Qnewdir/barQolddir/barQCONFLICT (directory rename suggested)QCONFLICT (file location): foo renamed to olddir/bar in B^0, inside a directory that was renamed in A^0, suggesting it should perhaps be moved to newdir/bar. + Q1Qnewdir/barQAuto-mergingQAuto-merging newdir/bar + Q1Qnewdir/barQCONFLICT (contents)QCONFLICT (content): Merge conflict in newdir/bar + Q + EOF + test_cmp expect actual + ) +' + +# rename/delete + modify/delete handling +# Commit O: foo +# Commit A: modify foo + rename to bar +# Commit B: delete foo +# Expected: CONFLICT(rename/delete) + CONFLICT(modify/delete) + +test_expect_success 'rename/delete handling' ' + # Setup + git init rename-delete && + ( + cd rename-delete && + test_write_lines 1 2 3 4 5 >foo && + git add foo && + git commit -m "original" && + + git branch O && + git branch A && + git branch B && + + git checkout A && + test_write_lines 1 2 3 4 5 6 >foo && + git add foo && + git mv foo bar && + git commit -m "Modify foo, rename to bar" && + + git checkout B && + git rm foo && + git commit -m "remove foo" + ) && + # Testing + ( + cd rename-delete && + + test_expect_code 1 \ + git merge-tree -z A^0 B^0 >out && + echo >>out && + anonymize_hash out >actual && + q_to_tab <<-\EOF | lf_to_nul >expect && + HASH + 100644 HASH 1Qbar + 100644 HASH 2Qbar + EOF + + q_to_nul <<-EOF >>expect && + Q2QbarQfooQCONFLICT (rename/delete)QCONFLICT (rename/delete): foo renamed to bar in A^0, but deleted in B^0. + Q1QbarQCONFLICT (modify/delete)QCONFLICT (modify/delete): bar deleted in B^0 and modified in A^0. Version A^0 of bar left in tree. + Q + EOF + test_cmp expect actual + ) +' + +# rename/add handling +# Commit O: foo +# Commit A: modify foo, add different bar +# Commit B: modify & rename foo->bar +# Expected: CONFLICT(add/add) [via rename collide] for bar + +test_expect_success 'rename/add handling' ' + # Setup + git init rename-add && + ( + cd rename-add && + test_write_lines original 1 2 3 4 5 >foo && + git add foo && + git commit -m "original" && + + git branch O && + git branch A && + git branch B && + + git checkout A && + test_write_lines 1 2 3 4 5 >foo && + echo "different file" >bar && + git add foo bar && + git commit -m "Modify foo, add bar" && + + git checkout B && + test_write_lines original 1 2 3 4 5 6 >foo && + git add foo && + git mv foo bar && + git commit -m "rename foo to bar" + ) && + # Testing + ( + cd rename-add && + + test_expect_code 1 \ + git merge-tree -z A^0 B^0 >out && + echo >>out && + + # + # First, check that the bar that appears at stage 3 does not + # correspond to an individual blob anywhere in history + # + hash=$(cat out | tr "\0" "\n" | head -n 3 | grep 3.bar | cut -f 2 -d " ") && + git rev-list --objects --all >all_blobs && + ! grep $hash all_blobs && + + # + # Second, check anonymized hash output against expectation + # + anonymize_hash out >actual && + q_to_tab <<-\EOF | lf_to_nul >expect && + HASH + 100644 HASH 2Qbar + 100644 HASH 3Qbar + EOF + + q_to_nul <<-EOF >>expect && + Q1QbarQAuto-mergingQAuto-merging bar + Q1QbarQCONFLICT (contents)QCONFLICT (add/add): Merge conflict in bar + Q1QfooQAuto-mergingQAuto-merging foo + Q + EOF + test_cmp expect actual + ) +' + +# rename/add, where add is a mode conflict +# Commit O: foo +# Commit A: modify foo, add symlink bar +# Commit B: modify & rename foo->bar +# Expected: CONFLICT(distinct modes) for bar + +test_expect_success SYMLINKS 'rename/add, where add is a mode conflict' ' + # Setup + git init rename-add-symlink && + ( + cd rename-add-symlink && + test_write_lines original 1 2 3 4 5 >foo && + git add foo && + git commit -m "original" && + + git branch O && + git branch A && + git branch B && + + git checkout A && + test_write_lines 1 2 3 4 5 >foo && + ln -s foo bar && + git add foo bar && + git commit -m "Modify foo, add symlink bar" && + + git checkout B && + test_write_lines original 1 2 3 4 5 6 >foo && + git add foo && + git mv foo bar && + git commit -m "rename foo to bar" + ) && + # Testing + ( + cd rename-add-symlink && + + test_expect_code 1 \ + git merge-tree -z A^0 B^0 >out && + echo >>out && + + # + # First, check that the bar that appears at stage 3 does not + # correspond to an individual blob anywhere in history + # + hash=$(cat out | tr "\0" "\n" | head -n 3 | grep 3.bar | cut -f 2 -d " ") && + git rev-list --objects --all >all_blobs && + ! grep $hash all_blobs && + + # + # Second, check anonymized hash output against expectation + # + anonymize_hash out >actual && + q_to_tab <<-\EOF | lf_to_nul >expect && + HASH + 120000 HASH 2Qbar + 100644 HASH 3Qbar~B^0 + EOF + + q_to_nul <<-EOF >>expect && + Q2QbarQbar~B^0QCONFLICT (distinct modes)QCONFLICT (distinct types): bar had different types on each side; renamed one of them so each can be recorded somewhere. + Q1QfooQAuto-mergingQAuto-merging foo + Q + EOF + test_cmp expect actual + ) +' + +# rename/rename(1to2) + content conflict handling +# Commit O: foo +# Commit A: modify foo & rename to bar +# Commit B: modify foo & rename to baz +# Expected: CONFLICT(rename/rename) + +test_expect_success 'rename/rename + content conflict' ' + # Setup + git init rr-plus-content && + ( + cd rr-plus-content && + test_write_lines 1 2 3 4 5 >foo && + git add foo && + git commit -m "original" && + + git branch O && + git branch A && + git branch B && + + git checkout A && + test_write_lines 1 2 3 4 5 six >foo && + git add foo && + git mv foo bar && + git commit -m "Modify foo + rename to bar" && + + git checkout B && + test_write_lines 1 2 3 4 5 6 >foo && + git add foo && + git mv foo baz && + git commit -m "Modify foo + rename to baz" + ) && + # Testing + ( + cd rr-plus-content && + + test_expect_code 1 \ + git merge-tree -z A^0 B^0 >out && + echo >>out && + anonymize_hash out >actual && + q_to_tab <<-\EOF | lf_to_nul >expect && + HASH + 100644 HASH 2Qbar + 100644 HASH 3Qbaz + 100644 HASH 1Qfoo + EOF + + q_to_nul <<-EOF >>expect && + Q1QfooQAuto-mergingQAuto-merging foo + Q3QfooQbarQbazQCONFLICT (rename/rename)QCONFLICT (rename/rename): foo renamed to bar in A^0 and to baz in B^0. + Q + EOF + test_cmp expect actual + ) +' + +# rename/add/delete +# Commit O: foo +# Commit A: rm foo, add different bar +# Commit B: rename foo->bar +# Expected: CONFLICT (rename/delete), CONFLICT(add/add) [via rename collide] +# for bar + +test_expect_success 'rename/add/delete conflict' ' + # Setup + git init rad && + ( + cd rad && + echo "original file" >foo && + git add foo && + git commit -m "original" && + + git branch O && + git branch A && + git branch B && + + git checkout A && + git rm foo && + echo "different file" >bar && + git add bar && + git commit -m "Remove foo, add bar" && + + git checkout B && + git mv foo bar && + git commit -m "rename foo to bar" + ) && + # Testing + ( + cd rad && + + test_expect_code 1 \ + git merge-tree -z B^0 A^0 >out && + echo >>out && + anonymize_hash out >actual && + + q_to_tab <<-\EOF | lf_to_nul >expect && + HASH + 100644 HASH 2Qbar + 100644 HASH 3Qbar + + EOF + + q_to_nul <<-EOF >>expect && + 2QbarQfooQCONFLICT (rename/delete)QCONFLICT (rename/delete): foo renamed to bar in B^0, but deleted in A^0. + Q1QbarQAuto-mergingQAuto-merging bar + Q1QbarQCONFLICT (contents)QCONFLICT (add/add): Merge conflict in bar + Q + EOF + test_cmp expect actual + ) +' + +# rename/rename(2to1)/delete/delete +# Commit O: foo, bar +# Commit A: rename foo->baz, rm bar +# Commit B: rename bar->baz, rm foo +# Expected: 2x CONFLICT (rename/delete), CONFLICT (add/add) via colliding +# renames for baz + +test_expect_success 'rename/rename(2to1)/delete/delete conflict' ' + # Setup + git init rrdd && + ( + cd rrdd && + echo foo >foo && + echo bar >bar && + git add foo bar && + git commit -m O && + + git branch O && + git branch A && + git branch B && + + git checkout A && + git mv foo baz && + git rm bar && + git commit -m "Rename foo, remove bar" && + + git checkout B && + git mv bar baz && + git rm foo && + git commit -m "Rename bar, remove foo" + ) && + # Testing + ( + cd rrdd && + + test_expect_code 1 \ + git merge-tree -z A^0 B^0 >out && + echo >>out && + anonymize_hash out >actual && + + q_to_tab <<-\EOF | lf_to_nul >expect && + HASH + 100644 HASH 2Qbaz + 100644 HASH 3Qbaz + + EOF + + q_to_nul <<-EOF >>expect && + 2QbazQbarQCONFLICT (rename/delete)QCONFLICT (rename/delete): bar renamed to baz in B^0, but deleted in A^0. + Q2QbazQfooQCONFLICT (rename/delete)QCONFLICT (rename/delete): foo renamed to baz in A^0, but deleted in B^0. + Q1QbazQAuto-mergingQAuto-merging baz + Q1QbazQCONFLICT (contents)QCONFLICT (add/add): Merge conflict in baz + Q + EOF + test_cmp expect actual + ) +' + +# mod6: chains of rename/rename(1to2) + add/add via colliding renames +# Commit O: one, three, five +# Commit A: one->two, three->four, five->six +# Commit B: one->six, three->two, five->four +# Expected: three CONFLICT(rename/rename) messages + three CONFLICT(add/add) +# messages; each path in two of the multi-way merged contents +# found in two, four, six + +test_expect_success 'mod6: chains of rename/rename(1to2) and add/add via colliding renames' ' + # Setup + git init mod6 && + ( + cd mod6 && + test_seq 11 19 >one && + test_seq 31 39 >three && + test_seq 51 59 >five && + git add . && + test_tick && + git commit -m "O" && + + git branch O && + git branch A && + git branch B && + + git checkout A && + test_seq 10 19 >one && + echo 40 >>three && + git add one three && + git mv one two && + git mv three four && + git mv five six && + test_tick && + git commit -m "A" && + + git checkout B && + echo 20 >>one && + echo forty >>three && + echo 60 >>five && + git add one three five && + git mv one six && + git mv three two && + git mv five four && + test_tick && + git commit -m "B" + ) && + # Testing + ( + cd mod6 && + + test_expect_code 1 \ + git merge-tree -z A^0 B^0 >out && + echo >>out && + + # + # First, check that some of the hashes that appear as stage + # conflict entries do not appear as individual blobs anywhere + # in history. + # + hash1=$(cat out | tr "\0" "\n" | head | grep 2.four | cut -f 2 -d " ") && + hash2=$(cat out | tr "\0" "\n" | head | grep 3.two | cut -f 2 -d " ") && + git rev-list --objects --all >all_blobs && + ! grep $hash1 all_blobs && + ! grep $hash2 all_blobs && + + # + # Now compare anonymized hash output with expectation + # + anonymize_hash out >actual && + q_to_tab <<-\EOF | lf_to_nul >expect && + HASH + 100644 HASH 1Qfive + 100644 HASH 2Qfour + 100644 HASH 3Qfour + 100644 HASH 1Qone + 100644 HASH 2Qsix + 100644 HASH 3Qsix + 100644 HASH 1Qthree + 100644 HASH 2Qtwo + 100644 HASH 3Qtwo + + EOF + + q_to_nul <<-EOF >>expect && + 3QfiveQsixQfourQCONFLICT (rename/rename)QCONFLICT (rename/rename): five renamed to six in A^0 and to four in B^0. + Q1QfourQAuto-mergingQAuto-merging four + Q1QfourQCONFLICT (contents)QCONFLICT (add/add): Merge conflict in four + Q1QoneQAuto-mergingQAuto-merging one + Q3QoneQtwoQsixQCONFLICT (rename/rename)QCONFLICT (rename/rename): one renamed to two in A^0 and to six in B^0. + Q1QsixQAuto-mergingQAuto-merging six + Q1QsixQCONFLICT (contents)QCONFLICT (add/add): Merge conflict in six + Q1QthreeQAuto-mergingQAuto-merging three + Q3QthreeQfourQtwoQCONFLICT (rename/rename)QCONFLICT (rename/rename): three renamed to four in A^0 and to two in B^0. + Q1QtwoQAuto-mergingQAuto-merging two + Q1QtwoQCONFLICT (contents)QCONFLICT (add/add): Merge conflict in two + Q + EOF + test_cmp expect actual + ) +' + +# directory rename + rename/delete + modify/delete + directory/file conflict +# Commit O: foo, olddir/{a,b,c} +# Commit A: delete foo, rename olddir/ -> newdir/, add newdir/bar/file +# Commit B: modify foo & rename foo -> olddir/bar +# Expected: CONFLICT(content) for for newdir/bar (not olddir/bar or foo) + +test_expect_success 'directory rename + rename/delete + modify/delete + directory/file conflict' ' + # Setup + git init 4-stacked-conflict && + ( + cd 4-stacked-conflict && + test_write_lines 1 2 3 4 5 >foo && + mkdir olddir && + for i in a b c; do echo $i >olddir/$i || exit 1; done && + git add foo olddir && + git commit -m "original" && + + git branch O && + git branch A && + git branch B && + + git checkout A && + git rm foo && + git mv olddir newdir && + mkdir newdir/bar && + >newdir/bar/file && + git add newdir/bar/file && + git commit -m "rm foo, olddir/ -> newdir/, + newdir/bar/file" && + + git checkout B && + test_write_lines 1 2 3 4 5 6 >foo && + git add foo && + git mv foo olddir/bar && + git commit -m "Modify foo & rename foo -> olddir/bar" + ) && + # Testing + ( + cd 4-stacked-conflict && + + test_expect_code 1 \ + git merge-tree -z A^0 B^0 >out && + echo >>out && + anonymize_hash out >actual && + + q_to_tab <<-\EOF | lf_to_nul >expect && + HASH + 100644 HASH 1Qnewdir/bar~B^0 + 100644 HASH 3Qnewdir/bar~B^0 + EOF + + q_to_nul <<-EOF >>expect && + Q2Qnewdir/barQolddir/barQCONFLICT (directory rename suggested)QCONFLICT (file location): foo renamed to olddir/bar in B^0, inside a directory that was renamed in A^0, suggesting it should perhaps be moved to newdir/bar. + Q2Qnewdir/barQfooQCONFLICT (rename/delete)QCONFLICT (rename/delete): foo renamed to newdir/bar in B^0, but deleted in A^0. + Q2Qnewdir/bar~B^0Qnewdir/barQCONFLICT (file/directory)QCONFLICT (file/directory): directory in the way of newdir/bar from B^0; moving it to newdir/bar~B^0 instead. + Q1Qnewdir/bar~B^0QCONFLICT (modify/delete)QCONFLICT (modify/delete): newdir/bar~B^0 deleted in A^0 and modified in B^0. Version B^0 of newdir/bar~B^0 left in tree. + Q + EOF + test_cmp expect actual + ) +' + for opt in $(git merge-tree --git-completion-helper-all) do if test $opt = "--trivial-merge" || test $opt = "--write-tree" @@ -188,8 +760,8 @@ test_expect_success 'NUL terminated conflicted file "lines"' ' git commit -m "Renamed numbers" && test_expect_code 1 git merge-tree --write-tree -z tweak1 side2 >out && + echo >>out && anonymize_hash out >actual && - printf "\\n" >>actual && # Expected results: # "greeting" should merge with conflicts diff --git a/t/t5310-pack-bitmaps.sh b/t/t5310-pack-bitmaps.sh index f775fc1ce6..7e50f8e765 100755 --- a/t/t5310-pack-bitmaps.sh +++ b/t/t5310-pack-bitmaps.sh @@ -26,22 +26,415 @@ has_any () { grep -Ff "$1" "$2" } -setup_bitmap_history - -test_expect_success 'setup writing bitmaps during repack' ' - git config repack.writeBitmaps true -' - -test_expect_success 'full repack creates bitmaps' ' - GIT_TRACE2_EVENT="$(pwd)/trace" \ +test_bitmap_cases () { + writeLookupTable=false + for i in "$@" + do + case "$i" in + "pack.writeBitmapLookupTable") writeLookupTable=true;; + esac + done + + test_expect_success 'setup test repository' ' + rm -fr * .git && + git init && + git config pack.writeBitmapLookupTable '"$writeLookupTable"' + ' + setup_bitmap_history + + test_expect_success 'setup writing bitmaps during repack' ' + git config repack.writeBitmaps true + ' + + test_expect_success 'full repack creates bitmaps' ' + GIT_TRACE2_EVENT="$(pwd)/trace" \ + git repack -ad && + ls .git/objects/pack/ | grep bitmap >output && + test_line_count = 1 output && + grep "\"key\":\"num_selected_commits\",\"value\":\"106\"" trace && + grep "\"key\":\"num_maximal_commits\",\"value\":\"107\"" trace + ' + + basic_bitmap_tests + + test_expect_success 'pack-objects respects --local (non-local loose)' ' + git init --bare alt.git && + echo $(pwd)/alt.git/objects >.git/objects/info/alternates && + echo content1 >file1 && + # non-local loose object which is not present in bitmapped pack + altblob=$(GIT_DIR=alt.git git hash-object -w file1) && + # non-local loose object which is also present in bitmapped pack + git cat-file blob $blob | GIT_DIR=alt.git git hash-object -w --stdin && + git add file1 && + test_tick && + git commit -m commit_file1 && + echo HEAD | git pack-objects --local --stdout --revs >1.pack && + git index-pack 1.pack && + list_packed_objects 1.idx >1.objects && + printf "%s\n" "$altblob" "$blob" >nonlocal-loose && + ! has_any nonlocal-loose 1.objects + ' + + test_expect_success 'pack-objects respects --honor-pack-keep (local non-bitmapped pack)' ' + echo content2 >file2 && + blob2=$(git hash-object -w file2) && + git add file2 && + test_tick && + git commit -m commit_file2 && + printf "%s\n" "$blob2" "$bitmaptip" >keepobjects && + pack2=$(git pack-objects pack2 <keepobjects) && + mv pack2-$pack2.* .git/objects/pack/ && + >.git/objects/pack/pack2-$pack2.keep && + rm $(objpath $blob2) && + echo HEAD | git pack-objects --honor-pack-keep --stdout --revs >2a.pack && + git index-pack 2a.pack && + list_packed_objects 2a.idx >2a.objects && + ! has_any keepobjects 2a.objects + ' + + test_expect_success 'pack-objects respects --local (non-local pack)' ' + mv .git/objects/pack/pack2-$pack2.* alt.git/objects/pack/ && + echo HEAD | git pack-objects --local --stdout --revs >2b.pack && + git index-pack 2b.pack && + list_packed_objects 2b.idx >2b.objects && + ! has_any keepobjects 2b.objects + ' + + test_expect_success 'pack-objects respects --honor-pack-keep (local bitmapped pack)' ' + ls .git/objects/pack/ | grep bitmap >output && + test_line_count = 1 output && + packbitmap=$(basename $(cat output) .bitmap) && + list_packed_objects .git/objects/pack/$packbitmap.idx >packbitmap.objects && + test_when_finished "rm -f .git/objects/pack/$packbitmap.keep" && + >.git/objects/pack/$packbitmap.keep && + echo HEAD | git pack-objects --honor-pack-keep --stdout --revs >3a.pack && + git index-pack 3a.pack && + list_packed_objects 3a.idx >3a.objects && + ! has_any packbitmap.objects 3a.objects + ' + + test_expect_success 'pack-objects respects --local (non-local bitmapped pack)' ' + mv .git/objects/pack/$packbitmap.* alt.git/objects/pack/ && + rm -f .git/objects/pack/multi-pack-index && + test_when_finished "mv alt.git/objects/pack/$packbitmap.* .git/objects/pack/" && + echo HEAD | git pack-objects --local --stdout --revs >3b.pack && + git index-pack 3b.pack && + list_packed_objects 3b.idx >3b.objects && + ! has_any packbitmap.objects 3b.objects + ' + + test_expect_success 'pack-objects to file can use bitmap' ' + # make sure we still have 1 bitmap index from previous tests + ls .git/objects/pack/ | grep bitmap >output && + test_line_count = 1 output && + # verify equivalent packs are generated with/without using bitmap index + packasha1=$(git pack-objects --no-use-bitmap-index --all packa </dev/null) && + packbsha1=$(git pack-objects --use-bitmap-index --all packb </dev/null) && + list_packed_objects packa-$packasha1.idx >packa.objects && + list_packed_objects packb-$packbsha1.idx >packb.objects && + test_cmp packa.objects packb.objects + ' + + test_expect_success 'full repack, reusing previous bitmaps' ' git repack -ad && - ls .git/objects/pack/ | grep bitmap >output && - test_line_count = 1 output && - grep "\"key\":\"num_selected_commits\",\"value\":\"106\"" trace && - grep "\"key\":\"num_maximal_commits\",\"value\":\"107\"" trace -' + ls .git/objects/pack/ | grep bitmap >output && + test_line_count = 1 output + ' + + test_expect_success 'fetch (full bitmap)' ' + git --git-dir=clone.git fetch origin second:second && + git rev-parse HEAD >expect && + git --git-dir=clone.git rev-parse HEAD >actual && + test_cmp expect actual + ' + + test_expect_success 'create objects for missing-HAVE tests' ' + blob=$(echo "missing have" | git hash-object -w --stdin) && + tree=$(printf "100644 blob $blob\tfile\n" | git mktree) && + parent=$(echo parent | git commit-tree $tree) && + commit=$(echo commit | git commit-tree $tree -p $parent) && + cat >revs <<-EOF + HEAD + ^HEAD^ + ^$commit + EOF + ' + + test_expect_success 'pack-objects respects --incremental' ' + cat >revs2 <<-EOF && + HEAD + $commit + EOF + git pack-objects --incremental --stdout --revs <revs2 >4.pack && + git index-pack 4.pack && + list_packed_objects 4.idx >4.objects && + test_line_count = 4 4.objects && + git rev-list --objects $commit >revlist && + cut -d" " -f1 revlist |sort >objects && + test_cmp 4.objects objects + ' + + test_expect_success 'pack with missing blob' ' + rm $(objpath $blob) && + git pack-objects --stdout --revs <revs >/dev/null + ' + + test_expect_success 'pack with missing tree' ' + rm $(objpath $tree) && + git pack-objects --stdout --revs <revs >/dev/null + ' + + test_expect_success 'pack with missing parent' ' + rm $(objpath $parent) && + git pack-objects --stdout --revs <revs >/dev/null + ' + + test_expect_success JGIT,SHA1 'we can read jgit bitmaps' ' + git clone --bare . compat-jgit.git && + ( + cd compat-jgit.git && + rm -f objects/pack/*.bitmap && + jgit gc && + git rev-list --test-bitmap HEAD + ) + ' + + test_expect_success JGIT,SHA1 'jgit can read our bitmaps' ' + git clone --bare . compat-us.git && + ( + cd compat-us.git && + git config pack.writeBitmapLookupTable '"$writeLookupTable"' && + git repack -adb && + # jgit gc will barf if it does not like our bitmaps + jgit gc + ) + ' + + test_expect_success 'splitting packs does not generate bogus bitmaps' ' + test-tool genrandom foo $((1024 * 1024)) >rand && + git add rand && + git commit -m "commit with big file" && + git -c pack.packSizeLimit=500k repack -adb && + git init --bare no-bitmaps.git && + git -C no-bitmaps.git fetch .. HEAD + ' + + test_expect_success 'set up reusable pack' ' + rm -f .git/objects/pack/*.keep && + git repack -adb && + reusable_pack () { + git for-each-ref --format="%(objectname)" | + git pack-objects --delta-base-offset --revs --stdout "$@" + } + ' + + test_expect_success 'pack reuse respects --honor-pack-keep' ' + test_when_finished "rm -f .git/objects/pack/*.keep" && + for i in .git/objects/pack/*.pack + do + >${i%.pack}.keep || return 1 + done && + reusable_pack --honor-pack-keep >empty.pack && + git index-pack empty.pack && + git show-index <empty.idx >actual && + test_must_be_empty actual + ' + + test_expect_success 'pack reuse respects --local' ' + mv .git/objects/pack/* alt.git/objects/pack/ && + test_when_finished "mv alt.git/objects/pack/* .git/objects/pack/" && + reusable_pack --local >empty.pack && + git index-pack empty.pack && + git show-index <empty.idx >actual && + test_must_be_empty actual + ' + + test_expect_success 'pack reuse respects --incremental' ' + reusable_pack --incremental >empty.pack && + git index-pack empty.pack && + git show-index <empty.idx >actual && + test_must_be_empty actual + ' + + test_expect_success 'truncated bitmap fails gracefully (ewah)' ' + test_config pack.writebitmaphashcache false && + test_config pack.writebitmaplookuptable false && + git repack -ad && + git rev-list --use-bitmap-index --count --all >expect && + bitmap=$(ls .git/objects/pack/*.bitmap) && + test_when_finished "rm -f $bitmap" && + test_copy_bytes 256 <$bitmap >$bitmap.tmp && + mv -f $bitmap.tmp $bitmap && + git rev-list --use-bitmap-index --count --all >actual 2>stderr && + test_cmp expect actual && + test_i18ngrep corrupt.ewah.bitmap stderr + ' + + test_expect_success 'truncated bitmap fails gracefully (cache)' ' + git config pack.writeBitmapLookupTable '"$writeLookupTable"' && + git repack -ad && + git rev-list --use-bitmap-index --count --all >expect && + bitmap=$(ls .git/objects/pack/*.bitmap) && + test_when_finished "rm -f $bitmap" && + test_copy_bytes 512 <$bitmap >$bitmap.tmp && + mv -f $bitmap.tmp $bitmap && + git rev-list --use-bitmap-index --count --all >actual 2>stderr && + test_cmp expect actual && + test_i18ngrep corrupted.bitmap.index stderr + ' + + # Create a state of history with these properties: + # + # - refs that allow a client to fetch some new history, while sharing some old + # history with the server; we use branches delta-reuse-old and + # delta-reuse-new here + # + # - the new history contains an object that is stored on the server as a delta + # against a base that is in the old history + # + # - the base object is not immediately reachable from the tip of the old + # history; finding it would involve digging down through history we know the + # other side has + # + # This should result in a state where fetching from old->new would not + # traditionally reuse the on-disk delta (because we'd have to dig to realize + # that the client has it), but we will do so if bitmaps can tell us cheaply + # that the other side has it. + test_expect_success 'set up thin delta-reuse parent' ' + # This first commit contains the buried base object. + test-tool genrandom delta 16384 >file && + git add file && + git commit -m "delta base" && + base=$(git rev-parse --verify HEAD:file) && + + # These intermediate commits bury the base back in history. + # This becomes the "old" state. + for i in 1 2 3 4 5 + do + echo $i >file && + git commit -am "intermediate $i" || return 1 + done && + git branch delta-reuse-old && + + # And now our new history has a delta against the buried base. Note + # that this must be smaller than the original file, since pack-objects + # prefers to create deltas from smaller objects to larger. + test-tool genrandom delta 16300 >file && + git commit -am "delta result" && + delta=$(git rev-parse --verify HEAD:file) && + git branch delta-reuse-new && + + # Repack with bitmaps and double check that we have the expected delta + # relationship. + git repack -adb && + have_delta $delta $base + ' + + # Now we can sanity-check the non-bitmap behavior (that the server is not able + # to reuse the delta). This isn't strictly something we care about, so this + # test could be scrapped in the future. But it makes sure that the next test is + # actually triggering the feature we want. + # + # Note that our tools for working with on-the-wire "thin" packs are limited. So + # we actually perform the fetch, retain the resulting pack, and inspect the + # result. + test_expect_success 'fetch without bitmaps ignores delta against old base' ' + test_config pack.usebitmaps false && + test_when_finished "rm -rf client.git" && + git init --bare client.git && + ( + cd client.git && + git config transfer.unpackLimit 1 && + git fetch .. delta-reuse-old:delta-reuse-old && + git fetch .. delta-reuse-new:delta-reuse-new && + have_delta $delta $ZERO_OID + ) + ' + + # And do the same for the bitmap case, where we do expect to find the delta. + test_expect_success 'fetch with bitmaps can reuse old base' ' + test_config pack.usebitmaps true && + test_when_finished "rm -rf client.git" && + git init --bare client.git && + ( + cd client.git && + git config transfer.unpackLimit 1 && + git fetch .. delta-reuse-old:delta-reuse-old && + git fetch .. delta-reuse-new:delta-reuse-new && + have_delta $delta $base + ) + ' + + test_expect_success 'pack.preferBitmapTips' ' + git init repo && + test_when_finished "rm -fr repo" && + ( + cd repo && + git config pack.writeBitmapLookupTable '"$writeLookupTable"' && + + # create enough commits that not all are receive bitmap + # coverage even if they are all at the tip of some reference. + test_commit_bulk --message="%s" 103 && + + git rev-list HEAD >commits.raw && + sort <commits.raw >commits && + + git log --format="create refs/tags/%s %H" HEAD >refs && + git update-ref --stdin <refs && + + git repack -adb && + test-tool bitmap list-commits | sort >bitmaps && + + # remember which commits did not receive bitmaps + comm -13 bitmaps commits >before && + test_file_not_empty before && + + # mark the commits which did not receive bitmaps as preferred, + # and generate the bitmap again + perl -pe "s{^}{create refs/tags/include/$. }" <before | + git update-ref --stdin && + git -c pack.preferBitmapTips=refs/tags/include repack -adb && + + # finally, check that the commit(s) without bitmap coverage + # are not the same ones as before + test-tool bitmap list-commits | sort >bitmaps && + comm -13 bitmaps commits >after && + + ! test_cmp before after + ) + ' + + test_expect_success 'complains about multiple pack bitmaps' ' + rm -fr repo && + git init repo && + test_when_finished "rm -fr repo" && + ( + cd repo && + git config pack.writeBitmapLookupTable '"$writeLookupTable"' && + + test_commit base && + + git repack -adb && + bitmap="$(ls .git/objects/pack/pack-*.bitmap)" && + mv "$bitmap" "$bitmap.bak" && + + test_commit other && + git repack -ab && + + mv "$bitmap.bak" "$bitmap" && + + find .git/objects/pack -type f -name "*.pack" >packs && + find .git/objects/pack -type f -name "*.bitmap" >bitmaps && + test_line_count = 2 packs && + test_line_count = 2 bitmaps && + + git rev-list --use-bitmap-index HEAD 2>err && + grep "ignoring extra bitmap file" err + ) + ' +} -basic_bitmap_tests +test_bitmap_cases test_expect_success 'incremental repack fails when bitmaps are requested' ' test_commit more-1 && @@ -54,219 +447,24 @@ test_expect_success 'incremental repack can disable bitmaps' ' git repack -d --no-write-bitmap-index ' -test_expect_success 'pack-objects respects --local (non-local loose)' ' - git init --bare alt.git && - echo $(pwd)/alt.git/objects >.git/objects/info/alternates && - echo content1 >file1 && - # non-local loose object which is not present in bitmapped pack - altblob=$(GIT_DIR=alt.git git hash-object -w file1) && - # non-local loose object which is also present in bitmapped pack - git cat-file blob $blob | GIT_DIR=alt.git git hash-object -w --stdin && - git add file1 && - test_tick && - git commit -m commit_file1 && - echo HEAD | git pack-objects --local --stdout --revs >1.pack && - git index-pack 1.pack && - list_packed_objects 1.idx >1.objects && - printf "%s\n" "$altblob" "$blob" >nonlocal-loose && - ! has_any nonlocal-loose 1.objects -' - -test_expect_success 'pack-objects respects --honor-pack-keep (local non-bitmapped pack)' ' - echo content2 >file2 && - blob2=$(git hash-object -w file2) && - git add file2 && - test_tick && - git commit -m commit_file2 && - printf "%s\n" "$blob2" "$bitmaptip" >keepobjects && - pack2=$(git pack-objects pack2 <keepobjects) && - mv pack2-$pack2.* .git/objects/pack/ && - >.git/objects/pack/pack2-$pack2.keep && - rm $(objpath $blob2) && - echo HEAD | git pack-objects --honor-pack-keep --stdout --revs >2a.pack && - git index-pack 2a.pack && - list_packed_objects 2a.idx >2a.objects && - ! has_any keepobjects 2a.objects -' - -test_expect_success 'pack-objects respects --local (non-local pack)' ' - mv .git/objects/pack/pack2-$pack2.* alt.git/objects/pack/ && - echo HEAD | git pack-objects --local --stdout --revs >2b.pack && - git index-pack 2b.pack && - list_packed_objects 2b.idx >2b.objects && - ! has_any keepobjects 2b.objects -' - -test_expect_success 'pack-objects respects --honor-pack-keep (local bitmapped pack)' ' - ls .git/objects/pack/ | grep bitmap >output && - test_line_count = 1 output && - packbitmap=$(basename $(cat output) .bitmap) && - list_packed_objects .git/objects/pack/$packbitmap.idx >packbitmap.objects && - test_when_finished "rm -f .git/objects/pack/$packbitmap.keep" && - >.git/objects/pack/$packbitmap.keep && - echo HEAD | git pack-objects --honor-pack-keep --stdout --revs >3a.pack && - git index-pack 3a.pack && - list_packed_objects 3a.idx >3a.objects && - ! has_any packbitmap.objects 3a.objects -' - -test_expect_success 'pack-objects respects --local (non-local bitmapped pack)' ' - mv .git/objects/pack/$packbitmap.* alt.git/objects/pack/ && - rm -f .git/objects/pack/multi-pack-index && - test_when_finished "mv alt.git/objects/pack/$packbitmap.* .git/objects/pack/" && - echo HEAD | git pack-objects --local --stdout --revs >3b.pack && - git index-pack 3b.pack && - list_packed_objects 3b.idx >3b.objects && - ! has_any packbitmap.objects 3b.objects -' - -test_expect_success 'pack-objects to file can use bitmap' ' - # make sure we still have 1 bitmap index from previous tests - ls .git/objects/pack/ | grep bitmap >output && - test_line_count = 1 output && - # verify equivalent packs are generated with/without using bitmap index - packasha1=$(git pack-objects --no-use-bitmap-index --all packa </dev/null) && - packbsha1=$(git pack-objects --use-bitmap-index --all packb </dev/null) && - list_packed_objects packa-$packasha1.idx >packa.objects && - list_packed_objects packb-$packbsha1.idx >packb.objects && - test_cmp packa.objects packb.objects -' - -test_expect_success 'full repack, reusing previous bitmaps' ' - git repack -ad && - ls .git/objects/pack/ | grep bitmap >output && - test_line_count = 1 output -' - -test_expect_success 'fetch (full bitmap)' ' - git --git-dir=clone.git fetch origin second:second && - git rev-parse HEAD >expect && - git --git-dir=clone.git rev-parse HEAD >actual && - test_cmp expect actual -' - -test_expect_success 'create objects for missing-HAVE tests' ' - blob=$(echo "missing have" | git hash-object -w --stdin) && - tree=$(printf "100644 blob $blob\tfile\n" | git mktree) && - parent=$(echo parent | git commit-tree $tree) && - commit=$(echo commit | git commit-tree $tree -p $parent) && - cat >revs <<-EOF - HEAD - ^HEAD^ - ^$commit - EOF -' - -test_expect_success 'pack-objects respects --incremental' ' - cat >revs2 <<-EOF && - HEAD - $commit - EOF - git pack-objects --incremental --stdout --revs <revs2 >4.pack && - git index-pack 4.pack && - list_packed_objects 4.idx >4.objects && - test_line_count = 4 4.objects && - git rev-list --objects $commit >revlist && - cut -d" " -f1 revlist |sort >objects && - test_cmp 4.objects objects -' - -test_expect_success 'pack with missing blob' ' - rm $(objpath $blob) && - git pack-objects --stdout --revs <revs >/dev/null -' - -test_expect_success 'pack with missing tree' ' - rm $(objpath $tree) && - git pack-objects --stdout --revs <revs >/dev/null -' - -test_expect_success 'pack with missing parent' ' - rm $(objpath $parent) && - git pack-objects --stdout --revs <revs >/dev/null -' +test_bitmap_cases "pack.writeBitmapLookupTable" -test_expect_success JGIT,SHA1 'we can read jgit bitmaps' ' - git clone --bare . compat-jgit.git && - ( - cd compat-jgit.git && - rm -f objects/pack/*.bitmap && - jgit gc && - git rev-list --test-bitmap HEAD - ) -' - -test_expect_success JGIT,SHA1 'jgit can read our bitmaps' ' - git clone --bare . compat-us.git && - ( - cd compat-us.git && - git repack -adb && - # jgit gc will barf if it does not like our bitmaps - jgit gc - ) -' - -test_expect_success 'splitting packs does not generate bogus bitmaps' ' - test-tool genrandom foo $((1024 * 1024)) >rand && - git add rand && - git commit -m "commit with big file" && - git -c pack.packSizeLimit=500k repack -adb && - git init --bare no-bitmaps.git && - git -C no-bitmaps.git fetch .. HEAD +test_expect_success 'verify writing bitmap lookup table when enabled' ' + GIT_TRACE2_EVENT="$(pwd)/trace2" \ + git repack -ad && + grep "\"label\":\"writing_lookup_table\"" trace2 ' -test_expect_success 'set up reusable pack' ' - rm -f .git/objects/pack/*.keep && +test_expect_success 'lookup table is actually used to traverse objects' ' git repack -adb && - reusable_pack () { - git for-each-ref --format="%(objectname)" | - git pack-objects --delta-base-offset --revs --stdout "$@" - } + GIT_TRACE2_EVENT="$(pwd)/trace3" \ + git rev-list --use-bitmap-index --count --all && + grep "\"label\":\"reading_lookup_table\"" trace3 ' -test_expect_success 'pack reuse respects --honor-pack-keep' ' - test_when_finished "rm -f .git/objects/pack/*.keep" && - for i in .git/objects/pack/*.pack - do - >${i%.pack}.keep || return 1 - done && - reusable_pack --honor-pack-keep >empty.pack && - git index-pack empty.pack && - git show-index <empty.idx >actual && - test_must_be_empty actual -' - -test_expect_success 'pack reuse respects --local' ' - mv .git/objects/pack/* alt.git/objects/pack/ && - test_when_finished "mv alt.git/objects/pack/* .git/objects/pack/" && - reusable_pack --local >empty.pack && - git index-pack empty.pack && - git show-index <empty.idx >actual && - test_must_be_empty actual -' - -test_expect_success 'pack reuse respects --incremental' ' - reusable_pack --incremental >empty.pack && - git index-pack empty.pack && - git show-index <empty.idx >actual && - test_must_be_empty actual -' - -test_expect_success 'truncated bitmap fails gracefully (ewah)' ' +test_expect_success 'truncated bitmap fails gracefully (lookup table)' ' test_config pack.writebitmaphashcache false && - git repack -ad && - git rev-list --use-bitmap-index --count --all >expect && - bitmap=$(ls .git/objects/pack/*.bitmap) && - test_when_finished "rm -f $bitmap" && - test_copy_bytes 256 <$bitmap >$bitmap.tmp && - mv -f $bitmap.tmp $bitmap && - git rev-list --use-bitmap-index --count --all >actual 2>stderr && - test_cmp expect actual && - test_i18ngrep corrupt.ewah.bitmap stderr -' - -test_expect_success 'truncated bitmap fails gracefully (cache)' ' - git repack -ad && + git repack -adb && git rev-list --use-bitmap-index --count --all >expect && bitmap=$(ls .git/objects/pack/*.bitmap) && test_when_finished "rm -f $bitmap" && @@ -277,152 +475,4 @@ test_expect_success 'truncated bitmap fails gracefully (cache)' ' test_i18ngrep corrupted.bitmap.index stderr ' -# Create a state of history with these properties: -# -# - refs that allow a client to fetch some new history, while sharing some old -# history with the server; we use branches delta-reuse-old and -# delta-reuse-new here -# -# - the new history contains an object that is stored on the server as a delta -# against a base that is in the old history -# -# - the base object is not immediately reachable from the tip of the old -# history; finding it would involve digging down through history we know the -# other side has -# -# This should result in a state where fetching from old->new would not -# traditionally reuse the on-disk delta (because we'd have to dig to realize -# that the client has it), but we will do so if bitmaps can tell us cheaply -# that the other side has it. -test_expect_success 'set up thin delta-reuse parent' ' - # This first commit contains the buried base object. - test-tool genrandom delta 16384 >file && - git add file && - git commit -m "delta base" && - base=$(git rev-parse --verify HEAD:file) && - - # These intermediate commits bury the base back in history. - # This becomes the "old" state. - for i in 1 2 3 4 5 - do - echo $i >file && - git commit -am "intermediate $i" || return 1 - done && - git branch delta-reuse-old && - - # And now our new history has a delta against the buried base. Note - # that this must be smaller than the original file, since pack-objects - # prefers to create deltas from smaller objects to larger. - test-tool genrandom delta 16300 >file && - git commit -am "delta result" && - delta=$(git rev-parse --verify HEAD:file) && - git branch delta-reuse-new && - - # Repack with bitmaps and double check that we have the expected delta - # relationship. - git repack -adb && - have_delta $delta $base -' - -# Now we can sanity-check the non-bitmap behavior (that the server is not able -# to reuse the delta). This isn't strictly something we care about, so this -# test could be scrapped in the future. But it makes sure that the next test is -# actually triggering the feature we want. -# -# Note that our tools for working with on-the-wire "thin" packs are limited. So -# we actually perform the fetch, retain the resulting pack, and inspect the -# result. -test_expect_success 'fetch without bitmaps ignores delta against old base' ' - test_config pack.usebitmaps false && - test_when_finished "rm -rf client.git" && - git init --bare client.git && - ( - cd client.git && - git config transfer.unpackLimit 1 && - git fetch .. delta-reuse-old:delta-reuse-old && - git fetch .. delta-reuse-new:delta-reuse-new && - have_delta $delta $ZERO_OID - ) -' - -# And do the same for the bitmap case, where we do expect to find the delta. -test_expect_success 'fetch with bitmaps can reuse old base' ' - test_config pack.usebitmaps true && - test_when_finished "rm -rf client.git" && - git init --bare client.git && - ( - cd client.git && - git config transfer.unpackLimit 1 && - git fetch .. delta-reuse-old:delta-reuse-old && - git fetch .. delta-reuse-new:delta-reuse-new && - have_delta $delta $base - ) -' - -test_expect_success 'pack.preferBitmapTips' ' - git init repo && - test_when_finished "rm -fr repo" && - ( - cd repo && - - # create enough commits that not all are receive bitmap - # coverage even if they are all at the tip of some reference. - test_commit_bulk --message="%s" 103 && - - git rev-list HEAD >commits.raw && - sort <commits.raw >commits && - - git log --format="create refs/tags/%s %H" HEAD >refs && - git update-ref --stdin <refs && - - git repack -adb && - test-tool bitmap list-commits | sort >bitmaps && - - # remember which commits did not receive bitmaps - comm -13 bitmaps commits >before && - test_file_not_empty before && - - # mark the commits which did not receive bitmaps as preferred, - # and generate the bitmap again - perl -pe "s{^}{create refs/tags/include/$. }" <before | - git update-ref --stdin && - git -c pack.preferBitmapTips=refs/tags/include repack -adb && - - # finally, check that the commit(s) without bitmap coverage - # are not the same ones as before - test-tool bitmap list-commits | sort >bitmaps && - comm -13 bitmaps commits >after && - - ! test_cmp before after - ) -' - -test_expect_success 'complains about multiple pack bitmaps' ' - rm -fr repo && - git init repo && - test_when_finished "rm -fr repo" && - ( - cd repo && - - test_commit base && - - git repack -adb && - bitmap="$(ls .git/objects/pack/pack-*.bitmap)" && - mv "$bitmap" "$bitmap.bak" && - - test_commit other && - git repack -ab && - - mv "$bitmap.bak" "$bitmap" && - - find .git/objects/pack -type f -name "*.pack" >packs && - find .git/objects/pack -type f -name "*.bitmap" >bitmaps && - test_line_count = 2 packs && - test_line_count = 2 bitmaps && - - git rev-list --use-bitmap-index HEAD 2>err && - grep "ignoring extra bitmap file" err - ) -' - test_done diff --git a/t/t5311-pack-bitmaps-shallow.sh b/t/t5311-pack-bitmaps-shallow.sh index 872a95df33..9dae60f73e 100755 --- a/t/t5311-pack-bitmaps-shallow.sh +++ b/t/t5311-pack-bitmaps-shallow.sh @@ -17,23 +17,40 @@ test_description='check bitmap operation with shallow repositories' # the tree for A. But in a shallow one, we've grafted away # A, and fetching A to B requires that the other side send # us the tree for file=1. -test_expect_success 'setup shallow repo' ' - echo 1 >file && - git add file && - git commit -m orig && - echo 2 >file && - git commit -a -m update && - git clone --no-local --bare --depth=1 . shallow.git && - echo 1 >file && - git commit -a -m repeat -' - -test_expect_success 'turn on bitmaps in the parent' ' - git repack -adb -' - -test_expect_success 'shallow fetch from bitmapped repo' ' - (cd shallow.git && git fetch) -' +test_shallow_bitmaps () { + writeLookupTable=false + + for i in "$@" + do + case $i in + "pack.writeBitmapLookupTable") writeLookupTable=true;; + esac + done + + test_expect_success 'setup shallow repo' ' + rm -rf * .git && + git init && + git config pack.writeBitmapLookupTable '"$writeLookupTable"' && + echo 1 >file && + git add file && + git commit -m orig && + echo 2 >file && + git commit -a -m update && + git clone --no-local --bare --depth=1 . shallow.git && + echo 1 >file && + git commit -a -m repeat + ' + + test_expect_success 'turn on bitmaps in the parent' ' + git repack -adb + ' + + test_expect_success 'shallow fetch from bitmapped repo' ' + (cd shallow.git && git fetch) + ' +} + +test_shallow_bitmaps +test_shallow_bitmaps "pack.writeBitmapLookupTable" test_done diff --git a/t/t5318-commit-graph.sh b/t/t5318-commit-graph.sh index 1b0cd82359..049c5fc8ea 100755 --- a/t/t5318-commit-graph.sh +++ b/t/t5318-commit-graph.sh @@ -12,12 +12,12 @@ test_expect_success 'usage' ' test_expect_success 'usage shown without sub-command' ' test_expect_code 129 git commit-graph 2>err && - ! grep error: err + grep usage: err ' test_expect_success 'usage shown with an error on unknown sub-command' ' cat >expect <<-\EOF && - error: unrecognized subcommand: unknown + error: unknown subcommand: `unknown'\'' EOF test_expect_code 129 git commit-graph unknown 2>stderr && grep error stderr >actual && diff --git a/t/t5326-multi-pack-bitmaps.sh b/t/t5326-multi-pack-bitmaps.sh index 4fe57414c1..ad6eea5fa0 100755 --- a/t/t5326-multi-pack-bitmaps.sh +++ b/t/t5326-multi-pack-bitmaps.sh @@ -15,17 +15,24 @@ GIT_TEST_MULTI_PACK_INDEX_WRITE_BITMAP=0 sane_unset GIT_TEST_MIDX_WRITE_REV sane_unset GIT_TEST_MIDX_READ_RIDX -midx_bitmap_core - bitmap_reuse_tests() { from=$1 to=$2 + writeLookupTable=false + + for i in $3-${$#} + do + case $i in + "pack.writeBitmapLookupTable") writeLookupTable=true;; + esac + done test_expect_success "setup pack reuse tests ($from -> $to)" ' rm -fr repo && git init repo && ( cd repo && + git config pack.writeBitmapLookupTable '"$writeLookupTable"' && test_commit_bulk 16 && git tag old-tip && @@ -43,6 +50,7 @@ bitmap_reuse_tests() { test_expect_success "build bitmap from existing ($from -> $to)" ' ( cd repo && + git config pack.writeBitmapLookupTable '"$writeLookupTable"' && test_commit_bulk --id=further 16 && git tag new-tip && @@ -59,6 +67,7 @@ bitmap_reuse_tests() { test_expect_success "verify resulting bitmaps ($from -> $to)" ' ( cd repo && + git config pack.writeBitmapLookupTable '"$writeLookupTable"' && git for-each-ref && git rev-list --test-bitmap refs/tags/old-tip && git rev-list --test-bitmap refs/tags/new-tip @@ -66,244 +75,338 @@ bitmap_reuse_tests() { ' } -bitmap_reuse_tests 'pack' 'MIDX' -bitmap_reuse_tests 'MIDX' 'pack' -bitmap_reuse_tests 'MIDX' 'MIDX' +test_midx_bitmap_cases () { + writeLookupTable=false + writeBitmapLookupTable= + + for i in "$@" + do + case $i in + "pack.writeBitmapLookupTable") + writeLookupTable=true + writeBitmapLookupTable="$i" + ;; + esac + done + + test_expect_success 'setup test_repository' ' + rm -rf * .git && + git init && + git config pack.writeBitmapLookupTable '"$writeLookupTable"' + ' -test_expect_success 'missing object closure fails gracefully' ' - rm -fr repo && - git init repo && - test_when_finished "rm -fr repo" && - ( - cd repo && + midx_bitmap_core - test_commit loose && - test_commit packed && + bitmap_reuse_tests 'pack' 'MIDX' "$writeBitmapLookupTable" + bitmap_reuse_tests 'MIDX' 'pack' "$writeBitmapLookupTable" + bitmap_reuse_tests 'MIDX' 'MIDX' "$writeBitmapLookupTable" - # Do not pass "--revs"; we want a pack without the "loose" - # commit. - git pack-objects $objdir/pack/pack <<-EOF && - $(git rev-parse packed) - EOF + test_expect_success 'missing object closure fails gracefully' ' + rm -fr repo && + git init repo && + test_when_finished "rm -fr repo" && + ( + cd repo && + git config pack.writeBitmapLookupTable '"$writeLookupTable"' && - test_must_fail git multi-pack-index write --bitmap 2>err && - grep "doesn.t have full closure" err && - test_path_is_missing $midx - ) -' + test_commit loose && + test_commit packed && -midx_bitmap_partial_tests + # Do not pass "--revs"; we want a pack without the "loose" + # commit. + git pack-objects $objdir/pack/pack <<-EOF && + $(git rev-parse packed) + EOF -test_expect_success 'removing a MIDX clears stale bitmaps' ' - rm -fr repo && - git init repo && - test_when_finished "rm -fr repo" && - ( - cd repo && - test_commit base && - git repack && - git multi-pack-index write --bitmap && + test_must_fail git multi-pack-index write --bitmap 2>err && + grep "doesn.t have full closure" err && + test_path_is_missing $midx + ) + ' - # Write a MIDX and bitmap; remove the MIDX but leave the bitmap. - stale_bitmap=$midx-$(midx_checksum $objdir).bitmap && - rm $midx && + midx_bitmap_partial_tests - # Then write a new MIDX. - test_commit new && - git repack && - git multi-pack-index write --bitmap && + test_expect_success 'removing a MIDX clears stale bitmaps' ' + rm -fr repo && + git init repo && + test_when_finished "rm -fr repo" && + ( + cd repo && + git config pack.writeBitmapLookupTable '"$writeLookupTable"' && + test_commit base && + git repack && + git multi-pack-index write --bitmap && + + # Write a MIDX and bitmap; remove the MIDX but leave the bitmap. + stale_bitmap=$midx-$(midx_checksum $objdir).bitmap && + rm $midx && + + # Then write a new MIDX. + test_commit new && + git repack && + git multi-pack-index write --bitmap && + + test_path_is_file $midx && + test_path_is_file $midx-$(midx_checksum $objdir).bitmap && + test_path_is_missing $stale_bitmap + ) + ' - test_path_is_file $midx && - test_path_is_file $midx-$(midx_checksum $objdir).bitmap && - test_path_is_missing $stale_bitmap - ) -' + test_expect_success 'pack.preferBitmapTips' ' + git init repo && + test_when_finished "rm -fr repo" && + ( + cd repo && + git config pack.writeBitmapLookupTable '"$writeLookupTable"' && -test_expect_success 'pack.preferBitmapTips' ' - git init repo && - test_when_finished "rm -fr repo" && - ( - cd repo && + test_commit_bulk --message="%s" 103 && - test_commit_bulk --message="%s" 103 && + git log --format="%H" >commits.raw && + sort <commits.raw >commits && - git log --format="%H" >commits.raw && - sort <commits.raw >commits && + git log --format="create refs/tags/%s %H" HEAD >refs && + git update-ref --stdin <refs && - git log --format="create refs/tags/%s %H" HEAD >refs && - git update-ref --stdin <refs && + git multi-pack-index write --bitmap && + test_path_is_file $midx && + test_path_is_file $midx-$(midx_checksum $objdir).bitmap && - git multi-pack-index write --bitmap && - test_path_is_file $midx && - test_path_is_file $midx-$(midx_checksum $objdir).bitmap && + test-tool bitmap list-commits | sort >bitmaps && + comm -13 bitmaps commits >before && + test_line_count = 1 before && - test-tool bitmap list-commits | sort >bitmaps && - comm -13 bitmaps commits >before && - test_line_count = 1 before && + perl -ne "printf(\"create refs/tags/include/%d \", $.); print" \ + <before | git update-ref --stdin && - perl -ne "printf(\"create refs/tags/include/%d \", $.); print" \ - <before | git update-ref --stdin && + rm -fr $midx-$(midx_checksum $objdir).bitmap && + rm -fr $midx && - rm -fr $midx-$(midx_checksum $objdir).bitmap && - rm -fr $midx && + git -c pack.preferBitmapTips=refs/tags/include \ + multi-pack-index write --bitmap && + test-tool bitmap list-commits | sort >bitmaps && + comm -13 bitmaps commits >after && - git -c pack.preferBitmapTips=refs/tags/include \ - multi-pack-index write --bitmap && - test-tool bitmap list-commits | sort >bitmaps && - comm -13 bitmaps commits >after && + ! test_cmp before after + ) + ' - ! test_cmp before after - ) -' + test_expect_success 'writing a bitmap with --refs-snapshot' ' + git init repo && + test_when_finished "rm -fr repo" && + ( + cd repo && + git config pack.writeBitmapLookupTable '"$writeLookupTable"' && -test_expect_success 'writing a bitmap with --refs-snapshot' ' - git init repo && - test_when_finished "rm -fr repo" && - ( - cd repo && + test_commit one && + test_commit two && - test_commit one && - test_commit two && + git rev-parse one >snapshot && - git rev-parse one >snapshot && + git repack -ad && - git repack -ad && + # First, write a MIDX which see both refs/tags/one and + # refs/tags/two (causing both of those commits to receive + # bitmaps). + git multi-pack-index write --bitmap && - # First, write a MIDX which see both refs/tags/one and - # refs/tags/two (causing both of those commits to receive - # bitmaps). - git multi-pack-index write --bitmap && + test_path_is_file $midx && + test_path_is_file $midx-$(midx_checksum $objdir).bitmap && - test_path_is_file $midx && - test_path_is_file $midx-$(midx_checksum $objdir).bitmap && + test-tool bitmap list-commits | sort >bitmaps && + grep "$(git rev-parse one)" bitmaps && + grep "$(git rev-parse two)" bitmaps && - test-tool bitmap list-commits | sort >bitmaps && - grep "$(git rev-parse one)" bitmaps && - grep "$(git rev-parse two)" bitmaps && + rm -fr $midx-$(midx_checksum $objdir).bitmap && + rm -fr $midx && - rm -fr $midx-$(midx_checksum $objdir).bitmap && - rm -fr $midx && + # Then again, but with a refs snapshot which only sees + # refs/tags/one. + git multi-pack-index write --bitmap --refs-snapshot=snapshot && - # Then again, but with a refs snapshot which only sees - # refs/tags/one. - git multi-pack-index write --bitmap --refs-snapshot=snapshot && + test_path_is_file $midx && + test_path_is_file $midx-$(midx_checksum $objdir).bitmap && - test_path_is_file $midx && - test_path_is_file $midx-$(midx_checksum $objdir).bitmap && + test-tool bitmap list-commits | sort >bitmaps && + grep "$(git rev-parse one)" bitmaps && + ! grep "$(git rev-parse two)" bitmaps + ) + ' - test-tool bitmap list-commits | sort >bitmaps && - grep "$(git rev-parse one)" bitmaps && - ! grep "$(git rev-parse two)" bitmaps - ) -' + test_expect_success 'write a bitmap with --refs-snapshot (preferred tips)' ' + git init repo && + test_when_finished "rm -fr repo" && + ( + cd repo && + git config pack.writeBitmapLookupTable '"$writeLookupTable"' && -test_expect_success 'write a bitmap with --refs-snapshot (preferred tips)' ' - git init repo && - test_when_finished "rm -fr repo" && - ( - cd repo && + test_commit_bulk --message="%s" 103 && - test_commit_bulk --message="%s" 103 && + git log --format="%H" >commits.raw && + sort <commits.raw >commits && - git log --format="%H" >commits.raw && - sort <commits.raw >commits && + git log --format="create refs/tags/%s %H" HEAD >refs && + git update-ref --stdin <refs && - git log --format="create refs/tags/%s %H" HEAD >refs && - git update-ref --stdin <refs && + git multi-pack-index write --bitmap && + test_path_is_file $midx && + test_path_is_file $midx-$(midx_checksum $objdir).bitmap && - git multi-pack-index write --bitmap && - test_path_is_file $midx && - test_path_is_file $midx-$(midx_checksum $objdir).bitmap && + test-tool bitmap list-commits | sort >bitmaps && + comm -13 bitmaps commits >before && + test_line_count = 1 before && + + ( + grep -vf before commits.raw && + # mark missing commits as preferred + sed "s/^/+/" before + ) >snapshot && + + rm -fr $midx-$(midx_checksum $objdir).bitmap && + rm -fr $midx && - test-tool bitmap list-commits | sort >bitmaps && - comm -13 bitmaps commits >before && - test_line_count = 1 before && + git multi-pack-index write --bitmap --refs-snapshot=snapshot && + test-tool bitmap list-commits | sort >bitmaps && + comm -13 bitmaps commits >after && + ! test_cmp before after + ) + ' + + test_expect_success 'hash-cache values are propagated from pack bitmaps' ' + rm -fr repo && + git init repo && + test_when_finished "rm -fr repo" && ( - grep -vf before commits.raw && - # mark missing commits as preferred - sed "s/^/+/" before - ) >snapshot && + cd repo && + git config pack.writeBitmapLookupTable '"$writeLookupTable"' && - rm -fr $midx-$(midx_checksum $objdir).bitmap && - rm -fr $midx && + test_commit base && + test_commit base2 && + git repack -adb && - git multi-pack-index write --bitmap --refs-snapshot=snapshot && - test-tool bitmap list-commits | sort >bitmaps && - comm -13 bitmaps commits >after && + test-tool bitmap dump-hashes >pack.raw && + test_file_not_empty pack.raw && + sort pack.raw >pack.hashes && - ! test_cmp before after - ) -' + test_commit new && + git repack && + git multi-pack-index write --bitmap && -test_expect_success 'hash-cache values are propagated from pack bitmaps' ' - rm -fr repo && - git init repo && - test_when_finished "rm -fr repo" && - ( - cd repo && + test-tool bitmap dump-hashes >midx.raw && + sort midx.raw >midx.hashes && - test_commit base && - test_commit base2 && - git repack -adb && + # ensure that every namehash in the pack bitmap can be found in + # the midx bitmap (i.e., that there are no oid-namehash pairs + # unique to the pack bitmap). + comm -23 pack.hashes midx.hashes >dropped.hashes && + test_must_be_empty dropped.hashes + ) + ' - test-tool bitmap dump-hashes >pack.raw && - test_file_not_empty pack.raw && - sort pack.raw >pack.hashes && + test_expect_success 'no .bitmap is written without any objects' ' + rm -fr repo && + git init repo && + test_when_finished "rm -fr repo" && + ( + cd repo && + git config pack.writeBitmapLookupTable '"$writeLookupTable"' && - test_commit new && - git repack && - git multi-pack-index write --bitmap && + empty="$(git pack-objects $objdir/pack/pack </dev/null)" && + cat >packs <<-EOF && + pack-$empty.idx + EOF - test-tool bitmap dump-hashes >midx.raw && - sort midx.raw >midx.hashes && + git multi-pack-index write --bitmap --stdin-packs \ + <packs 2>err && - # ensure that every namehash in the pack bitmap can be found in - # the midx bitmap (i.e., that there are no oid-namehash pairs - # unique to the pack bitmap). - comm -23 pack.hashes midx.hashes >dropped.hashes && - test_must_be_empty dropped.hashes - ) -' + grep "bitmap without any objects" err && -test_expect_success 'no .bitmap is written without any objects' ' - rm -fr repo && - git init repo && - test_when_finished "rm -fr repo" && - ( - cd repo && + test_path_is_file $midx && + test_path_is_missing $midx-$(midx_checksum $objdir).bitmap + ) + ' + + test_expect_success 'graceful fallback when missing reverse index' ' + rm -fr repo && + git init repo && + test_when_finished "rm -fr repo" && + ( + cd repo && + git config pack.writeBitmapLookupTable '"$writeLookupTable"' && - empty="$(git pack-objects $objdir/pack/pack </dev/null)" && - cat >packs <<-EOF && - pack-$empty.idx - EOF + test_commit base && - git multi-pack-index write --bitmap --stdin-packs \ - <packs 2>err && + # write a pack and MIDX bitmap containing base + git repack -adb && + git multi-pack-index write --bitmap && - grep "bitmap without any objects" err && + GIT_TEST_MIDX_READ_RIDX=0 \ + git rev-list --use-bitmap-index HEAD 2>err && + ! grep "ignoring extra bitmap file" err + ) + ' +} - test_path_is_file $midx && - test_path_is_missing $midx-$(midx_checksum $objdir).bitmap - ) -' +test_midx_bitmap_cases -test_expect_success 'graceful fallback when missing reverse index' ' +test_midx_bitmap_cases "pack.writeBitmapLookupTable" + +test_expect_success 'multi-pack-index write writes lookup table if enabled' ' rm -fr repo && git init repo && test_when_finished "rm -fr repo" && ( cd repo && + test_commit base && + git config pack.writeBitmapLookupTable true && + git repack -ad && + GIT_TRACE2_EVENT="$(pwd)/trace" \ + git multi-pack-index write --bitmap && + grep "\"label\":\"writing_lookup_table\"" trace + ) +' + +test_expect_success 'preferred pack change with existing MIDX bitmap' ' + git init preferred-pack-with-existing && + ( + cd preferred-pack-with-existing && test_commit base && + test_commit other && + + git rev-list --objects --no-object-names base >p1.objects && + git rev-list --objects --no-object-names other >p2.objects && - # write a pack and MIDX bitmap containing base - git repack -adb && - git multi-pack-index write --bitmap && + p1="$(git pack-objects "$objdir/pack/pack" \ + --delta-base-offset <p1.objects)" && + p2="$(git pack-objects "$objdir/pack/pack" \ + --delta-base-offset <p2.objects)" && + + # Generate a MIDX containing the first two packs, + # marking p1 as preferred, and ensure that it can be + # successfully cloned. + git multi-pack-index write --bitmap \ + --preferred-pack="pack-$p1.pack" && + test_path_is_file $midx && + test_path_is_file $midx-$(midx_checksum $objdir).bitmap && + git clone --no-local . clone1 && + + # Then generate a new pack which sorts ahead of any + # existing pack (by tweaking the pack prefix). + test_commit foo && + git pack-objects --all --unpacked $objdir/pack/pack0 && + + # Generate a new MIDX which changes the preferred pack + # to a pack contained in the existing MIDX. + git multi-pack-index write --bitmap \ + --preferred-pack="pack-$p2.pack" && + test_path_is_file $midx && + test_path_is_file $midx-$(midx_checksum $objdir).bitmap && - GIT_TEST_MIDX_READ_RIDX=0 \ - git rev-list --use-bitmap-index HEAD 2>err && - ! grep "ignoring extra bitmap file" err + # When the above circumstances are met, the preferred + # pack should change appropriately and clones should + # (still) succeed. + git clone --no-local . clone2 ) ' diff --git a/t/t5327-multi-pack-bitmaps-rev.sh b/t/t5327-multi-pack-bitmaps-rev.sh index d30ba632c8..e65e311cd7 100755 --- a/t/t5327-multi-pack-bitmaps-rev.sh +++ b/t/t5327-multi-pack-bitmaps-rev.sh @@ -17,7 +17,27 @@ GIT_TEST_MIDX_READ_RIDX=0 export GIT_TEST_MIDX_WRITE_REV export GIT_TEST_MIDX_READ_RIDX -midx_bitmap_core rev -midx_bitmap_partial_tests rev +test_midx_bitmap_rev () { + writeLookupTable=false + + for i in "$@" + do + case $i in + "pack.writeBitmapLookupTable") writeLookupTable=true;; + esac + done + + test_expect_success 'setup bitmap config' ' + rm -rf * .git && + git init && + git config pack.writeBitmapLookupTable '"$writeLookupTable"' + ' + + midx_bitmap_core rev + midx_bitmap_partial_tests rev +} + +test_midx_bitmap_rev +test_midx_bitmap_rev "pack.writeBitmapLookupTable" test_done diff --git a/t/t5329-pack-objects-cruft.sh b/t/t5329-pack-objects-cruft.sh index 8968f7a08d..303f7a5d84 100755 --- a/t/t5329-pack-objects-cruft.sh +++ b/t/t5329-pack-objects-cruft.sh @@ -29,7 +29,8 @@ basic_cruft_pack_tests () { while read oid do path="$objdir/$(test_oid_to_path "$oid")" && - printf "%s %d\n" "$oid" "$(test-tool chmtime --get "$path")" + printf "%s %d\n" "$oid" "$(test-tool chmtime --get "$path")" || + echo "object list generation failed for $oid" done | sort -k1 ) >expect && @@ -232,7 +233,7 @@ test_expect_success 'cruft tags rescue tagged objects' ' while read oid do test-tool chmtime -1000 \ - "$objdir/$(test_oid_to_path $oid)" + "$objdir/$(test_oid_to_path $oid)" || exit 1 done <objects && test-tool chmtime -500 \ @@ -272,7 +273,7 @@ test_expect_success 'cruft commits rescue parents, trees' ' while read object do test-tool chmtime -1000 \ - "$objdir/$(test_oid_to_path $object)" + "$objdir/$(test_oid_to_path $object)" || exit 1 done <objects && test-tool chmtime +500 "$objdir/$(test_oid_to_path \ $(git rev-parse HEAD))" && @@ -345,7 +346,7 @@ test_expect_success 'expired objects are pruned' ' while read object do test-tool chmtime -1000 \ - "$objdir/$(test_oid_to_path $object)" + "$objdir/$(test_oid_to_path $object)" || exit 1 done <objects && keep="$(basename "$(ls $packdir/pack-*.pack)")" && diff --git a/t/t5500-fetch-pack.sh b/t/t5500-fetch-pack.sh index ee6d2dde9f..d18f2823d8 100755 --- a/t/t5500-fetch-pack.sh +++ b/t/t5500-fetch-pack.sh @@ -407,6 +407,7 @@ test_expect_success 'in_vain not triggered before first ACK' ' ' test_expect_success 'in_vain resetted upon ACK' ' + test_when_finished rm -f log trace2 && rm -rf myserver myclient && git init myserver && @@ -432,7 +433,8 @@ test_expect_success 'in_vain resetted upon ACK' ' # first. The 256th commit is common between the client and the server, # and should reset in_vain. This allows negotiation to continue until # the client reports that first_anotherbranch_commit is common. - git -C myclient fetch --progress origin main 2>log && + GIT_TRACE2_EVENT="$(pwd)/trace2" git -C myclient fetch --progress origin main 2>log && + grep \"key\":\"total_rounds\",\"value\":\"6\" trace2 && test_i18ngrep "Total 3 " log ' diff --git a/t/t5505-remote.sh b/t/t5505-remote.sh index 6c7370f87f..9006196ac6 100755 --- a/t/t5505-remote.sh +++ b/t/t5505-remote.sh @@ -241,6 +241,26 @@ test_expect_success 'add invalid foreign_vcs remote' ' test_cmp expect actual ' +test_expect_success 'without subcommand' ' + echo origin >expect && + git -C test remote >actual && + test_cmp expect actual +' + +test_expect_success 'without subcommand accepts -v' ' + cat >expect <<-EOF && + origin $(pwd)/one (fetch) + origin $(pwd)/one (push) + EOF + git -C test remote -v >actual && + test_cmp expect actual +' + +test_expect_success 'without subcommand does not take arguments' ' + test_expect_code 129 git -C test remote origin 2>err && + grep "^error: unknown subcommand:" err +' + cat >test/expect <<EOF * remote origin Fetch URL: $(pwd)/one diff --git a/t/t5516-fetch-push.sh b/t/t5516-fetch-push.sh index f3356f9ea8..3211002d46 100755 --- a/t/t5516-fetch-push.sh +++ b/t/t5516-fetch-push.sh @@ -200,7 +200,10 @@ test_expect_success 'push with negotiation' ' test_commit -C testrepo unrelated_commit && git -C testrepo config receive.hideRefs refs/remotes/origin/first_commit && test_when_finished "rm event" && - GIT_TRACE2_EVENT="$(pwd)/event" git -c protocol.version=2 -c push.negotiate=1 push testrepo refs/heads/main:refs/remotes/origin/main && + GIT_TRACE2_EVENT="$(pwd)/event" \ + git -c protocol.version=2 -c push.negotiate=1 \ + push testrepo refs/heads/main:refs/remotes/origin/main && + grep \"key\":\"total_rounds\",\"value\":\"1\" event && grep_wrote 2 event # 1 commit, 1 tree ' @@ -224,7 +227,10 @@ test_expect_success 'push with negotiation does not attempt to fetch submodules' git push testrepo $the_first_commit:refs/remotes/origin/first_commit && test_commit -C testrepo unrelated_commit && git -C testrepo config receive.hideRefs refs/remotes/origin/first_commit && - git -c submodule.recurse=true -c protocol.version=2 -c push.negotiate=1 push testrepo refs/heads/main:refs/remotes/origin/main 2>err && + GIT_TRACE2_EVENT="$(pwd)/event" git -c submodule.recurse=true \ + -c protocol.version=2 -c push.negotiate=1 \ + push testrepo refs/heads/main:refs/remotes/origin/main 2>err && + grep \"key\":\"total_rounds\",\"value\":\"1\" event && ! grep "Fetching submodule" err ' diff --git a/t/t5557-http-get.sh b/t/t5557-http-get.sh new file mode 100755 index 0000000000..76a4bbd16a --- /dev/null +++ b/t/t5557-http-get.sh @@ -0,0 +1,39 @@ +#!/bin/sh + +test_description='test downloading a file by URL' + +TEST_PASSES_SANITIZE_LEAK=true + +. ./test-lib.sh + +. "$TEST_DIRECTORY"/lib-httpd.sh +start_httpd + +test_expect_success 'get by URL: 404' ' + test_when_finished "rm -f file.temp" && + url="$HTTPD_URL/none.txt" && + cat >input <<-EOF && + capabilities + get $url file1 + EOF + + test_must_fail git remote-http $url <input 2>err && + test_path_is_missing file1 && + grep "failed to download file at URL" err +' + +test_expect_success 'get by URL: 200' ' + echo data >"$HTTPD_DOCUMENT_ROOT_PATH/exists.txt" && + + url="$HTTPD_URL/exists.txt" && + cat >input <<-EOF && + capabilities + get $url file2 + + EOF + + git remote-http $url <input && + test_cmp "$HTTPD_DOCUMENT_ROOT_PATH/exists.txt" file2 +' + +test_done diff --git a/t/t5558-clone-bundle-uri.sh b/t/t5558-clone-bundle-uri.sh new file mode 100755 index 0000000000..ad666a2d28 --- /dev/null +++ b/t/t5558-clone-bundle-uri.sh @@ -0,0 +1,81 @@ +#!/bin/sh + +test_description='test fetching bundles with --bundle-uri' + +. ./test-lib.sh + +test_expect_success 'fail to clone from non-existent file' ' + test_when_finished rm -rf test && + git clone --bundle-uri="$(pwd)/does-not-exist" . test 2>err && + grep "failed to download bundle from URI" err +' + +test_expect_success 'fail to clone from non-bundle file' ' + test_when_finished rm -rf test && + echo bogus >bogus && + git clone --bundle-uri="$(pwd)/bogus" . test 2>err && + grep "is not a bundle" err +' + +test_expect_success 'create bundle' ' + git init clone-from && + git -C clone-from checkout -b topic && + test_commit -C clone-from A && + test_commit -C clone-from B && + git -C clone-from bundle create B.bundle topic +' + +test_expect_success 'clone with path bundle' ' + git clone --bundle-uri="clone-from/B.bundle" \ + clone-from clone-path && + git -C clone-path rev-parse refs/bundles/topic >actual && + git -C clone-from rev-parse topic >expect && + test_cmp expect actual +' + +test_expect_success 'clone with file:// bundle' ' + git clone --bundle-uri="file://$(pwd)/clone-from/B.bundle" \ + clone-from clone-file && + git -C clone-file rev-parse refs/bundles/topic >actual && + git -C clone-from rev-parse topic >expect && + test_cmp expect actual +' + +######################################################################### +# HTTP tests begin here + +. "$TEST_DIRECTORY"/lib-httpd.sh +start_httpd + +test_expect_success 'fail to fetch from non-existent HTTP URL' ' + test_when_finished rm -rf test && + git clone --bundle-uri="$HTTPD_URL/does-not-exist" . test 2>err && + grep "failed to download bundle from URI" err +' + +test_expect_success 'fail to fetch from non-bundle HTTP URL' ' + test_when_finished rm -rf test && + echo bogus >"$HTTPD_DOCUMENT_ROOT_PATH/bogus" && + git clone --bundle-uri="$HTTPD_URL/bogus" . test 2>err && + grep "is not a bundle" err +' + +test_expect_success 'clone HTTP bundle' ' + cp clone-from/B.bundle "$HTTPD_DOCUMENT_ROOT_PATH/B.bundle" && + + git clone --no-local --mirror clone-from \ + "$HTTPD_DOCUMENT_ROOT_PATH/fetch.git" && + + git clone --bundle-uri="$HTTPD_URL/B.bundle" \ + "$HTTPD_URL/smart/fetch.git" clone-http && + git -C clone-http rev-parse refs/bundles/topic >actual && + git -C clone-from rev-parse topic >expect && + test_cmp expect actual && + + test_config -C clone-http log.excludedecoration refs/bundle/ +' + +# Do not add tests here unless they use the HTTP server, as they will +# not run unless the HTTP dependencies exist. + +test_done diff --git a/t/t5601-clone.sh b/t/t5601-clone.sh index cf3be0584f..2e57de9c12 100755 --- a/t/t5601-clone.sh +++ b/t/t5601-clone.sh @@ -743,7 +743,11 @@ test_expect_success 'batch missing blob request during checkout' ' # Ensure that there is only one negotiation by checking that there is # only "done" line sent. ("done" marks the end of negotiation.) - GIT_TRACE_PACKET="$(pwd)/trace" git -C client checkout HEAD^ && + GIT_TRACE_PACKET="$(pwd)/trace" \ + GIT_TRACE2_EVENT="$(pwd)/trace2_event" \ + git -C client -c trace2.eventNesting=5 checkout HEAD^ && + grep \"key\":\"total_rounds\",\"value\":\"1\" trace2_event >trace_lines && + test_line_count = 1 trace_lines && grep "fetch> done" trace >done_lines && test_line_count = 1 done_lines ' diff --git a/t/t5606-clone-options.sh b/t/t5606-clone-options.sh index 8f676d6b0c..f6bb02ab94 100755 --- a/t/t5606-clone-options.sh +++ b/t/t5606-clone-options.sh @@ -58,6 +58,14 @@ test_expect_success 'disallows --bare with --separate-git-dir' ' ' +test_expect_success 'disallows --bundle-uri with shallow options' ' + for option in --depth=1 --shallow-since=01-01-2000 --shallow-exclude=HEAD + do + test_must_fail git clone --bundle-uri=bundle $option from to 2>err && + grep "bundle-uri is incompatible" err || return 1 + done +' + test_expect_success 'reject cloning shallow repository' ' test_when_finished "rm -rf repo" && test_must_fail git clone --reject-shallow shallow-repo out 2>err && diff --git a/t/t5616-partial-clone.sh b/t/t5616-partial-clone.sh index 4a3778d04a..9aeacc2f6a 100755 --- a/t/t5616-partial-clone.sh +++ b/t/t5616-partial-clone.sh @@ -49,6 +49,13 @@ test_expect_success 'do partial clone 1' ' test "$(git -C pc1 config --local remote.origin.partialclonefilter)" = "blob:none" ' +test_expect_success 'rev-list --missing=allow-promisor on partial clone' ' + git -C pc1 rev-list --objects --missing=allow-promisor HEAD >actual && + git -C pc1 rev-list --objects --missing=print HEAD >expect.raw && + grep -v "^?" expect.raw >expect && + test_cmp expect actual +' + test_expect_success 'verify that .promisor file contains refs fetched' ' ls pc1/.git/objects/pack/pack-*.promisor >promisorlist && test_line_count = 1 promisorlist && diff --git a/t/t5703-upload-pack-ref-in-want.sh b/t/t5703-upload-pack-ref-in-want.sh index 9d6cd7d986..df74f80061 100755 --- a/t/t5703-upload-pack-ref-in-want.sh +++ b/t/t5703-upload-pack-ref-in-want.sh @@ -229,14 +229,16 @@ test_expect_success 'setup repos for fetching with ref-in-want tests' ' ' test_expect_success 'fetching with exact OID' ' - test_when_finished "rm -f log" && + test_when_finished "rm -f log trace2" && rm -rf local && cp -r "$LOCAL_PRISTINE" local && oid=$(git -C "$REPO" rev-parse d) && - GIT_TRACE_PACKET="$(pwd)/log" git -C local fetch origin \ + GIT_TRACE_PACKET="$(pwd)/log" GIT_TRACE2_EVENT="$(pwd)/trace2" \ + git -C local fetch origin \ "$oid":refs/heads/actual && + grep \"key\":\"total_rounds\",\"value\":\"2\" trace2 && git -C "$REPO" rev-parse "d" >expected && git -C local rev-parse refs/heads/actual >actual && test_cmp expected actual && diff --git a/t/t6019-rev-list-ancestry-path.sh b/t/t6019-rev-list-ancestry-path.sh index af57a04b7f..738da23628 100755 --- a/t/t6019-rev-list-ancestry-path.sh +++ b/t/t6019-rev-list-ancestry-path.sh @@ -8,8 +8,13 @@ test_description='--ancestry-path' # / \ # A-------K---------------L--M # -# D..M == E F G H I J K L M -# --ancestry-path D..M == E F H I J L M +# D..M == E F G H I J K L M +# --ancestry-path D..M == E F H I J L M +# --ancestry-path=F D..M == E F J L M +# --ancestry-path=G D..M == G H I J L M +# --ancestry-path=H D..M == E G H I J L M +# --ancestry-path=K D..M == K L M +# --ancestry-path=K --ancestry-path=F D..M == E F J K L M # # D..M -- M.t == M # --ancestry-path D..M -- M.t == M @@ -50,73 +55,41 @@ test_expect_success setup ' test_commit M ' -test_expect_success 'rev-list D..M' ' - test_write_lines E F G H I J K L M >expect && - git rev-list --format=%s D..M | - sed -e "/^commit /d" | - sort >actual && - test_cmp expect actual -' - -test_expect_success 'rev-list --ancestry-path D..M' ' - test_write_lines E F H I J L M >expect && - git rev-list --ancestry-path --format=%s D..M | - sed -e "/^commit /d" | - sort >actual && - test_cmp expect actual -' - -test_expect_success 'rev-list D..M -- M.t' ' - echo M >expect && - git rev-list --format=%s D..M -- M.t | - sed -e "/^commit /d" >actual && - test_cmp expect actual -' - -test_expect_success 'rev-list --ancestry-path D..M -- M.t' ' - echo M >expect && - git rev-list --ancestry-path --format=%s D..M -- M.t | - sed -e "/^commit /d" >actual && - test_cmp expect actual -' +test_ancestry () { + args=$1 + expected=$2 + test_expect_success "log $args" " + test_write_lines $expected >expect && + git log --format=%s $args >raw && + + if test -n \"$expected\" + then + sort raw >actual && + test_cmp expect actual + else + test_must_be_empty raw + fi + " +} -test_expect_success 'rev-list F...I' ' - test_write_lines F G H I >expect && - git rev-list --format=%s F...I | - sed -e "/^commit /d" | - sort >actual && - test_cmp expect actual -' +test_ancestry "D..M" "E F G H I J K L M" -test_expect_success 'rev-list --ancestry-path F...I' ' - test_write_lines F H I >expect && - git rev-list --ancestry-path --format=%s F...I | - sed -e "/^commit /d" | - sort >actual && - test_cmp expect actual -' +test_ancestry "--ancestry-path D..M" "E F H I J L M" +test_ancestry "--ancestry-path=F D..M" "E F J L M" +test_ancestry "--ancestry-path=G D..M" "G H I J L M" +test_ancestry "--ancestry-path=H D..M" "E G H I J L M" +test_ancestry "--ancestry-path=K D..M" "K L M" +test_ancestry "--ancestry-path=F --ancestry-path=K D..M" "E F J K L M" -# G.t is dropped in an "-s ours" merge -test_expect_success 'rev-list G..M -- G.t' ' - git rev-list --format=%s G..M -- G.t | - sed -e "/^commit /d" >actual && - test_must_be_empty actual -' +test_ancestry "D..M -- M.t" "M" +test_ancestry "--ancestry-path D..M -- M.t" "M" -test_expect_success 'rev-list --ancestry-path G..M -- G.t' ' - echo L >expect && - git rev-list --ancestry-path --format=%s G..M -- G.t | - sed -e "/^commit /d" >actual && - test_cmp expect actual -' +test_ancestry "F...I" "F G H I" +test_ancestry "--ancestry-path F...I" "F H I" -test_expect_success 'rev-list --ancestry-path --simplify-merges G^..M -- G.t' ' - test_write_lines G L >expect && - git rev-list --ancestry-path --simplify-merges --format=%s G^..M -- G.t | - sed -e "/^commit /d" | - sort >actual && - test_cmp expect actual -' +test_ancestry "G..M -- G.t" "" +test_ancestry "--ancestry-path G..M -- G.t" "L" +test_ancestry "--ancestry-path --simplify-merges G^..M -- G.t" "G L" # b---bc # / \ / diff --git a/t/t6132-pathspec-exclude.sh b/t/t6132-pathspec-exclude.sh index 9fdafeb1e9..cada952f9a 100755 --- a/t/t6132-pathspec-exclude.sh +++ b/t/t6132-pathspec-exclude.sh @@ -293,7 +293,11 @@ test_expect_success 'add with all negative' ' test_cmp expect actual ' -test_expect_success 'add -p with all negative' ' +test_lazy_prereq ADD_I_USE_BUILTIN_OR_PERL ' + test_have_prereq ADD_I_USE_BUILTIN || test_have_prereq PERL +' + +test_expect_success ADD_I_USE_BUILTIN_OR_PERL 'add -p with all negative' ' H=$(git rev-parse HEAD) && git reset --hard $H && git clean -f && diff --git a/t/t6400-merge-df.sh b/t/t6400-merge-df.sh index 57a67cf362..3de4ef6bd9 100755 --- a/t/t6400-merge-df.sh +++ b/t/t6400-merge-df.sh @@ -126,7 +126,7 @@ test_expect_success 'Simple merge in repo with interesting pathnames' ' # foo/bar-2/baz # The fact that foo/bar-2 appears between foo/bar and foo/bar/baz # can trip up some codepaths, and is the point of this test. - test_create_repo name-ordering && + git init name-ordering && ( cd name-ordering && diff --git a/t/t6406-merge-attr.sh b/t/t6406-merge-attr.sh index 99abefd44b..8650a88c40 100755 --- a/t/t6406-merge-attr.sh +++ b/t/t6406-merge-attr.sh @@ -162,8 +162,8 @@ test_expect_success 'custom merge backend' ' ' test_expect_success 'up-to-date merge without common ancestor' ' - test_create_repo repo1 && - test_create_repo repo2 && + git init repo1 && + git init repo2 && test_tick && ( cd repo1 && diff --git a/t/t6416-recursive-corner-cases.sh b/t/t6416-recursive-corner-cases.sh index 690c8482b1..17b54d625d 100755 --- a/t/t6416-recursive-corner-cases.sh +++ b/t/t6416-recursive-corner-cases.sh @@ -19,7 +19,7 @@ export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME # test_expect_success 'setup basic criss-cross + rename with no modifications' ' - test_create_repo basic-rename && + git init basic-rename && ( cd basic-rename && @@ -85,7 +85,7 @@ test_expect_success 'merge simple rename+criss-cross with no modifications' ' # test_expect_success 'setup criss-cross + rename merges with basic modification' ' - test_create_repo rename-modify && + git init rename-modify && ( cd rename-modify && @@ -160,7 +160,7 @@ test_expect_success 'merge criss-cross + rename merges with basic modification' # test_expect_success 'setup differently handled merges of rename/add conflict' ' - test_create_repo rename-add && + git init rename-add && ( cd rename-add && @@ -324,7 +324,7 @@ test_expect_success 'git detects differently handled merges conflict, swapped' ' # Merging commits D & E should result in modify/delete conflict. test_expect_success 'setup criss-cross + modify/delete resolved differently' ' - test_create_repo modify-delete && + git init modify-delete && ( cd modify-delete && @@ -499,7 +499,7 @@ test_expect_success 'git detects conflict merging criss-cross+modify/delete, rev # test_expect_success 'setup differently handled merges of directory/file conflict' ' - test_create_repo directory-file && + git init directory-file && ( cd directory-file && @@ -867,7 +867,7 @@ test_expect_failure 'merge of D2 & E4 merges a2s & reports conflict for a/file' # but that may cancel out at the final merge stage". test_expect_success 'setup rename/rename(1to2)/modify followed by what looks like rename/rename(2to1)/modify' ' - test_create_repo rename-squared-squared && + git init rename-squared-squared && ( cd rename-squared-squared && @@ -944,7 +944,7 @@ test_expect_success 'handle rename/rename(1to2)/modify followed by what looks li # content merge handled. test_expect_success 'setup criss-cross + rename/rename/add-source + modify/modify' ' - test_create_repo rename-rename-add-source && + git init rename-rename-add-source && ( cd rename-rename-add-source && @@ -1032,7 +1032,7 @@ test_expect_failure 'detect rename/rename/add-source for virtual merge-base' ' # base of B & C needs to not delete B:c for that to work, though... test_expect_success 'setup criss-cross+rename/rename/add-dest + simple modify' ' - test_create_repo rename-rename-add-dest && + git init rename-rename-add-dest && ( cd rename-rename-add-dest && @@ -1111,7 +1111,7 @@ test_expect_success 'virtual merge base handles rename/rename(1to2)/add-dest' ' # git detect it? test_expect_success 'setup symlink modify/modify' ' - test_create_repo symlink-modify-modify && + git init symlink-modify-modify && ( cd symlink-modify-modify && @@ -1178,7 +1178,7 @@ test_expect_merge_algorithm failure success 'check symlink modify/modify' ' # git detect it? test_expect_success 'setup symlink add/add' ' - test_create_repo symlink-add-add && + git init symlink-add-add && ( cd symlink-add-add && @@ -1244,11 +1244,11 @@ test_expect_merge_algorithm failure success 'check symlink add/add' ' # git detect it? test_expect_success 'setup submodule modify/modify' ' - test_create_repo submodule-modify-modify && + git init submodule-modify-modify && ( cd submodule-modify-modify && - test_create_repo submod && + git init submod && ( cd submod && touch file-A && @@ -1332,11 +1332,11 @@ test_expect_merge_algorithm failure success 'check submodule modify/modify' ' # git detect it? test_expect_success 'setup submodule add/add' ' - test_create_repo submodule-add-add && + git init submodule-add-add && ( cd submodule-add-add && - test_create_repo submod && + git init submod && ( cd submod && touch file-A && @@ -1419,11 +1419,11 @@ test_expect_merge_algorithm failure success 'check submodule add/add' ' # This is an obvious add/add conflict for 'path'. Can git detect it? test_expect_success 'setup conflicting entry types (submodule vs symlink)' ' - test_create_repo submodule-symlink-add-add && + git init submodule-symlink-add-add && ( cd submodule-symlink-add-add && - test_create_repo path && + git init path && ( cd path && touch file-B && @@ -1494,7 +1494,7 @@ test_expect_merge_algorithm failure success 'check conflicting entry types (subm # This is an obvious add/add mode conflict. Can git detect it? test_expect_success 'setup conflicting modes for regular file' ' - test_create_repo regular-file-mode-conflict && + git init regular-file-mode-conflict && ( cd regular-file-mode-conflict && @@ -1571,7 +1571,7 @@ test_expect_failure 'check conflicting modes for regular file' ' # to ensure that we handle it as well as practical. test_expect_success 'setup nested conflicts' ' - test_create_repo nested_conflicts && + git init nested_conflicts && ( cd nested_conflicts && @@ -1757,7 +1757,7 @@ test_expect_success 'check nested conflicts' ' # have three levels of conflict markers. Can we distinguish all three? test_expect_success 'setup virtual merge base with nested conflicts' ' - test_create_repo virtual_merge_base_has_nested_conflicts && + git init virtual_merge_base_has_nested_conflicts && ( cd virtual_merge_base_has_nested_conflicts && diff --git a/t/t6421-merge-partial-clone.sh b/t/t6421-merge-partial-clone.sh index 36bcd7c328..5413e5dd9d 100755 --- a/t/t6421-merge-partial-clone.sh +++ b/t/t6421-merge-partial-clone.sh @@ -31,7 +31,7 @@ test_description="limiting blob downloads when merging with partial clones" test_setup_repo () { test -d server && return - test_create_repo server && + git init server && ( cd server && diff --git a/t/t6422-merge-rename-corner-cases.sh b/t/t6422-merge-rename-corner-cases.sh index 9b65768aed..346253c7c8 100755 --- a/t/t6422-merge-rename-corner-cases.sh +++ b/t/t6422-merge-rename-corner-cases.sh @@ -11,7 +11,7 @@ TEST_PASSES_SANITIZE_LEAK=true . "$TEST_DIRECTORY"/lib-merge.sh test_setup_rename_delete_untracked () { - test_create_repo rename-delete-untracked && + git init rename-delete-untracked && ( cd rename-delete-untracked && @@ -56,7 +56,7 @@ test_expect_success "Does git preserve Gollum's precious artifact?" ' # We should be able to merge B & C cleanly test_setup_rename_modify_add_source () { - test_create_repo rename-modify-add-source && + git init rename-modify-add-source && ( cd rename-modify-add-source && @@ -96,7 +96,7 @@ test_expect_failure 'rename/modify/add-source conflict resolvable' ' ' test_setup_break_detection_1 () { - test_create_repo break-detection-1 && + git init break-detection-1 && ( cd break-detection-1 && @@ -144,7 +144,7 @@ test_expect_failure 'conflict caused if rename not detected' ' ' test_setup_break_detection_2 () { - test_create_repo break-detection-2 && + git init break-detection-2 && ( cd break-detection-2 && @@ -192,7 +192,7 @@ test_expect_failure 'missed conflict if rename not detected' ' # Commit C: rename a->b, add unrelated a test_setup_break_detection_3 () { - test_create_repo break-detection-3 && + git init break-detection-3 && ( cd break-detection-3 && @@ -268,7 +268,7 @@ test_expect_failure 'detect rename/add-source and preserve all data, merge other ' test_setup_rename_directory () { - test_create_repo rename-directory-$1 && + git init rename-directory-$1 && ( cd rename-directory-$1 && @@ -386,7 +386,7 @@ test_expect_success 'rename/directory conflict + content merge conflict' ' ' test_setup_rename_directory_2 () { - test_create_repo rename-directory-2 && + git init rename-directory-2 && ( cd rename-directory-2 && @@ -445,7 +445,7 @@ test_expect_success 'disappearing dir in rename/directory conflict handled' ' # Commit B: modify a, add different b test_setup_rename_with_content_merge_and_add () { - test_create_repo rename-with-content-merge-and-add-$1 && + git init rename-with-content-merge-and-add-$1 && ( cd rename-with-content-merge-and-add-$1 && @@ -570,7 +570,7 @@ test_expect_success 'handle rename-with-content-merge vs. add, merge other way' # * Nothing else should be present. Is anything? test_setup_rename_rename_2to1 () { - test_create_repo rename-rename-2to1 && + git init rename-rename-2to1 && ( cd rename-rename-2to1 && @@ -642,7 +642,7 @@ test_expect_success 'handle rename/rename (2to1) conflict correctly' ' # Commit B: rename a->b # Commit C: rename a->c test_setup_rename_rename_1to2 () { - test_create_repo rename-rename-1to2 && + git init rename-rename-1to2 && ( cd rename-rename-1to2 && @@ -700,7 +700,7 @@ test_expect_success 'merge has correct working tree contents' ' # Merging of B & C should NOT be clean; there's a rename/rename conflict test_setup_rename_rename_1to2_add_source_1 () { - test_create_repo rename-rename-1to2-add-source-1 && + git init rename-rename-1to2-add-source-1 && ( cd rename-rename-1to2-add-source-1 && @@ -748,7 +748,7 @@ test_expect_failure 'detect conflict with rename/rename(1to2)/add-source merge' ' test_setup_rename_rename_1to2_add_source_2 () { - test_create_repo rename-rename-1to2-add-source-2 && + git init rename-rename-1to2-add-source-2 && ( cd rename-rename-1to2-add-source-2 && @@ -794,7 +794,7 @@ test_expect_failure 'rename/rename/add-source still tracks new a file' ' ' test_setup_rename_rename_1to2_add_dest () { - test_create_repo rename-rename-1to2-add-dest && + git init rename-rename-1to2-add-dest && ( cd rename-rename-1to2-add-dest && @@ -874,7 +874,7 @@ test_expect_success 'rename/rename/add-dest merge still knows about conflicting # Expected: CONFLICT (rename/add/delete), two-way merged bar test_setup_rad () { - test_create_repo rad && + git init rad && ( cd rad && echo "original file" >foo && @@ -946,7 +946,7 @@ test_expect_merge_algorithm failure success 'rad-check: rename/add/delete confli # Expected: CONFLICT (rename/rename/delete/delete), two-way merged baz test_setup_rrdd () { - test_create_repo rrdd && + git init rrdd && ( cd rrdd && echo foo >foo && @@ -1022,7 +1022,7 @@ test_expect_merge_algorithm failure success 'rrdd-check: rename/rename(2to1)/del # multi-way merged contents found in two, four, six test_setup_mod6 () { - test_create_repo mod6 && + git init mod6 && ( cd mod6 && test_seq 11 19 >one && @@ -1160,7 +1160,7 @@ test_conflicts_with_adds_and_renames() { # tree test_setup_collision_conflict () { #test_expect_success "setup simple $sideL/$sideR conflict" ' - test_create_repo simple_${sideL}_${sideR} && + git init simple_${sideL}_${sideR} && ( cd simple_${sideL}_${sideR} && @@ -1308,7 +1308,7 @@ test_conflicts_with_adds_and_renames add add # So, we have four different conflicting files that all end up at path # 'three'. test_setup_nested_conflicts_from_rename_rename () { - test_create_repo nested_conflicts_from_rename_rename && + git init nested_conflicts_from_rename_rename && ( cd nested_conflicts_from_rename_rename && @@ -1417,7 +1417,7 @@ test_expect_success 'check nested conflicts from rename/rename(2to1)' ' # Expected: CONFLICT(rename/rename) message, three unstaged entries in the # index, and contents of orig-[AB] at path orig-[AB] test_setup_rename_rename_1_to_2_binary () { - test_create_repo rename_rename_1_to_2_binary && + git init rename_rename_1_to_2_binary && ( cd rename_rename_1_to_2_binary && diff --git a/t/t6423-merge-rename-directories.sh b/t/t6423-merge-rename-directories.sh index 99baf77cbf..a4941878fe 100755 --- a/t/t6423-merge-rename-directories.sh +++ b/t/t6423-merge-rename-directories.sh @@ -40,7 +40,7 @@ test_description="recursive merge with directory renames" # Expected: y/{b,c,d,e/f} test_setup_1a () { - test_create_repo 1a && + git init 1a && ( cd 1a && @@ -106,7 +106,7 @@ test_expect_success '1a: Simple directory rename detection' ' # Expected: y/{b,c,d,e} test_setup_1b () { - test_create_repo 1b && + git init 1b && ( cd 1b && @@ -169,7 +169,7 @@ test_expect_success '1b: Merge a directory with another' ' # Expected: y/{b,c,d} (because x/d -> z/d -> y/d) test_setup_1c () { - test_create_repo 1c && + git init 1c && ( cd 1c && @@ -232,7 +232,7 @@ test_expect_success '1c: Transitive renaming' ' # y/wham_1 & z/wham_2 should too...giving us a conflict. test_setup_1d () { - test_create_repo 1d && + git init 1d && ( cd 1d && @@ -328,7 +328,7 @@ test_expect_success '1d: Directory renames cause a rename/rename(2to1) conflict' # Expected: y/{newb,newc,d} test_setup_1e () { - test_create_repo 1e && + git init 1e && ( cd 1e && @@ -387,7 +387,7 @@ test_expect_success '1e: Renamed directory, with all files being renamed too' ' # Expected: y/{b,c}, x/{d,e,f,g} test_setup_1f () { - test_create_repo 1f && + git init 1f && ( cd 1f && @@ -476,7 +476,7 @@ test_expect_success '1f: Split a directory into two other directories' ' # Commit B: z/{b,c,d} # Expected: y/b, w/c, z/d, with warning about z/ -> (y/ vs. w/) conflict test_setup_2a () { - test_create_repo 2a && + git init 2a && ( cd 2a && @@ -538,7 +538,7 @@ test_expect_success '2a: Directory split into two on one side, with equal number # Commit B: z/{b,c}, x/d # Expected: y/b, w/c, x/d; No warning about z/ -> (y/ vs. w/) conflict test_setup_2b () { - test_create_repo 2b && + git init 2b && ( cd 2b && @@ -620,7 +620,7 @@ test_expect_success '2b: Directory split into two on one side, with equal number # Commit B: y/{b,c}, x/d # Expected: y/{b,c}, x/d test_setup_3a () { - test_create_repo 3a && + git init 3a && ( cd 3a && @@ -684,7 +684,7 @@ test_expect_success '3a: Avoid implicit rename if involved as source on other si # end up with CONFLICT:(z/d -> y/d vs. x/d vs. w/d), i.e. a # rename/rename/rename(1to3) conflict, which is just weird. test_setup_3b () { - test_create_repo 3b && + git init 3b && ( cd 3b && @@ -807,7 +807,7 @@ test_expect_success '3b: Avoid implicit rename if involved as source on current # NOTE: Even though most files from z moved to y, we don't want f to follow. test_setup_4a () { - test_create_repo 4a && + git init 4a && ( cd 4a && @@ -896,7 +896,7 @@ test_expect_success '4a: Directory split, with original directory still present' # index. test_setup_5a () { - test_create_repo 5a && + git init 5a && ( cd 5a && @@ -971,7 +971,7 @@ test_expect_success '5a: Merge directories, other side adds files to original an # back to git behavior without the directory rename detection. test_setup_5b () { - test_create_repo 5b && + git init 5b && ( cd 5b && @@ -1048,7 +1048,7 @@ test_expect_success '5b: Rename/delete in order to get add/add/add conflict' ' # though, because it doesn't have anything in the way. test_setup_5c () { - test_create_repo 5c && + git init 5c && ( cd 5c && @@ -1138,7 +1138,7 @@ test_expect_success '5c: Transitive rename would cause rename/rename/rename/add/ # directory rename detection for z/f -> y/f. test_setup_5d () { - test_create_repo 5d && + git init 5d && ( cd 5d && @@ -1239,7 +1239,7 @@ test_expect_success '5d: Directory/file/file conflict due to directory rename' ' # it is also involved in a rename/delete conflict. test_setup_6a () { - test_create_repo 6a && + git init 6a && ( cd 6a && @@ -1337,7 +1337,7 @@ test_expect_success '6a: Tricky rename/delete' ' # the behavior on testcases 6b2 and 8e, and introduced this 6b1 testcase. test_setup_6b1 () { - test_create_repo 6b1 && + git init 6b1 && ( cd 6b1 && @@ -1415,7 +1415,7 @@ test_expect_merge_algorithm failure success '6b1: Same renames done on both side # the z/ -> y/ rename. test_setup_6b2 () { - test_create_repo 6b2 && + git init 6b2 && ( cd 6b2 && @@ -1479,7 +1479,7 @@ test_expect_merge_algorithm failure success '6b2: Same rename done on both sides # "accidentally detect a rename" and give us y/{b,c,d}. test_setup_6c () { - test_create_repo 6c && + git init 6c && ( cd 6c && @@ -1542,7 +1542,7 @@ test_expect_success '6c: Rename only done on same side' ' # doesn't "accidentally detect a rename" and give us y/{b,c,d}. test_setup_6d () { - test_create_repo 6d && + git init 6d && ( cd 6d && @@ -1605,7 +1605,7 @@ test_expect_success '6d: We do not always want transitive renaming' ' # add/add conflict on y/d_1 vs y/d_2. test_setup_6e () { - test_create_repo 6e && + git init 6e && ( cd 6e && @@ -1700,7 +1700,7 @@ test_expect_success '6e: Add/add from one side' ' # NOTE: There's a rename of z/ here, y/ has more renames, so z/d -> y/d. test_setup_7a () { - test_create_repo 7a && + git init 7a && ( cd 7a && @@ -1772,7 +1772,7 @@ test_expect_success '7a: rename-dir vs. rename-dir (NOT split evenly) PLUS add-o # Expected: y/{b,c}, CONFLICT(rename/rename(2to1): x/d_1, w/d_2 -> y_d) test_setup_7b () { - test_create_repo 7b && + git init 7b && ( cd 7b && @@ -1861,7 +1861,7 @@ test_expect_success '7b: rename/rename(2to1), but only due to transitive rename' # nor CONFLiCT x/d -> w/d vs. y/d vs. z/d) test_setup_7c () { - test_create_repo 7c && + git init 7c && ( cd 7c && @@ -1926,7 +1926,7 @@ test_expect_success '7c: rename/rename(1to...2or3); transitive rename may add co # NOTE: z->y so NOT CONFLICT(delete x/d vs rename to z/d) test_setup_7d () { - test_create_repo 7d && + git init 7d && ( cd 7d && @@ -2027,7 +2027,7 @@ test_expect_success '7d: transitive rename involved in rename/delete; how is it # how it's resolved. test_setup_7e () { - test_create_repo 7e && + git init 7e && ( cd 7e && @@ -2137,7 +2137,7 @@ test_expect_success '7e: transitive rename in rename/delete AND dirs in the way' # we potentially could. test_setup_8a () { - test_create_repo 8a && + git init 8a && ( cd 8a && @@ -2216,7 +2216,7 @@ test_expect_success '8a: Dual-directory rename, one into the others way' ' # e_1 and e_2. test_setup_8b () { - test_create_repo 8b && + git init 8b && ( cd 8b && @@ -2290,7 +2290,7 @@ test_expect_success '8b: Dual-directory rename, one into the others way, with co # notes in 8d. test_setup_8c () { - test_create_repo 8c && + git init 8c && ( cd 8c && @@ -2370,7 +2370,7 @@ test_expect_success '8c: modify/delete or rename+modify/delete' ' # differently. test_setup_8d () { - test_create_repo 8d && + git init 8d && ( cd 8d && @@ -2453,7 +2453,7 @@ test_expect_success '8d: rename/delete...or not?' ' # the behavior, and predict it without computing as many details. test_setup_8e () { - test_create_repo 8e && + git init 8e && ( cd 8e && @@ -2537,7 +2537,7 @@ test_expect_success '8e: Both sides rename, one side adds to original directory' # of that could take the new file in commit B at z/i to x/w/i or x/i. test_setup_9a () { - test_create_repo 9a && + git init 9a && ( cd 9a && @@ -2609,7 +2609,7 @@ test_expect_success '9a: Inner renamed directory within outer renamed directory' # Expected: y/{b,c,d_merged} test_setup_9b () { - test_create_repo 9b && + git init 9b && ( cd 9b && @@ -2697,7 +2697,7 @@ test_expect_success '9b: Transitive rename with content merge' ' # history for any implicit directory renames. test_setup_9c () { - test_create_repo 9c && + git init 9c && ( cd 9c && @@ -2786,7 +2786,7 @@ test_expect_success '9c: Doubly transitive rename?' ' # testcases and simplifies things for the user. test_setup_9d () { - test_create_repo 9d && + git init 9d && ( cd 9d && @@ -2861,7 +2861,7 @@ test_expect_success '9d: N-way transitive rename?' ' # dir1/yo, dir2/yo, dir3/yo, dirN/yo test_setup_9e () { - test_create_repo 9e && + git init 9e && ( cd 9e && @@ -2954,7 +2954,7 @@ test_expect_success '9e: N-to-1 whammo' ' # Expected: priority/{a,b}/$more_files, priority/c test_setup_9f () { - test_create_repo 9f && + git init 9f && ( cd 9f && @@ -3027,7 +3027,7 @@ test_expect_success '9f: Renamed directory that only contained immediate subdirs # viewpoint... test_setup_9g () { - test_create_repo 9g && + git init 9g && ( cd 9g && @@ -3096,7 +3096,7 @@ test_expect_failure '9g: Renamed directory that only contained immediate subdirs # NOTE: If we applied the z/ -> y/ rename to z/d, then we'd end up with # a rename/rename(1to2) conflict (z/d -> y/d vs. x/d) test_setup_9h () { - test_create_repo 9h && + git init 9h && ( cd 9h && @@ -3177,7 +3177,7 @@ test_expect_success '9h: Avoid dir rename on merely modified path' ' # ERROR_MSG(untracked working tree files would be overwritten by merge) test_setup_10a () { - test_create_repo 10a && + git init 10a && ( cd 10a && @@ -3243,7 +3243,7 @@ test_expect_success '10a: Overwrite untracked with normal rename/delete' ' # ERROR_MSG(refusing to lose untracked file at 'y/d') test_setup_10b () { - test_create_repo 10b && + git init 10b && ( cd 10b && @@ -3334,7 +3334,7 @@ test_expect_success '10b: Overwrite untracked with dir rename + delete' ' # ERROR_MSG(Refusing to lose untracked file at y/c) test_setup_10c () { - test_create_repo 10c_$1 && + git init 10c_$1 && ( cd 10c_$1 && @@ -3472,7 +3472,7 @@ test_expect_success '10c2: Overwrite untracked with dir rename/rename(1to2), oth # ERROR_MSG(Refusing to lose untracked file at y/wham) test_setup_10d () { - test_create_repo 10d && + git init 10d && ( cd 10d && @@ -3568,7 +3568,7 @@ test_expect_success '10d: Delete untracked with dir rename/rename(2to1)' ' # Expected: y/{a,b,c} + untracked z/c test_setup_10e () { - test_create_repo 10e && + git init 10e && ( cd 10e && @@ -3650,7 +3650,7 @@ test_expect_merge_algorithm failure success '10e: Does git complain about untrac # z/c with uncommitted mods on top of A:z/c_v1 test_setup_11a () { - test_create_repo 11a && + git init 11a && ( cd 11a && @@ -3728,7 +3728,7 @@ test_expect_success '11a: Avoid losing dirty contents with simple rename' ' test_setup_11b () { - test_create_repo 11b && + git init 11b && ( cd 11b && @@ -3810,7 +3810,7 @@ test_expect_success '11b: Avoid losing dirty file involved in directory rename' # y/c left untouched (still has uncommitted mods) test_setup_11c () { - test_create_repo 11c && + git init 11c && ( cd 11c && @@ -3883,7 +3883,7 @@ test_expect_success '11c: Avoid losing not-uptodate with rename + D/F conflict' # y/{a,c~HEAD,c/d}, x/b, now-untracked z/c_v1 with uncommitted mods test_setup_11d () { - test_create_repo 11d && + git init 11d && ( cd 11d && @@ -3968,7 +3968,7 @@ test_expect_success '11d: Avoid losing not-uptodate with rename + D/F conflict' # y/c has dirty file from before merge test_setup_11e () { - test_create_repo 11e && + git init 11e && ( cd 11e && @@ -4060,7 +4060,7 @@ test_expect_success '11e: Avoid deleting not-uptodate with dir rename/rename(1to # ERROR_MSG(Refusing to lose dirty file at y/wham) test_setup_11f () { - test_create_repo 11f && + git init 11f && ( cd 11f && @@ -4155,7 +4155,7 @@ test_expect_success '11f: Avoid deleting not-uptodate with dir rename/rename(2to # Expected: node1/{leaf1,leaf2,leaf5,node2/{leaf3,leaf4,leaf6}} test_setup_12a () { - test_create_repo 12a && + git init 12a && ( cd 12a && @@ -4238,7 +4238,7 @@ test_expect_success '12a: Moving one directory hierarchy into another' ' # node2/node1/{leaf1, leaf2} test_setup_12b1 () { - test_create_repo 12b1 && + git init 12b1 && ( cd 12b1 && @@ -4327,7 +4327,7 @@ test_expect_merge_algorithm failure success '12b1: Moving two directory hierarch # even simple rules give weird results when given weird inputs. test_setup_12b2 () { - test_create_repo 12b2 && + git init 12b2 && ( cd 12b2 && @@ -4402,7 +4402,7 @@ test_expect_success '12b2: Moving two directory hierarchies into each other' ' # each side of the merge. test_setup_12c1 () { - test_create_repo 12c1 && + git init 12c1 && ( cd 12c1 && @@ -4492,7 +4492,7 @@ test_expect_merge_algorithm failure success '12c1: Moving one directory hierarch # on each side of the merge. test_setup_12c2 () { - test_create_repo 12c2 && + git init 12c2 && ( cd 12c2 && @@ -4584,7 +4584,7 @@ test_expect_success '12c2: Moving one directory hierarchy into another w/ conten # Expected: subdir/foo, bar test_setup_12d () { - test_create_repo 12d && + git init 12d && ( cd 12d && @@ -4642,7 +4642,7 @@ test_expect_success '12d: Rename/merge subdir into the root, variant 1' ' # Expected: foo, bar test_setup_12e () { - test_create_repo 12e && + git init 12e && ( cd 12e && @@ -4743,7 +4743,7 @@ test_expect_success '12e: Rename/merge subdir into the root, variant 2' ' # pick and re-applying them in the subsequent one. test_setup_12f () { - test_create_repo 12f && + git init 12f && ( cd 12f && @@ -4902,7 +4902,7 @@ test_expect_merge_algorithm failure success '12f: Trivial directory resolve, cac # Expected: newfile_{merged}, newdir/{a_B,b_B,c_A} test_setup_12g () { - test_create_repo 12g && + git init 12g && ( cd 12g && @@ -4973,7 +4973,7 @@ test_expect_success '12g: Testcase with two kinds of "relevant" renames' ' # Expected: newdir/{alpha_2, b} test_setup_12h () { - test_create_repo 12h && + git init 12h && ( cd 12h && @@ -5032,7 +5032,7 @@ test_expect_failure '12h: renaming a file within a renamed directory' ' # source/bar vs. source/subdir/bar test_setup_12i () { - test_create_repo 12i && + git init 12i && ( cd 12i && @@ -5090,7 +5090,7 @@ test_expect_success '12i: Directory rename causes rename-to-self' ' # Expected: {foo, bar, baz_2}, with conflicts on bar vs. subdir/bar test_setup_12j () { - test_create_repo 12j && + git init 12j && ( cd 12j && @@ -5148,7 +5148,7 @@ test_expect_success '12j: Directory rename to root causes rename-to-self' ' # Expected: dirA/{foo, bar, baz_2}, with conflicts on dirA/bar vs. dirB/bar test_setup_12k () { - test_create_repo 12k && + git init 12k && ( cd 12k && @@ -5218,7 +5218,7 @@ test_expect_success '12k: Directory rename with sibling causes rename-to-self' ' # is needed for there to be a sub1/ -> sub3/ rename. test_setup_12l () { - test_create_repo 12l_$1 && + git init 12l_$1 && ( cd 12l_$1 && @@ -5322,7 +5322,7 @@ test_expect_merge_algorithm failure success '12l (A into B): Rename into each ot # Expected: y/{b,c,d,e/f}, with notices/conflicts for both y/d and y/e/f test_setup_13a () { - test_create_repo 13a_$1 && + git init 13a_$1 && ( cd 13a_$1 && @@ -5409,7 +5409,7 @@ test_expect_success '13a(info): messages for newly added files' ' # one about content, and one about file location test_setup_13b () { - test_create_repo 13b_$1 && + git init 13b_$1 && ( cd 13b_$1 && @@ -5496,7 +5496,7 @@ test_expect_success '13b(info): messages for transitive rename with conflicted c # shown in testcase 13d. test_setup_13c () { - test_create_repo 13c_$1 && + git init 13c_$1 && ( cd 13c_$1 && @@ -5584,7 +5584,7 @@ test_expect_success '13c(info): messages for rename/rename(1to1) via transitive # No conflict in where a/y ends up, so put it in d/y. test_setup_13d () { - test_create_repo 13d_$1 && + git init 13d_$1 && ( cd 13d_$1 && @@ -5710,7 +5710,7 @@ test_expect_success '13d(info): messages for rename/rename(1to1) via dual transi # least avoids hitting a BUG(). # test_setup_13e () { - test_create_repo 13e && + git init 13e && ( cd 13e && diff --git a/t/t6426-merge-skip-unneeded-updates.sh b/t/t6426-merge-skip-unneeded-updates.sh index 7b5f1c1dcd..2bb8e7f09b 100755 --- a/t/t6426-merge-skip-unneeded-updates.sh +++ b/t/t6426-merge-skip-unneeded-updates.sh @@ -38,7 +38,7 @@ test_description="merge cases" # Expected: b_2 test_setup_1a () { - test_create_repo 1a_$1 && + git init 1a_$1 && ( cd 1a_$1 && @@ -136,7 +136,7 @@ test_expect_success '1a-R: Modify(A)/Modify(B), change on B subset of A' ' # Expected: c_2 test_setup_2a () { - test_create_repo 2a_$1 && + git init 2a_$1 && ( cd 2a_$1 && @@ -229,7 +229,7 @@ test_expect_success '2a-R: Modify/rename, merge into rename side' ' # Expected: c_2 test_setup_2b () { - test_create_repo 2b_$1 && + git init 2b_$1 && ( cd 2b_$1 && @@ -336,7 +336,7 @@ test_expect_success '2b-R: Rename+Mod(A)/Mod(B), B mods subset of A' ' # not make that particular mistake. test_setup_2c () { - test_create_repo 2c && + git init 2c && ( cd 2c && @@ -437,7 +437,7 @@ test_expect_success '2c: Modify b & add c VS rename b->c' ' # Expected: bar/{bq_2, whatever} test_setup_3a () { - test_create_repo 3a_$1 && + git init 3a_$1 && ( cd 3a_$1 && @@ -537,7 +537,7 @@ test_expect_success '3a-R: bq_1->foo/bq_2 on A, foo/->bar/ on B' ' # Expected: bar/{bq_2, whatever} test_setup_3b () { - test_create_repo 3b_$1 && + git init 3b_$1 && ( cd 3b_$1 && @@ -642,7 +642,7 @@ test_expect_success '3b-R: bq_1->foo/bq_2 on A, foo/->bar/ on B' ' # Expected: b_2 for merge, b_4 in working copy test_setup_4a () { - test_create_repo 4a && + git init 4a && ( cd 4a && @@ -714,7 +714,7 @@ test_expect_merge_algorithm failure success '4a: Change on A, change on B subset # Expected: c_2 test_setup_4b () { - test_create_repo 4b && + git init 4b && ( cd 4b && diff --git a/t/t6427-diff3-conflict-markers.sh b/t/t6427-diff3-conflict-markers.sh index a9ee4cb207..dd5fe6a402 100755 --- a/t/t6427-diff3-conflict-markers.sh +++ b/t/t6427-diff3-conflict-markers.sh @@ -19,7 +19,7 @@ export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME # test_expect_success 'setup no merge base' ' - test_create_repo no_merge_base && + git init no_merge_base && ( cd no_merge_base && @@ -55,7 +55,7 @@ test_expect_success 'check no merge base' ' # test_expect_success 'setup unique merge base' ' - test_create_repo unique_merge_base && + git init unique_merge_base && ( cd unique_merge_base && @@ -116,7 +116,7 @@ test_expect_success 'check unique merge base' ' # test_expect_success 'setup multiple merge bases' ' - test_create_repo multiple_merge_bases && + git init multiple_merge_bases && ( cd multiple_merge_bases && @@ -190,7 +190,7 @@ test_expect_success 'check multiple merge bases' ' ' test_expect_success 'rebase --merge describes parent of commit being picked' ' - test_create_repo rebase && + git init rebase && ( cd rebase && test_commit base file && @@ -212,7 +212,7 @@ test_expect_success 'rebase --apply describes fake ancestor base' ' ' test_setup_zdiff3 () { - test_create_repo zdiff3 && + git init zdiff3 && ( cd zdiff3 && diff --git a/t/t6428-merge-conflicts-sparse.sh b/t/t6428-merge-conflicts-sparse.sh index 064be1b629..9919c3fa7c 100755 --- a/t/t6428-merge-conflicts-sparse.sh +++ b/t/t6428-merge-conflicts-sparse.sh @@ -29,7 +29,7 @@ test_description="merge cases" # Testcase basic, conflicting changes in 'numerals' test_setup_numerals () { - test_create_repo numerals_$1 && + git init numerals_$1 && ( cd numerals_$1 && diff --git a/t/t6429-merge-sequence-rename-caching.sh b/t/t6429-merge-sequence-rename-caching.sh index e1ce919916..d02fa16614 100755 --- a/t/t6429-merge-sequence-rename-caching.sh +++ b/t/t6429-merge-sequence-rename-caching.sh @@ -35,7 +35,7 @@ test_description="remember regular & dir renames in sequence of merges" # preventing us from finding new renames. # test_expect_success 'caching renames does not preclude finding new ones' ' - test_create_repo caching-renames-and-new-renames && + git init caching-renames-and-new-renames && ( cd caching-renames-and-new-renames && @@ -106,7 +106,7 @@ test_expect_success 'caching renames does not preclude finding new ones' ' # should be able to only run rename detection on the upstream side one # time.) test_expect_success 'cherry-pick both a commit and its immediate revert' ' - test_create_repo pick-commit-and-its-immediate-revert && + git init pick-commit-and-its-immediate-revert && ( cd pick-commit-and-its-immediate-revert && @@ -162,7 +162,7 @@ test_expect_success 'cherry-pick both a commit and its immediate revert' ' # could cause a spurious rename/add conflict. # test_expect_success 'rename same file identically, then reintroduce it' ' - test_create_repo rename-rename-1to1-then-add-old-filename && + git init rename-rename-1to1-then-add-old-filename && ( cd rename-rename-1to1-then-add-old-filename && @@ -229,7 +229,7 @@ test_expect_success 'rename same file identically, then reintroduce it' ' # cached, the directory rename could put newfile in the wrong directory. # test_expect_success 'rename same file identically, then add file to old dir' ' - test_create_repo rename-rename-1to1-then-add-file-to-old-dir && + git init rename-rename-1to1-then-add-file-to-old-dir && ( cd rename-rename-1to1-then-add-file-to-old-dir && @@ -311,7 +311,7 @@ test_expect_success 'rename same file identically, then add file to old dir' ' # should avoid the need to re-detect upstream renames.) # test_expect_success 'cached dir rename does not prevent noticing later conflict' ' - test_create_repo dir-rename-cache-not-occluding-later-conflict && + git init dir-rename-cache-not-occluding-later-conflict && ( cd dir-rename-cache-not-occluding-later-conflict && @@ -365,7 +365,7 @@ test_expect_success 'cached dir rename does not prevent noticing later conflict' # Helper for the next two tests test_setup_upstream_rename () { - test_create_repo $1 && + git init $1 && ( cd $1 && @@ -537,7 +537,7 @@ test_expect_success 'dir rename unneeded, then rename existing file into old dir # Helper for the next two tests test_setup_topic_rename () { - test_create_repo $1 && + git init $1 && ( cd $1 && @@ -725,7 +725,7 @@ test_expect_success 'avoid assuming we detected renames' ' mkdir unrelated && for i in $(test_seq 1 10) do - >unrelated/$i + >unrelated/$i || exit 1 done && test_seq 2 10 >numbers && test_seq 12 20 >values && diff --git a/t/t6437-submodule-merge.sh b/t/t6437-submodule-merge.sh index c253bf759a..c9a86f2e94 100755 --- a/t/t6437-submodule-merge.sh +++ b/t/t6437-submodule-merge.sh @@ -103,8 +103,25 @@ test_expect_success 'setup for merge search' ' echo "file-c" > file-c && git add file-c && git commit -m "sub-c") && - git commit -a -m "c" && + git commit -a -m "c") +' +test_expect_success 'merging should conflict for non fast-forward' ' + test_when_finished "git -C merge-search reset --hard" && + (cd merge-search && + git checkout -b test-nonforward-a b && + if test "$GIT_TEST_MERGE_ALGORITHM" = ort + then + test_must_fail git merge c >actual && + sub_expect="go to submodule (sub), and either merge commit $(git -C sub rev-parse --short sub-c)" && + grep "$sub_expect" actual + else + test_must_fail git merge c 2> actual + fi) +' + +test_expect_success 'finish setup for merge-search' ' + (cd merge-search && git checkout -b d a && (cd sub && git checkout -b sub-d sub-b && @@ -129,14 +146,16 @@ test_expect_success 'merge with one side as a fast-forward of the other' ' test_cmp expect actual) ' -test_expect_success 'merging should conflict for non fast-forward' ' +test_expect_success 'merging should conflict for non fast-forward (resolution exists)' ' (cd merge-search && - git checkout -b test-nonforward b && + git checkout -b test-nonforward-b b && (cd sub && git rev-parse --short sub-d > ../expect) && if test "$GIT_TEST_MERGE_ALGORITHM" = ort then - test_must_fail git merge c >actual + test_must_fail git merge c >actual && + sub_expect="go to submodule (sub), and either merge commit $(git -C sub rev-parse --short sub-c)" && + grep "$sub_expect" actual else test_must_fail git merge c 2> actual fi && @@ -161,7 +180,9 @@ test_expect_success 'merging should fail for ambiguous common parent' ' ) && if test "$GIT_TEST_MERGE_ALGORITHM" = ort then - test_must_fail git merge c >actual + test_must_fail git merge c >actual && + sub_expect="go to submodule (sub), and either merge commit $(git -C sub rev-parse --short sub-c)" && + grep "$sub_expect" actual else test_must_fail git merge c 2> actual fi && @@ -205,7 +226,12 @@ test_expect_success 'merging should fail for changes that are backwards' ' git commit -a -m "f" && git checkout -b test-backward e && - test_must_fail git merge f) + test_must_fail git merge f >actual && + if test "$GIT_TEST_MERGE_ALGORITHM" = ort + then + sub_expect="go to submodule (sub), and either merge commit $(git -C sub rev-parse --short sub-d)" && + grep "$sub_expect" actual + fi) ' @@ -310,7 +336,7 @@ test_expect_success 'recursive merge with submodule' ' # Expected: path/ is submodule and file contents for B's path are somewhere test_expect_success 'setup file/submodule conflict' ' - test_create_repo file-submodule && + git init file-submodule && ( cd file-submodule && @@ -325,7 +351,7 @@ test_expect_success 'setup file/submodule conflict' ' git commit -m B && git checkout A && - test_create_repo path && + git init path && test_commit -C path world && git submodule add ./path && git commit -m A @@ -385,7 +411,7 @@ test_expect_success 'file/submodule conflict; merge --abort works afterward' ' # under the submodule to be treated as untracked or in the way. test_expect_success 'setup directory/submodule conflict' ' - test_create_repo directory-submodule && + git init directory-submodule && ( cd directory-submodule && @@ -408,7 +434,7 @@ test_expect_success 'setup directory/submodule conflict' ' git commit -m B2 && git checkout A && - test_create_repo path && + git init path && test_commit -C path hello world && git submodule add ./path && git commit -m A @@ -476,4 +502,44 @@ test_expect_failure 'directory/submodule conflict; merge --abort works afterward ) ' +# Setup: +# - Submodule has 2 commits: a and b +# - Superproject branch 'a' adds and commits submodule pointing to 'commit a' +# - Superproject branch 'b' adds and commits submodule pointing to 'commit b' +# If these two branches are now merged, there is no merge base +test_expect_success 'setup for null merge base' ' + mkdir no-merge-base && + (cd no-merge-base && + git init && + mkdir sub && + (cd sub && + git init && + echo "file-a" > file-a && + git add file-a && + git commit -m "commit a") && + git commit --allow-empty -m init && + git branch init && + git checkout -b a init && + git add sub && + git commit -m "a" && + git switch main && + (cd sub && + echo "file-b" > file-b && + git add file-b && + git commit -m "commit b")) +' + +test_expect_success 'merging should fail with no merge base' ' + (cd no-merge-base && + git checkout -b b init && + git add sub && + git commit -m "b" && + test_must_fail git merge a >actual && + if test "$GIT_TEST_MERGE_ALGORITHM" = ort + then + sub_expect="go to submodule (sub), and either merge commit $(git -C sub rev-parse --short HEAD^1)" && + grep "$sub_expect" actual + fi) +' + test_done diff --git a/t/t7400-submodule-basic.sh b/t/t7400-submodule-basic.sh index e7cec2e457..b50db3f103 100755 --- a/t/t7400-submodule-basic.sh +++ b/t/t7400-submodule-basic.sh @@ -14,6 +14,32 @@ export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME . ./test-lib.sh +test_expect_success 'submodule usage: -h' ' + git submodule -h >out 2>err && + grep "^usage: git submodule" out && + test_must_be_empty err +' + +test_expect_success 'submodule usage: --recursive' ' + test_expect_code 1 git submodule --recursive >out 2>err && + grep "^usage: git submodule" err && + test_must_be_empty out +' + +test_expect_success 'submodule usage: status --' ' + test_expect_code 1 git submodule -- && + test_expect_code 1 git submodule --end-of-options +' + +for opt in '--quiet' '--cached' +do + test_expect_success "submodule usage: status $opt" ' + git submodule $opt && + git submodule status $opt && + git submodule $opt status + ' +done + test_expect_success 'submodule deinit works on empty repository' ' git submodule deinit --all ' @@ -152,6 +178,11 @@ test_expect_success 'submodule add' ' test_must_be_empty untracked ' +test_expect_success !WINDOWS 'submodule add (absolute path)' ' + test_when_finished "git reset --hard" && + git submodule add "$submodurl" "$submodurl/add-abs" +' + test_expect_success 'setup parent and one repository' ' test_create_repo parent && test_commit -C parent one @@ -1224,31 +1255,6 @@ test_expect_success 'submodule add clone shallow submodule' ' ) ' -test_expect_success 'submodule helper list is not confused by common prefixes' ' - mkdir -p dir1/b && - ( - cd dir1/b && - git init && - echo hi >testfile2 && - git add . && - git commit -m "test1" - ) && - mkdir -p dir2/b && - ( - cd dir2/b && - git init && - echo hello >testfile1 && - git add . && - git commit -m "test2" - ) && - git submodule add /dir1/b dir1/b && - git submodule add /dir2/b dir2/b && - git commit -m "first submodule commit" && - git submodule--helper list dir1/b | cut -f 2 >actual && - echo "dir1/b" >expect && - test_cmp expect actual -' - test_expect_success 'setup superproject with submodules' ' git init sub1 && test_commit -C sub1 test && diff --git a/t/t7402-submodule-rebase.sh b/t/t7402-submodule-rebase.sh index 8e32f19007..ebeca12a71 100755 --- a/t/t7402-submodule-rebase.sh +++ b/t/t7402-submodule-rebase.sh @@ -104,7 +104,7 @@ test_expect_success 'rebasing submodule that should conflict' ' test_tick && git commit -m fourth && - test_must_fail git rebase --onto HEAD^^ HEAD^ HEAD^0 && + test_must_fail git rebase --onto HEAD^^ HEAD^ HEAD^0 >actual_output && git ls-files -s submodule >actual && ( cd submodule && @@ -112,7 +112,12 @@ test_expect_success 'rebasing submodule that should conflict' ' echo "160000 $(git rev-parse HEAD^^) 2 submodule" && echo "160000 $(git rev-parse HEAD) 3 submodule" ) >expect && - test_cmp expect actual + test_cmp expect actual && + if test "$GIT_TEST_MERGE_ALGORITHM" = ort + then + sub_expect="go to submodule (submodule), and either merge commit $(git -C submodule rev-parse --short HEAD^0)" && + grep "$sub_expect" actual_output + fi ' test_done diff --git a/t/t7406-submodule-update.sh b/t/t7406-submodule-update.sh index 6cc07460dd..c5f5dbe55e 100755 --- a/t/t7406-submodule-update.sh +++ b/t/t7406-submodule-update.sh @@ -769,7 +769,7 @@ test_expect_success 'submodule update continues after recursive checkout error' echo "" > file ) ) && - test_must_fail git submodule update --recursive && + test_expect_code 1 git submodule update --recursive && (cd submodule2 && git rev-parse --verify HEAD >../actual ) && diff --git a/t/t7413-submodule-is-active.sh b/t/t7413-submodule-is-active.sh index c8e7e98331..ede6f02dbd 100755 --- a/t/t7413-submodule-is-active.sh +++ b/t/t7413-submodule-is-active.sh @@ -1,9 +1,12 @@ #!/bin/sh -test_description='Test submodule--helper is-active +test_description='Test with test-tool submodule is-active -This test verifies that `git submodue--helper is-active` correctly identifies +This test verifies that `test-tool submodule is-active` correctly identifies submodules which are "active" and interesting to the user. + +This is a unit test of the submodule.c is_submodule_active() function, +which is also indirectly tested elsewhere. ' . ./test-lib.sh @@ -25,13 +28,13 @@ test_expect_success 'setup' ' ' test_expect_success 'is-active works with urls' ' - git -C super submodule--helper is-active sub1 && - git -C super submodule--helper is-active sub2 && + test-tool -C super submodule is-active sub1 && + test-tool -C super submodule is-active sub2 && git -C super config --unset submodule.sub1.URL && - test_must_fail git -C super submodule--helper is-active sub1 && + test_must_fail test-tool -C super submodule is-active sub1 && git -C super config submodule.sub1.URL ../sub && - git -C super submodule--helper is-active sub1 + test-tool -C super submodule is-active sub1 ' test_expect_success 'is-active works with submodule.<name>.active config' ' @@ -39,11 +42,11 @@ test_expect_success 'is-active works with submodule.<name>.active config' ' test_when_finished "git -C super config submodule.sub1.URL ../sub" && git -C super config --bool submodule.sub1.active "false" && - test_must_fail git -C super submodule--helper is-active sub1 && + test_must_fail test-tool -C super submodule is-active sub1 && git -C super config --bool submodule.sub1.active "true" && git -C super config --unset submodule.sub1.URL && - git -C super submodule--helper is-active sub1 + test-tool -C super submodule is-active sub1 ' test_expect_success 'is-active works with basic submodule.active config' ' @@ -53,17 +56,17 @@ test_expect_success 'is-active works with basic submodule.active config' ' git -C super config --add submodule.active "." && git -C super config --unset submodule.sub1.URL && - git -C super submodule--helper is-active sub1 && - git -C super submodule--helper is-active sub2 + test-tool -C super submodule is-active sub1 && + test-tool -C super submodule is-active sub2 ' test_expect_success 'is-active correctly works with paths that are not submodules' ' test_when_finished "git -C super config --unset-all submodule.active" && - test_must_fail git -C super submodule--helper is-active not-a-submodule && + test_must_fail test-tool -C super submodule is-active not-a-submodule && git -C super config --add submodule.active "." && - test_must_fail git -C super submodule--helper is-active not-a-submodule + test_must_fail test-tool -C super submodule is-active not-a-submodule ' test_expect_success 'is-active works with exclusions in submodule.active config' ' @@ -72,8 +75,8 @@ test_expect_success 'is-active works with exclusions in submodule.active config' git -C super config --add submodule.active "." && git -C super config --add submodule.active ":(exclude)sub1" && - test_must_fail git -C super submodule--helper is-active sub1 && - git -C super submodule--helper is-active sub2 + test_must_fail test-tool -C super submodule is-active sub1 && + test-tool -C super submodule is-active sub2 ' test_expect_success 'is-active with submodule.active and submodule.<name>.active' ' @@ -85,8 +88,8 @@ test_expect_success 'is-active with submodule.active and submodule.<name>.active git -C super config --bool submodule.sub1.active "false" && git -C super config --bool submodule.sub2.active "true" && - test_must_fail git -C super submodule--helper is-active sub1 && - git -C super submodule--helper is-active sub2 + test_must_fail test-tool -C super submodule is-active sub1 && + test-tool -C super submodule is-active sub2 ' test_expect_success 'is-active, submodule.active and submodule add' ' diff --git a/t/t7450-bad-git-dotfiles.sh b/t/t7450-bad-git-dotfiles.sh index 41706c1c9f..2c24f120da 100755 --- a/t/t7450-bad-git-dotfiles.sh +++ b/t/t7450-bad-git-dotfiles.sh @@ -21,7 +21,7 @@ test_expect_success 'check names' ' valid/with/paths EOF - git submodule--helper check-name >actual <<-\EOF && + test-tool submodule check-name >actual <<-\EOF && valid valid/with/paths diff --git a/t/t7600-merge.sh b/t/t7600-merge.sh index f0f6fda150..7c3f6ed994 100755 --- a/t/t7600-merge.sh +++ b/t/t7600-merge.sh @@ -255,6 +255,15 @@ test_expect_success 'merge --squash c3 with c7' ' test_cmp expect actual ' +test_expect_success 'merge --squash --autostash conflict does not attempt to apply autostash' ' + git reset --hard c3 && + >unrelated && + git add unrelated && + test_must_fail git merge --squash c7 --autostash >out 2>err && + ! grep "Applying autostash resulted in conflicts." err && + grep "When finished, apply stashed changes with \`git stash pop\`" out +' + test_expect_success 'merge c3 with c7 with commit.cleanup = scissors' ' git config commit.cleanup scissors && git reset --hard c3 && diff --git a/t/t7900-maintenance.sh b/t/t7900-maintenance.sh index 74aa638475..2724a44fe3 100755 --- a/t/t7900-maintenance.sh +++ b/t/t7900-maintenance.sh @@ -32,11 +32,13 @@ test_systemd_analyze_verify () { } test_expect_success 'help text' ' - test_expect_code 129 git maintenance -h 2>err && - test_i18ngrep "usage: git maintenance <subcommand>" err && - test_expect_code 128 git maintenance barf 2>err && - test_i18ngrep "invalid subcommand: barf" err && + test_expect_code 129 git maintenance -h >actual && + test_i18ngrep "usage: git maintenance <subcommand>" actual && + test_expect_code 129 git maintenance barf 2>err && + test_i18ngrep "unknown subcommand: \`barf'\''" err && + test_i18ngrep "usage: git maintenance" err && test_expect_code 129 git maintenance 2>err && + test_i18ngrep "error: need a subcommand" err && test_i18ngrep "usage: git maintenance" err ' @@ -162,7 +164,6 @@ test_expect_success 'prefetch multiple remotes' ' test_cmp_rev refs/remotes/remote1/one refs/prefetch/remotes/remote1/one && test_cmp_rev refs/remotes/remote2/two refs/prefetch/remotes/remote2/two && - test_cmp_config refs/prefetch/ log.excludedecoration && git log --oneline --decorate --all >log && ! grep "prefetch" log && @@ -173,26 +174,6 @@ test_expect_success 'prefetch multiple remotes' ' test_subcommand git fetch remote2 $fetchargs <skip-remote1.txt ' -test_expect_success 'prefetch and existing log.excludeDecoration values' ' - git config --unset-all log.excludeDecoration && - git config log.excludeDecoration refs/remotes/remote1/ && - git maintenance run --task=prefetch && - - git config --get-all log.excludeDecoration >out && - grep refs/remotes/remote1/ out && - grep refs/prefetch/ out && - - git log --oneline --decorate --all >log && - ! grep "prefetch" log && - ! grep "remote1" log && - grep "remote2" log && - - # a second run does not change the config - git maintenance run --task=prefetch && - git log --oneline --decorate --all >log2 && - test_cmp log log2 -' - test_expect_success 'loose-objects task' ' # Repack everything so we know the state of the object dir git repack -adk && diff --git a/t/t9903-bash-prompt.sh b/t/t9903-bash-prompt.sh index 6a30f5719c..d459fae655 100755 --- a/t/t9903-bash-prompt.sh +++ b/t/t9903-bash-prompt.sh @@ -759,4 +759,20 @@ test_expect_success 'prompt - hide if pwd ignored - inside gitdir' ' test_cmp expected "$actual" ' +test_expect_success 'prompt - conflict indicator' ' + printf " (main|CONFLICT)" >expected && + echo "stash" >file && + git stash && + test_when_finished "git stash drop" && + echo "commit" >file && + git commit -m "commit" file && + test_when_finished "git reset --hard HEAD~" && + test_must_fail git stash apply && + ( + GIT_PS1_SHOWCONFLICTSTATE="yes" && + __git_ps1 >"$actual" + ) && + test_cmp expected "$actual" +' + test_done diff --git a/tempfile.c b/tempfile.c index 2024c82691..e27048f970 100644 --- a/tempfile.c +++ b/tempfile.c @@ -14,16 +14,14 @@ * * The possible states of a `tempfile` object are as follows: * - * - Uninitialized. In this state the object's `on_list` field must be - * zero but the rest of its contents need not be initialized. As - * soon as the object is used in any way, it is irrevocably - * registered in `tempfile_list`, and `on_list` is set. + * - Inactive/unallocated. The only way to get a tempfile is via a creation + * function like create_tempfile(). Once allocated, the tempfile is on the + * global tempfile_list and considered active. * * - Active, file open (after `create_tempfile()` or * `reopen_tempfile()`). In this state: * * - the temporary file exists - * - `active` is set * - `filename` holds the filename of the temporary file * - `fd` holds a file descriptor open for writing to it * - `fp` holds a pointer to an open `FILE` object if and only if @@ -35,14 +33,8 @@ * `fd` is -1, and `fp` is `NULL`. * * - Inactive (after `delete_tempfile()`, `rename_tempfile()`, or a - * failed attempt to create a temporary file). In this state: - * - * - `active` is unset - * - `filename` is empty (usually, though there are transitory - * states in which this condition doesn't hold). Client code should - * *not* rely on the filename being empty in this state. - * - `fd` is -1 and `fp` is `NULL` - * - the object is removed from `tempfile_list` (but could be used again) + * failed attempt to create a temporary file). The struct is removed from + * the global tempfile_list and deallocated. * * A temporary file is owned by the process that created it. The * `tempfile` has an `owner` field that records the owner's PID. This @@ -59,14 +51,11 @@ static VOLATILE_LIST_HEAD(tempfile_list); static void remove_template_directory(struct tempfile *tempfile, int in_signal_handler) { - if (tempfile->directorylen > 0 && - tempfile->directorylen < tempfile->filename.len && - tempfile->filename.buf[tempfile->directorylen] == '/') { - strbuf_setlen(&tempfile->filename, tempfile->directorylen); + if (tempfile->directory) { if (in_signal_handler) - rmdir(tempfile->filename.buf); + rmdir(tempfile->directory); else - rmdir_or_warn(tempfile->filename.buf); + rmdir_or_warn(tempfile->directory); } } @@ -89,8 +78,6 @@ static void remove_tempfiles(int in_signal_handler) else unlink_or_warn(p->filename.buf); remove_template_directory(p, in_signal_handler); - - p->active = 0; } } @@ -111,11 +98,10 @@ static struct tempfile *new_tempfile(void) struct tempfile *tempfile = xmalloc(sizeof(*tempfile)); tempfile->fd = -1; tempfile->fp = NULL; - tempfile->active = 0; tempfile->owner = 0; INIT_LIST_HEAD(&tempfile->list); strbuf_init(&tempfile->filename, 0); - tempfile->directorylen = 0; + tempfile->directory = NULL; return tempfile; } @@ -123,9 +109,6 @@ static void activate_tempfile(struct tempfile *tempfile) { static int initialized; - if (is_tempfile_active(tempfile)) - BUG("activate_tempfile called for active object"); - if (!initialized) { sigchain_push_common(remove_tempfiles_on_signal); atexit(remove_tempfiles_on_exit); @@ -134,14 +117,13 @@ static void activate_tempfile(struct tempfile *tempfile) volatile_list_add(&tempfile->list, &tempfile_list); tempfile->owner = getpid(); - tempfile->active = 1; } static void deactivate_tempfile(struct tempfile *tempfile) { - tempfile->active = 0; - strbuf_release(&tempfile->filename); volatile_list_del(&tempfile->list); + strbuf_release(&tempfile->filename); + free(tempfile->directory); free(tempfile); } @@ -254,7 +236,7 @@ struct tempfile *mks_tempfile_dt(const char *directory_template, tempfile = new_tempfile(); strbuf_swap(&tempfile->filename, &sb); - tempfile->directorylen = directorylen; + tempfile->directory = xmemdupz(tempfile->filename.buf, directorylen); tempfile->fd = fd; activate_tempfile(tempfile); return tempfile; diff --git a/tempfile.h b/tempfile.h index d7804a214a..d0413af733 100644 --- a/tempfile.h +++ b/tempfile.h @@ -77,12 +77,11 @@ struct tempfile { volatile struct volatile_list_head list; - volatile sig_atomic_t active; volatile int fd; FILE *volatile fp; volatile pid_t owner; struct strbuf filename; - size_t directorylen; + char *directory; }; /* @@ -221,7 +220,7 @@ FILE *fdopen_tempfile(struct tempfile *tempfile, const char *mode); static inline int is_tempfile_active(struct tempfile *tempfile) { - return tempfile && tempfile->active; + return !!tempfile; } /* diff --git a/trace2/tr2_tgt_event.c b/trace2/tr2_tgt_event.c index c5c8cfbbaa..37a3163be1 100644 --- a/trace2/tr2_tgt_event.c +++ b/trace2/tr2_tgt_event.c @@ -479,9 +479,12 @@ static void fn_param_fl(const char *file, int line, const char *param, { const char *event_name = "def_param"; struct json_writer jw = JSON_WRITER_INIT; + enum config_scope scope = current_config_scope(); + const char *scope_name = config_scope_name(scope); jw_object_begin(&jw, 0); event_fmt_prepare(event_name, file, line, NULL, &jw); + jw_object_string(&jw, "scope", scope_name); jw_object_string(&jw, "param", param); jw_object_string(&jw, "value", value); jw_end(&jw); diff --git a/trace2/tr2_tgt_normal.c b/trace2/tr2_tgt_normal.c index c42fbade7f..69f8033077 100644 --- a/trace2/tr2_tgt_normal.c +++ b/trace2/tr2_tgt_normal.c @@ -298,8 +298,11 @@ static void fn_param_fl(const char *file, int line, const char *param, const char *value) { struct strbuf buf_payload = STRBUF_INIT; + enum config_scope scope = current_config_scope(); + const char *scope_name = config_scope_name(scope); - strbuf_addf(&buf_payload, "def_param %s=%s", param, value); + strbuf_addf(&buf_payload, "def_param scope:%s %s=%s", scope_name, param, + value); normal_io_write_fl(file, line, &buf_payload); strbuf_release(&buf_payload); } diff --git a/trace2/tr2_tgt_perf.c b/trace2/tr2_tgt_perf.c index a1eff8bea3..8cb792488c 100644 --- a/trace2/tr2_tgt_perf.c +++ b/trace2/tr2_tgt_perf.c @@ -441,12 +441,17 @@ static void fn_param_fl(const char *file, int line, const char *param, { const char *event_name = "def_param"; struct strbuf buf_payload = STRBUF_INIT; + struct strbuf scope_payload = STRBUF_INIT; + enum config_scope scope = current_config_scope(); + const char *scope_name = config_scope_name(scope); strbuf_addf(&buf_payload, "%s:%s", param, value); + strbuf_addf(&scope_payload, "%s:%s", "scope", scope_name); - perf_io_write_fl(file, line, event_name, NULL, NULL, NULL, NULL, - &buf_payload); + perf_io_write_fl(file, line, event_name, NULL, NULL, NULL, + scope_payload.buf, &buf_payload); strbuf_release(&buf_payload); + strbuf_release(&scope_payload); } static void fn_repo_fl(const char *file, int line, diff --git a/unpack-trees.c b/unpack-trees.c index 90b92114be..bae812156c 100644 --- a/unpack-trees.c +++ b/unpack-trees.c @@ -1423,7 +1423,7 @@ static void debug_unpack_callback(int n, * from the tree walk at the given traverse_info. */ static int is_sparse_directory_entry(struct cache_entry *ce, - struct name_entry *name, + const struct name_entry *name, struct traverse_info *info) { if (!ce || !name || !S_ISSPARSEDIR(ce->ce_mode)) @@ -1562,7 +1562,7 @@ static int unpack_callback(int n, unsigned long mask, unsigned long dirmask, str } } - if (!is_sparse_directory_entry(src[0], names, info) && + if (!is_sparse_directory_entry(src[0], p, info) && !is_new_sparse_dir && traverse_trees_recursive(n, dirmask, mask & ~dirmask, names, info) < 0) { diff --git a/upload-pack.c b/upload-pack.c index 3bdbd142b7..abf2c11cfe 100644 --- a/upload-pack.c +++ b/upload-pack.c @@ -1410,18 +1410,14 @@ static int parse_want(struct packet_writer *writer, const char *line, const char *arg; if (skip_prefix(line, "want ", &arg)) { struct object_id oid; - struct commit *commit; struct object *o; if (get_oid_hex(arg, &oid)) die("git upload-pack: protocol error, " "expected to get oid, not '%s'", line); - commit = lookup_commit_in_graph(the_repository, &oid); - if (commit) - o = &commit->object; - else - o = parse_object(the_repository, &oid); + o = parse_object_with_flags(the_repository, &oid, + PARSE_OBJECT_SKIP_HASH_CHECK); if (!o) { packet_writer_error(writer, @@ -161,28 +161,6 @@ void xsetenv(const char *name, const char *value, int overwrite) die_errno(_("could not setenv '%s'"), name ? name : "(null)"); } -/* - * Limit size of IO chunks, because huge chunks only cause pain. OS X - * 64-bit is buggy, returning EINVAL if len >= INT_MAX; and even in - * the absence of bugs, large chunks can result in bad latencies when - * you decide to kill the process. - * - * We pick 8 MiB as our default, but if the platform defines SSIZE_MAX - * that is smaller than that, clip it to SSIZE_MAX, as a call to - * read(2) or write(2) larger than that is allowed to fail. As the last - * resort, we allow a port to pass via CFLAGS e.g. "-DMAX_IO_SIZE=value" - * to override this, if the definition of SSIZE_MAX given by the platform - * is broken. - */ -#ifndef MAX_IO_SIZE -# define MAX_IO_SIZE_DEFAULT (8*1024*1024) -# if defined(SSIZE_MAX) && (SSIZE_MAX < MAX_IO_SIZE_DEFAULT) -# define MAX_IO_SIZE SSIZE_MAX -# else -# define MAX_IO_SIZE MAX_IO_SIZE_DEFAULT -# endif -#endif - /** * xopen() is the same as open(), but it die()s if the open() fails. */ diff --git a/xdiff/xdiffi.c b/xdiff/xdiffi.c index 53e803e6bc..32652ded2d 100644 --- a/xdiff/xdiffi.c +++ b/xdiff/xdiffi.c @@ -321,12 +321,12 @@ int xdl_do_diff(mmfile_t *mf1, mmfile_t *mf2, xpparam_t const *xpp, return -1; if (XDF_DIFF_ALG(xpp->flags) == XDF_PATIENCE_DIFF) { - res = xdl_do_patience_diff(mf1, mf2, xpp, xe); + res = xdl_do_patience_diff(xpp, xe); goto out; } if (XDF_DIFF_ALG(xpp->flags) == XDF_HISTOGRAM_DIFF) { - res = xdl_do_histogram_diff(mf1, mf2, xpp, xe); + res = xdl_do_histogram_diff(xpp, xe); goto out; } diff --git a/xdiff/xdiffi.h b/xdiff/xdiffi.h index 8f1c7c8b04..126c9d8ff4 100644 --- a/xdiff/xdiffi.h +++ b/xdiff/xdiffi.h @@ -56,9 +56,7 @@ int xdl_build_script(xdfenv_t *xe, xdchange_t **xscr); void xdl_free_script(xdchange_t *xscr); int xdl_emit_diff(xdfenv_t *xe, xdchange_t *xscr, xdemitcb_t *ecb, xdemitconf_t const *xecfg); -int xdl_do_patience_diff(mmfile_t *mf1, mmfile_t *mf2, xpparam_t const *xpp, - xdfenv_t *env); -int xdl_do_histogram_diff(mmfile_t *mf1, mmfile_t *mf2, xpparam_t const *xpp, - xdfenv_t *env); +int xdl_do_patience_diff(xpparam_t const *xpp, xdfenv_t *env); +int xdl_do_histogram_diff(xpparam_t const *xpp, xdfenv_t *env); #endif /* #if !defined(XDIFFI_H) */ diff --git a/xdiff/xhistogram.c b/xdiff/xhistogram.c index df909004c1..16a8fe2f3f 100644 --- a/xdiff/xhistogram.c +++ b/xdiff/xhistogram.c @@ -362,8 +362,7 @@ out: return result; } -int xdl_do_histogram_diff(mmfile_t *file1, mmfile_t *file2, - xpparam_t const *xpp, xdfenv_t *env) +int xdl_do_histogram_diff(xpparam_t const *xpp, xdfenv_t *env) { return histogram_diff(xpp, env, env->xdf1.dstart + 1, env->xdf1.dend - env->xdf1.dstart + 1, diff --git a/xdiff/xpatience.c b/xdiff/xpatience.c index fe39c2978c..a2d8955537 100644 --- a/xdiff/xpatience.c +++ b/xdiff/xpatience.c @@ -69,7 +69,6 @@ struct hashmap { } *entries, *first, *last; /* were common records found? */ unsigned long has_matches; - mmfile_t *file1, *file2; xdfenv_t *env; xpparam_t const *xpp; }; @@ -139,13 +138,10 @@ static void insert_record(xpparam_t const *xpp, int line, struct hashmap *map, * * It is assumed that env has been prepared using xdl_prepare(). */ -static int fill_hashmap(mmfile_t *file1, mmfile_t *file2, - xpparam_t const *xpp, xdfenv_t *env, +static int fill_hashmap(xpparam_t const *xpp, xdfenv_t *env, struct hashmap *result, int line1, int count1, int line2, int count2) { - result->file1 = file1; - result->file2 = file2; result->xpp = xpp; result->env = env; @@ -254,8 +250,7 @@ static int match(struct hashmap *map, int line1, int line2) return record1->ha == record2->ha; } -static int patience_diff(mmfile_t *file1, mmfile_t *file2, - xpparam_t const *xpp, xdfenv_t *env, +static int patience_diff(xpparam_t const *xpp, xdfenv_t *env, int line1, int count1, int line2, int count2); static int walk_common_sequence(struct hashmap *map, struct entry *first, @@ -286,8 +281,7 @@ static int walk_common_sequence(struct hashmap *map, struct entry *first, /* Recurse */ if (next1 > line1 || next2 > line2) { - if (patience_diff(map->file1, map->file2, - map->xpp, map->env, + if (patience_diff(map->xpp, map->env, line1, next1 - line1, line2, next2 - line2)) return -1; @@ -326,8 +320,7 @@ static int fall_back_to_classic_diff(struct hashmap *map, * * This function assumes that env was prepared with xdl_prepare_env(). */ -static int patience_diff(mmfile_t *file1, mmfile_t *file2, - xpparam_t const *xpp, xdfenv_t *env, +static int patience_diff(xpparam_t const *xpp, xdfenv_t *env, int line1, int count1, int line2, int count2) { struct hashmap map; @@ -346,7 +339,7 @@ static int patience_diff(mmfile_t *file1, mmfile_t *file2, } memset(&map, 0, sizeof(map)); - if (fill_hashmap(file1, file2, xpp, env, &map, + if (fill_hashmap(xpp, env, &map, line1, count1, line2, count2)) return -1; @@ -374,9 +367,7 @@ static int patience_diff(mmfile_t *file1, mmfile_t *file2, return result; } -int xdl_do_patience_diff(mmfile_t *file1, mmfile_t *file2, - xpparam_t const *xpp, xdfenv_t *env) +int xdl_do_patience_diff(xpparam_t const *xpp, xdfenv_t *env) { - return patience_diff(file1, file2, xpp, env, - 1, env->xdf1.nrec, 1, env->xdf2.nrec); + return patience_diff(xpp, env, 1, env->xdf1.nrec, 1, env->xdf2.nrec); } |
