diff options
401 files changed, 10441 insertions, 3050 deletions
diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index cd1f52692a..831f4df56c 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -309,7 +309,7 @@ jobs: if: needs.ci-config.outputs.enabled == 'yes' env: jobname: StaticAnalysis - runs-on: ubuntu-18.04 + runs-on: ubuntu-22.04 steps: - uses: actions/checkout@v2 - run: ci/install-dependencies.sh diff --git a/.gitignore b/.gitignore index a452215764..80b530bbed 100644 --- a/.gitignore +++ b/.gitignore @@ -53,6 +53,7 @@ /git-cvsimport /git-cvsserver /git-daemon +/git-diagnose /git-diff /git-diff-files /git-diff-index @@ -185,6 +186,7 @@ /git-worktree /git-write-tree /git-core-*/?* +/git.res /gitweb/GITWEB-BUILD-OPTIONS /gitweb/gitweb.cgi /gitweb/static/gitweb.js @@ -225,7 +227,6 @@ *.hcc *.obj *.lib -*.res *.sln *.sp *.suo diff --git a/Documentation/Makefile b/Documentation/Makefile index 4f801f4e4c..bd6b6fcb93 100644 --- a/Documentation/Makefile +++ b/Documentation/Makefile @@ -24,10 +24,21 @@ MAN1_TXT += gitweb.txt # man5 / man7 guides (note: new guides should also be added to command-list.txt) MAN5_TXT += gitattributes.txt +MAN5_TXT += gitformat-bundle.txt +MAN5_TXT += gitformat-chunk.txt +MAN5_TXT += gitformat-commit-graph.txt +MAN5_TXT += gitformat-index.txt +MAN5_TXT += gitformat-pack.txt +MAN5_TXT += gitformat-signature.txt MAN5_TXT += githooks.txt MAN5_TXT += gitignore.txt MAN5_TXT += gitmailmap.txt MAN5_TXT += gitmodules.txt +MAN5_TXT += gitprotocol-capabilities.txt +MAN5_TXT += gitprotocol-common.txt +MAN5_TXT += gitprotocol-http.txt +MAN5_TXT += gitprotocol-pack.txt +MAN5_TXT += gitprotocol-v2.txt MAN5_TXT += gitrepository-layout.txt MAN5_TXT += gitweb.conf.txt @@ -95,26 +106,17 @@ TECH_DOCS += MyFirstObjectWalk TECH_DOCS += SubmittingPatches TECH_DOCS += ToolsForGit TECH_DOCS += technical/bitmap-format -TECH_DOCS += technical/bundle-format -TECH_DOCS += technical/cruft-packs +TECH_DOCS += technical/bundle-uri TECH_DOCS += technical/hash-function-transition -TECH_DOCS += technical/http-protocol -TECH_DOCS += technical/index-format TECH_DOCS += technical/long-running-process-protocol TECH_DOCS += technical/multi-pack-index -TECH_DOCS += technical/pack-format TECH_DOCS += technical/pack-heuristics -TECH_DOCS += technical/pack-protocol TECH_DOCS += technical/parallel-checkout TECH_DOCS += technical/partial-clone -TECH_DOCS += technical/protocol-capabilities -TECH_DOCS += technical/protocol-common -TECH_DOCS += technical/protocol-v2 TECH_DOCS += technical/racy-git TECH_DOCS += technical/reftable TECH_DOCS += technical/send-pack-pipeline TECH_DOCS += technical/shallow -TECH_DOCS += technical/signature-format TECH_DOCS += technical/trivial-merge SP_ARTICLES += $(TECH_DOCS) SP_ARTICLES += technical/api-index @@ -290,6 +292,8 @@ cmds_txt = cmds-ancillaryinterrogators.txt \ cmds-synchingrepositories.txt \ cmds-synchelpers.txt \ cmds-guide.txt \ + cmds-developerinterfaces.txt \ + cmds-userinterfaces.txt \ cmds-purehelpers.txt \ cmds-foreignscminterface.txt diff --git a/Documentation/RelNotes/2.38.0.txt b/Documentation/RelNotes/2.38.0.txt new file mode 100644 index 0000000000..91a822e62b --- /dev/null +++ b/Documentation/RelNotes/2.38.0.txt @@ -0,0 +1,300 @@ +Git v2.38 Release Notes +======================= + +UI, Workflows & Features + + * "git remote show [-n] frotz" now pays attention to negative + pathspec. + + * "git push" sometimes perform poorly when reachability bitmaps are + used, even in a repository where other operations are helped by + bitmaps. The push.useBitmaps configuration variable is introduced + to allow disabling use of reachability bitmaps only for "git push". + + * "git grep -m<max-hits>" is a way to limit the hits shown per file. + + * "git merge-tree" learned a new mode where it takes two commits and + computes a tree that would result in the merge commit, if the + histories leading to these two commits were to be merged. + + * "git mv A B" in a sparsely populated working tree can be asked to + move a path between directories that are "in cone" (i.e. expected + to be materialized in the working tree) and "out of cone" + (i.e. expected to be hidden). The handling of such cases has been + improved. + + * Earlier, HTTP transport clients learned to tell the server side + what locale they are in by sending Accept-Language HTTP header, but + this was done only for some requests but not others. + + * Introduce a discovery.barerepository configuration variable that + allows users to forbid discovery of bare repositories. + + * Various messages that come from the pack-bitmap codepaths have been + tweaked. + + * "git rebase -i" learns to update branches whose tip appear in the + rebased range with "--update-refs" option. + + * "git ls-files" learns the "--format" option to tweak its output. + + * "git cat-file" learned an option to use the mailmap when showing + commit and tag objects. + + * When "git merge" finds that it cannot perform a merge, it should + restore the working tree to the state before the command was + initiated, but in some corner cases it didn't. + + * Operating modes like "--batch" of "git cat-file" command learned to + take NUL-terminated input, instead of one-item-per-line. + + * "git rm" has become more aware of the sparse-index feature. + + * "git rev-list --disk-usage" learned to take an optional value + "human" to show the reported value in human-readable format, like + "3.40MiB". + + * The "diagnose" feature to create a zip archive for diagnostic + material has been lifted from "scalar" and made into a feature of + "git bugreport". + + * The namespaces used by "log --decorate" from "refs/" hierarchy by + default has been tightened. + + * "git rev-list --ancestry-path=C A..B" is a natural extension of + "git rev-list A..B"; instead of choosing a subset of A..B to those + that have ancestry relationship with A, it lets a subset with + ancestry relationship with C. + + * "scalar" now enables built-in fsmonitor on enlisted repositories, + when able. + + * The bash prompt (in contrib/) learned to optionally indicate when + the index is unmerged. + + +Performance, Internal Implementation, Development Support etc. + + * Collection of what is referenced by objects in promisor packs have + been optimized to inspect these objects in the in-pack order. + + * Introduce a helper to see if a branch is already being worked on + (hence should not be newly checked out in a working tree), which + performs much better than the existing find_shared_symref() to + replace many uses of the latter. + + * Teach "git archive" to (optionally and then by default) avoid + spawning an external "gzip" process when creating ".tar.gz" (and + ".tgz") archives. + + * Allow large objects read from a packstream to be streamed into a + loose object file straight, without having to keep it in-core as a + whole. + + * Further preparation to turn git-submodule.sh into a builtin + continues. + + * Apply Coccinelle rule to turn raw memmove() into MOVE_ARRAY() cpp + macro, which would improve maintainability and readability. + + * Teach "make all" to build gitweb as well. + + * Tweak tests so that they still work when the "git init" template + did not create .git/info directory. + + * Add Coccinelle rules to detect the pattern of initializing and then + finalizing a structure without using it in between at all, which + happens after code restructuring and the compilers fail to + recognize as an unused variable. + + * The code to convert between GPG trust level strings and internal + constants we use to represent them have been cleaned up. + + * Support for libnettle as SHA256 implementation has been added. + + * The way "git multi-pack" uses parse-options API has been improved. + + * A coccinelle rule (in contrib/) to encourage use of COPY_ARRAY + macro has been improved. + + * API tweak to make it easier to run fuzz testing on commit-graph parser. + + * Omit fsync-related trace2 entries when their values are all zero. + + * The codepath to write multi-pack index has been taught to release a + large chunk of memory that holds an array of objects in the packs, + as soon as it is done with the array, to reduce memory consumption. + + * Add a level of redirection to array allocation API in xdiff part, + to make it easier to share with the libgit2 project. + + * "git fetch" client logs the partial clone filter used in the trace2 + output. + + * The "bundle URI" design gets documented. + + * The common ancestor negotiation exchange during a "git fetch" + session now leaves trace log. + + * Test portability improvements. + (merge 4d1d843be7 mt/rot13-in-c later to maint). + + +Fixes since v2.37 +----------------- + + * Rewrite of "git add -i" in C that appeared in Git 2.25 didn't + correctly record a removed file to the index, which was fixed. + + * Certain diff options are currently ignored when combined-diff is + shown; mark them as incompatible with the feature. + + * Adjust technical/bitmap-format to be formatted by AsciiDoc, and + add some missing information to the documentation. + + * Fixes for tests when the source directory has unusual characters in + its path, e.g. whitespaces, double-quotes, etc. + + * "git mktree --missing" lazily fetched objects that are missing from + the local object store, which was totally unnecessary for the purpose + of creating the tree object(s) from its input. + + * Give _() markings to fatal/warning/usage: labels that are shown in + front of these messages. + + * References to commands-to-be-typed-literally in "git rebase" + documentation mark-up have been corrected. + + * In a non-bare repository, the behavior of Git when the + core.worktree configuration variable points at a directory that has + a repository as its subdirectory, regressed in Git 2.27 days. + + * Recent update to vimdiff layout code has been made more robust + against different end-user vim settings. + + * Plug various memory leaks, both in the main code and in test-tool + commands. + + * Fixes a long-standing corner case bug around directory renames in + the merge-ort strategy. + + * The resolve-undo information in the index was not protected against + GC, which has been corrected. + + * A corner case bug where lazily fetching objects from a promisor + remote resulted in infinite recursion has been corrected. + + * "git clone" from a repository with some ref whose HEAD is unborn + did not set the HEAD in the resulting repository correctly, which + has been corrected. + + * An earlier attempt to plug leaks placed a clean-up label to jump to + at a bogus place, which as been corrected. + + * Variable quoting fix in the vimdiff driver of "git mergetool" + + * "git shortlog -n" relied on the underlying qsort() to be stable, + which shouldn't have. Fixed. + + * A fix for a regression in test framework. + + * mkstemp() emulation on Windows has been improved. + + * Add missing documentation for "include" and "includeIf" features in + "git config" file format, which incidentally teaches the command + line completion to include them in its offerings. + + * Avoid "white/black-list" in documentation and code comments. + + * Workaround for a compiler warning against use of die() in + osx-keychain (in contrib/). + + * Workaround for a false positive compiler warning. + + * "git p4" working on UTF-16 files on Windows did not implement + CRLF-to-LF conversion correctly, which has been corrected. + + * "git p4" did not handle non-ASCII client name well, which has been + corrected. + + * "rerere-train" script (in contrib/) used to honor commit.gpgSign + while recreating the throw-away merges. + + * "git checkout" miscounted the paths it updated, which has been + corrected. + + * Fix for a bug that makes write-tree to fail to write out a + non-existent index as a tree, introduced in 2.37. + + * There was a bug in the codepath to upgrade generation information + in commit-graph from v1 to v2 format, which has been corrected. + + * Gitweb had legacy URL shortener that is specific to the way + projects hosted on kernel.org used to (but no longer) work, which + has been removed. + + * Fix build procedure for Windows that uses CMake so that it can pick + up the shell interpreter from local installation location. + + * Conditionally allow building Python interpreter on Windows + + * Fix to lstat() emulation on Windows. + + * Older gcc with -Wall complains about the universal zero initializer + "struct s = { 0 };" idiom, which makes developers' lives + inconvenient (as -Werror is enabled by DEVELOPER=YesPlease). The + build procedure has been tweaked to help these compilers. + + * Plug memory leaks in the failure code path in the "merge-ort" merge + strategy backend. + + * "git symbolic-ref symref non..sen..se" is now diagnosed as an error. + + * A follow-up fix to a fix for a regression in 2.36 around hooks. + + * Avoid repeatedly running getconf to ask libc version in the test + suite, and instead just as it once per script. + + * Platform-specific code that determines if a directory is OK to use + as a repository has been taught to report more details, especially + on Windows. + + * "vimdiff3" regression fix. + + * "git fsck" reads mode from tree objects but canonicalizes the mode + before passing it to the logic to check object sanity, which has + hid broken tree objects from the checking logic. This has been + corrected, but to help exiting projects with broken tree objects + that they cannot fix retroactively, the severity of anomalies this + code detects has been demoted to "info" for now. + + * Fixes to sparse index compatibility work for "reset" and "checkout" + commands. + + * An earlier optimization discarded a tree-object buffer that is + still in use, which has been corrected. + (merge 1490d7d82d jk/is-promisor-object-keep-tree-in-use later to maint). + + * Fix deadlocks between main Git process and subprocess spawned via + the pipe_command() API, that can kill "git add -p" that was + reimplemented in C recently. + (merge 716c1f649e jk/pipe-command-nonblock later to maint). + + * The sequencer machinery translated messages left in the reflog by + mistake, which has been corrected. + + * xcalloc(), imitating calloc(), takes "number of elements of the + array", and "size of a single element", in this order. A call that + does not follow this ordering has been corrected. + (merge c4bbd9bb8f sg/xcalloc-cocci-fix later to maint). + + * The preload-index codepath made copies of pathspec to give to + multiple threads, which were left leaked. + (merge 23578904da ad/preload-plug-memleak later to maint). + + * Update the version of Ubuntu used for GitHub Actions CI from 18.04 + to 22.04. + (merge ef46584831 ds/github-actions-use-newer-ubuntu later to maint). + + * Other code cleanup, docfix, build fix, etc. + (merge 77b9e85c0f vd/fix-perf-tests later to maint).
\ No newline at end of file diff --git a/Documentation/config/core.txt b/Documentation/config/core.txt index 88bc799cf3..37afbaf5a4 100644 --- a/Documentation/config/core.txt +++ b/Documentation/config/core.txt @@ -444,17 +444,32 @@ You probably do not need to adjust this value. Common unit suffixes of 'k', 'm', or 'g' are supported. core.bigFileThreshold:: - Files larger than this size are stored deflated, without - attempting delta compression. Storing large files without - delta compression avoids excessive memory usage, at the - slight expense of increased disk usage. Additionally files - larger than this size are always treated as binary. + The size of files considered "big", which as discussed below + changes the behavior of numerous git commands, as well as how + such files are stored within the repository. The default is + 512 MiB. Common unit suffixes of 'k', 'm', or 'g' are + supported. + -Default is 512 MiB on all platforms. This should be reasonable -for most projects as source code and other text files can still -be delta compressed, but larger binary media files won't be. +Files above the configured limit will be: + -Common unit suffixes of 'k', 'm', or 'g' are supported. +* Stored deflated in packfiles, without attempting delta compression. ++ +The default limit is primarily set with this use-case in mind. With it, +most projects will have their source code and other text files delta +compressed, but not larger binary media files. ++ +Storing large files without delta compression avoids excessive memory +usage, at the slight expense of increased disk usage. ++ +* Will be treated as if they were labeled "binary" (see + linkgit:gitattributes[5]). e.g. linkgit:git-log[1] and + linkgit:git-diff[1] will not compute diffs for files above this limit. ++ +* Will generally be streamed when written, which avoids excessive +memory usage, at the cost of some fixed overhead. Commands that make +use of this include linkgit:git-archive[1], +linkgit:git-fast-import[1], linkgit:git-index-pack[1], +linkgit:git-unpack-objects[1] and linkgit:git-fsck[1]. core.excludesFile:: Specifies the pathname to the file that contains patterns to diff --git a/Documentation/config/log.txt b/Documentation/config/log.txt index 456eb07800..5250ba45fb 100644 --- a/Documentation/config/log.txt +++ b/Documentation/config/log.txt @@ -18,6 +18,11 @@ log.decorate:: names are shown. This is the same as the `--decorate` option of the `git log`. +log.initialDecorationSet:: + By default, `git log` only shows decorations for certain known ref + namespaces. If 'all' is specified, then show all refs as + decorations. + log.excludeDecoration:: Exclude the specified patterns from the log decorations. This is similar to the `--decorate-refs-exclude` command-line option, but diff --git a/Documentation/config/lsrefs.txt b/Documentation/config/lsrefs.txt index adeda0f24d..3d88fb0bad 100644 --- a/Documentation/config/lsrefs.txt +++ b/Documentation/config/lsrefs.txt @@ -1,7 +1,7 @@ lsrefs.unborn:: May be "advertise" (the default), "allow", or "ignore". If "advertise", the server will respond to the client sending "unborn" (as described in - protocol-v2.txt) and will advertise support for this feature during the + linkgit:gitprotocol-v2[5]) and will advertise support for this feature during the protocol v2 capability advertisement. "allow" is the same as "advertise" except that the server will not advertise support for this feature; this is useful for load-balanced servers that cannot be diff --git a/Documentation/config/pack.txt b/Documentation/config/pack.txt index ad7f73a1ea..3e581eab84 100644 --- a/Documentation/config/pack.txt +++ b/Documentation/config/pack.txt @@ -166,7 +166,7 @@ permuted into their appropriate location when writing a new bitmap. pack.writeReverseIndex:: When true, git will write a corresponding .rev file (see: - link:../technical/pack-format.html[Documentation/technical/pack-format.txt]) + linkgit:gitformat-pack[5]) for each new packfile that it writes in all places except for linkgit:git-fast-import[1] and in the bulk checkin mechanism. Defaults to false. diff --git a/Documentation/config/protocol.txt b/Documentation/config/protocol.txt index 756591d77b..5760381851 100644 --- a/Documentation/config/protocol.txt +++ b/Documentation/config/protocol.txt @@ -58,6 +58,6 @@ protocol.version:: * `1` - the original wire protocol with the addition of a version string in the initial response from the server. -* `2` - link:technical/protocol-v2.html[wire protocol version 2]. +* `2` - Wire protocol version 2, see linkgit:gitprotocol-v2[5]. -- diff --git a/Documentation/config/push.txt b/Documentation/config/push.txt index e32801e6c9..7386fea225 100644 --- a/Documentation/config/push.txt +++ b/Documentation/config/push.txt @@ -137,3 +137,8 @@ push.negotiate:: server attempt to find commits in common. If "false", Git will rely solely on the server's ref advertisement to find commits in common. + +push.useBitmaps:: + If set to "false", disable use of bitmaps for "git push" even if + `pack.useBitmaps` is "true", without preventing other git operations + from using bitmaps. Default is true. diff --git a/Documentation/config/rebase.txt b/Documentation/config/rebase.txt index 8c979cb20f..f19bd0e040 100644 --- a/Documentation/config/rebase.txt +++ b/Documentation/config/rebase.txt @@ -21,6 +21,9 @@ rebase.autoStash:: `--autostash` options of linkgit:git-rebase[1]. Defaults to false. +rebase.updateRefs:: + If set to true enable `--update-refs` option by default. + rebase.missingCommitsCheck:: If set to "warn", git rebase -i will print a warning if some commits are removed (e.g. a line was deleted), however the diff --git a/Documentation/config/safe.txt b/Documentation/config/safe.txt index fa02f3ccc5..bde7f31459 100644 --- a/Documentation/config/safe.txt +++ b/Documentation/config/safe.txt @@ -1,3 +1,22 @@ +safe.bareRepository:: + Specifies which bare repositories Git will work with. The currently + supported values are: ++ +* `all`: Git works with all bare repositories. This is the default. +* `explicit`: Git only works with bare repositories specified via + the top-level `--git-dir` command-line option, or the `GIT_DIR` + environment variable (see linkgit:git[1]). ++ +If you do not use bare repositories in your workflow, then it may be +beneficial to set `safe.bareRepository` to `explicit` in your global +config. This will protect you from attacks that involve cloning a +repository that contains a bare repository and running a Git command +within that directory. ++ +This config setting is only respected in protected configuration (see +<<SCOPES>>). This prevents the untrusted repository from tampering with +this value. + safe.directory:: These config entries specify Git-tracked directories that are considered safe even if they are owned by someone other than the @@ -12,9 +31,9 @@ via `git config --add`. To reset the list of safe directories (e.g. to override any such directories specified in the system config), add a `safe.directory` entry with an empty value. + -This config setting is only respected when specified in a system or global -config, not when it is specified in a repository config, via the command -line option `-c safe.directory=<path>`, or in environment variables. +This config setting is only respected in protected configuration (see +<<SCOPES>>). This prevents the untrusted repository from tampering with this +value. + The value of this setting is interpolated, i.e. `~/<path>` expands to a path relative to the home directory and `%(prefix)/<path>` expands to a diff --git a/Documentation/config/uploadpack.txt b/Documentation/config/uploadpack.txt index 32fad5bbe8..16264d82a7 100644 --- a/Documentation/config/uploadpack.txt +++ b/Documentation/config/uploadpack.txt @@ -49,9 +49,9 @@ uploadpack.packObjectsHook:: `pack-objects` to the hook, and expects a completed packfile on stdout. + -Note that this configuration variable is ignored if it is seen in the -repository-level config (this is a safety measure against fetching from -untrusted repositories). +Note that this configuration variable is only respected when it is specified +in protected configuration (see <<SCOPES>>). This is a safety measure +against fetching from untrusted repositories. uploadpack.allowFilter:: If this option is set, `upload-pack` will support partial diff --git a/Documentation/git-am.txt b/Documentation/git-am.txt index 09107fb106..320da6c4f7 100644 --- a/Documentation/git-am.txt +++ b/Documentation/git-am.txt @@ -112,10 +112,7 @@ default. You can use `--no-utf8` to override this. am.threeWay configuration variable. For more information, see am.threeWay in linkgit:git-config[1]. ---rerere-autoupdate:: ---no-rerere-autoupdate:: - Allow the rerere mechanism to update the index with the - result of auto-conflict resolution if possible. +include::rerere-options.txt[] --ignore-space-change:: --ignore-whitespace:: diff --git a/Documentation/git-archive.txt b/Documentation/git-archive.txt index 56989a2f34..60c040988b 100644 --- a/Documentation/git-archive.txt +++ b/Documentation/git-archive.txt @@ -34,10 +34,12 @@ OPTIONS ------- --format=<fmt>:: - Format of the resulting archive: 'tar' or 'zip'. If this option + Format of the resulting archive. Possible values are `tar`, + `zip`, `tar.gz`, `tgz`, and any format defined using the + configuration option `tar.<format>.command`. If `--format` is not given, and the output file is specified, the format is - inferred from the filename if possible (e.g. writing to "foo.zip" - makes the output to be in the zip format). Otherwise the output + inferred from the filename if possible (e.g. writing to `foo.zip` + makes the output to be in the `zip` format). Otherwise the output format is `tar`. -l:: @@ -143,17 +145,16 @@ tar.<format>.command:: is executed using the shell with the generated tar file on its standard input, and should produce the final output on its standard output. Any compression-level options will be passed - to the command (e.g., "-9"). An output file with the same - extension as `<format>` will be use this format if no other - format is given. + to the command (e.g., `-9`). + -The "tar.gz" and "tgz" formats are defined automatically and default to -`gzip -cn`. You may override them with custom commands. +The `tar.gz` and `tgz` formats are defined automatically and use the +magic command `git archive gzip` by default, which invokes an internal +implementation of gzip. tar.<format>.remote:: - If true, enable `<format>` for use by remote clients via + If true, enable the format for use by remote clients via linkgit:git-upload-archive[1]. Defaults to false for - user-defined formats, but true for the "tar.gz" and "tgz" + user-defined formats, but true for the `tar.gz` and `tgz` formats. [[ATTRIBUTES]] diff --git a/Documentation/git-bugreport.txt b/Documentation/git-bugreport.txt index d8817bf3ce..eca726e579 100644 --- a/Documentation/git-bugreport.txt +++ b/Documentation/git-bugreport.txt @@ -9,6 +9,7 @@ SYNOPSIS -------- [verse] 'git bugreport' [(-o | --output-directory) <path>] [(-s | --suffix) <format>] + [--diagnose[=<mode>]] DESCRIPTION ----------- @@ -31,6 +32,10 @@ The following information is captured automatically: - A list of enabled hooks - $SHELL +Additional information may be gathered into a separate zip archive using the +`--diagnose` option, and can be attached alongside the bugreport document to +provide additional context to readers. + This tool is invoked via the typical Git setup process, which means that in some cases, it might not be able to launch - for example, if a relevant config file is unreadable. In this kind of scenario, it may be helpful to manually gather @@ -49,6 +54,19 @@ OPTIONS named 'git-bugreport-<formatted suffix>'. This should take the form of a strftime(3) format string; the current local time will be used. +--no-diagnose:: +--diagnose[=<mode>]:: + Create a zip archive of supplemental information about the user's + machine, Git client, and repository state. The archive is written to the + same output directory as the bug report and is named + 'git-diagnostics-<formatted suffix>'. ++ +Without `mode` specified, the diagnostic archive will contain the default set of +statistics reported by `git diagnose`. An optional `mode` value may be specified +to change which information is included in the archive. See +linkgit:git-diagnose[1] for the list of valid values for `mode` and details +about their usage. + GIT --- Part of the linkgit:git[1] suite diff --git a/Documentation/git-bundle.txt b/Documentation/git-bundle.txt index 7685b57045..6da6172243 100644 --- a/Documentation/git-bundle.txt +++ b/Documentation/git-bundle.txt @@ -56,10 +56,8 @@ using "thin packs", bundles created using exclusions are smaller in size. That they're "thin" under the hood is merely noted here as a curiosity, and as a reference to other documentation. -See link:technical/bundle-format.html[the `bundle-format` -documentation] for more details and the discussion of "thin pack" in -link:technical/pack-format.html[the pack format documentation] for -further details. +See linkgit:gitformat-bundle[5] for more details and the discussion of +"thin pack" in linkgit:gitformat-pack[5] for further details. OPTIONS ------- @@ -77,7 +75,7 @@ verify <file>:: commits exist and are fully linked in the current repository. Then, 'git bundle' prints a list of missing commits, if any. Finally, information about additional capabilities, such as "object - filter", is printed. See "Capabilities" in link:technical/bundle-format.html + filter", is printed. See "Capabilities" in linkgit:gitformat-bundle[5] for more information. The exit code is zero for success, but will be nonzero if the bundle file is invalid. @@ -337,6 +335,11 @@ You can also see what references it offers: $ git ls-remote mybundle ---------------- +FILE FORMAT +----------- + +See linkgit:gitformat-bundle[5]. + GIT --- Part of the linkgit:git[1] suite diff --git a/Documentation/git-cat-file.txt b/Documentation/git-cat-file.txt index 24a811f0ef..ec30b5c574 100644 --- a/Documentation/git-cat-file.txt +++ b/Documentation/git-cat-file.txt @@ -14,7 +14,7 @@ SYNOPSIS 'git cat-file' (-t | -s) [--allow-unknown-type] <object> 'git cat-file' (--batch | --batch-check | --batch-command) [--batch-all-objects] [--buffer] [--follow-symlinks] [--unordered] - [--textconv | --filters] + [--textconv | --filters] [-z] 'git cat-file' (--textconv | --filters) [<rev>:<path|tree-ish> | --path=<path|tree-ish> <rev>] @@ -63,6 +63,12 @@ OPTIONS or to ask for a "blob" with `<object>` being a tag object that points at it. +--[no-]mailmap:: +--[no-]use-mailmap:: + Use mailmap file to map author, committer and tagger names + and email addresses to canonical real names and email addresses. + See linkgit:git-shortlog[1]. + --textconv:: Show the content as transformed by a textconv filter. In this case, `<object>` has to be of the form `<tree-ish>:<path>`, or `:<path>` in @@ -207,6 +213,11 @@ respectively print: /etc/passwd -- +-z:: + Only meaningful with `--batch`, `--batch-check`, or + `--batch-command`; input is NUL-delimited instead of + newline-delimited. + OUTPUT ------ diff --git a/Documentation/git-cherry-pick.txt b/Documentation/git-cherry-pick.txt index 78dcc9171f..1e8ac9df60 100644 --- a/Documentation/git-cherry-pick.txt +++ b/Documentation/git-cherry-pick.txt @@ -156,10 +156,7 @@ effect to your index in a row. Pass the merge strategy-specific option through to the merge strategy. See linkgit:git-merge[1] for details. ---rerere-autoupdate:: ---no-rerere-autoupdate:: - Allow the rerere mechanism to update the index with the - result of auto-conflict resolution if possible. +include::rerere-options.txt[] SEQUENCER SUBCOMMANDS --------------------- diff --git a/Documentation/git-commit-graph.txt b/Documentation/git-commit-graph.txt index e1f48c95b3..047decdb65 100644 --- a/Documentation/git-commit-graph.txt +++ b/Documentation/git-commit-graph.txt @@ -143,6 +143,11 @@ $ git rev-parse HEAD | git commit-graph write --stdin-commits --append ------------------------------------------------ +FILE FORMAT +----------- + +see linkgit:gitformat-commit-graph[5]. + GIT --- Part of the linkgit:git[1] suite diff --git a/Documentation/git-config.txt b/Documentation/git-config.txt index 9376e39aef..7a2bcb2f6c 100644 --- a/Documentation/git-config.txt +++ b/Documentation/git-config.txt @@ -297,23 +297,20 @@ The default is to use a pager. FILES ----- -If not set explicitly with `--file`, there are four files where -'git config' will search for configuration options: +By default, 'git config' will read configuration options from multiple +files: $(prefix)/etc/gitconfig:: System-wide configuration file. $XDG_CONFIG_HOME/git/config:: - Second user-specific configuration file. If $XDG_CONFIG_HOME is not set - or empty, `$HOME/.config/git/config` will be used. Any single-valued - variable set in this file will be overwritten by whatever is in - `~/.gitconfig`. It is a good idea not to create this file if - you sometimes use older versions of Git, as support for this - file was added fairly recently. - ~/.gitconfig:: - User-specific configuration file. Also called "global" - configuration file. + User-specific configuration files. When the XDG_CONFIG_HOME environment + variable is not set or empty, $HOME/.config/ is used as + $XDG_CONFIG_HOME. ++ +These are also called "global" configuration files. If both files exist, both +files are read in the order given above. $GIT_DIR/config:: Repository specific configuration file. @@ -322,28 +319,80 @@ $GIT_DIR/config.worktree:: This is optional and is only searched when `extensions.worktreeConfig` is present in $GIT_DIR/config. -If no further options are given, all reading options will read all of these -files that are available. If the global or the system-wide configuration -file are not available they will be ignored. If the repository configuration -file is not available or readable, 'git config' will exit with a non-zero -error code. However, in neither case will an error message be issued. +You may also provide additional configuration parameters when running any +git command by using the `-c` option. See linkgit:git[1] for details. + +Options will be read from all of these files that are available. If the +global or the system-wide configuration files are missing or unreadable they +will be ignored. If the repository configuration file is missing or unreadable, +'git config' will exit with a non-zero error code. An error message is produced +if the file is unreadable, but not if it is missing. The files are read in the order given above, with last value found taking precedence over values read earlier. When multiple values are taken then all values of a key from all files will be used. -You may override individual configuration parameters when running any git -command by using the `-c` option. See linkgit:git[1] for details. - -All writing options will per default write to the repository specific +By default, options are only written to the repository specific configuration file. Note that this also affects options like `--replace-all` and `--unset`. *'git config' will only ever change one file at a time*. -You can override these rules using the `--global`, `--system`, -`--local`, `--worktree`, and `--file` command-line options; see -<<OPTIONS>> above. +You can limit which configuration sources are read from or written to by +specifying the path of a file with the `--file` option, or by specifying a +configuration scope with `--system`, `--global`, `--local`, or `--worktree`. +For more, see <<OPTIONS>> above. + +[[SCOPES]] +SCOPES +------ + +Each configuration source falls within a configuration scope. The scopes +are: + +system:: + $(prefix)/etc/gitconfig + +global:: + $XDG_CONFIG_HOME/git/config ++ +~/.gitconfig + +local:: + $GIT_DIR/config + +worktree:: + $GIT_DIR/config.worktree + +command:: + GIT_CONFIG_{COUNT,KEY,VALUE} environment variables (see <<ENVIRONMENT>> + below) ++ +the `-c` option + +With the exception of 'command', each scope corresponds to a command line +option: `--system`, `--global`, `--local`, `--worktree`. + +When reading options, specifying a scope will only read options from the +files within that scope. When writing options, specifying a scope will write +to the files within that scope (instead of the repository specific +configuration file). See <<OPTIONS>> above for a complete description. + +Most configuration options are respected regardless of the scope it is +defined in, but some options are only respected in certain scopes. See the +respective option's documentation for the full details. + +Protected configuration +~~~~~~~~~~~~~~~~~~~~~~~ + +Protected configuration refers to the 'system', 'global', and 'command' scopes. +For security reasons, certain options are only respected when they are +specified in protected configuration, and ignored otherwise. +Git treats these scopes as if they are controlled by the user or a trusted +administrator. This is because an attacker who controls these scopes can do +substantial harm without using Git, so it is assumed that the user's environment +protects these scopes against attackers. +[[ENVIRONMENT]] ENVIRONMENT ----------- diff --git a/Documentation/git-diagnose.txt b/Documentation/git-diagnose.txt new file mode 100644 index 0000000000..3ec8cc7ad7 --- /dev/null +++ b/Documentation/git-diagnose.txt @@ -0,0 +1,65 @@ +git-diagnose(1) +================ + +NAME +---- +git-diagnose - Generate a zip archive of diagnostic information + +SYNOPSIS +-------- +[verse] +'git diagnose' [(-o | --output-directory) <path>] [(-s | --suffix) <format>] + [--mode=<mode>] + +DESCRIPTION +----------- +Collects detailed information about the user's machine, Git client, and +repository state and packages that information into a zip archive. The +generated archive can then, for example, be shared with the Git mailing list to +help debug an issue or serve as a reference for independent debugging. + +By default, the following information is captured in the archive: + + * 'git version --build-options' + * The path to the repository root + * The available disk space on the filesystem + * The name and size of each packfile, including those in alternate object + stores + * The total count of loose objects, as well as counts broken down by + `.git/objects` subdirectory + +Additional information can be collected by selecting a different diagnostic mode +using the `--mode` option. + +This tool differs from linkgit:git-bugreport[1] in that it collects much more +detailed information with a greater focus on reporting the size and data shape +of repository contents. + +OPTIONS +------- +-o <path>:: +--output-directory <path>:: + Place the resulting diagnostics archive in `<path>` instead of the + current directory. + +-s <format>:: +--suffix <format>:: + Specify an alternate suffix for the diagnostics archive name, to create + a file named 'git-diagnostics-<formatted suffix>'. This should take the + form of a strftime(3) format string; the current local time will be + used. + +--mode=(stats|all):: + Specify the type of diagnostics that should be collected. The default behavior + of 'git diagnose' is equivalent to `--mode=stats`. ++ +The `--mode=all` option collects everything included in `--mode=stats`, as well +as copies of `.git`, `.git/hooks`, `.git/info`, `.git/logs`, and +`.git/objects/info` directories. This additional information may be sensitive, +as it can be used to reconstruct the full contents of the diagnosed repository. +Users should exercise caution when sharing an archive generated with +`--mode=all`. + +GIT +--- +Part of the linkgit:git[1] suite diff --git a/Documentation/git-grep.txt b/Documentation/git-grep.txt index 3d393fbac1..58d944bd57 100644 --- a/Documentation/git-grep.txt +++ b/Documentation/git-grep.txt @@ -23,6 +23,7 @@ SYNOPSIS [--break] [--heading] [-p | --show-function] [-A <post-context>] [-B <pre-context>] [-C <context>] [-W | --function-context] + [(-m | --max-count) <num>] [--threads <num>] [-f <file>] [-e] <pattern> [--and|--or|--not|(|)|-e <pattern>...] @@ -238,6 +239,14 @@ providing this option will cause it to die. `git diff` works out patch hunk headers (see 'Defining a custom hunk-header' in linkgit:gitattributes[5]). +-m <num>:: +--max-count <num>:: + Limit the amount of matches per file. When using the `-v` or + `--invert-match` option, the search stops after the specified + number of non-matches. A value of -1 will return unlimited + results (the default). A value of 0 will exit immediately with + a non-zero status. + --threads <num>:: Number of grep worker threads to use. See `grep.threads` in 'CONFIGURATION' for more information. diff --git a/Documentation/git-help.txt b/Documentation/git-help.txt index 239c68db45..2b0b5e390d 100644 --- a/Documentation/git-help.txt +++ b/Documentation/git-help.txt @@ -9,14 +9,16 @@ SYNOPSIS -------- [verse] 'git help' [-a|--all] [--[no-]verbose] [--[no-]external-commands] [--[no-]aliases] -'git help' [[-i|--info] [-m|--man] [-w|--web]] [<command>|<guide>] +'git help' [[-i|--info] [-m|--man] [-w|--web]] [<command>|<doc>] 'git help' [-g|--guides] 'git help' [-c|--config] +'git help' [--user-interfaces] +'git help' [--developer-interfaces] DESCRIPTION ----------- -With no options and no '<command>' or '<guide>' given, the synopsis of the 'git' +With no options and no '<command>' or '<doc>' given, the synopsis of the 'git' command and a list of the most commonly used Git commands are printed on the standard output. @@ -26,8 +28,8 @@ printed on the standard output. If the option `--guides` or `-g` is given, a list of the Git concept guides is also printed on the standard output. -If a command, or a guide, is given, a manual page for that command or -guide is brought up. The 'man' program is used by default for this +If a command or other documentation is given, the relevant manual page +will be brought up. The 'man' program is used by default for this purpose, but this can be overridden by other options or configuration variables. @@ -69,6 +71,23 @@ OPTIONS --guides:: Prints a list of the Git concept guides on the standard output. +--user-interfaces:: + Prints a list of the repository, command and file interfaces + documentation on the standard output. ++ +In-repository file interfaces such as `.git/info/exclude` are +documented here (see linkgit:gitrepository-layout[5]), as well as +in-tree configuration such as `.mailmap` (see linkgit:gitmailmap[5]). ++ +This section of the documentation also covers general or widespread +user-interface conventions (e.g. linkgit:gitcli[7]), and +pseudo-configuration such as the file-based `.git/hooks/*` interface +described in linkgit:githooks[5]. + +--developer-interfaces:: + Print list of file formats, protocols and other developer + interfaces documentation on the standard output. + -i:: --info:: Display manual page for the command in the 'info' format. The diff --git a/Documentation/git-log.txt b/Documentation/git-log.txt index 20e87cecf4..b1285aee3c 100644 --- a/Documentation/git-log.txt +++ b/Documentation/git-log.txt @@ -45,13 +45,23 @@ OPTIONS --decorate-refs=<pattern>:: --decorate-refs-exclude=<pattern>:: - If no `--decorate-refs` is given, pretend as if all refs were - included. For each candidate, do not use it for decoration if it + For each candidate reference, do not use it for decoration if it matches any patterns given to `--decorate-refs-exclude` or if it doesn't match any of the patterns given to `--decorate-refs`. The `log.excludeDecoration` config option allows excluding refs from the decorations, but an explicit `--decorate-refs` pattern will override a match in `log.excludeDecoration`. ++ +If none of these options or config settings are given, then references are +used as decoration if they match `HEAD`, `refs/heads/`, `refs/remotes/`, +`refs/stash/`, or `refs/tags/`. + +--clear-decorations:: + When specified, this option clears all previous `--decorate-refs` + or `--decorate-refs-exclude` options and relaxes the default + decoration filter to include all references. This option is + assumed if the config value `log.initialDecorationSet` is set to + `all`. --source:: Print out the ref name given on the command line by which each diff --git a/Documentation/git-ls-files.txt b/Documentation/git-ls-files.txt index 0dabf3f0dd..d7986419c2 100644 --- a/Documentation/git-ls-files.txt +++ b/Documentation/git-ls-files.txt @@ -20,7 +20,7 @@ SYNOPSIS [--exclude-standard] [--error-unmatch] [--with-tree=<tree-ish>] [--full-name] [--recurse-submodules] - [--abbrev[=<n>]] [--] [<file>...] + [--abbrev[=<n>]] [--format=<format>] [--] [<file>...] DESCRIPTION ----------- @@ -192,6 +192,13 @@ followed by the ("attr/<eolattr>"). to the contained files. Sparse directories will be shown with a trailing slash, such as "x/" for a sparse directory "x". +--format=<format>:: + A string that interpolates `%(fieldname)` from the result being shown. + It also interpolates `%%` to `%`, and `%xx` where `xx` are hex digits + interpolates to character with hex code `xx`; for example `%00` + interpolates to `\0` (NUL), `%09` to `\t` (TAB) and %0a to `\n` (LF). + --format cannot be combined with `-s`, `-o`, `-k`, `-t`, `--resolve-undo` + and `--eol`. \--:: Do not interpret any more arguments as options. @@ -223,6 +230,36 @@ quoted as explained for the configuration variable `core.quotePath` (see linkgit:git-config[1]). Using `-z` the filename is output verbatim and the line is terminated by a NUL byte. +It is possible to print in a custom format by using the `--format` +option, which is able to interpolate different fields using +a `%(fieldname)` notation. For example, if you only care about the +"objectname" and "path" fields, you can execute with a specific +"--format" like + + git ls-files --format='%(objectname) %(path)' + +FIELD NAMES +----------- +The way each path is shown can be customized by using the +`--format=<format>` option, where the %(fieldname) in the +<format> string for various aspects of the index entry are +interpolated. The following "fieldname" are understood: + +objectmode:: + The mode of the file which is recorded in the index. +objectname:: + The name of the file which is recorded in the index. +stage:: + The stage of the file which is recorded in the index. +eolinfo:index:: +eolinfo:worktree:: + The <eolinfo> (see the description of the `--eol` option) of + the contents in the index or in the worktree for the path. +eolattr:: + The <eolattr> (see the description of the `--eol` option) + that applies to the path. +path:: + The pathname of the file which is recorded in the index. EXCLUDE PATTERNS ---------------- diff --git a/Documentation/git-merge-tree.txt b/Documentation/git-merge-tree.txt index 58731c1942..d6c356740e 100644 --- a/Documentation/git-merge-tree.txt +++ b/Documentation/git-merge-tree.txt @@ -3,26 +3,237 @@ git-merge-tree(1) NAME ---- -git-merge-tree - Show three-way merge without touching index +git-merge-tree - Perform merge without touching index or working tree SYNOPSIS -------- [verse] -'git merge-tree' <base-tree> <branch1> <branch2> +'git merge-tree' [--write-tree] [<options>] <branch1> <branch2> +'git merge-tree' [--trivial-merge] <base-tree> <branch1> <branch2> (deprecated) +[[NEWMERGE]] DESCRIPTION ----------- -Reads three tree-ish, and output trivial merge results and -conflicting stages to the standard output. This is similar to -what three-way 'git read-tree -m' does, but instead of storing the -results in the index, the command outputs the entries to the -standard output. - -This is meant to be used by higher level scripts to compute -merge results outside of the index, and stuff the results back into the -index. For this reason, the output from the command omits -entries that match the <branch1> tree. + +This command has a modern `--write-tree` mode and a deprecated +`--trivial-merge` mode. With the exception of the +<<DEPMERGE,DEPRECATED DESCRIPTION>> section at the end, the rest of +this documentation describes modern `--write-tree` mode. + +Performs a merge, but does not make any new commits and does not read +from or write to either the working tree or index. + +The performed merge will use the same feature as the "real" +linkgit:git-merge[1], including: + + * three way content merges of individual files + * rename detection + * proper directory/file conflict handling + * recursive ancestor consolidation (i.e. when there is more than one + merge base, creating a virtual merge base by merging the merge bases) + * etc. + +After the merge completes, a new toplevel tree object is created. See +`OUTPUT` below for details. + +OPTIONS +------- + +-z:: + Do not quote filenames in the <Conflicted file info> section, + and end each filename with a NUL character rather than + newline. Also begin the messages section with a NUL character + instead of a newline. See <<OUTPUT>> below for more information. + +--name-only:: + In the Conflicted file info section, instead of writing a list + of (mode, oid, stage, path) tuples to output for conflicted + files, just provide a list of filenames with conflicts (and + do not list filenames multiple times if they have multiple + conflicting stages). + +--[no-]messages:: + Write any informational messages such as "Auto-merging <path>" + or CONFLICT notices to the end of stdout. If unspecified, the + default is to include these messages if there are merge + conflicts, and to omit them otherwise. + +--allow-unrelated-histories:: + merge-tree will by default error out if the two branches specified + share no common history. This flag can be given to override that + check and make the merge proceed anyway. + +[[OUTPUT]] +OUTPUT +------ + +For a successful merge, the output from git-merge-tree is simply one +line: + + <OID of toplevel tree> + +Whereas for a conflicted merge, the output is by default of the form: + + <OID of toplevel tree> + <Conflicted file info> + <Informational messages> + +These are discussed individually below. + +[[OIDTLT]] +OID of toplevel tree +~~~~~~~~~~~~~~~~~~~~ + +This is a tree object that represents what would be checked out in the +working tree at the end of `git merge`. If there were conflicts, then +files within this tree may have embedded conflict markers. This section +is always followed by a newline (or NUL if `-z` is passed). + +[[CFI]] +Conflicted file info +~~~~~~~~~~~~~~~~~~~~ + +This is a sequence of lines with the format + + <mode> <object> <stage> <filename> + +The filename will be quoted as explained for the configuration +variable `core.quotePath` (see linkgit:git-config[1]). However, if +the `--name-only` option is passed, the mode, object, and stage will +be omitted. If `-z` is passed, the "lines" are terminated by a NUL +character instead of a newline character. + +[[IM]] +Informational messages +~~~~~~~~~~~~~~~~~~~~~~ + +This always starts with a blank line (or NUL if `-z` is passed) to +separate it from the previous sections, and then has free-form +messages about the merge, such as: + + * "Auto-merging <file>" + * "CONFLICT (rename/delete): <oldfile> renamed...but deleted in..." + * "Failed to merge submodule <submodule> (<reason>)" + * "Warning: cannot merge binary files: <filename>" + +Note that these free-form messages will never have a NUL character +in or between them, even if -z is passed. It is simply a large block +of text taking up the remainder of the output. + +EXIT STATUS +----------- + +For a successful, non-conflicted merge, the exit status is 0. When the +merge has conflicts, the exit status is 1. If the merge is not able to +complete (or start) due to some kind of error, the exit status is +something other than 0 or 1 (and the output is unspecified). + +USAGE NOTES +----------- + +This command is intended as low-level plumbing, similar to +linkgit:git-hash-object[1], linkgit:git-mktree[1], +linkgit:git-commit-tree[1], linkgit:git-write-tree[1], +linkgit:git-update-ref[1], and linkgit:git-mktag[1]. Thus, it can be +used as a part of a series of steps such as: + + NEWTREE=$(git merge-tree --write-tree $BRANCH1 $BRANCH2) + test $? -eq 0 || die "There were conflicts..." + NEWCOMMIT=$(git commit-tree $NEWTREE -p $BRANCH1 -p $BRANCH2) + git update-ref $BRANCH1 $NEWCOMMIT + +Note that when the exit status is non-zero, `NEWTREE` in this sequence +will contain a lot more output than just a tree. + +For conflicts, the output includes the same information that you'd get +with linkgit:git-merge[1]: + + * what would be written to the working tree (the + <<OIDTLT,OID of toplevel tree>>) + * the higher order stages that would be written to the index (the + <<CFI,Conflicted file info>>) + * any messages that would have been printed to stdout (the + <<IM,Informational messages>>) + +MISTAKES TO AVOID +----------------- + +Do NOT look through the resulting toplevel tree to try to find which +files conflict; parse the <<CFI,Conflicted file info>> section instead. +Not only would parsing an entire tree be horrendously slow in large +repositories, there are numerous types of conflicts not representable by +conflict markers (modify/delete, mode conflict, binary file changed on +both sides, file/directory conflicts, various rename conflict +permutations, etc.) + +Do NOT interpret an empty <<CFI,Conflicted file info>> list as a clean +merge; check the exit status. A merge can have conflicts without having +individual files conflict (there are a few types of directory rename +conflicts that fall into this category, and others might also be added +in the future). + +Do NOT attempt to guess or make the user guess the conflict types from +the <<CFI,Conflicted file info>> list. The information there is +insufficient to do so. For example: Rename/rename(1to2) conflicts (both +sides renamed the same file differently) will result in three different +file having higher order stages (but each only has one higher order +stage), with no way (short of the <<IM,Informational messages>> section) +to determine which three files are related. File/directory conflicts +also result in a file with exactly one higher order stage. +Possibly-involved-in-directory-rename conflicts (when +"merge.directoryRenames" is unset or set to "conflicts") also result in +a file with exactly one higher order stage. In all cases, the +<<IM,Informational messages>> section has the necessary info, though it +is not designed to be machine parseable. + +Do NOT assume that each paths from <<CFI,Conflicted file info>>, and +the logical conflicts in the <<IM,Informational messages>> have a +one-to-one mapping, nor that there is a one-to-many mapping, nor a +many-to-one mapping. Many-to-many mappings exist, meaning that each +path can have many logical conflict types in a single merge, and each +logical conflict type can affect many paths. + +Do NOT assume all filenames listed in the <<IM,Informational messages>> +section had conflicts. Messages can be included for files that have no +conflicts, such as "Auto-merging <file>". + +AVOID taking the OIDS from the <<CFI,Conflicted file info>> and +re-merging them to present the conflicts to the user. This will lose +information. Instead, look up the version of the file found within the +<<OIDTLT,OID of toplevel tree>> and show that instead. In particular, +the latter will have conflict markers annotated with the original +branch/commit being merged and, if renames were involved, the original +filename. While you could include the original branch/commit in the +conflict marker annotations when re-merging, the original filename is +not available from the <<CFI,Conflicted file info>> and thus you would +be losing information that might help the user resolve the conflict. + +[[DEPMERGE]] +DEPRECATED DESCRIPTION +---------------------- + +Per the <<NEWMERGE,DESCRIPTION>> and unlike the rest of this +documentation, this section describes the deprecated `--trivial-merge` +mode. + +Other than the optional `--trivial-merge`, this mode accepts no +options. + +This mode reads three tree-ish, and outputs trivial merge results and +conflicting stages to the standard output in a semi-diff format. +Since this was designed for higher level scripts to consume and merge +the results back into the index, it omits entries that match +<branch1>. The result of this second form is similar to what +three-way 'git read-tree -m' does, but instead of storing the results +in the index, the command outputs the entries to the standard output. + +This form not only has limited applicability (a trivial merge cannot +handle content merges of individual files, rename detection, proper +directory/file conflict handling, etc.), the output format is also +difficult to work with, and it will generally be less performant than +the first form even on successful merges (especially if working in +large repositories). GIT --- diff --git a/Documentation/git-merge.txt b/Documentation/git-merge.txt index 3125473cc1..fee1dc2df2 100644 --- a/Documentation/git-merge.txt +++ b/Documentation/git-merge.txt @@ -90,10 +90,7 @@ invocations. The automated message can include the branch description. If `--log` is specified, a shortlog of the commits being merged will be appended to the specified message. ---rerere-autoupdate:: ---no-rerere-autoupdate:: - Allow the rerere mechanism to update the index with the - result of auto-conflict resolution if possible. +include::rerere-options.txt[] --overwrite-ignore:: --no-overwrite-ignore:: diff --git a/Documentation/git-multi-pack-index.txt b/Documentation/git-multi-pack-index.txt index c588fb91af..a48c3d5ea6 100644 --- a/Documentation/git-multi-pack-index.txt +++ b/Documentation/git-multi-pack-index.txt @@ -128,8 +128,8 @@ $ git multi-pack-index verify SEE ALSO -------- See link:technical/multi-pack-index.html[The Multi-Pack-Index Design -Document] and link:technical/pack-format.html[The Multi-Pack-Index -Format] for more information on the multi-pack-index feature. +Document] and linkgit:gitformat-pack[5] for more information on the +multi-pack-index feature and its file format. GIT diff --git a/Documentation/git-rebase.txt b/Documentation/git-rebase.txt index a872ab0fbd..1877942180 100644 --- a/Documentation/git-rebase.txt +++ b/Documentation/git-rebase.txt @@ -376,10 +376,7 @@ See also INCOMPATIBLE OPTIONS below. + See also INCOMPATIBLE OPTIONS below. ---rerere-autoupdate:: ---no-rerere-autoupdate:: - Allow the rerere mechanism to update the index with the - result of auto-conflict resolution if possible. +include::rerere-options.txt[] -S[<keyid>]:: --gpg-sign[=<keyid>]:: @@ -612,6 +609,15 @@ provided. Otherwise an explicit `--no-reschedule-failed-exec` at the start would be overridden by the presence of `rebase.rescheduleFailedExec=true` configuration. +--update-refs:: +--no-update-refs:: + Automatically force-update any branches that point to commits that + are being rebased. Any branches that are checked out in a worktree + are not updated in this way. ++ +If the configuration variable `rebase.updateRefs` is set, then this option +can be used to override and disable this setting. + INCOMPATIBLE OPTIONS -------------------- @@ -635,6 +641,7 @@ are incompatible with the following options: * --empty= * --reapply-cherry-picks * --edit-todo + * --update-refs * --root when used in combination with --onto In addition, the following pairs of options are incompatible: diff --git a/Documentation/git-revert.txt b/Documentation/git-revert.txt index 8463fe9cf7..0105a54c1a 100644 --- a/Documentation/git-revert.txt +++ b/Documentation/git-revert.txt @@ -112,10 +112,7 @@ effect to your index in a row. Pass the merge strategy-specific option through to the merge strategy. See linkgit:git-merge[1] for details. ---rerere-autoupdate:: ---no-rerere-autoupdate:: - Allow the rerere mechanism to update the index with the - result of auto-conflict resolution if possible. +include::rerere-options.txt[] --reference:: Instead of starting the body of the log message with "This diff --git a/Documentation/git-upload-pack.txt b/Documentation/git-upload-pack.txt index 8f87b23ea8..3f89d64077 100644 --- a/Documentation/git-upload-pack.txt +++ b/Documentation/git-upload-pack.txt @@ -39,10 +39,9 @@ OPTIONS --http-backend-info-refs:: Used by linkgit:git-http-backend[1] to serve up `$GIT_URL/info/refs?service=git-upload-pack` requests. See - "Smart Clients" in link:technical/http-protocol.html[the HTTP - transfer protocols] documentation and "HTTP Transport" in - link:technical/protocol-v2.html[the Git Wire Protocol, Version - 2] documentation. Also understood by + "Smart Clients" in linkgit:gitprotocol-http[5] and "HTTP + Transport" in in the linkgit:gitprotocol-v2[5] + documentation. Also understood by linkgit:git-receive-pack[1]. <directory>:: diff --git a/Documentation/git.txt b/Documentation/git.txt index 47a6095ff4..0ef7f5e4ec 100644 --- a/Documentation/git.txt +++ b/Documentation/git.txt @@ -339,6 +339,23 @@ The following documentation pages are guides about Git concepts. include::cmds-guide.txt[] +Repository, command and file interfaces +--------------------------------------- + +This documentation discusses repository and command interfaces which +users are expected to interact with directly. See `--user-formats` in +linkgit:git-help[1] for more details on the critera. + +include::cmds-userinterfaces.txt[] + +File formats, protocols and other developer interfaces +------------------------------------------------------ + +This documentation discusses file formats, over-the-wire protocols and +other git developer interfaces. See `--developer-interfaces` in +linkgit:git-help[1]. + +include::cmds-developerinterfaces.txt[] Configuration Mechanism ----------------------- diff --git a/Documentation/technical/bundle-format.txt b/Documentation/gitformat-bundle.txt index b9be8644cf..00e0a20e65 100644 --- a/Documentation/technical/bundle-format.txt +++ b/Documentation/gitformat-bundle.txt @@ -1,11 +1,33 @@ -= Git bundle v2 format +gitformat-bundle(5) +=================== -The Git bundle format is a format that represents both refs and Git objects. +NAME +---- +gitformat-bundle - The bundle file format + + +SYNOPSIS +-------- +[verse] +*.bundle +*.bdl + +DESCRIPTION +----------- + +The Git bundle format is a format that represents both refs and Git +objects. A bundle is a header in a format similar to +linkgit:git-show-ref[1] followed by a pack in *.pack format. -== Format +The format is created and read by the linkgit:git-bundle[1] command, +and supported by e.g. linkgit:git-fetch[1] and linkgit:git-clone[1]. + + +FORMAT +------ We will use ABNF notation to define the Git bundle format. See -protocol-common.txt for the details. +linkgit:gitprotocol-common[5] for the details. A v2 bundle looks like this: @@ -36,7 +58,9 @@ value = *(%01-09 / %0b-FF) pack = ... ; packfile ---- -== Semantics + +SEMANTICS +--------- A Git bundle consists of several parts. @@ -62,13 +86,15 @@ In the bundle format, there can be a comment following a prerequisite obj-id. This is a comment and it has no specific meaning. The writer of the bundle MAY put any string here. The reader of the bundle MUST ignore the comment. -=== Note on the shallow clone and a Git bundle +Note on the shallow clone and a Git bundle +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Note that the prerequisites does not represent a shallow-clone boundary. The semantics of the prerequisites and the shallow-clone boundaries are different, and the Git bundle v2 format cannot represent a shallow clone repository. -== Capabilities +CAPABILITIES +------------ Because there is no opportunity for negotiation, unknown capabilities cause 'git bundle' to abort. @@ -79,3 +105,7 @@ bundle' to abort. * `filter` specifies an object filter as in the `--filter` option in linkgit:git-rev-list[1]. The resulting pack-file must be marked as a `.promisor` pack-file after it is unbundled. + +GIT +--- +Part of the linkgit:git[1] suite diff --git a/Documentation/technical/chunk-format.txt b/Documentation/gitformat-chunk.txt index 593614fced..57202ede27 100644 --- a/Documentation/technical/chunk-format.txt +++ b/Documentation/gitformat-chunk.txt @@ -1,12 +1,25 @@ -Chunk-based file formats -======================== +gitformat-chunk(5) +================== + +NAME +---- +gitformat-chunk - Chunk-based file formats + +SYNOPSIS +-------- + +Used by linkgit:gitformat-commit-graph[5] and the "MIDX" format (see +the pack format documentation in linkgit:gitformat-pack[5]). + +DESCRIPTION +----------- Some file formats in Git use a common concept of "chunks" to describe sections of the file. This allows structured access to a large file by scanning a small "table of contents" for the remaining data. This common format is used by the `commit-graph` and `multi-pack-index` files. See -link:technical/pack-format.html[the `multi-pack-index` format] and -link:technical/commit-graph-format.html[the `commit-graph` format] for +the `multi-pack-index` format in linkgit:gitformat-pack[5] and +the `commit-graph` format in linkgit:gitformat-commit-graph[5] for how they use the chunks to describe structured data. A chunk-based file format begins with some header information custom to @@ -108,9 +121,13 @@ for future formats: * *commit-graph:* see `write_commit_graph_file()` and `parse_commit_graph()` in `commit-graph.c` for how the chunk-format API is used to write and parse the commit-graph file format documented in - link:technical/commit-graph-format.html[the commit-graph file format]. + the commit-graph file format in linkgit:gitformat-commit-graph[5]. * *multi-pack-index:* see `write_midx_internal()` and `load_multi_pack_index()` in `midx.c` for how the chunk-format API is used to write and parse the multi-pack-index file format documented in - link:technical/pack-format.html[the multi-pack-index file format]. + the multi-pack-index file format section of linkgit:gitformat-pack[5]. + +GIT +--- +Part of the linkgit:git[1] suite diff --git a/Documentation/technical/commit-graph-format.txt b/Documentation/gitformat-commit-graph.txt index 484b185ba9..7324665716 100644 --- a/Documentation/technical/commit-graph-format.txt +++ b/Documentation/gitformat-commit-graph.txt @@ -1,5 +1,18 @@ -Git commit graph format -======================= +gitformat-commit-graph(5) +========================= + +NAME +---- +gitformat-commit-graph - Git commit graph format + +SYNOPSIS +-------- +[verse] +$GIT_DIR/objects/info/commit-graph +$GIT_DIR/objects/info/commit-graphs/* + +DESCRIPTION +----------- The Git commit graph stores a list of commit OIDs and some associated metadata, including: @@ -30,7 +43,7 @@ and hash type. All multi-byte numbers are in network byte order. -HEADER: +=== HEADER: 4-byte signature: The signature is: {'C', 'G', 'P', 'H'} @@ -52,7 +65,7 @@ HEADER: We infer the length (H*B) of the Base Graphs chunk from this value. -CHUNK LOOKUP: +=== CHUNK LOOKUP: (C + 1) * 12 bytes listing the table of contents for the chunks: First 4 bytes describe the chunk id. Value 0 is a terminating label. @@ -62,23 +75,23 @@ CHUNK LOOKUP: ID appears at most once. The CHUNK LOOKUP matches the table of contents from - link:technical/chunk-format.html[the chunk-based file format]. + the chunk-based file format, see linkgit:gitformat-chunk[5] The remaining data in the body is described one chunk at a time, and these chunks may be given in any order. Chunks are required unless otherwise specified. -CHUNK DATA: +=== CHUNK DATA: - OID Fanout (ID: {'O', 'I', 'D', 'F'}) (256 * 4 bytes) +==== OID Fanout (ID: {'O', 'I', 'D', 'F'}) (256 * 4 bytes) The ith entry, F[i], stores the number of OIDs with first byte at most i. Thus F[255] stores the total number of commits (N). - OID Lookup (ID: {'O', 'I', 'D', 'L'}) (N * H bytes) +==== OID Lookup (ID: {'O', 'I', 'D', 'L'}) (N * H bytes) The OIDs for all commits in the graph, sorted in ascending order. - Commit Data (ID: {'C', 'D', 'A', 'T' }) (N * (H + 16) bytes) +==== Commit Data (ID: {'C', 'D', 'A', 'T' }) (N * (H + 16) bytes) * The first H bytes are for the OID of the root tree. * The next 8 bytes are for the positions of the first two parents of the ith commit. Stores value 0x70000000 if no parent in that @@ -93,7 +106,7 @@ CHUNK DATA: 2 bits of the lowest byte, storing the 33rd and 34th bit of the commit time. - Generation Data (ID: {'G', 'D', 'A', '2' }) (N * 4 bytes) [Optional] +==== Generation Data (ID: {'G', 'D', 'A', '2' }) (N * 4 bytes) [Optional] * This list of 4-byte values store corrected commit date offsets for the commits, arranged in the same order as commit data chunk. * If the corrected commit date offset cannot be stored within 31 bits, @@ -104,7 +117,7 @@ CHUNK DATA: by compatible versions of Git and in case of split commit-graph chains, the topmost layer also has Generation Data chunk. - Generation Data Overflow (ID: {'G', 'D', 'O', '2' }) [Optional] +==== Generation Data Overflow (ID: {'G', 'D', 'O', '2' }) [Optional] * This list of 8-byte values stores the corrected commit date offsets for commits with corrected commit date offsets that cannot be stored within 31 bits. @@ -112,7 +125,7 @@ CHUNK DATA: chunk is present and atleast one corrected commit date offset cannot be stored within 31 bits. - Extra Edge List (ID: {'E', 'D', 'G', 'E'}) [Optional] +==== Extra Edge List (ID: {'E', 'D', 'G', 'E'}) [Optional] This list of 4-byte values store the second through nth parents for all octopus merges. The second parent value in the commit data stores an array position within this list along with the most-significant bit @@ -120,14 +133,14 @@ CHUNK DATA: positions for the parents until reaching a value with the most-significant bit on. The other bits correspond to the position of the last parent. - Bloom Filter Index (ID: {'B', 'I', 'D', 'X'}) (N * 4 bytes) [Optional] +==== Bloom Filter Index (ID: {'B', 'I', 'D', 'X'}) (N * 4 bytes) [Optional] * The ith entry, BIDX[i], stores the number of bytes in all Bloom filters from commit 0 to commit i (inclusive) in lexicographic order. The Bloom filter for the i-th commit spans from BIDX[i-1] to BIDX[i] (plus header length), where BIDX[-1] is 0. * The BIDX chunk is ignored if the BDAT chunk is not present. - Bloom Filter Data (ID: {'B', 'D', 'A', 'T'}) [Optional] +==== Bloom Filter Data (ID: {'B', 'D', 'A', 'T'}) [Optional] * It starts with header consisting of three unsigned 32-bit integers: - Version of the hash algorithm being used. We currently only support value 1 which corresponds to the 32-bit version of the murmur3 hash @@ -147,13 +160,13 @@ CHUNK DATA: of length one, with either all bits set to zero or one respectively. * The BDAT chunk is present if and only if BIDX is present. - Base Graphs List (ID: {'B', 'A', 'S', 'E'}) [Optional] +==== Base Graphs List (ID: {'B', 'A', 'S', 'E'}) [Optional] This list of H-byte hashes describe a set of B commit-graph files that form a commit-graph chain. The graph position for the ith commit in this file's OID Lookup chunk is equal to i plus the number of commits in all base graphs. If B is non-zero, this chunk must exist. -TRAILER: +=== TRAILER: H-byte HASH-checksum of all of the above. @@ -164,3 +177,7 @@ the number '2' in their chunk IDs because a previous version of Git wrote possibly erroneous data in these chunks with the IDs "GDAT" and "GDOV". By changing the IDs, newer versions of Git will silently ignore those older chunks and write the new information without trusting the incorrect data. + +GIT +--- +Part of the linkgit:git[1] suite diff --git a/Documentation/technical/index-format.txt b/Documentation/gitformat-index.txt index f691c20ab0..015cb21bdc 100644 --- a/Documentation/technical/index-format.txt +++ b/Documentation/gitformat-index.txt @@ -1,5 +1,19 @@ +gitformat-index(5) +================== + +NAME +---- +gitformat-index - Git index format + +SYNOPSIS +-------- +[verse] +$GIT_DIR/index + +DESCRIPTION +----------- + Git index format -================ == The Git index file has the following format @@ -125,7 +139,7 @@ Git index format entry is encoded as if the path name for the previous entry is an empty string). At the beginning of an entry, an integer N in the variable width encoding (the same encoding as the offset is encoded - for OFS_DELTA pack entries; see pack-format.txt) is stored, followed + for OFS_DELTA pack entries; see linkgit:gitformat-pack[5]) is stored, followed by a NUL-terminated string S. Removing N bytes from the end of the path name for the previous entry, and replacing it with the string S yields the path name for this entry. @@ -402,3 +416,7 @@ The remaining data of each directory block is grouped by type: with signature { 's', 'd', 'i', 'r' }. Like the split-index extension, tools should avoid interacting with a sparse index unless they understand this extension. + +GIT +--- +Part of the linkgit:git[1] suite diff --git a/Documentation/technical/pack-format.txt b/Documentation/gitformat-pack.txt index b520aa9c45..e06af02f21 100644 --- a/Documentation/technical/pack-format.txt +++ b/Documentation/gitformat-pack.txt @@ -1,5 +1,30 @@ -Git pack format -=============== +gitformat-pack(5) +================= + +NAME +---- +gitformat-pack - Git pack format + + +SYNOPSIS +-------- +[verse] +$GIT_DIR/objects/pack/pack-*.{pack,idx} +$GIT_DIR/objects/pack/pack-*.rev +$GIT_DIR/objects/pack/pack-*.mtimes +$GIT_DIR/objects/pack/multi-pack-index + +DESCRIPTION +----------- + +The Git pack format is now Git stores most of its primary repository +data. Over the lietime af a repository loose objects (if any) and +smaller packs are consolidated into larger pack(s). See +linkgit:git-gc[1] and linkgit:git-pack-objects[1]. + +The pack format is also used over-the-wire, see +e.g. linkgit:gitprotocol-v2[5], as well as being a part of +other container formats in the case of linkgit:gitformat-bundle[5]. == Checksums and object IDs @@ -356,7 +381,7 @@ CHUNK LOOKUP: using the next chunk position if necessary.) The CHUNK LOOKUP matches the table of contents from - link:technical/chunk-format.html[the chunk-based file format]. + the chunk-based file format, see linkgit:gitformat-chunk[5]. The remaining data in the body is described one chunk at a time, and these chunks may be given in any order. Chunks are required unless @@ -482,3 +507,132 @@ packs arranged in MIDX order (with the preferred pack coming first). The MIDX's reverse index is stored in the optional 'RIDX' chunk within the MIDX itself. + +== cruft packs + +The cruft packs feature offer an alternative to Git's traditional mechanism of +removing unreachable objects. This document provides an overview of Git's +pruning mechanism, and how a cruft pack can be used instead to accomplish the +same. + +=== Background + +To remove unreachable objects from your repository, Git offers `git repack -Ad` +(see linkgit:git-repack[1]). Quoting from the documentation: + +---- +[...] unreachable objects in a previous pack become loose, unpacked objects, +instead of being left in the old pack. [...] loose unreachable objects will be +pruned according to normal expiry rules with the next 'git gc' invocation. +---- + +Unreachable objects aren't removed immediately, since doing so could race with +an incoming push which may reference an object which is about to be deleted. +Instead, those unreachable objects are stored as loose objects and stay that way +until they are older than the expiration window, at which point they are removed +by linkgit:git-prune[1]. + +Git must store these unreachable objects loose in order to keep track of their +per-object mtimes. If these unreachable objects were written into one big pack, +then either freshening that pack (because an object contained within it was +re-written) or creating a new pack of unreachable objects would cause the pack's +mtime to get updated, and the objects within it would never leave the expiration +window. Instead, objects are stored loose in order to keep track of the +individual object mtimes and avoid a situation where all cruft objects are +freshened at once. + +This can lead to undesirable situations when a repository contains many +unreachable objects which have not yet left the grace period. Having large +directories in the shards of `.git/objects` can lead to decreased performance in +the repository. But given enough unreachable objects, this can lead to inode +starvation and degrade the performance of the whole system. Since we +can never pack those objects, these repositories often take up a large amount of +disk space, since we can only zlib compress them, but not store them in delta +chains. + +=== Cruft packs + +A cruft pack eliminates the need for storing unreachable objects in a loose +state by including the per-object mtimes in a separate file alongside a single +pack containing all loose objects. + +A cruft pack is written by `git repack --cruft` when generating a new pack. +linkgit:git-pack-objects[1]'s `--cruft` option. Note that `git repack --cruft` +is a classic all-into-one repack, meaning that everything in the resulting pack is +reachable, and everything else is unreachable. Once written, the `--cruft` +option instructs `git repack` to generate another pack containing only objects +not packed in the previous step (which equates to packing all unreachable +objects together). This progresses as follows: + + 1. Enumerate every object, marking any object which is (a) not contained in a + kept-pack, and (b) whose mtime is within the grace period as a traversal + tip. + + 2. Perform a reachability traversal based on the tips gathered in the previous + step, adding every object along the way to the pack. + + 3. Write the pack out, along with a `.mtimes` file that records the per-object + timestamps. + +This mode is invoked internally by linkgit:git-repack[1] when instructed to +write a cruft pack. Crucially, the set of in-core kept packs is exactly the set +of packs which will not be deleted by the repack; in other words, they contain +all of the repository's reachable objects. + +When a repository already has a cruft pack, `git repack --cruft` typically only +adds objects to it. An exception to this is when `git repack` is given the +`--cruft-expiration` option, which allows the generated cruft pack to omit +expired objects instead of waiting for linkgit:git-gc[1] to expire those objects +later on. + +It is linkgit:git-gc[1] that is typically responsible for removing expired +unreachable objects. + +=== Caution for mixed-version environments + +Repositories that have cruft packs in them will continue to work with any older +version of Git. Note, however, that previous versions of Git which do not +understand the `.mtimes` file will use the cruft pack's mtime as the mtime for +all of the objects in it. In other words, do not expect older (pre-cruft pack) +versions of Git to interpret or even read the contents of the `.mtimes` file. + +Note that having mixed versions of Git GC-ing the same repository can lead to +unreachable objects never being completely pruned. This can happen under the +following circumstances: + + - An older version of Git running GC explodes the contents of an existing + cruft pack loose, using the cruft pack's mtime. + - A newer version running GC collects those loose objects into a cruft pack, + where the .mtime file reflects the loose object's actual mtimes, but the + cruft pack mtime is "now". + +Repeating this process will lead to unreachable objects not getting pruned as a +result of repeatedly resetting the objects' mtimes to the present time. + +If you are GC-ing repositories in a mixed version environment, consider omitting +the `--cruft` option when using linkgit:git-repack[1] and linkgit:git-gc[1], and +leaving the `gc.cruftPacks` configuration unset until all writers understand +cruft packs. + +=== Alternatives + +Notable alternatives to this design include: + + - The location of the per-object mtime data, and + - Storing unreachable objects in multiple cruft packs. + +On the location of mtime data, a new auxiliary file tied to the pack was chosen +to avoid complicating the `.idx` format. If the `.idx` format were ever to gain +support for optional chunks of data, it may make sense to consolidate the +`.mtimes` format into the `.idx` itself. + +Storing unreachable objects among multiple cruft packs (e.g., creating a new +cruft pack during each repacking operation including only unreachable objects +which aren't already stored in an earlier cruft pack) is significantly more +complicated to construct, and so aren't pursued here. The obvious drawback to +the current implementation is that the entire cruft pack must be re-written from +scratch. + +GIT +--- +Part of the linkgit:git[1] suite diff --git a/Documentation/technical/signature-format.txt b/Documentation/gitformat-signature.txt index 166721be6f..a249869faf 100644 --- a/Documentation/technical/signature-format.txt +++ b/Documentation/gitformat-signature.txt @@ -1,7 +1,18 @@ -Git signature format -==================== +gitformat-signature(5) +====================== -== Overview +NAME +---- +gitformat-signature - Git cryptographic signature formats + +SYNOPSIS +-------- +[verse] +<[tag|commit] object header(s)> +<over-the-wire protocol> + +DESCRIPTION +----------- Git uses cryptographic signatures in various places, currently objects (tags, commits, mergetags) and transactions (pushes). In every case, the command which @@ -200,3 +211,7 @@ Date: Wed Jun 15 09:13:29 2016 +0000 # gpg: There is no indication that the signature belongs to the owner. # Primary key fingerprint: D4BE 2231 1AD3 131E 5EDA 29A4 6109 2E85 B722 7189 ---- + +GIT +--- +Part of the linkgit:git[1] suite diff --git a/Documentation/technical/protocol-capabilities.txt b/Documentation/gitprotocol-capabilities.txt index 9dfade930d..c6dcc7d565 100644 --- a/Documentation/technical/protocol-capabilities.txt +++ b/Documentation/gitprotocol-capabilities.txt @@ -1,8 +1,20 @@ -Git Protocol Capabilities -========================= +gitprotocol-capabilities(5) +=========================== + +NAME +---- +gitprotocol-capabilities - Protocol v0 and v1 capabilities + +SYNOPSIS +-------- +[verse] +<over-the-wire-protocol> + +DESCRIPTION +----------- NOTE: this document describes capabilities for versions 0 and 1 of the pack -protocol. For version 2, please refer to the link:protocol-v2.html[protocol-v2] +protocol. For version 2, please refer to the linkgit:gitprotocol-v2[5] doc. Servers SHOULD support all capabilities defined in this document. @@ -77,7 +89,7 @@ interleaved with S-R-Q. multi_ack_detailed ------------------ This is an extension of multi_ack that permits client to better -understand the server's in-memory state. See pack-protocol.txt, +understand the server's in-memory state. See linkgit:gitprotocol-pack[5], section "Packfile Negotiation" for more information. no-done @@ -281,7 +293,7 @@ a packfile upload and reference update. If the pushing client requests this capability, after unpacking and updating references the server will respond with whether the packfile unpacked successfully and if each reference was updated successfully. If any of those were not -successful, it will send back an error message. See pack-protocol.txt +successful, it will send back an error message. See linkgit:gitprotocol-pack[5] for example messages. report-status-v2 @@ -292,7 +304,7 @@ adding new "option" directives in order to support reference rewritten by the "proc-receive" hook. The "proc-receive" hook may handle a command for a pseudo-reference which may create or update a reference with different name, new-oid, and old-oid. While the capability -'report-status' cannot report for such case. See pack-protocol.txt +'report-status' cannot report for such case. See linkgit:gitprotocol-pack[5] for details. delete-refs @@ -378,3 +390,7 @@ packet-line, and must not contain non-printable or whitespace characters. The current implementation uses trace2 session IDs (see link:api-trace2.html[api-trace2] for details), but this may change and users of the session ID should not rely on this fact. + +GIT +--- +Part of the linkgit:git[1] suite diff --git a/Documentation/technical/protocol-common.txt b/Documentation/gitprotocol-common.txt index ecedb34bba..1486651bd1 100644 --- a/Documentation/technical/protocol-common.txt +++ b/Documentation/gitprotocol-common.txt @@ -1,5 +1,20 @@ -Documentation Common to Pack and Http Protocols -=============================================== +gitprotocol-common(5) +===================== + +NAME +---- +gitprotocol-common - Things common to various protocols + +SYNOPSIS +-------- +[verse] +<over-the-wire-protocol> + +DESCRIPTION +----------- + +This document sets defines things common to various over-the-wire +protocols and file formats used in Git. ABNF Notation ------------- @@ -97,3 +112,7 @@ Examples (as C-style strings): "000bfoobar\n" "foobar\n" "0004" "" ---- + +GIT +--- +Part of the linkgit:git[1] suite diff --git a/Documentation/technical/http-protocol.txt b/Documentation/gitprotocol-http.txt index cc5126cfed..ccc13f0a40 100644 --- a/Documentation/technical/http-protocol.txt +++ b/Documentation/gitprotocol-http.txt @@ -1,5 +1,19 @@ -HTTP transfer protocols -======================= +gitprotocol-http(5) +=================== + +NAME +---- +gitprotocol-http - Git HTTP-based protocols + + +SYNOPSIS +-------- +[verse] +<over-the-wire-protocol> + + +DESCRIPTION +----------- Git supports two HTTP based transfer protocols. A "dumb" protocol which requires only a standard HTTP server on the server end of the @@ -222,7 +236,7 @@ smart server reply: S: 0000 The client may send Extra Parameters (see -Documentation/technical/pack-protocol.txt) as a colon-separated string +linkgit:gitprotocol-pack[5]) as a colon-separated string in the Git-Protocol HTTP header. Uses the `--http-backend-info-refs` option to @@ -512,11 +526,18 @@ the id obtained through ref discovery as old_id. TODO: Document this further. - -References +REFERENCES ---------- http://www.ietf.org/rfc/rfc1738.txt[RFC 1738: Uniform Resource Locators (URL)] http://www.ietf.org/rfc/rfc2616.txt[RFC 2616: Hypertext Transfer Protocol -- HTTP/1.1] -link:technical/pack-protocol.html -link:technical/protocol-capabilities.html + +SEE ALSO +-------- + +linkgit:gitprotocol-pack[5] +linkgit:gitprotocol-capabilities[5] + +GIT +--- +Part of the linkgit:git[1] suite diff --git a/Documentation/technical/pack-protocol.txt b/Documentation/gitprotocol-pack.txt index e13a2c064d..dd4108b7a3 100644 --- a/Documentation/technical/pack-protocol.txt +++ b/Documentation/gitprotocol-pack.txt @@ -1,11 +1,23 @@ -Packfile transfer protocols -=========================== +gitprotocol-pack(5) +=================== + +NAME +---- +gitprotocol-pack - How packs are transferred over-the-wire + +SYNOPSIS +-------- +[verse] +<over-the-wire-protocol> + +DESCRIPTION +----------- Git supports transferring data in packfiles over the ssh://, git://, http:// and file:// transports. There exist two sets of protocols, one for pushing data from a client to a server and another for fetching data from a server to a client. The three transports (ssh, git, file) use the same -protocol to transfer data. http is documented in http-protocol.txt. +protocol to transfer data. http is documented in linkgit:gitprotocol-http[5]. The processes invoked in the canonical Git implementation are 'upload-pack' on the server side and 'fetch-pack' on the client side for fetching data; @@ -18,7 +30,7 @@ pkt-line Format --------------- The descriptions below build on the pkt-line format described in -protocol-common.txt. When the grammar indicate `PKT-LINE(...)`, unless +linkgit:gitprotocol-common[5]. When the grammar indicate `PKT-LINE(...)`, unless otherwise noted the usual pkt-line LF rules apply: the sender SHOULD include a LF, but the receiver MUST NOT complain if it is not present. @@ -60,7 +72,7 @@ Each Extra Parameter takes the form of `<key>=<value>` or `<key>`. Servers that receive any such Extra Parameters MUST ignore all unrecognized keys. Currently, the only Extra Parameter recognized is -"version" with a value of '1' or '2'. See protocol-v2.txt for more +"version" with a value of '1' or '2'. See linkgit:gitprotocol-v2[5] for more information on protocol version 2. Git Transport @@ -455,7 +467,7 @@ Now that the client and server have finished negotiation about what the minimal amount of data that needs to be sent to the client is, the server will construct and send the required data in packfile format. -See pack-format.txt for what the packfile itself actually looks like. +See linkgit:gitformat-pack[5] for what the packfile itself actually looks like. If 'side-band' or 'side-band-64k' capabilities have been specified by the client, the server will send the packfile data multiplexed. @@ -707,3 +719,7 @@ An example client/server communication might look like this: S: 0018ok refs/heads/debug\n S: 002ang refs/heads/master non-fast-forward\n ---- + +GIT +--- +Part of the linkgit:git[1] suite diff --git a/Documentation/technical/protocol-v2.txt b/Documentation/gitprotocol-v2.txt index 8a877d27e2..c9c0f9160b 100644 --- a/Documentation/technical/protocol-v2.txt +++ b/Documentation/gitprotocol-v2.txt @@ -1,5 +1,17 @@ -Git Wire Protocol, Version 2 -============================ +gitprotocol-v2(5) +================= + +NAME +---- +gitprotocol-v2 - Git Wire Protocol, Version 2 + +SYNOPSIS +-------- +[verse] +<over-the-wire-protocol> + +DESCRIPTION +----------- This document presents a specification for a version 2 of Git's wire protocol. Protocol v2 will improve upon v1 in the following ways: @@ -26,8 +38,7 @@ Packet-Line Framing ------------------- All communication is done using packet-line framing, just as in v1. See -`Documentation/technical/pack-protocol.txt` and -`Documentation/technical/protocol-common.txt` for more information. +linkgit:gitprotocol-pack[5] and linkgit:gitprotocol-common[5] for more information. In protocol v2 these special packets will have the following semantics: @@ -42,7 +53,7 @@ Initial Client Request In general a client can request to speak protocol v2 by sending `version=2` through the respective side-channel for the transport being used which inevitably sets `GIT_PROTOCOL`. More information can be -found in `pack-protocol.txt` and `http-protocol.txt`, as well as the +found in linkgit:gitprotocol-pack[5] and linkgit:gitprotocol-http[5], as well as the `GIT_PROTOCOL` definition in `git.txt`. In all cases the response from the server is the capability advertisement. @@ -66,7 +77,7 @@ HTTP Transport ~~~~~~~~~~~~~~ When using the http:// or https:// transport a client makes a "smart" -info/refs request as described in `http-protocol.txt` and requests that +info/refs request as described in linkgit:gitprotocol-http[5] and requests that v2 be used by supplying "version=2" in the `Git-Protocol` header. C: GET $GIT_URL/info/refs?service=git-upload-pack HTTP/1.0 @@ -566,3 +577,7 @@ and associated requested information, each separated by a single space. attr = "size" obj-info = obj-id SP obj-size + +GIT +--- +Part of the linkgit:git[1] suite diff --git a/Documentation/howto/recover-corrupted-object-harder.txt b/Documentation/howto/recover-corrupted-object-harder.txt index 8994e2559e..5efb4fe81f 100644 --- a/Documentation/howto/recover-corrupted-object-harder.txt +++ b/Documentation/howto/recover-corrupted-object-harder.txt @@ -68,7 +68,7 @@ Note that the "object" file isn't fit for feeding straight to zlib; it has the git packed object header, which is variable-length. We want to strip that off so we can start playing with the zlib data directly. You can either work your way through it manually (the format is described in -link:../technical/pack-format.html[Documentation/technical/pack-format.txt]), +linkgit:gitformat-pack[5]), or you can walk through it in a debugger. I did the latter, creating a valid pack like: diff --git a/Documentation/lint-man-section-order.perl b/Documentation/lint-man-section-order.perl index 425377dfeb..02408a0062 100755 --- a/Documentation/lint-man-section-order.perl +++ b/Documentation/lint-man-section-order.perl @@ -32,6 +32,9 @@ my %SECTIONS; 'SEE ALSO' => { order => $order++, }, + 'FILE FORMAT' => { + order => $order++, + }, 'GIT' => { required => 1, order => $order++, diff --git a/Documentation/rerere-options.txt b/Documentation/rerere-options.txt new file mode 100644 index 0000000000..c3321ddea2 --- /dev/null +++ b/Documentation/rerere-options.txt @@ -0,0 +1,9 @@ +--rerere-autoupdate:: +--no-rerere-autoupdate:: + After the rerere mechanism reuses a recorded resolution on + the current conflict to update the files in the working + tree, allow it to also update the index with the result of + resolution. `--no-rerere-autoupdate` is a good way to + double-check what `rerere` did and catch potential + mismerges, before committing the result to the index with a + separate `git add`. diff --git a/Documentation/rev-list-options.txt b/Documentation/rev-list-options.txt index 195e74eec6..1837509566 100644 --- a/Documentation/rev-list-options.txt +++ b/Documentation/rev-list-options.txt @@ -242,6 +242,7 @@ ifdef::git-rev-list[] to `/dev/null` as the output does not have to be formatted. --disk-usage:: +--disk-usage=human:: Suppress normal output; instead, print the sum of the bytes used for on-disk storage by the selected commits or objects. This is equivalent to piping the output into `git cat-file @@ -249,6 +250,8 @@ ifdef::git-rev-list[] faster (especially with `--use-bitmap-index`). See the `CAVEATS` section in linkgit:git-cat-file[1] for the limitations of what "on-disk storage" means. + With the optional value `human`, on-disk storage size is shown + in human-readable string(e.g. 12.24 Kib, 3.50 Mib). endif::git-rev-list[] --cherry-mark:: @@ -389,12 +392,14 @@ Default mode:: merges from the resulting history, as there are no selected commits contributing to this merge. ---ancestry-path:: +--ancestry-path[=<commit>]:: When given a range of commits to display (e.g. 'commit1..commit2' - or 'commit2 {caret}commit1'), only display commits that exist - directly on the ancestry chain between the 'commit1' and - 'commit2', i.e. commits that are both descendants of 'commit1', - and ancestors of 'commit2'. + or 'commit2 {caret}commit1'), only display commits in that range + that are ancestors of <commit>, descendants of <commit>, or + <commit> itself. If no commit is specified, use 'commit1' (the + excluded part of the range) as <commit>. Can be passed multiple + times; if so, a commit is included if it is any of the commits + given or if it is an ancestor or descendant of one of them. A more detailed explanation follows. @@ -568,11 +573,10 @@ Note the major differences in `N`, `P`, and `Q` over `--full-history`: There is another simplification mode available: ---ancestry-path:: - Limit the displayed commits to those directly on the ancestry - chain between the ``from'' and ``to'' commits in the given commit - range. I.e. only display commits that are ancestor of the ``to'' - commit and descendants of the ``from'' commit. +--ancestry-path[=<commit>]:: + Limit the displayed commits to those which are an ancestor of + <commit>, or which are a descendant of <commit>, or are <commit> + itself. + As an example use case, consider the following commit history: + @@ -604,6 +608,29 @@ option does. Applied to the 'D..M' range, it results in: \ L--M ----------------------------------------------------------------------- ++ +We can also use `--ancestry-path=D` instead of `--ancestry-path` which +means the same thing when applied to the 'D..M' range but is just more +explicit. ++ +If we instead are interested in a given topic within this range, and all +commits affected by that topic, we may only want to view the subset of +`D..M` which contain that topic in their ancestry path. So, using +`--ancestry-path=H D..M` for example would result in: ++ +----------------------------------------------------------------------- + E + \ + G---H---I---J + \ + L--M +----------------------------------------------------------------------- ++ +Whereas `--ancestry-path=K D..M` would result in ++ +----------------------------------------------------------------------- + K---------------L--M +----------------------------------------------------------------------- Before discussing another option, `--show-pulls`, we need to create a new example history. @@ -659,7 +686,7 @@ Here, the merge commits `O` and `P` contribute extra noise, as they did not actually contribute a change to `file.txt`. They only merged a topic that was based on an older version of `file.txt`. This is a common issue in repositories using a workflow where many contributors work in -parallel and merge their topic branches along a single trunk: manu +parallel and merge their topic branches along a single trunk: many unrelated merges appear in the `--full-history` results. When using the `--simplify-merges` option, the commits `O` and `P` diff --git a/Documentation/technical/api-simple-ipc.txt b/Documentation/technical/api-simple-ipc.txt index d79ad323e6..d44ada98e7 100644 --- a/Documentation/technical/api-simple-ipc.txt +++ b/Documentation/technical/api-simple-ipc.txt @@ -78,7 +78,7 @@ client and an optional response message from the server. Both the client and server messages are unlimited in length and are terminated with a flush packet. -The pkt-line routines (Documentation/technical/protocol-common.txt) +The pkt-line routines (linkgit:gitprotocol-common[5]) are used to simplify buffer management during message generation, transmission, and reception. A flush packet is used to mark the end of the message. This allows the sender to incrementally generate and diff --git a/Documentation/technical/api-trace2.txt b/Documentation/technical/api-trace2.txt index 77a150b30e..2afa28bb5a 100644 --- a/Documentation/technical/api-trace2.txt +++ b/Documentation/technical/api-trace2.txt @@ -717,6 +717,7 @@ The "exec_id" field is a command-unique id and is only useful if the { "event":"def_param", ... + "scope":"global", "param":"core.abbrev", "value":"7" } @@ -1207,6 +1208,45 @@ at offset 508. This example also shows that thread names are assigned in a racy manner as each thread starts and allocates TLS storage. +Config (def param) Events:: + + Dump "interesting" config values to trace2 log. ++ +We can optionally emit configuration events, see +`trace2.configparams` in linkgit:git-config[1] for how to enable +it. ++ +---------------- +$ git config --system color.ui never +$ git config --global color.ui always +$ git config --local color.ui auto +$ git config --list --show-scope | grep 'color.ui' +system color.ui=never +global color.ui=always +local color.ui=auto +---------------- ++ +Then, mark the config `color.ui` as "interesting" config with +`GIT_TRACE2_CONFIG_PARAMS`: ++ +---------------- +$ export GIT_TRACE2_PERF_BRIEF=1 +$ export GIT_TRACE2_PERF=~/log.perf +$ export GIT_TRACE2_CONFIG_PARAMS=color.ui +$ git version +... +$ cat ~/log.perf +d0 | main | version | | | | | ... +d0 | main | start | | 0.001642 | | | /usr/local/bin/git version +d0 | main | cmd_name | | | | | version (version) +d0 | main | def_param | | | | scope:system | color.ui:never +d0 | main | def_param | | | | scope:global | color.ui:always +d0 | main | def_param | | | | scope:local | color.ui:auto +d0 | main | data | r0 | 0.002100 | 0.002100 | fsync | fsync/writeout-only:0 +d0 | main | data | r0 | 0.002126 | 0.002126 | fsync | fsync/hardware-flush:0 +d0 | main | exit | | 0.000470 | | | code:0 +d0 | main | atexit | | 0.000477 | | | code:0 +---------------- == Future Work === Relationship to the Existing Trace Api (api-trace.txt) diff --git a/Documentation/technical/bundle-uri.txt b/Documentation/technical/bundle-uri.txt new file mode 100644 index 0000000000..c25c42378a --- /dev/null +++ b/Documentation/technical/bundle-uri.txt @@ -0,0 +1,573 @@ +Bundle URIs +=========== + +Git bundles are files that store a pack-file along with some extra metadata, +including a set of refs and a (possibly empty) set of necessary commits. See +linkgit:git-bundle[1] and link:bundle-format.txt[the bundle format] for more +information. + +Bundle URIs are locations where Git can download one or more bundles in +order to bootstrap the object database in advance of fetching the remaining +objects from a remote. + +One goal is to speed up clones and fetches for users with poor network +connectivity to the origin server. Another benefit is to allow heavy users, +such as CI build farms, to use local resources for the majority of Git data +and thereby reducing the load on the origin server. + +To enable the bundle URI feature, users can specify a bundle URI using +command-line options or the origin server can advertise one or more URIs +via a protocol v2 capability. + +Design Goals +------------ + +The bundle URI standard aims to be flexible enough to satisfy multiple +workloads. The bundle provider and the Git client have several choices in +how they create and consume bundle URIs. + +* Bundles can have whatever name the server desires. This name could refer + to immutable data by using a hash of the bundle contents. However, this + means that a new URI will be needed after every update of the content. + This might be acceptable if the server is advertising the URI (and the + server is aware of new bundles being generated) but would not be + ergonomic for users using the command line option. + +* The bundles could be organized specifically for bootstrapping full + clones, but could also be organized with the intention of bootstrapping + incremental fetches. The bundle provider must decide on one of several + organization schemes to minimize client downloads during incremental + fetches, but the Git client can also choose whether to use bundles for + either of these operations. + +* The bundle provider can choose to support full clones, partial clones, + or both. The client can detect which bundles are appropriate for the + repository's partial clone filter, if any. + +* The bundle provider can use a single bundle (for clones only), or a + list of bundles. When using a list of bundles, the provider can specify + whether or not the client needs _all_ of the bundle URIs for a full + clone, or if _any_ one of the bundle URIs is sufficient. This allows the + bundle provider to use different URIs for different geographies. + +* The bundle provider can organize the bundles using heuristics, such as + creation tokens, to help the client prevent downloading bundles it does + not need. When the bundle provider does not provide these heuristics, + the client can use optimizations to minimize how much of the data is + downloaded. + +* The bundle provider does not need to be associated with the Git server. + The client can choose to use the bundle provider without it being + advertised by the Git server. + +* The client can choose to discover bundle providers that are advertised + by the Git server. This could happen during `git clone`, during + `git fetch`, both, or neither. The user can choose which combination + works best for them. + +* The client can choose to configure a bundle provider manually at any + time. The client can also choose to specify a bundle provider manually + as a command-line option to `git clone`. + +Each repository is different and every Git server has different needs. +Hopefully the bundle URI feature is flexible enough to satisfy all needs. +If not, then the feature can be extended through its versioning mechanism. + +Server requirements +------------------- + +To provide a server-side implementation of bundle servers, no other parts +of the Git protocol are required. This allows server maintainers to use +static content solutions such as CDNs in order to serve the bundle files. + +At the current scope of the bundle URI feature, all URIs are expected to +be HTTP(S) URLs where content is downloaded to a local file using a `GET` +request to that URL. The server could include authentication requirements +to those requests with the aim of triggering the configured credential +helper for secure access. (Future extensions could use "file://" URIs or +SSH URIs.) + +Assuming a `200 OK` response from the server, the content at the URL is +inspected. First, Git attempts to parse the file as a bundle file of +version 2 or higher. If the file is not a bundle, then the file is parsed +as a plain-text file using Git's config parser. The key-value pairs in +that config file are expected to describe a list of bundle URIs. If +neither of these parse attempts succeed, then Git will report an error to +the user that the bundle URI provided erroneous data. + +Any other data provided by the server is considered erroneous. + +Bundle Lists +------------ + +The Git server can advertise bundle URIs using a set of `key=value` pairs. +A bundle URI can also serve a plain-text file in the Git config format +containing these same `key=value` pairs. In both cases, we consider this +to be a _bundle list_. The pairs specify information about the bundles +that the client can use to make decisions for which bundles to download +and which to ignore. + +A few keys focus on properties of the list itself. + +bundle.version:: + (Required) This value provides a version number for the bundle + list. If a future Git change enables a feature that needs the Git + client to react to a new key in the bundle list file, then this version + will increment. The only current version number is 1, and if any other + value is specified then Git will fail to use this file. + +bundle.mode:: + (Required) This value has one of two values: `all` and `any`. When `all` + is specified, then the client should expect to need all of the listed + bundle URIs that match their repository's requirements. When `any` is + specified, then the client should expect that any one of the bundle URIs + that match their repository's requirements will suffice. Typically, the + `any` option is used to list a number of different bundle servers + located in different geographies. + +bundle.heuristic:: + If this string-valued key exists, then the bundle list is designed to + work well with incremental `git fetch` commands. The heuristic signals + that there are additional keys available for each bundle that help + determine which subset of bundles the client should download. The only + heuristic currently planned is `creationToken`. + +The remaining keys include an `<id>` segment which is a server-designated +name for each available bundle. The `<id>` must contain only alphanumeric +and `-` characters. + +bundle.<id>.uri:: + (Required) This string value is the URI for downloading bundle `<id>`. + If the URI begins with a protocol (`http://` or `https://`) then the URI + is absolute. Otherwise, the URI is interpreted as relative to the URI + used for the bundle list. If the URI begins with `/`, then that relative + path is relative to the domain name used for the bundle list. (This use + of relative paths is intended to make it easier to distribute a set of + bundles across a large number of servers or CDNs with different domain + names.) + +bundle.<id>.filter:: + This string value represents an object filter that should also appear in + the header of this bundle. The server uses this value to differentiate + different kinds of bundles from which the client can choose those that + match their object filters. + +bundle.<id>.creationToken:: + This value is a nonnegative 64-bit integer used for sorting the bundles + the list. This is used to download a subset of bundles during a fetch + when `bundle.heuristic=creationToken`. + +bundle.<id>.location:: + This string value advertises a real-world location from where the bundle + URI is served. This can be used to present the user with an option for + which bundle URI to use or simply as an informative indicator of which + bundle URI was selected by Git. This is only valuable when + `bundle.mode` is `any`. + +Here is an example bundle list using the Git config format: + + [bundle] + version = 1 + mode = all + heuristic = creationToken + + [bundle "2022-02-09-1644442601-daily"] + uri = https://bundles.example.com/git/git/2022-02-09-1644442601-daily.bundle + creationToken = 1644442601 + + [bundle "2022-02-02-1643842562"] + uri = https://bundles.example.com/git/git/2022-02-02-1643842562.bundle + creationToken = 1643842562 + + [bundle "2022-02-09-1644442631-daily-blobless"] + uri = 2022-02-09-1644442631-daily-blobless.bundle + creationToken = 1644442631 + filter = blob:none + + [bundle "2022-02-02-1643842568-blobless"] + uri = /git/git/2022-02-02-1643842568-blobless.bundle + creationToken = 1643842568 + filter = blob:none + +This example uses `bundle.mode=all` as well as the +`bundle.<id>.creationToken` heuristic. It also uses the `bundle.<id>.filter` +options to present two parallel sets of bundles: one for full clones and +another for blobless partial clones. + +Suppose that this bundle list was found at the URI +`https://bundles.example.com/git/git/` and so the two blobless bundles have +the following fully-expanded URIs: + +* `https://bundles.example.com/git/git/2022-02-09-1644442631-daily-blobless.bundle` +* `https://bundles.example.com/git/git/2022-02-02-1643842568-blobless.bundle` + +Advertising Bundle URIs +----------------------- + +If a user knows a bundle URI for the repository they are cloning, then +they can specify that URI manually through a command-line option. However, +a Git host may want to advertise bundle URIs during the clone operation, +helping users unaware of the feature. + +The only thing required for this feature is that the server can advertise +one or more bundle URIs. This advertisement takes the form of a new +protocol v2 capability specifically for discovering bundle URIs. + +The client could choose an arbitrary bundle URI as an option _or_ select +the URI with best performance by some exploratory checks. It is up to the +bundle provider to decide if having multiple URIs is preferable to a +single URI that is geodistributed through server-side infrastructure. + +Cloning with Bundle URIs +------------------------ + +The primary need for bundle URIs is to speed up clones. The Git client +will interact with bundle URIs according to the following flow: + +1. The user specifies a bundle URI with the `--bundle-uri` command-line + option _or_ the client discovers a bundle list advertised by the + Git server. + +2. If the downloaded data from a bundle URI is a bundle, then the client + inspects the bundle headers to check that the prerequisite commit OIDs + are present in the client repository. If some are missing, then the + client delays unbundling until other bundles have been unbundled, + making those OIDs present. When all required OIDs are present, the + client unbundles that data using a refspec. The default refspec is + `+refs/heads/*:refs/bundles/*`, but this can be configured. These refs + are stored so that later `git fetch` negotiations can communicate the + bundled refs as `have`s, reducing the size of the fetch over the Git + protocol. To allow pruning refs from this ref namespace, Git may + introduce a numbered namespace (such as `refs/bundles/<i>/*`) such that + stale bundle refs can be deleted. + +3. If the file is instead a bundle list, then the client inspects the + `bundle.mode` to see if the list is of the `all` or `any` form. + + a. If `bundle.mode=all`, then the client considers all bundle + URIs. The list is reduced based on the `bundle.<id>.filter` options + matching the client repository's partial clone filter. Then, all + bundle URIs are requested. If the `bundle.<id>.creationToken` + heuristic is provided, then the bundles are downloaded in decreasing + order by the creation token, stopping when a bundle has all required + OIDs. The bundles can then be unbundled in increasing creation token + order. The client stores the latest creation token as a heuristic + for avoiding future downloads if the bundle list does not advertise + bundles with larger creation tokens. + + b. If `bundle.mode=any`, then the client can choose any one of the + bundle URIs to inspect. The client can use a variety of ways to + choose among these URIs. The client can also fallback to another URI + if the initial choice fails to return a result. + +Note that during a clone we expect that all bundles will be required, and +heuristics such as `bundle.<uri>.creationToken` can be used to download +bundles in chronological order or in parallel. + +If a given bundle URI is a bundle list with a `bundle.heuristic` +value, then the client can choose to store that URI as its chosen bundle +URI. The client can then navigate directly to that URI during later `git +fetch` calls. + +When downloading bundle URIs, the client can choose to inspect the initial +content before committing to downloading the entire content. This may +provide enough information to determine if the URI is a bundle list or +a bundle. In the case of a bundle, the client may inspect the bundle +header to determine that all advertised tips are already in the client +repository and cancel the remaining download. + +Fetching with Bundle URIs +------------------------- + +When the client fetches new data, it can decide to fetch from bundle +servers before fetching from the origin remote. This could be done via a +command-line option, but it is more likely useful to use a config value +such as the one specified during the clone. + +The fetch operation follows the same procedure to download bundles from a +bundle list (although we do _not_ want to use parallel downloads here). We +expect that the process will end when all prerequisite commit OIDs in a +thin bundle are already in the object database. + +When using the `creationToken` heuristic, the client can avoid downloading +any bundles if their creation tokenss are not larger than the stored +creation token. After fetching new bundles, Git updates this local +creation token. + +If the bundle provider does not provide a heuristic, then the client +should attempt to inspect the bundle headers before downloading the full +bundle data in case the bundle tips already exist in the client +repository. + +Error Conditions +---------------- + +If the Git client discovers something unexpected while downloading +information according to a bundle URI or the bundle list found at that +location, then Git can ignore that data and continue as if it was not +given a bundle URI. The remote Git server is the ultimate source of truth, +not the bundle URI. + +Here are a few example error conditions: + +* The client fails to connect with a server at the given URI or a connection + is lost without any chance to recover. + +* The client receives a 400-level response (such as `404 Not Found` or + `401 Not Authorized`). The client should use the credential helper to + find and provide a credential for the URI, but match the semantics of + Git's other HTTP protocols in terms of handling specific 400-level + errors. + +* The server reports any other failure reponse. + +* The client receives data that is not parsable as a bundle or bundle list. + +* A bundle includes a filter that does not match expectations. + +* The client cannot unbundle the bundles because the prerequisite commit OIDs + are not in the object database and there are no more bundles to download. + +There are also situations that could be seen as wasteful, but are not +error conditions: + +* The downloaded bundles contain more information than is requested by + the clone or fetch request. A primary example is if the user requests + a clone with `--single-branch` but downloads bundles that store every + reachable commit from all `refs/heads/*` references. This might be + initially wasteful, but perhaps these objects will become reachable by + a later ref update that the client cares about. + +* A bundle download during a `git fetch` contains objects already in the + object database. This is probably unavoidable if we are using bundles + for fetches, since the client will almost always be slightly ahead of + the bundle servers after performing its "catch-up" fetch to the remote + server. This extra work is most wasteful when the client is fetching + much more frequently than the server is computing bundles, such as if + the client is using hourly prefetches with background maintenance, but + the server is computing bundles weekly. For this reason, the client + should not use bundle URIs for fetch unless the server has explicitly + recommended it through a `bundle.heuristic` value. + +Example Bundle Provider organization +------------------------------------ + +The bundle URI feature is intentionally designed to be flexible to +different ways a bundle provider wants to organize the object data. +However, it can be helpful to have a complete organization model described +here so providers can start from that base. + +This example organization is a simplified model of what is used by the +GVFS Cache Servers (see section near the end of this document) which have +been beneficial in speeding up clones and fetches for very large +repositories, although using extra software outside of Git. + +The bundle provider deploys servers across multiple geographies. Each +server manages its own bundle set. The server can track a number of Git +repositories, but provides a bundle list for each based on a pattern. For +example, when mirroring a repository at `https://<domain>/<org>/<repo>` +the bundle server could have its bundle list available at +`https://<server-url>/<domain>/<org>/<repo>`. The origin Git server can +list all of these servers under the "any" mode: + + [bundle] + version = 1 + mode = any + + [bundle "eastus"] + uri = https://eastus.example.com/<domain>/<org>/<repo> + + [bundle "europe"] + uri = https://europe.example.com/<domain>/<org>/<repo> + + [bundle "apac"] + uri = https://apac.example.com/<domain>/<org>/<repo> + +This "list of lists" is static and only changes if a bundle server is +added or removed. + +Each bundle server manages its own set of bundles. The initial bundle list +contains only a single bundle, containing all of the objects received from +cloning the repository from the origin server. The list uses the +`creationToken` heuristic and a `creationToken` is made for the bundle +based on the server's timestamp. + +The bundle server runs regularly-scheduled updates for the bundle list, +such as once a day. During this task, the server fetches the latest +contents from the origin server and generates a bundle containing the +objects reachable from the latest origin refs, but not contained in a +previously-computed bundle. This bundle is added to the list, with care +that the `creationToken` is strictly greater than the previous maximum +`creationToken`. + +When the bundle list grows too large, say more than 30 bundles, then the +oldest "_N_ minus 30" bundles are combined into a single bundle. This +bundle's `creationToken` is equal to the maximum `creationToken` among the +merged bundles. + +An example bundle list is provided here, although it only has two daily +bundles and not a full list of 30: + + [bundle] + version = 1 + mode = all + heuristic = creationToken + + [bundle "2022-02-13-1644770820-daily"] + uri = https://eastus.example.com/<domain>/<org>/<repo>/2022-02-09-1644770820-daily.bundle + creationToken = 1644770820 + + [bundle "2022-02-09-1644442601-daily"] + uri = https://eastus.example.com/<domain>/<org>/<repo>/2022-02-09-1644442601-daily.bundle + creationToken = 1644442601 + + [bundle "2022-02-02-1643842562"] + uri = https://eastus.example.com/<domain>/<org>/<repo>/2022-02-02-1643842562.bundle + creationToken = 1643842562 + +To avoid storing and serving object data in perpetuity despite becoming +unreachable in the origin server, this bundle merge can be more careful. +Instead of taking an absolute union of the old bundles, instead the bundle +can be created by looking at the newer bundles and ensuring that their +necessary commits are all available in this merged bundle (or in another +one of the newer bundles). This allows "expiring" object data that is not +being used by new commits in this window of time. That data could be +reintroduced by a later push. + +The intention of this data organization has two main goals. First, initial +clones of the repository become faster by downloading precomputed object +data from a closer source. Second, `git fetch` commands can be faster, +especially if the client has not fetched for a few days. However, if a +client does not fetch for 30 days, then the bundle list organization would +cause redownloading a large amount of object data. + +One way to make this organization more useful to users who fetch frequently +is to have more frequent bundle creation. For example, bundles could be +created every hour, and then once a day those "hourly" bundles could be +merged into a "daily" bundle. The daily bundles are merged into the +oldest bundle after 30 days. + +It is recommened that this bundle strategy is repeated with the `blob:none` +filter if clients of this repository are expecting to use blobless partial +clones. This list of blobless bundles stays in the same list as the full +bundles, but uses the `bundle.<id>.filter` key to separate the two groups. +For very large repositories, the bundle provider may want to _only_ provide +blobless bundles. + +Implementation Plan +------------------- + +This design document is being submitted on its own as an aspirational +document, with the goal of implementing all of the mentioned client +features over the course of several patch series. Here is a potential +outline for submitting these features: + +1. Integrate bundle URIs into `git clone` with a `--bundle-uri` option. + This will include a new `git fetch --bundle-uri` mode for use as the + implementation underneath `git clone`. The initial version here will + expect a single bundle at the given URI. + +2. Implement the ability to parse a bundle list from a bundle URI and + update the `git fetch --bundle-uri` logic to properly distinguish + between `bundle.mode` options. Specifically design the feature so + that the config format parsing feeds a list of key-value pairs into the + bundle list logic. + +3. Create the `bundle-uri` protocol v2 command so Git servers can advertise + bundle URIs using the key-value pairs. Plug into the existing key-value + input to the bundle list logic. Allow `git clone` to discover these + bundle URIs and bootstrap the client repository from the bundle data. + (This choice is an opt-in via a config option and a command-line + option.) + +4. Allow the client to understand the `bundle.flag=forFetch` configuration + and the `bundle.<id>.creationToken` heuristic. When `git clone` + discovers a bundle URI with `bundle.flag=forFetch`, it configures the + client repository to check that bundle URI during later `git fetch <remote>` + commands. + +5. Allow clients to discover bundle URIs during `git fetch` and configure + a bundle URI for later fetches if `bundle.flag=forFetch`. + +6. Implement the "inspect headers" heuristic to reduce data downloads when + the `bundle.<id>.creationToken` heuristic is not available. + +As these features are reviewed, this plan might be updated. We also expect +that new designs will be discovered and implemented as this feature +matures and becomes used in real-world scenarios. + +Related Work: Packfile URIs +--------------------------- + +The Git protocol already has a capability where the Git server can list +a set of URLs along with the packfile response when serving a client +request. The client is then expected to download the packfiles at those +locations in order to have a complete understanding of the response. + +This mechanism is used by the Gerrit server (implemented with JGit) and +has been effective at reducing CPU load and improving user performance for +clones. + +A major downside to this mechanism is that the origin server needs to know +_exactly_ what is in those packfiles, and the packfiles need to be available +to the user for some time after the server has responded. This coupling +between the origin and the packfile data is difficult to manage. + +Further, this implementation is extremely hard to make work with fetches. + +Related Work: GVFS Cache Servers +-------------------------------- + +The GVFS Protocol [2] is a set of HTTP endpoints designed independently of +the Git project before Git's partial clone was created. One feature of this +protocol is the idea of a "cache server" which can be colocated with build +machines or developer offices to transfer Git data without overloading the +central server. + +The endpoint that VFS for Git is famous for is the `GET /gvfs/objects/{oid}` +endpoint, which allows downloading an object on-demand. This is a critical +piece of the filesystem virtualization of that product. + +However, a more subtle need is the `GET /gvfs/prefetch?lastPackTimestamp=<t>` +endpoint. Given an optional timestamp, the cache server responds with a list +of precomputed packfiles containing the commits and trees that were introduced +in those time intervals. + +The cache server computes these "prefetch" packfiles using the following +strategy: + +1. Every hour, an "hourly" pack is generated with a given timestamp. +2. Nightly, the previous 24 hourly packs are rolled up into a "daily" pack. +3. Nightly, all prefetch packs more than 30 days old are rolled up into + one pack. + +When a user runs `gvfs clone` or `scalar clone` against a repo with cache +servers, the client requests all prefetch packfiles, which is at most +`24 + 30 + 1` packfiles downloading only commits and trees. The client +then follows with a request to the origin server for the references, and +attempts to checkout that tip reference. (There is an extra endpoint that +helps get all reachable trees from a given commit, in case that commit +was not already in a prefetch packfile.) + +During a `git fetch`, a hook requests the prefetch endpoint using the +most-recent timestamp from a previously-downloaded prefetch packfile. +Only the list of packfiles with later timestamps are downloaded. Most +users fetch hourly, so they get at most one hourly prefetch pack. Users +whose machines have been off or otherwise have not fetched in over 30 days +might redownload all prefetch packfiles. This is rare. + +It is important to note that the clients always contact the origin server +for the refs advertisement, so the refs are frequently "ahead" of the +prefetched pack data. The missing objects are downloaded on-demand using +the `GET gvfs/objects/{oid}` requests, when needed by a command such as +`git checkout` or `git log`. Some Git optimizations disable checks that +would cause these on-demand downloads to be too aggressive. + +See Also +-------- + +[1] https://lore.kernel.org/git/RFC-cover-00.13-0000000000-20210805T150534Z-avarab@gmail.com/ + An earlier RFC for a bundle URI feature. + +[2] https://github.com/microsoft/VFSForGit/blob/master/Protocol.md + The GVFS Protocol diff --git a/Documentation/technical/cruft-packs.txt b/Documentation/technical/cruft-packs.txt deleted file mode 100644 index d81f3a8982..0000000000 --- a/Documentation/technical/cruft-packs.txt +++ /dev/null @@ -1,123 +0,0 @@ -= Cruft packs - -The cruft packs feature offer an alternative to Git's traditional mechanism of -removing unreachable objects. This document provides an overview of Git's -pruning mechanism, and how a cruft pack can be used instead to accomplish the -same. - -== Background - -To remove unreachable objects from your repository, Git offers `git repack -Ad` -(see linkgit:git-repack[1]). Quoting from the documentation: - -[quote] -[...] unreachable objects in a previous pack become loose, unpacked objects, -instead of being left in the old pack. [...] loose unreachable objects will be -pruned according to normal expiry rules with the next 'git gc' invocation. - -Unreachable objects aren't removed immediately, since doing so could race with -an incoming push which may reference an object which is about to be deleted. -Instead, those unreachable objects are stored as loose objects and stay that way -until they are older than the expiration window, at which point they are removed -by linkgit:git-prune[1]. - -Git must store these unreachable objects loose in order to keep track of their -per-object mtimes. If these unreachable objects were written into one big pack, -then either freshening that pack (because an object contained within it was -re-written) or creating a new pack of unreachable objects would cause the pack's -mtime to get updated, and the objects within it would never leave the expiration -window. Instead, objects are stored loose in order to keep track of the -individual object mtimes and avoid a situation where all cruft objects are -freshened at once. - -This can lead to undesirable situations when a repository contains many -unreachable objects which have not yet left the grace period. Having large -directories in the shards of `.git/objects` can lead to decreased performance in -the repository. But given enough unreachable objects, this can lead to inode -starvation and degrade the performance of the whole system. Since we -can never pack those objects, these repositories often take up a large amount of -disk space, since we can only zlib compress them, but not store them in delta -chains. - -== Cruft packs - -A cruft pack eliminates the need for storing unreachable objects in a loose -state by including the per-object mtimes in a separate file alongside a single -pack containing all loose objects. - -A cruft pack is written by `git repack --cruft` when generating a new pack. -linkgit:git-pack-objects[1]'s `--cruft` option. Note that `git repack --cruft` -is a classic all-into-one repack, meaning that everything in the resulting pack is -reachable, and everything else is unreachable. Once written, the `--cruft` -option instructs `git repack` to generate another pack containing only objects -not packed in the previous step (which equates to packing all unreachable -objects together). This progresses as follows: - - 1. Enumerate every object, marking any object which is (a) not contained in a - kept-pack, and (b) whose mtime is within the grace period as a traversal - tip. - - 2. Perform a reachability traversal based on the tips gathered in the previous - step, adding every object along the way to the pack. - - 3. Write the pack out, along with a `.mtimes` file that records the per-object - timestamps. - -This mode is invoked internally by linkgit:git-repack[1] when instructed to -write a cruft pack. Crucially, the set of in-core kept packs is exactly the set -of packs which will not be deleted by the repack; in other words, they contain -all of the repository's reachable objects. - -When a repository already has a cruft pack, `git repack --cruft` typically only -adds objects to it. An exception to this is when `git repack` is given the -`--cruft-expiration` option, which allows the generated cruft pack to omit -expired objects instead of waiting for linkgit:git-gc[1] to expire those objects -later on. - -It is linkgit:git-gc[1] that is typically responsible for removing expired -unreachable objects. - -== Caution for mixed-version environments - -Repositories that have cruft packs in them will continue to work with any older -version of Git. Note, however, that previous versions of Git which do not -understand the `.mtimes` file will use the cruft pack's mtime as the mtime for -all of the objects in it. In other words, do not expect older (pre-cruft pack) -versions of Git to interpret or even read the contents of the `.mtimes` file. - -Note that having mixed versions of Git GC-ing the same repository can lead to -unreachable objects never being completely pruned. This can happen under the -following circumstances: - - - An older version of Git running GC explodes the contents of an existing - cruft pack loose, using the cruft pack's mtime. - - A newer version running GC collects those loose objects into a cruft pack, - where the .mtime file reflects the loose object's actual mtimes, but the - cruft pack mtime is "now". - -Repeating this process will lead to unreachable objects not getting pruned as a -result of repeatedly resetting the objects' mtimes to the present time. - -If you are GC-ing repositories in a mixed version environment, consider omitting -the `--cruft` option when using linkgit:git-repack[1] and linkgit:git-gc[1], and -leaving the `gc.cruftPacks` configuration unset until all writers understand -cruft packs. - -== Alternatives - -Notable alternatives to this design include: - - - The location of the per-object mtime data, and - - Storing unreachable objects in multiple cruft packs. - -On the location of mtime data, a new auxiliary file tied to the pack was chosen -to avoid complicating the `.idx` format. If the `.idx` format were ever to gain -support for optional chunks of data, it may make sense to consolidate the -`.mtimes` format into the `.idx` itself. - -Storing unreachable objects among multiple cruft packs (e.g., creating a new -cruft pack during each repacking operation including only unreachable objects -which aren't already stored in an earlier cruft pack) is significantly more -complicated to construct, and so aren't pursued here. The obvious drawback to -the current implementation is that the entire cruft pack must be re-written from -scratch. diff --git a/Documentation/technical/hash-function-transition.txt b/Documentation/technical/hash-function-transition.txt index 260224b033..e2ac36dd21 100644 --- a/Documentation/technical/hash-function-transition.txt +++ b/Documentation/technical/hash-function-transition.txt @@ -205,7 +205,7 @@ SHA-1 content. Object storage ~~~~~~~~~~~~~~ Loose objects use zlib compression and packed objects use the packed -format described in Documentation/technical/pack-format.txt, just like +format described in linkgit:gitformat-pack[5], just like today. The content that is compressed and stored uses SHA-256 content instead of SHA-1 content. diff --git a/Documentation/technical/long-running-process-protocol.txt b/Documentation/technical/long-running-process-protocol.txt index aa0aa9af1c..6f33654b42 100644 --- a/Documentation/technical/long-running-process-protocol.txt +++ b/Documentation/technical/long-running-process-protocol.txt @@ -3,7 +3,7 @@ Long-running process protocol This protocol is used when Git needs to communicate with an external process throughout the entire life of a single Git command. All -communication is in pkt-line format (see technical/protocol-common.txt) +communication is in pkt-line format (see linkgit:gitprotocol-common[5]) over standard input and standard output. Handshake diff --git a/Documentation/technical/packfile-uri.txt b/Documentation/technical/packfile-uri.txt index 1eb525fe76..9d453d4765 100644 --- a/Documentation/technical/packfile-uri.txt +++ b/Documentation/technical/packfile-uri.txt @@ -18,7 +18,7 @@ a `packfile-uris` argument, the server MAY send a `packfile-uris` section directly before the `packfile` section (right after `wanted-refs` if it is sent) containing URIs of any of the given protocols. The URIs point to packfiles that use only features that the client has declared that it supports -(e.g. ofs-delta and thin-pack). See protocol-v2.txt for the documentation of +(e.g. ofs-delta and thin-pack). See linkgit:gitprotocol-v2[5] for the documentation of this section. Clients should then download and index all the given URIs (in addition to diff --git a/Documentation/technical/partial-clone.txt b/Documentation/technical/partial-clone.txt index 99f0eb3040..92fcee2bff 100644 --- a/Documentation/technical/partial-clone.txt +++ b/Documentation/technical/partial-clone.txt @@ -79,7 +79,7 @@ Design Details upload-pack negotiation. + This uses the existing capability discovery mechanism. -See "filter" in Documentation/technical/pack-protocol.txt. +See "filter" in linkgit:gitprotocol-pack[5]. - Clients pass a "filter-spec" to clone and fetch which is passed to the server to request filtering during packfile construction. diff --git a/Documentation/technical/scalar.txt b/Documentation/technical/scalar.txt new file mode 100644 index 0000000000..0600150b3a --- /dev/null +++ b/Documentation/technical/scalar.txt @@ -0,0 +1,127 @@ +Scalar +====== + +Scalar is a repository management tool that optimizes Git for use in large +repositories. It accomplishes this by helping users to take advantage of +advanced performance features in Git. Unlike most other Git built-in commands, +Scalar is not executed as a subcommand of 'git'; rather, it is built as a +separate executable containing its own series of subcommands. + +Background +---------- + +Scalar was originally designed as an add-on to Git and implemented as a .NET +Core application. It was created based on the learnings from the VFS for Git +project (another application aimed at improving the experience of working with +large repositories). As part of its initial implementation, Scalar relied on +custom features in the Microsoft fork of Git that have since been integrated +into core Git: + +* partial clone, +* commit graphs, +* multi-pack index, +* sparse checkout (cone mode), +* scheduled background maintenance, +* etc + +With the requisite Git functionality in place and a desire to bring the benefits +of Scalar to the larger Git community, the Scalar application itself was ported +from C# to C and integrated upstream. + +Features +-------- + +Scalar is comprised of two major pieces of functionality: automatically +configuring built-in Git performance features and managing repository +enlistments. + +The Git performance features configured by Scalar (see "Background" for +examples) confer substantial performance benefits to large repositories, but are +either too experimental to enable for all of Git yet, or only benefit large +repositories. As new features are introduced, Scalar should be updated +accordingly to incorporate them. This will prevent the tool from becoming stale +while also providing a path for more easily bringing features to the appropriate +users. + +Enlistments are how Scalar knows which repositories on a user's system should +utilize Scalar-configured features. This allows it to update performance +settings when new ones are added to the tool, as well as centrally manage +repository maintenance. The enlistment structure - a root directory with a +`src/` subdirectory containing the cloned repository itself - is designed to +encourage users to route build outputs outside of the repository to avoid the +performance-limiting overhead of ignoring those files in Git. + +Design +------ + +Scalar is implemented in C and interacts with Git via a mix of child process +invocations of Git and direct usage of `libgit.a`. Internally, it is structured +much like other built-ins with subcommands (e.g., `git stash`), containing a +`cmd_<subcommand>()` function for each subcommand, routed through a `cmd_main()` +function. Most options are unique to each subcommand, with `scalar` respecting +some "global" `git` options (e.g., `-c` and `-C`). + +Because `scalar` is not invoked as a Git subcommand (like `git scalar`), it is +built and installed as its own executable in the `bin/` directory, alongside +`git`, `git-gui`, etc. + +Roadmap +------- + +NOTE: this section will be removed once the remaining tasks outlined in this +roadmap are complete. + +Scalar is a large enough project that it is being upstreamed incrementally, +living in `contrib/` until it is feature-complete. So far, the following patch +series have been accepted: + +- `scalar-the-beginning`: The initial patch series which sets up + `contrib/scalar/` and populates it with a minimal `scalar` command that + demonstrates the fundamental ideas. + +- `scalar-c-and-C`: The `scalar` command learns about two options that can be + specified before the command, `-c <key>=<value>` and `-C <directory>`. + +- `scalar-diagnose`: The `scalar` command is taught the `diagnose` subcommand. + +- `scalar-generalize-diagnose`: Move the functionality of `scalar diagnose` + into `git diagnose` and `git bugreport --diagnose`. + +- 'scalar-add-fsmonitor: Enable the built-in FSMonitor in Scalar + enlistments. At the end of this series, Scalar should be feature-complete + from the perspective of a user. + +Roughly speaking (and subject to change), the following series are needed to +"finish" this initial version of Scalar: + +- Move Scalar to toplevel: Move Scalar out of `contrib/` and into the root of + `git`. This includes a variety of related updates, including: + - building & installing Scalar in the Git root-level 'make [install]'. + - builing & testing Scalar as part of CI. + - moving and expanding test coverage of Scalar (including perf tests). + - implementing 'scalar help'/'git help scalar' to display scalar + documentation. + +Finally, there are two additional patch series that exist in Microsoft's fork of +Git, but there is no current plan to upstream them. There are some interesting +ideas there, but the implementation is too specific to Azure Repos and/or VFS +for Git to be of much help in general. + +These still exist mainly because the GVFS protocol is what Azure Repos has +instead of partial clone, while Git is focused on improving partial clone: + +- `scalar-with-gvfs`: The primary purpose of this patch series is to support + existing Scalar users whose repositories are hosted in Azure Repos (which does + not support Git's partial clones, but supports its predecessor, the GVFS + protocol, which is used by Scalar to emulate the partial clone). + + Since the GVFS protocol will never be supported by core Git, this patch series + will remain in Microsoft's fork of Git. + +- `run-scalar-functional-tests`: The Scalar project developed a quite + comprehensive set of integration tests (or, "Functional Tests"). They are the + sole remaining part of the original C#-based Scalar project, and this patch + adds a GitHub workflow that runs them all. + + Since the tests partially depend on features that are only provided in the + `scalar-with-gvfs` patch series, this patch cannot be upstreamed. diff --git a/Documentation/user-manual.txt b/Documentation/user-manual.txt index 865074bed4..ca9decdd95 100644 --- a/Documentation/user-manual.txt +++ b/Documentation/user-manual.txt @@ -3133,7 +3133,7 @@ those "loose" objects. You can save space and make Git faster by moving these loose objects in to a "pack file", which stores a group of objects in an efficient compressed format; the details of how pack files are formatted can be -found in link:technical/pack-format.html[pack format]. +found in link:gitformat-pack[5]. To put the loose objects into a pack, just run git repack: diff --git a/GIT-VERSION-GEN b/GIT-VERSION-GEN index fb16249620..6ec9e34282 100755 --- a/GIT-VERSION-GEN +++ b/GIT-VERSION-GEN @@ -1,7 +1,7 @@ #!/bin/sh GVF=GIT-VERSION-FILE -DEF_VER=v2.37.3 +DEF_VER=v2.37.GIT LF=' ' @@ -182,6 +182,8 @@ include shared.mak # # Define BLK_SHA256 to use the built-in SHA-256 routines. # +# Define NETTLE_SHA256 to use the SHA-256 routines in libnettle. +# # Define GCRYPT_SHA256 to use the SHA-256 routines in libgcrypt. # # Define OPENSSL_SHA256 to use the SHA-256 routines in OpenSSL. @@ -309,6 +311,11 @@ include shared.mak # distributions that want to use their packaged versions of Perl # modules, instead of the fallbacks shipped with Git. # +# Define NO_GITWEB if you do not want to build or install +# 'gitweb'. Note that defining NO_PERL currently has the same effect +# on not installing gitweb, but not on whether it's built in the +# gitweb/ directory. +# # Define PYTHON_PATH to the path of your Python binary (often /usr/bin/python # but /usr/bin/python2.7 or /usr/bin/python3 on some platforms). # @@ -544,6 +551,7 @@ gitexecdir = libexec/git-core mergetoolsdir = $(gitexecdir)/mergetools sharedir = $(prefix)/share gitwebdir = $(sharedir)/gitweb +gitwebstaticdir = $(gitwebdir)/static perllibdir = $(sharedir)/perl5 localedir = $(sharedir)/locale template_dir = share/git-core/templates @@ -562,7 +570,7 @@ localedir_relative = $(patsubst $(prefix)/%,%,$(localedir)) htmldir_relative = $(patsubst $(prefix)/%,%,$(htmldir)) perllibdir_relative = $(patsubst $(prefix)/%,%,$(perllibdir)) -export prefix bindir sharedir sysconfdir gitwebdir perllibdir localedir +export prefix bindir sharedir sysconfdir perllibdir localedir # Set our default programs CC = cc @@ -764,6 +772,7 @@ TEST_BUILTINS_OBJS += test-read-midx.o TEST_BUILTINS_OBJS += test-ref-store.o TEST_BUILTINS_OBJS += test-reftable.o TEST_BUILTINS_OBJS += test-regex.o +TEST_BUILTINS_OBJS += test-rot13-filter.o TEST_BUILTINS_OBJS += test-repository.o TEST_BUILTINS_OBJS += test-revision-walking.o TEST_BUILTINS_OBJS += test-run-command.o @@ -910,6 +919,7 @@ LIB_OBJS += combine-diff.o LIB_OBJS += commit-graph.o LIB_OBJS += commit-reach.o LIB_OBJS += commit.o +LIB_OBJS += compat/nonblock.o LIB_OBJS += compat/obstack.o LIB_OBJS += compat/terminal.o LIB_OBJS += compat/zlib-uncompress2.o @@ -924,6 +934,7 @@ LIB_OBJS += ctype.o LIB_OBJS += date.o LIB_OBJS += decorate.o LIB_OBJS += delta-islands.o +LIB_OBJS += diagnose.o LIB_OBJS += diff-delta.o LIB_OBJS += diff-merges.o LIB_OBJS += diff-lib.o @@ -984,7 +995,6 @@ LIB_OBJS += merge-ort.o LIB_OBJS += merge-ort-wrappers.o LIB_OBJS += merge-recursive.o LIB_OBJS += merge.o -LIB_OBJS += mergesort.o LIB_OBJS += midx.o LIB_OBJS += name-hash.o LIB_OBJS += negotiator/default.o @@ -1145,6 +1155,7 @@ BUILTIN_OBJS += builtin/credential-cache.o BUILTIN_OBJS += builtin/credential-store.o BUILTIN_OBJS += builtin/credential.o BUILTIN_OBJS += builtin/describe.o +BUILTIN_OBJS += builtin/diagnose.o BUILTIN_OBJS += builtin/diff-files.o BUILTIN_OBJS += builtin/diff-index.o BUILTIN_OBJS += builtin/diff-tree.o @@ -1286,7 +1297,7 @@ SANITIZE_ADDRESS = # For the 'coccicheck' target; setting SPATCH_BATCH_SIZE higher will # usually result in less CPU usage at the cost of higher peak memory. # Setting it to 0 will feed all files in a single spatch invocation. -SPATCH_FLAGS = --all-includes --patch . +SPATCH_FLAGS = --all-includes SPATCH_BATCH_SIZE = 1 include config.mak.uname @@ -1842,6 +1853,10 @@ ifdef OPENSSL_SHA256 EXTLIBS += $(LIB_4_CRYPTO) BASIC_CFLAGS += -DSHA256_OPENSSL else +ifdef NETTLE_SHA256 + BASIC_CFLAGS += -DSHA256_NETTLE + EXTLIBS += -lnettle +else ifdef GCRYPT_SHA256 BASIC_CFLAGS += -DSHA256_GCRYPT EXTLIBS += -lgcrypt @@ -1850,6 +1865,7 @@ else BASIC_CFLAGS += -DSHA256_BLK endif endif +endif ifdef SHA1_MAX_BLOCK_SIZE LIB_OBJS += compat/sha1-chunked.o @@ -2089,6 +2105,7 @@ htmldir_relative_SQ = $(subst ','\'',$(htmldir_relative)) prefix_SQ = $(subst ','\'',$(prefix)) perllibdir_relative_SQ = $(subst ','\'',$(perllibdir_relative)) gitwebdir_SQ = $(subst ','\'',$(gitwebdir)) +gitwebstaticdir_SQ = $(subst ','\'',$(gitwebstaticdir)) SHELL_PATH_SQ = $(subst ','\'',$(SHELL_PATH)) TEST_SHELL_PATH_SQ = $(subst ','\'',$(TEST_SHELL_PATH)) @@ -2417,10 +2434,6 @@ GIT-PERL-HEADER: $(PERL_HEADER_TEMPLATE) GIT-PERL-DEFINES Makefile perllibdir: @echo '$(perllibdir_SQ)' -.PHONY: gitweb -gitweb: - $(QUIET_SUBDIR0)gitweb $(QUIET_SUBDIR1) all - git-instaweb: git-instaweb.sh GIT-SCRIPT-DEFINES $(QUIET_GEN)$(cmd_munge_script) && \ chmod +x $@+ && \ @@ -3091,6 +3104,9 @@ $(SP_OBJ): %.sp: %.c %.o sparse: $(SP_OBJ) EXCEPT_HDRS := $(GENERATED_H) unicode-width.h compat/% xdiff/% +ifndef NETTLE_SHA256 + EXCEPT_HDRS += sha256/nettle.h +endif ifndef GCRYPT_SHA256 EXCEPT_HDRS += sha256/gcrypt.h endif @@ -3123,6 +3139,8 @@ check: $(GENERATED_H) exit 1; \ fi +COCCI_TEST_RES = $(wildcard contrib/coccinelle/tests/*.res) + %.cocci.patch: %.cocci $(COCCI_SOURCES) $(QUIET_SPATCH) \ if test $(SPATCH_BATCH_SIZE) = 0; then \ @@ -3131,7 +3149,8 @@ check: $(GENERATED_H) limit='-n $(SPATCH_BATCH_SIZE)'; \ fi; \ if ! echo $(COCCI_SOURCES) | xargs $$limit \ - $(SPATCH) --sp-file $< $(SPATCH_FLAGS) \ + $(SPATCH) $(SPATCH_FLAGS) \ + --sp-file $< --patch . \ >$@+ 2>$@.log; \ then \ cat $@.log; \ @@ -3142,13 +3161,43 @@ check: $(GENERATED_H) then \ echo ' ' SPATCH result: $@; \ fi + +COCCI_TEST_RES_GEN = $(addprefix .build/,$(COCCI_TEST_RES)) +$(COCCI_TEST_RES_GEN): .build/%.res : %.c +$(COCCI_TEST_RES_GEN): .build/%.res : %.res +$(COCCI_TEST_RES_GEN): .build/contrib/coccinelle/tests/%.res : contrib/coccinelle/%.cocci + $(call mkdir_p_parent_template) + $(QUIET_SPATCH_T)$(SPATCH) $(SPATCH_FLAGS) \ + --very-quiet --no-show-diff \ + --sp-file $< -o $@ \ + $(@:.build/%.res=%.c) && \ + cmp $(@:.build/%=%) $@ || \ + git -P diff --no-index $(@:.build/%=%) $@ 2>/dev/null; \ + +.PHONY: coccicheck-test +coccicheck-test: $(COCCI_TEST_RES_GEN) + +coccicheck: coccicheck-test coccicheck: $(addsuffix .patch,$(filter-out %.pending.cocci,$(wildcard contrib/coccinelle/*.cocci))) # See contrib/coccinelle/README +coccicheck-pending: coccicheck-test coccicheck-pending: $(addsuffix .patch,$(wildcard contrib/coccinelle/*.pending.cocci)) .PHONY: coccicheck coccicheck-pending +# "Sub"-Makefiles, not really because they can't be run stand-alone, +# only there to contain directory-specific rules and variables +## gitweb/Makefile inclusion: +MAK_DIR_GITWEB = gitweb/ +include gitweb/Makefile + +.PHONY: gitweb +gitweb: $(MAK_DIR_GITWEB_ALL) +ifndef NO_GITWEB +all:: gitweb +endif + ### Installation rules ifneq ($(filter /%,$(firstword $(template_dir))),) @@ -3221,7 +3270,6 @@ ifndef NO_PERL $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perllibdir_SQ)' (cd perl/build/lib && $(TAR) cf - .) | \ (cd '$(DESTDIR_SQ)$(perllibdir_SQ)' && umask 022 && $(TAR) xof -) - $(MAKE) -C gitweb install endif ifndef NO_TCLTK $(MAKE) -C gitk-git install @@ -3276,10 +3324,8 @@ endif cp "$$execdir/git-remote-http$X" "$$execdir/$$p" || exit; } \ done -.PHONY: install-gitweb install-doc install-man install-man-perl install-html install-info install-pdf +.PHONY: install-doc install-man install-man-perl install-html install-info install-pdf .PHONY: quick-install-doc quick-install-man quick-install-html -install-gitweb: - $(MAKE) -C gitweb install install-doc: install-man-perl $(MAKE) -C Documentation install @@ -3403,12 +3449,13 @@ profile-clean: $(RM) $(addsuffix *.gcno,$(addprefix $(PROFILE_DIR)/, $(object_dirs))) cocciclean: + $(RM) -r .build/contrib/coccinelle $(RM) contrib/coccinelle/*.cocci.patch* clean: profile-clean coverage-clean cocciclean $(RM) -r .build $(RM) po/git.pot po/git-core.pot - $(RM) *.res + $(RM) git.res $(RM) $(OBJECTS) $(RM) $(LIB_FILE) $(XDIFF_LIB) $(REFTABLE_LIB) $(REFTABLE_TEST_LIB) $(RM) $(ALL_PROGRAMS) $(SCRIPT_LIB) $(BUILT_INS) git$X @@ -3425,7 +3472,6 @@ clean: profile-clean coverage-clean cocciclean $(MAKE) -C Documentation/ clean $(RM) Documentation/GIT-EXCLUDED-PROGRAMS ifndef NO_PERL - $(MAKE) -C gitweb clean $(RM) -r perl/build/ endif $(MAKE) -C templates/ clean @@ -3489,6 +3535,7 @@ check-docs:: sed -e '1,/^### command list/d' \ -e '/^#/d' \ -e '/guide$$/d' \ + -e '/interfaces$$/d' \ -e 's/[ ].*//' \ -e 's/^/listed /' command-list.txt; \ $(MAKE) -C Documentation print-man1 | \ @@ -1 +1 @@ -Documentation/RelNotes/2.37.3.txt
\ No newline at end of file +Documentation/RelNotes/2.38.0.txt
\ No newline at end of file diff --git a/archive-tar.c b/archive-tar.c index 042feb66d2..3d77e0f750 100644 --- a/archive-tar.c +++ b/archive-tar.c @@ -38,11 +38,18 @@ static int write_tar_filter_archive(const struct archiver *ar, #define USTAR_MAX_MTIME 077777777777ULL #endif +static void tar_write_block(const void *buf) +{ + write_or_die(1, buf, BLOCKSIZE); +} + +static void (*write_block)(const void *) = tar_write_block; + /* writes out the whole block, but only if it is full */ static void write_if_needed(void) { if (offset == BLOCKSIZE) { - write_or_die(1, block, BLOCKSIZE); + write_block(block); offset = 0; } } @@ -66,7 +73,7 @@ static void do_write_blocked(const void *data, unsigned long size) write_if_needed(); } while (size >= BLOCKSIZE) { - write_or_die(1, buf, BLOCKSIZE); + write_block(buf); size -= BLOCKSIZE; buf += BLOCKSIZE; } @@ -101,10 +108,10 @@ static void write_trailer(void) { int tail = BLOCKSIZE - offset; memset(block + offset, 0, tail); - write_or_die(1, block, BLOCKSIZE); + write_block(block); if (tail < 2 * RECORDSIZE) { memset(block, 0, offset); - write_or_die(1, block, BLOCKSIZE); + write_block(block); } } @@ -383,8 +390,8 @@ static int tar_filter_config(const char *var, const char *value, void *data) if (!strcmp(type, "command")) { if (!value) return config_error_nonbool(var); - free(ar->data); - ar->data = xstrdup(value); + free(ar->filter_command); + ar->filter_command = xstrdup(value); return 0; } if (!strcmp(type, "remote")) { @@ -425,17 +432,65 @@ static int write_tar_archive(const struct archiver *ar, return err; } +static git_zstream gzstream; +static unsigned char outbuf[16384]; + +static void tgz_deflate(int flush) +{ + while (gzstream.avail_in || flush == Z_FINISH) { + int status = git_deflate(&gzstream, flush); + if (!gzstream.avail_out || status == Z_STREAM_END) { + write_or_die(1, outbuf, gzstream.next_out - outbuf); + gzstream.next_out = outbuf; + gzstream.avail_out = sizeof(outbuf); + if (status == Z_STREAM_END) + break; + } + if (status != Z_OK && status != Z_BUF_ERROR) + die(_("deflate error (%d)"), status); + } +} + +static void tgz_write_block(const void *data) +{ + gzstream.next_in = (void *)data; + gzstream.avail_in = BLOCKSIZE; + tgz_deflate(Z_NO_FLUSH); +} + +static const char internal_gzip_command[] = "git archive gzip"; + static int write_tar_filter_archive(const struct archiver *ar, struct archiver_args *args) { +#if ZLIB_VERNUM >= 0x1221 + struct gz_header_s gzhead = { .os = 3 }; /* Unix, for reproducibility */ +#endif struct strbuf cmd = STRBUF_INIT; struct child_process filter = CHILD_PROCESS_INIT; int r; - if (!ar->data) + if (!ar->filter_command) BUG("tar-filter archiver called with no filter defined"); - strbuf_addstr(&cmd, ar->data); + if (!strcmp(ar->filter_command, internal_gzip_command)) { + write_block = tgz_write_block; + git_deflate_init_gzip(&gzstream, args->compression_level); +#if ZLIB_VERNUM >= 0x1221 + if (deflateSetHeader(&gzstream.z, &gzhead) != Z_OK) + BUG("deflateSetHeader() called too late"); +#endif + gzstream.next_out = outbuf; + gzstream.avail_out = sizeof(outbuf); + + r = write_tar_archive(ar, args); + + tgz_deflate(Z_FINISH); + git_deflate_end(&gzstream); + return r; + } + + strbuf_addstr(&cmd, ar->filter_command); if (args->compression_level >= 0) strbuf_addf(&cmd, " -%d", args->compression_level); @@ -471,14 +526,14 @@ void init_tar_archiver(void) int i; register_archiver(&tar_archiver); - tar_filter_config("tar.tgz.command", "gzip -cn", NULL); + tar_filter_config("tar.tgz.command", internal_gzip_command, NULL); tar_filter_config("tar.tgz.remote", "true", NULL); - tar_filter_config("tar.tar.gz.command", "gzip -cn", NULL); + tar_filter_config("tar.tar.gz.command", internal_gzip_command, NULL); tar_filter_config("tar.tar.gz.remote", "true", NULL); git_config(git_tar_config, NULL); for (i = 0; i < nr_tar_filters; i++) { /* omit any filters that never had a command configured */ - if (tar_filters[i]->data) + if (tar_filters[i]->filter_command) register_archiver(tar_filters[i]); } } @@ -43,7 +43,7 @@ struct archiver { const char *name; int (*write_archive)(const struct archiver *, struct archiver_args *); unsigned flags; - void *data; + char *filter_command; }; void register_archiver(struct archiver *); @@ -1023,7 +1023,7 @@ static int path_matches(const char *pathname, int pathlen, } return match_pathname(pathname, pathlen - isdir, base, baselen, - pattern, prefix, pat->patternlen, pat->flags); + pattern, prefix, pat->patternlen); } static int macroexpand_one(struct all_attrs_item *all_attrs, int nr, int rem); @@ -648,11 +648,14 @@ static struct commit_list *managed_skipped(struct commit_list *list, } static void bisect_rev_setup(struct repository *r, struct rev_info *revs, + struct strvec *rev_argv, const char *prefix, const char *bad_format, const char *good_format, int read_paths) { - struct strvec rev_argv = STRVEC_INIT; + struct setup_revision_opt opt = { + .free_removed_argv_elements = 1, + }; int i; repo_init_revisions(r, revs, prefix); @@ -660,17 +663,16 @@ static void bisect_rev_setup(struct repository *r, struct rev_info *revs, revs->commit_format = CMIT_FMT_UNSPECIFIED; /* rev_argv.argv[0] will be ignored by setup_revisions */ - strvec_push(&rev_argv, "bisect_rev_setup"); - strvec_pushf(&rev_argv, bad_format, oid_to_hex(current_bad_oid)); + strvec_push(rev_argv, "bisect_rev_setup"); + strvec_pushf(rev_argv, bad_format, oid_to_hex(current_bad_oid)); for (i = 0; i < good_revs.nr; i++) - strvec_pushf(&rev_argv, good_format, + strvec_pushf(rev_argv, good_format, oid_to_hex(good_revs.oid + i)); - strvec_push(&rev_argv, "--"); + strvec_push(rev_argv, "--"); if (read_paths) - read_bisect_paths(&rev_argv); + read_bisect_paths(rev_argv); - setup_revisions(rev_argv.nr, rev_argv.v, revs, NULL); - /* XXX leak rev_argv, as "revs" may still be pointing to it */ + setup_revisions(rev_argv->nr, rev_argv->v, revs, &opt); } static void bisect_common(struct rev_info *revs) @@ -873,10 +875,11 @@ static enum bisect_error check_merge_bases(int rev_nr, struct commit **rev, int static int check_ancestors(struct repository *r, int rev_nr, struct commit **rev, const char *prefix) { + struct strvec rev_argv = STRVEC_INIT; struct rev_info revs; int res; - bisect_rev_setup(r, &revs, prefix, "^%s", "%s", 0); + bisect_rev_setup(r, &revs, &rev_argv, prefix, "^%s", "%s", 0); bisect_common(&revs); res = (revs.commits != NULL); @@ -885,6 +888,7 @@ static int check_ancestors(struct repository *r, int rev_nr, clear_commit_marks_many(rev_nr, rev, ALL_REV_FLAGS); release_revisions(&revs); + strvec_clear(&rev_argv); return res; } @@ -1010,6 +1014,7 @@ void read_bisect_terms(const char **read_bad, const char **read_good) */ enum bisect_error bisect_next_all(struct repository *r, const char *prefix) { + struct strvec rev_argv = STRVEC_INIT; struct rev_info revs = REV_INFO_INIT; struct commit_list *tried; int reaches = 0, all = 0, nr, steps; @@ -1037,7 +1042,7 @@ enum bisect_error bisect_next_all(struct repository *r, const char *prefix) if (res) goto cleanup; - bisect_rev_setup(r, &revs, prefix, "%s", "^%s", 1); + bisect_rev_setup(r, &revs, &rev_argv, prefix, "%s", "^%s", 1); revs.first_parent_only = !!(bisect_flags & FIND_BISECTION_FIRST_PARENT_ONLY); revs.limited = 1; @@ -1054,7 +1059,7 @@ enum bisect_error bisect_next_all(struct repository *r, const char *prefix) */ res = error_if_skipped_commits(tried, NULL); if (res < 0) - return res; + goto cleanup; printf(_("%s was both %s and %s\n"), oid_to_hex(current_bad_oid), term_good, @@ -1112,6 +1117,7 @@ enum bisect_error bisect_next_all(struct repository *r, const char *prefix) res = bisect_checkout(bisect_rev, no_checkout); cleanup: release_revisions(&revs); + strvec_clear(&rev_argv); return res; } @@ -1098,30 +1098,22 @@ static struct blame_entry *blame_merge(struct blame_entry *list1, } } -static void *get_next_blame(const void *p) -{ - return ((struct blame_entry *)p)->next; -} - -static void set_next_blame(void *p1, void *p2) -{ - ((struct blame_entry *)p1)->next = p2; -} +DEFINE_LIST_SORT(static, sort_blame_entries, struct blame_entry, next); /* * Final image line numbers are all different, so we don't need a * three-way comparison here. */ -static int compare_blame_final(const void *p1, const void *p2) +static int compare_blame_final(const struct blame_entry *e1, + const struct blame_entry *e2) { - return ((struct blame_entry *)p1)->lno > ((struct blame_entry *)p2)->lno - ? 1 : -1; + return e1->lno > e2->lno ? 1 : -1; } -static int compare_blame_suspect(const void *p1, const void *p2) +static int compare_blame_suspect(const struct blame_entry *s1, + const struct blame_entry *s2) { - const struct blame_entry *s1 = p1, *s2 = p2; /* * to allow for collating suspects, we sort according to the * respective pointer value as the primary sorting criterion. @@ -1138,8 +1130,7 @@ static int compare_blame_suspect(const void *p1, const void *p2) void blame_sort_final(struct blame_scoreboard *sb) { - sb->ent = llist_mergesort(sb->ent, get_next_blame, set_next_blame, - compare_blame_final); + sort_blame_entries(&sb->ent, compare_blame_final); } static int compare_commits_by_reverse_commit_date(const void *a, @@ -1964,9 +1955,7 @@ static void pass_blame_to_parent(struct blame_scoreboard *sb, parent, target, 0); *d.dstq = NULL; if (ignore_diffs) - newdest = llist_mergesort(newdest, get_next_blame, - set_next_blame, - compare_blame_suspect); + sort_blame_entries(&newdest, compare_blame_suspect); queue_blames(sb, parent, newdest); return; @@ -2383,8 +2372,7 @@ static int num_scapegoats(struct rev_info *revs, struct commit *commit, int reve */ static void distribute_blame(struct blame_scoreboard *sb, struct blame_entry *blamed) { - blamed = llist_mergesort(blamed, get_next_blame, set_next_blame, - compare_blame_suspect); + sort_blame_entries(&blamed, compare_blame_suspect); while (blamed) { struct blame_origin *porigin = blamed->suspect; @@ -10,6 +10,7 @@ #include "worktree.h" #include "submodule-config.h" #include "run-command.h" +#include "strmap.h" struct tracking { struct refspec_item spec; @@ -369,6 +370,83 @@ int validate_branchname(const char *name, struct strbuf *ref) return ref_exists(ref->buf); } +static int initialized_checked_out_branches; +static struct strmap current_checked_out_branches = STRMAP_INIT; + +static void prepare_checked_out_branches(void) +{ + int i = 0; + struct worktree **worktrees; + + if (initialized_checked_out_branches) + return; + initialized_checked_out_branches = 1; + + worktrees = get_worktrees(); + + while (worktrees[i]) { + char *old; + struct wt_status_state state = { 0 }; + struct worktree *wt = worktrees[i++]; + struct string_list update_refs = STRING_LIST_INIT_DUP; + + if (wt->is_bare) + continue; + + if (wt->head_ref) { + old = strmap_put(¤t_checked_out_branches, + wt->head_ref, + xstrdup(wt->path)); + free(old); + } + + if (wt_status_check_rebase(wt, &state) && + (state.rebase_in_progress || state.rebase_interactive_in_progress) && + state.branch) { + struct strbuf ref = STRBUF_INIT; + strbuf_addf(&ref, "refs/heads/%s", state.branch); + old = strmap_put(¤t_checked_out_branches, + ref.buf, + xstrdup(wt->path)); + free(old); + strbuf_release(&ref); + } + wt_status_state_free_buffers(&state); + + if (wt_status_check_bisect(wt, &state) && + state.branch) { + struct strbuf ref = STRBUF_INIT; + strbuf_addf(&ref, "refs/heads/%s", state.branch); + old = strmap_put(¤t_checked_out_branches, + ref.buf, + xstrdup(wt->path)); + free(old); + strbuf_release(&ref); + } + wt_status_state_free_buffers(&state); + + if (!sequencer_get_update_refs_state(get_worktree_git_dir(wt), + &update_refs)) { + struct string_list_item *item; + for_each_string_list_item(item, &update_refs) { + old = strmap_put(¤t_checked_out_branches, + item->string, + xstrdup(wt->path)); + free(old); + } + string_list_clear(&update_refs, 1); + } + } + + free_worktrees(worktrees); +} + +const char *branch_checked_out(const char *refname) +{ + prepare_checked_out_branches(); + return strmap_get(¤t_checked_out_branches, refname); +} + /* * Check if a branch 'name' can be created as a new branch; die otherwise. * 'force' can be used when it is OK for the named branch already exists. @@ -377,9 +455,7 @@ int validate_branchname(const char *name, struct strbuf *ref) */ int validate_new_branchname(const char *name, struct strbuf *ref, int force) { - struct worktree **worktrees; - const struct worktree *wt; - + const char *path; if (!validate_branchname(name, ref)) return 0; @@ -387,13 +463,10 @@ int validate_new_branchname(const char *name, struct strbuf *ref, int force) die(_("a branch named '%s' already exists"), ref->buf + strlen("refs/heads/")); - worktrees = get_worktrees(); - wt = find_shared_symref(worktrees, "HEAD", ref->buf); - if (wt && !wt->is_bare) + if ((path = branch_checked_out(ref->buf))) die(_("cannot force update the branch '%s' " "checked out at '%s'"), - ref->buf + strlen("refs/heads/"), wt->path); - free_worktrees(worktrees); + ref->buf + strlen("refs/heads/"), path); return 1; } @@ -101,6 +101,13 @@ void create_branches_recursively(struct repository *r, const char *name, const char *tracking_name, int force, int reflog, int quiet, enum branch_track track, int dry_run); + +/* + * If the branch at 'refname' is currently checked out in a worktree, + * then return the path to that worktree. + */ +const char *branch_checked_out(const char *refname); + /* * Check if 'name' can be a valid name for a branch; die otherwise. * Return 1 if the named branch already exists; return 0 otherwise. @@ -144,6 +144,7 @@ int cmd_credential_cache(int argc, const char **argv, const char *prefix); int cmd_credential_cache_daemon(int argc, const char **argv, const char *prefix); int cmd_credential_store(int argc, const char **argv, const char *prefix); int cmd_describe(int argc, const char **argv, const char *prefix); +int cmd_diagnose(int argc, const char **argv, const char *prefix); int cmd_diff_files(int argc, const char **argv, const char *prefix); int cmd_diff_index(int argc, const char **argv, const char *prefix); int cmd_diff(int argc, const char **argv, const char *prefix); diff --git a/builtin/branch.c b/builtin/branch.c index 5d00d0b8d3..55cd9a6e99 100644 --- a/builtin/branch.c +++ b/builtin/branch.c @@ -204,7 +204,6 @@ static void delete_branch_config(const char *branchname) static int delete_branches(int argc, const char **argv, int force, int kinds, int quiet) { - struct worktree **worktrees; struct commit *head_rev = NULL; struct object_id oid; char *name = NULL; @@ -242,8 +241,6 @@ static int delete_branches(int argc, const char **argv, int force, int kinds, die(_("Couldn't look up commit object for HEAD")); } - worktrees = get_worktrees(); - for (i = 0; i < argc; i++, strbuf_reset(&bname)) { char *target = NULL; int flags = 0; @@ -253,12 +250,11 @@ static int delete_branches(int argc, const char **argv, int force, int kinds, name = mkpathdup(fmt, bname.buf); if (kinds == FILTER_REFS_BRANCHES) { - const struct worktree *wt = - find_shared_symref(worktrees, "HEAD", name); - if (wt) { + const char *path; + if ((path = branch_checked_out(name))) { error(_("Cannot delete branch '%s' " "checked out at '%s'"), - bname.buf, wt->path); + bname.buf, path); ret = 1; continue; } @@ -315,7 +311,6 @@ static int delete_branches(int argc, const char **argv, int force, int kinds, free(name); strbuf_release(&bname); - free_worktrees(worktrees); return ret; } diff --git a/builtin/bugreport.c b/builtin/bugreport.c index 9de32bc96e..530895be55 100644 --- a/builtin/bugreport.c +++ b/builtin/bugreport.c @@ -5,6 +5,7 @@ #include "compat/compiler.h" #include "hook.h" #include "hook-list.h" +#include "diagnose.h" static void get_system_info(struct strbuf *sys_info) @@ -59,7 +60,7 @@ static void get_populated_hooks(struct strbuf *hook_info, int nongit) } static const char * const bugreport_usage[] = { - N_("git bugreport [-o|--output-directory <file>] [-s|--suffix <format>]"), + N_("git bugreport [-o|--output-directory <file>] [-s|--suffix <format>] [--diagnose[=<mode>]"), NULL }; @@ -98,16 +99,21 @@ int cmd_bugreport(int argc, const char **argv, const char *prefix) int report = -1; time_t now = time(NULL); struct tm tm; + enum diagnose_mode diagnose = DIAGNOSE_NONE; char *option_output = NULL; char *option_suffix = "%Y-%m-%d-%H%M"; const char *user_relative_path = NULL; char *prefixed_filename; + size_t output_path_len; const struct option bugreport_options[] = { + OPT_CALLBACK_F(0, "diagnose", &diagnose, N_("mode"), + N_("create an additional zip archive of detailed diagnostics (default 'stats')"), + PARSE_OPT_OPTARG, option_parse_diagnose), OPT_STRING('o', "output-directory", &option_output, N_("path"), - N_("specify a destination for the bugreport file")), + N_("specify a destination for the bugreport file(s)")), OPT_STRING('s', "suffix", &option_suffix, N_("format"), - N_("specify a strftime format suffix for the filename")), + N_("specify a strftime format suffix for the filename(s)")), OPT_END() }; @@ -119,6 +125,7 @@ int cmd_bugreport(int argc, const char **argv, const char *prefix) option_output ? option_output : ""); strbuf_addstr(&report_path, prefixed_filename); strbuf_complete(&report_path, '/'); + output_path_len = report_path.len; strbuf_addstr(&report_path, "git-bugreport-"); strbuf_addftime(&report_path, option_suffix, localtime_r(&now, &tm), 0, 0); @@ -133,6 +140,20 @@ int cmd_bugreport(int argc, const char **argv, const char *prefix) report_path.buf); } + /* Prepare diagnostics, if requested */ + if (diagnose != DIAGNOSE_NONE) { + struct strbuf zip_path = STRBUF_INIT; + strbuf_add(&zip_path, report_path.buf, output_path_len); + strbuf_addstr(&zip_path, "git-diagnostics-"); + strbuf_addftime(&zip_path, option_suffix, localtime_r(&now, &tm), 0, 0); + strbuf_addstr(&zip_path, ".zip"); + + if (create_diagnostics_archive(&zip_path, diagnose)) + die_errno(_("unable to create diagnostics archive %s"), zip_path.buf); + + strbuf_release(&zip_path); + } + /* Prepare the report contents */ get_bug_template(&buffer); diff --git a/builtin/cat-file.c b/builtin/cat-file.c index 50cf38999d..989eee0bb4 100644 --- a/builtin/cat-file.c +++ b/builtin/cat-file.c @@ -16,6 +16,7 @@ #include "packfile.h" #include "object-store.h" #include "promisor-remote.h" +#include "mailmap.h" enum batch_mode { BATCH_MODE_CONTENTS, @@ -31,11 +32,28 @@ struct batch_options { int all_objects; int unordered; int transform_mode; /* may be 'w' or 'c' for --filters or --textconv */ + int nul_terminated; const char *format; }; static const char *force_path; +static struct string_list mailmap = STRING_LIST_INIT_NODUP; +static int use_mailmap; + +static char *replace_idents_using_mailmap(char *, size_t *); + +static char *replace_idents_using_mailmap(char *object_buf, size_t *size) +{ + struct strbuf sb = STRBUF_INIT; + const char *headers[] = { "author ", "committer ", "tagger ", NULL }; + + strbuf_attach(&sb, object_buf, *size, *size + 1); + apply_mailmap_to_header(&sb, headers, &mailmap); + *size = sb.len; + return strbuf_detach(&sb, NULL); +} + static int filter_object(const char *path, unsigned mode, const struct object_id *oid, char **buf, unsigned long *size) @@ -71,6 +89,7 @@ static int stream_blob(const struct object_id *oid) static int cat_one_file(int opt, const char *exp_type, const char *obj_name, int unknown_type) { + int ret; struct object_id oid; enum object_type type; char *buf; @@ -106,7 +125,8 @@ static int cat_one_file(int opt, const char *exp_type, const char *obj_name, if (sb.len) { printf("%s\n", sb.buf); strbuf_release(&sb); - return 0; + ret = 0; + goto cleanup; } break; @@ -115,7 +135,8 @@ static int cat_one_file(int opt, const char *exp_type, const char *obj_name, if (oid_object_info_extended(the_repository, &oid, &oi, flags) < 0) die("git cat-file: could not get object info"); printf("%"PRIuMAX"\n", (uintmax_t)size); - return 0; + ret = 0; + goto cleanup; case 'e': return !has_object_file(&oid); @@ -123,8 +144,10 @@ static int cat_one_file(int opt, const char *exp_type, const char *obj_name, case 'w': if (filter_object(path, obj_context.mode, - &oid, &buf, &size)) - return -1; + &oid, &buf, &size)) { + ret = -1; + goto cleanup; + } break; case 'c': @@ -143,15 +166,24 @@ static int cat_one_file(int opt, const char *exp_type, const char *obj_name, const char *ls_args[3] = { NULL }; ls_args[0] = "ls-tree"; ls_args[1] = obj_name; - return cmd_ls_tree(2, ls_args, NULL); + ret = cmd_ls_tree(2, ls_args, NULL); + goto cleanup; } - if (type == OBJ_BLOB) - return stream_blob(&oid); + if (type == OBJ_BLOB) { + ret = stream_blob(&oid); + goto cleanup; + } buf = read_object_file(&oid, &type, &size); if (!buf) die("Cannot read object %s", obj_name); + if (use_mailmap) { + size_t s = size; + buf = replace_idents_using_mailmap(buf, &s); + size = cast_size_t_to_ulong(s); + } + /* otherwise just spit out the data */ break; @@ -172,8 +204,10 @@ static int cat_one_file(int opt, const char *exp_type, const char *obj_name, } else oidcpy(&blob_oid, &oid); - if (oid_object_info(the_repository, &blob_oid, NULL) == OBJ_BLOB) - return stream_blob(&blob_oid); + if (oid_object_info(the_repository, &blob_oid, NULL) == OBJ_BLOB) { + ret = stream_blob(&blob_oid); + goto cleanup; + } /* * we attempted to dereference a tag to a blob * and failed; there may be new dereference @@ -183,6 +217,12 @@ static int cat_one_file(int opt, const char *exp_type, const char *obj_name, } buf = read_object_with_reference(the_repository, &oid, exp_type_id, &size, NULL); + + if (use_mailmap) { + size_t s = size; + buf = replace_idents_using_mailmap(buf, &s); + size = cast_size_t_to_ulong(s); + } break; } default: @@ -193,9 +233,11 @@ static int cat_one_file(int opt, const char *exp_type, const char *obj_name, die("git cat-file %s: bad file", obj_name); write_or_die(1, buf, size); + ret = 0; +cleanup: free(buf); free(obj_context.path); - return 0; + return ret; } struct expand_data { @@ -348,11 +390,18 @@ static void print_object_or_die(struct batch_options *opt, struct expand_data *d void *contents; contents = read_object_file(oid, &type, &size); + + if (use_mailmap) { + size_t s = size; + contents = replace_idents_using_mailmap(contents, &s); + size = cast_size_t_to_ulong(s); + } + if (!contents) die("object %s disappeared", oid_to_hex(oid)); if (type != data->type) die("object %s changed type!?", oid_to_hex(oid)); - if (data->info.sizep && size != data->size) + if (data->info.sizep && size != data->size && !use_mailmap) die("object %s changed size!?", oid_to_hex(oid)); batch_write(opt, contents, size); @@ -602,12 +651,20 @@ static void batch_objects_command(struct batch_options *opt, struct queued_cmd *queued_cmd = NULL; size_t alloc = 0, nr = 0; - while (!strbuf_getline(&input, stdin)) { - int i; + while (1) { + int i, ret; const struct parse_cmd *cmd = NULL; const char *p = NULL, *cmd_end; struct queued_cmd call = {0}; + if (opt->nul_terminated) + ret = strbuf_getline_nul(&input, stdin); + else + ret = strbuf_getline(&input, stdin); + + if (ret) + break; + if (!input.len) die(_("empty command in input")); if (isspace(*input.buf)) @@ -655,6 +712,7 @@ static void batch_objects_command(struct batch_options *opt, free_cmds(queued_cmd, &nr); } + free_cmds(queued_cmd, &nr); free(queued_cmd); strbuf_release(&input); } @@ -750,7 +808,16 @@ static int batch_objects(struct batch_options *opt) goto cleanup; } - while (strbuf_getline(&input, stdin) != EOF) { + while (1) { + int ret; + if (opt->nul_terminated) + ret = strbuf_getline_nul(&input, stdin); + else + ret = strbuf_getline(&input, stdin); + + if (ret == EOF) + break; + if (data.split_on_whitespace) { /* * Split at first whitespace, tying off the beginning @@ -843,6 +910,8 @@ int cmd_cat_file(int argc, const char **argv, const char *prefix) OPT_CMDMODE('s', NULL, &opt, N_("show object size"), 's'), OPT_BOOL(0, "allow-unknown-type", &unknown_type, N_("allow -s and -t to work with broken/corrupt objects")), + OPT_BOOL(0, "use-mailmap", &use_mailmap, N_("use mail map file")), + OPT_ALIAS(0, "mailmap", "use-mailmap"), /* Batch mode */ OPT_GROUP(N_("Batch objects requested on stdin (or --batch-all-objects)")), OPT_CALLBACK_F(0, "batch", &batch, N_("format"), @@ -853,6 +922,7 @@ int cmd_cat_file(int argc, const char **argv, const char *prefix) N_("like --batch, but don't emit <contents>"), PARSE_OPT_OPTARG | PARSE_OPT_NONEG, batch_option_callback), + OPT_BOOL('z', NULL, &batch.nul_terminated, N_("stdin is NUL-terminated")), OPT_CALLBACK_F(0, "batch-command", &batch, N_("format"), N_("read commands from stdin"), PARSE_OPT_OPTARG | PARSE_OPT_NONEG, @@ -885,6 +955,9 @@ int cmd_cat_file(int argc, const char **argv, const char *prefix) opt_cw = (opt == 'c' || opt == 'w'); opt_epts = (opt == 'e' || opt == 'p' || opt == 't' || opt == 's'); + if (use_mailmap) + read_mailmap(&mailmap); + /* --batch-all-objects? */ if (opt == 'b') batch.all_objects = 1; @@ -908,6 +981,9 @@ int cmd_cat_file(int argc, const char **argv, const char *prefix) else if (batch.all_objects) usage_msg_optf(_("'%s' requires a batch mode"), usage, options, "--batch-all-objects"); + else if (batch.nul_terminated) + usage_msg_optf(_("'%s' requires a batch mode"), usage, options, + "-z"); /* Batch defaults */ if (batch.buffer_output < 0) diff --git a/builtin/check-ref-format.c b/builtin/check-ref-format.c index bc67d3f0a8..fd0e5f8683 100644 --- a/builtin/check-ref-format.c +++ b/builtin/check-ref-format.c @@ -57,6 +57,8 @@ int cmd_check_ref_format(int argc, const char **argv, const char *prefix) int normalize = 0; int flags = 0; const char *refname; + char *to_free = NULL; + int ret = 1; if (argc == 2 && !strcmp(argv[1], "-h")) usage(builtin_check_ref_format_usage); @@ -81,11 +83,14 @@ int cmd_check_ref_format(int argc, const char **argv, const char *prefix) refname = argv[i]; if (normalize) - refname = collapse_slashes(refname); + refname = to_free = collapse_slashes(refname); if (check_refname_format(refname, flags)) - return 1; + goto cleanup; if (normalize) printf("%s\n", refname); - return 0; + ret = 0; +cleanup: + free(to_free); + return ret; } diff --git a/builtin/checkout.c b/builtin/checkout.c index cdd96cd9c6..f9d63d80b9 100644 --- a/builtin/checkout.c +++ b/builtin/checkout.c @@ -711,6 +711,26 @@ static void setup_branch_path(struct branch_info *branch) branch->path = strbuf_detach(&buf, NULL); } +static void init_topts(struct unpack_trees_options *topts, int merge, + int show_progress, int overwrite_ignore, + struct commit *old_commit) +{ + memset(topts, 0, sizeof(*topts)); + topts->head_idx = -1; + topts->src_index = &the_index; + topts->dst_index = &the_index; + + setup_unpack_trees_porcelain(topts, "checkout"); + + topts->initial_checkout = is_cache_unborn(); + topts->update = 1; + topts->merge = 1; + topts->quiet = merge && old_commit; + topts->verbose_update = show_progress; + topts->fn = twoway_merge; + topts->preserve_ignored = !overwrite_ignore; +} + static int merge_working_tree(const struct checkout_opts *opts, struct branch_info *old_branch_info, struct branch_info *new_branch_info, @@ -741,13 +761,6 @@ static int merge_working_tree(const struct checkout_opts *opts, struct unpack_trees_options topts; const struct object_id *old_commit_oid; - memset(&topts, 0, sizeof(topts)); - topts.head_idx = -1; - topts.src_index = &the_index; - topts.dst_index = &the_index; - - setup_unpack_trees_porcelain(&topts, "checkout"); - refresh_cache(REFRESH_QUIET); if (unmerged_cache()) { @@ -756,17 +769,12 @@ static int merge_working_tree(const struct checkout_opts *opts, } /* 2-way merge to the new branch */ - topts.initial_checkout = is_cache_unborn(); - topts.update = 1; - topts.merge = 1; - topts.quiet = opts->merge && old_branch_info->commit; - topts.verbose_update = opts->show_progress; - topts.fn = twoway_merge; + init_topts(&topts, opts->merge, opts->show_progress, + opts->overwrite_ignore, old_branch_info->commit); init_checkout_metadata(&topts.meta, new_branch_info->refname, new_branch_info->commit ? &new_branch_info->commit->object.oid : &new_branch_info->oid, NULL); - topts.preserve_ignored = !opts->overwrite_ignore; old_commit_oid = old_branch_info->commit ? &old_branch_info->commit->object.oid : diff --git a/builtin/clone.c b/builtin/clone.c index 9e0b2b45ca..c4ff4643ec 100644 --- a/builtin/clone.c +++ b/builtin/clone.c @@ -494,6 +494,7 @@ static struct ref *wanted_peer_refs(const struct ref *refs, /* if --branch=tag, pull the requested tag explicitly */ get_fetch_map(remote_head, tag_refspec, &tail, 0); } + free_refs(remote_head); } else { int i; for (i = 0; i < refspec->nr; i++) diff --git a/builtin/diagnose.c b/builtin/diagnose.c new file mode 100644 index 0000000000..cd260c2015 --- /dev/null +++ b/builtin/diagnose.c @@ -0,0 +1,61 @@ +#include "builtin.h" +#include "parse-options.h" +#include "diagnose.h" + +static const char * const diagnose_usage[] = { + N_("git diagnose [-o|--output-directory <path>] [-s|--suffix <format>] [--mode=<mode>]"), + NULL +}; + +int cmd_diagnose(int argc, const char **argv, const char *prefix) +{ + struct strbuf zip_path = STRBUF_INIT; + time_t now = time(NULL); + struct tm tm; + enum diagnose_mode mode = DIAGNOSE_STATS; + char *option_output = NULL; + char *option_suffix = "%Y-%m-%d-%H%M"; + char *prefixed_filename; + + const struct option diagnose_options[] = { + OPT_STRING('o', "output-directory", &option_output, N_("path"), + N_("specify a destination for the diagnostics archive")), + OPT_STRING('s', "suffix", &option_suffix, N_("format"), + N_("specify a strftime format suffix for the filename")), + OPT_CALLBACK_F(0, "mode", &mode, N_("(stats|all)"), + N_("specify the content of the diagnostic archive"), + PARSE_OPT_NONEG, option_parse_diagnose), + OPT_END() + }; + + argc = parse_options(argc, argv, prefix, diagnose_options, + diagnose_usage, 0); + + /* Prepare the path to put the result */ + prefixed_filename = prefix_filename(prefix, + option_output ? option_output : ""); + strbuf_addstr(&zip_path, prefixed_filename); + strbuf_complete(&zip_path, '/'); + + strbuf_addstr(&zip_path, "git-diagnostics-"); + strbuf_addftime(&zip_path, option_suffix, localtime_r(&now, &tm), 0, 0); + strbuf_addstr(&zip_path, ".zip"); + + switch (safe_create_leading_directories(zip_path.buf)) { + case SCLD_OK: + case SCLD_EXISTS: + break; + default: + die_errno(_("could not create leading directories for '%s'"), + zip_path.buf); + } + + /* Prepare diagnostics */ + if (create_diagnostics_archive(&zip_path, mode)) + die_errno(_("unable to create diagnostics archive %s"), + zip_path.buf); + + free(prefixed_filename); + strbuf_release(&zip_path); + return 0; +} diff --git a/builtin/fetch.c b/builtin/fetch.c index ac29c2b1ae..368a0f5329 100644 --- a/builtin/fetch.c +++ b/builtin/fetch.c @@ -490,7 +490,9 @@ static void filter_prefetch_refspec(struct refspec *rs) continue; if (!rs->items[i].dst || (rs->items[i].src && - !strncmp(rs->items[i].src, "refs/tags/", 10))) { + !strncmp(rs->items[i].src, + ref_namespace[NAMESPACE_TAGS].ref, + strlen(ref_namespace[NAMESPACE_TAGS].ref)))) { int j; free(rs->items[i].src); @@ -506,7 +508,7 @@ static void filter_prefetch_refspec(struct refspec *rs) } old_dst = rs->items[i].dst; - strbuf_addstr(&new_dst, "refs/prefetch/"); + strbuf_addstr(&new_dst, ref_namespace[NAMESPACE_PREFETCH].ref); /* * If old_dst starts with "refs/", then place @@ -881,11 +883,9 @@ static void format_display(struct strbuf *display, char code, static int update_local_ref(struct ref *ref, struct ref_transaction *transaction, const char *remote, const struct ref *remote_ref, - struct strbuf *display, int summary_width, - struct worktree **worktrees) + struct strbuf *display, int summary_width) { struct commit *current = NULL, *updated; - const struct worktree *wt; const char *pretty_ref = prettify_refname(ref->name); int fast_forward = 0; @@ -900,16 +900,14 @@ static int update_local_ref(struct ref *ref, } if (!update_head_ok && - (wt = find_shared_symref(worktrees, "HEAD", ref->name)) && - !wt->is_bare && !is_null_oid(&ref->old_oid)) { + !is_null_oid(&ref->old_oid) && + branch_checked_out(ref->name)) { /* * If this is the head, and it's not okay to update * the head, and the old value of the head isn't empty... */ format_display(display, '!', _("[rejected]"), - wt->is_current ? - _("can't fetch in current branch") : - _("checked out in another worktree"), + _("can't fetch into checked-out branch"), remote, pretty_ref, summary_width); return 1; } @@ -1110,10 +1108,10 @@ N_("it took %.2f seconds to check forced updates; you can use\n" static int store_updated_refs(const char *raw_url, const char *remote_name, int connectivity_checked, struct ref_transaction *transaction, struct ref *ref_map, - struct fetch_head *fetch_head, struct worktree **worktrees) + struct fetch_head *fetch_head) { int url_len, i, rc = 0; - struct strbuf note = STRBUF_INIT, err = STRBUF_INIT; + struct strbuf note = STRBUF_INIT; const char *what, *kind; struct ref *rm; char *url; @@ -1240,8 +1238,7 @@ static int store_updated_refs(const char *raw_url, const char *remote_name, strbuf_reset(¬e); if (ref) { rc |= update_local_ref(ref, transaction, what, - rm, ¬e, summary_width, - worktrees); + rm, ¬e, summary_width); free(ref); } else if (write_fetch_head || dry_run) { /* @@ -1281,7 +1278,6 @@ static int store_updated_refs(const char *raw_url, const char *remote_name, abort: strbuf_release(¬e); - strbuf_release(&err); free(url); return rc; } @@ -1332,8 +1328,7 @@ static int check_exist_and_connected(struct ref *ref_map) static int fetch_and_consume_refs(struct transport *transport, struct ref_transaction *transaction, struct ref *ref_map, - struct fetch_head *fetch_head, - struct worktree **worktrees) + struct fetch_head *fetch_head) { int connectivity_checked = 1; int ret; @@ -1356,7 +1351,7 @@ static int fetch_and_consume_refs(struct transport *transport, trace2_region_enter("fetch", "consume_refs", the_repository); ret = store_updated_refs(transport->url, transport->remote->name, connectivity_checked, transaction, ref_map, - fetch_head, worktrees); + fetch_head); trace2_region_leave("fetch", "consume_refs", the_repository); out: @@ -1434,19 +1429,16 @@ cleanup: return result; } -static void check_not_current_branch(struct ref *ref_map, - struct worktree **worktrees) +static void check_not_current_branch(struct ref *ref_map) { - const struct worktree *wt; + const char *path; for (; ref_map; ref_map = ref_map->next) if (ref_map->peer_ref && starts_with(ref_map->peer_ref->name, "refs/heads/") && - (wt = find_shared_symref(worktrees, "HEAD", - ref_map->peer_ref->name)) && - !wt->is_bare) + (path = branch_checked_out(ref_map->peer_ref->name))) die(_("refusing to fetch into branch '%s' " "checked out at '%s'"), - ref_map->peer_ref->name, wt->path); + ref_map->peer_ref->name, path); } static int truncate_fetch_head(void) @@ -1549,8 +1541,7 @@ static struct transport *prepare_transport(struct remote *remote, int deepen) static int backfill_tags(struct transport *transport, struct ref_transaction *transaction, struct ref *ref_map, - struct fetch_head *fetch_head, - struct worktree **worktrees) + struct fetch_head *fetch_head) { int retcode, cannot_reuse; @@ -1571,7 +1562,7 @@ static int backfill_tags(struct transport *transport, transport_set_option(transport, TRANS_OPT_FOLLOWTAGS, NULL); transport_set_option(transport, TRANS_OPT_DEPTH, "0"); transport_set_option(transport, TRANS_OPT_DEEPEN_RELATIVE, NULL); - retcode = fetch_and_consume_refs(transport, transaction, ref_map, fetch_head, worktrees); + retcode = fetch_and_consume_refs(transport, transaction, ref_map, fetch_head); if (gsecondary) { transport_disconnect(gsecondary); @@ -1592,7 +1583,6 @@ static int do_fetch(struct transport *transport, struct transport_ls_refs_options transport_ls_refs_options = TRANSPORT_LS_REFS_OPTIONS_INIT; int must_list_refs = 1; - struct worktree **worktrees = get_worktrees(); struct fetch_head fetch_head = { 0 }; struct strbuf err = STRBUF_INIT; @@ -1650,7 +1640,7 @@ static int do_fetch(struct transport *transport, ref_map = get_ref_map(transport->remote, remote_refs, rs, tags, &autotags); if (!update_head_ok) - check_not_current_branch(ref_map, worktrees); + check_not_current_branch(ref_map); retcode = open_fetch_head(&fetch_head); if (retcode) @@ -1683,7 +1673,7 @@ static int do_fetch(struct transport *transport, retcode = 1; } - if (fetch_and_consume_refs(transport, transaction, ref_map, &fetch_head, worktrees)) { + if (fetch_and_consume_refs(transport, transaction, ref_map, &fetch_head)) { retcode = 1; goto cleanup; } @@ -1706,7 +1696,7 @@ static int do_fetch(struct transport *transport, * the transaction and don't commit anything. */ if (backfill_tags(transport, transaction, tags_ref_map, - &fetch_head, worktrees)) + &fetch_head)) retcode = 1; } @@ -1791,7 +1781,6 @@ cleanup: close_fetch_head(&fetch_head); strbuf_release(&err); free_refs(ref_map); - free_worktrees(worktrees); return retcode; } diff --git a/builtin/gc.c b/builtin/gc.c index 021e9256ae..6c22205217 100644 --- a/builtin/gc.c +++ b/builtin/gc.c @@ -168,9 +168,15 @@ struct maintenance_run_opts; static int maintenance_task_pack_refs(MAYBE_UNUSED struct maintenance_run_opts *opts) { struct strvec pack_refs_cmd = STRVEC_INIT; + int ret; + strvec_pushl(&pack_refs_cmd, "pack-refs", "--all", "--prune", NULL); - return run_command_v_opt(pack_refs_cmd.v, RUN_GIT_CMD); + ret = run_command_v_opt(pack_refs_cmd.v, RUN_GIT_CMD); + + strvec_clear(&pack_refs_cmd); + + return ret; } static int too_many_loose_objects(void) @@ -904,12 +910,6 @@ static int fetch_remote(struct remote *remote, void *cbdata) static int maintenance_task_prefetch(struct maintenance_run_opts *opts) { - git_config_set_multivar_gently("log.excludedecoration", - "refs/prefetch/", - "refs/prefetch/", - CONFIG_FLAGS_FIXED_VALUE | - CONFIG_FLAGS_MULTI_REPLACE); - if (for_each_remote(fetch_remote, opts)) { error(_("failed to prefetch remotes")); return 1; diff --git a/builtin/grep.c b/builtin/grep.c index bcb07ea7f7..e6bcdf860c 100644 --- a/builtin/grep.c +++ b/builtin/grep.c @@ -961,6 +961,8 @@ int cmd_grep(int argc, const char **argv, const char *prefix) OPT_BOOL_F(0, "ext-grep", &external_grep_allowed__ignored, N_("allow calling of grep(1) (ignored by this build)"), PARSE_OPT_NOCOMPLETE), + OPT_INTEGER('m', "max-count", &opt.max_count, + N_("maximum number of results per file")), OPT_END() }; grep_prefix = prefix; @@ -1101,6 +1103,13 @@ int cmd_grep(int argc, const char **argv, const char *prefix) if (recurse_submodules && untracked) die(_("--untracked not supported with --recurse-submodules")); + /* + * Optimize out the case where the amount of matches is limited to zero. + * We do this to keep results consistent with GNU grep(1). + */ + if (opt.max_count == 0) + return 1; + if (show_in_pager) { if (num_threads > 1) warning(_("invalid option combination, ignoring --threads")); diff --git a/builtin/help.c b/builtin/help.c index 222f994f86..09ac4289f1 100644 --- a/builtin/help.c +++ b/builtin/help.c @@ -43,6 +43,8 @@ static enum help_action { HELP_ACTION_ALL = 1, HELP_ACTION_GUIDES, HELP_ACTION_CONFIG, + HELP_ACTION_USER_INTERFACES, + HELP_ACTION_DEVELOPER_INTERFACES, HELP_ACTION_CONFIG_FOR_COMPLETION, HELP_ACTION_CONFIG_SECTIONS_FOR_COMPLETION, } cmd_mode; @@ -69,6 +71,12 @@ static struct option builtin_help_options[] = { OPT_CMDMODE('g', "guides", &cmd_mode, N_("print list of useful guides"), HELP_ACTION_GUIDES), + OPT_CMDMODE(0, "user-interfaces", &cmd_mode, + N_("print list of user-facing repository, command and file interfaces"), + HELP_ACTION_USER_INTERFACES), + OPT_CMDMODE(0, "developer-interfaces", &cmd_mode, + N_("print list of file formats, protocols and other developer interfaces"), + HELP_ACTION_DEVELOPER_INTERFACES), OPT_CMDMODE('c', "config", &cmd_mode, N_("print all configuration variable names"), HELP_ACTION_CONFIG), OPT_CMDMODE_F(0, "config-for-completion", &cmd_mode, "", @@ -81,9 +89,11 @@ static struct option builtin_help_options[] = { static const char * const builtin_help_usage[] = { "git help [-a|--all] [--[no-]verbose]] [--[no-]external-commands] [--[no-]aliases]", - N_("git help [[-i|--info] [-m|--man] [-w|--web]] [<command>]"), + N_("git help [[-i|--info] [-m|--man] [-w|--web]] [<command>|<doc>]"), "git help [-g|--guides]", "git help [-c|--config]", + "git help [--user-interfaces]", + "git help [--developer-interfaces]", NULL }; @@ -654,6 +664,14 @@ int cmd_help(int argc, const char **argv, const char *prefix) opt_mode_usage(argc, "--config-for-completion", help_format); list_config_help(SHOW_CONFIG_VARS); return 0; + case HELP_ACTION_USER_INTERFACES: + opt_mode_usage(argc, "--user-interfaces", help_format); + list_user_interfaces_help(); + return 0; + case HELP_ACTION_DEVELOPER_INTERFACES: + opt_mode_usage(argc, "--developer-interfaces", help_format); + list_developer_interfaces_help(); + return 0; case HELP_ACTION_CONFIG_SECTIONS_FOR_COMPLETION: opt_mode_usage(argc, "--config-sections-for-completion", help_format); diff --git a/builtin/log.c b/builtin/log.c index 88a5e98875..fd209725e5 100644 --- a/builtin/log.c +++ b/builtin/log.c @@ -101,6 +101,20 @@ static int parse_decoration_style(const char *value) return -1; } +static int use_default_decoration_filter = 1; +static struct string_list decorate_refs_exclude = STRING_LIST_INIT_NODUP; +static struct string_list decorate_refs_exclude_config = STRING_LIST_INIT_NODUP; +static struct string_list decorate_refs_include = STRING_LIST_INIT_NODUP; + +static int clear_decorations_callback(const struct option *opt, + const char *arg, int unset) +{ + string_list_clear(&decorate_refs_include, 0); + string_list_clear(&decorate_refs_exclude, 0); + use_default_decoration_filter = 0; + return 0; +} + static int decorate_callback(const struct option *opt, const char *arg, int unset) { if (unset) @@ -162,18 +176,61 @@ static void cmd_log_init_defaults(struct rev_info *rev) parse_date_format(default_date_mode, &rev->date_mode); } +static void set_default_decoration_filter(struct decoration_filter *decoration_filter) +{ + int i; + char *value = NULL; + struct string_list *include = decoration_filter->include_ref_pattern; + const struct string_list *config_exclude = + git_config_get_value_multi("log.excludeDecoration"); + + if (config_exclude) { + struct string_list_item *item; + for_each_string_list_item(item, config_exclude) + string_list_append(decoration_filter->exclude_ref_config_pattern, + item->string); + } + + /* + * By default, decorate_all is disabled. Enable it if + * log.initialDecorationSet=all. Don't ever disable it by config, + * since the command-line takes precedent. + */ + if (use_default_decoration_filter && + !git_config_get_string("log.initialdecorationset", &value) && + !strcmp("all", value)) + use_default_decoration_filter = 0; + free(value); + + if (!use_default_decoration_filter || + decoration_filter->exclude_ref_pattern->nr || + decoration_filter->include_ref_pattern->nr || + decoration_filter->exclude_ref_config_pattern->nr) + return; + + /* + * No command-line or config options were given, so + * populate with sensible defaults. + */ + for (i = 0; i < ARRAY_SIZE(ref_namespace); i++) { + if (!ref_namespace[i].decoration) + continue; + + string_list_append(include, ref_namespace[i].ref); + } +} + static void cmd_log_init_finish(int argc, const char **argv, const char *prefix, struct rev_info *rev, struct setup_revision_opt *opt) { struct userformat_want w; int quiet = 0, source = 0, mailmap; static struct line_opt_callback_data line_cb = {NULL, NULL, STRING_LIST_INIT_DUP}; - static struct string_list decorate_refs_exclude = STRING_LIST_INIT_NODUP; - static struct string_list decorate_refs_exclude_config = STRING_LIST_INIT_NODUP; - static struct string_list decorate_refs_include = STRING_LIST_INIT_NODUP; - struct decoration_filter decoration_filter = {&decorate_refs_include, - &decorate_refs_exclude, - &decorate_refs_exclude_config}; + struct decoration_filter decoration_filter = { + .exclude_ref_pattern = &decorate_refs_exclude, + .include_ref_pattern = &decorate_refs_include, + .exclude_ref_config_pattern = &decorate_refs_exclude_config, + }; static struct revision_sources revision_sources; const struct option builtin_log_options[] = { @@ -181,6 +238,10 @@ static void cmd_log_init_finish(int argc, const char **argv, const char *prefix, OPT_BOOL(0, "source", &source, N_("show source")), OPT_BOOL(0, "use-mailmap", &mailmap, N_("use mail map file")), OPT_ALIAS(0, "mailmap", "use-mailmap"), + OPT_CALLBACK_F(0, "clear-decorations", NULL, NULL, + N_("clear all previously-defined decoration filters"), + PARSE_OPT_NOARG | PARSE_OPT_NONEG, + clear_decorations_callback), OPT_STRING_LIST(0, "decorate-refs", &decorate_refs_include, N_("pattern"), N_("only decorate refs that match <pattern>")), OPT_STRING_LIST(0, "decorate-refs-exclude", &decorate_refs_exclude, @@ -265,16 +326,7 @@ static void cmd_log_init_finish(int argc, const char **argv, const char *prefix, } if (decoration_style || rev->simplify_by_decoration) { - const struct string_list *config_exclude = - repo_config_get_value_multi(the_repository, - "log.excludeDecoration"); - - if (config_exclude) { - struct string_list_item *item; - for_each_string_list_item(item, config_exclude) - string_list_append(&decorate_refs_exclude_config, - item->string); - } + set_default_decoration_filter(&decoration_filter); if (decoration_style) rev->show_decorations = 1; @@ -668,10 +720,10 @@ static void show_setup_revisions_tweak(struct rev_info *rev, int cmd_show(int argc, const char **argv, const char *prefix) { struct rev_info rev; - struct object_array_entry *objects; + unsigned int i; struct setup_revision_opt opt; struct pathspec match_all; - int i, count, ret = 0; + int ret = 0; init_log_defaults(); git_config(git_log_config, NULL); @@ -698,12 +750,10 @@ int cmd_show(int argc, const char **argv, const char *prefix) if (!rev.no_walk) return cmd_log_deinit(cmd_log_walk(&rev), &rev); - count = rev.pending.nr; - objects = rev.pending.objects; rev.diffopt.no_free = 1; - for (i = 0; i < count && !ret; i++) { - struct object *o = objects[i].item; - const char *name = objects[i].name; + for (i = 0; i < rev.pending.nr && !ret; i++) { + struct object *o = rev.pending.objects[i].item; + const char *name = rev.pending.objects[i].name; switch (o->type) { case OBJ_BLOB: ret = show_blob_object(&o->oid, &rev, name); @@ -726,7 +776,7 @@ int cmd_show(int argc, const char **argv, const char *prefix) if (!o) ret = error(_("could not read object %s"), oid_to_hex(oid)); - objects[i].item = o; + rev.pending.objects[i].item = o; i--; break; } @@ -743,11 +793,24 @@ int cmd_show(int argc, const char **argv, const char *prefix) rev.shown_one = 1; break; case OBJ_COMMIT: - rev.pending.nr = rev.pending.alloc = 0; - rev.pending.objects = NULL; + { + struct object_array old; + struct object_array blank = OBJECT_ARRAY_INIT; + + memcpy(&old, &rev.pending, sizeof(old)); + memcpy(&rev.pending, &blank, sizeof(rev.pending)); + add_object_array(o, name, &rev.pending); ret = cmd_log_walk_no_free(&rev); + + /* + * No need for + * object_array_clear(&pending). It was + * cleared already in prepare_revision_walk() + */ + memcpy(&rev.pending, &old, sizeof(rev.pending)); break; + } default: ret = error(_("unknown type: %d"), o->type); } diff --git a/builtin/ls-files.c b/builtin/ls-files.c index e791b65e7e..779dc18e59 100644 --- a/builtin/ls-files.c +++ b/builtin/ls-files.c @@ -11,6 +11,7 @@ #include "quote.h" #include "dir.h" #include "builtin.h" +#include "strbuf.h" #include "tree.h" #include "cache-tree.h" #include "parse-options.h" @@ -48,6 +49,7 @@ static char *ps_matched; static const char *with_tree; static int exc_given; static int exclude_args; +static const char *format; static const char *tag_cached = ""; static const char *tag_unmerged = ""; @@ -85,6 +87,16 @@ static void write_name(const char *name) stdout, line_terminator); } +static void write_name_to_buf(struct strbuf *sb, const char *name) +{ + const char *rel = relative_path(name, prefix_len ? prefix : NULL, sb); + + if (line_terminator) + quote_c_style(rel, sb, NULL, 0); + else + strbuf_addstr(sb, rel); +} + static const char *get_tag(const struct cache_entry *ce, const char *tag) { static char alttag[4]; @@ -222,6 +234,73 @@ static void show_submodule(struct repository *superproject, repo_clear(&subrepo); } +struct show_index_data { + const char *pathname; + struct index_state *istate; + const struct cache_entry *ce; +}; + +static size_t expand_show_index(struct strbuf *sb, const char *start, + void *context) +{ + struct show_index_data *data = context; + const char *end; + const char *p; + size_t len = strbuf_expand_literal_cb(sb, start, NULL); + struct stat st; + + if (len) + return len; + if (*start != '(') + die(_("bad ls-files format: element '%s' " + "does not start with '('"), start); + + end = strchr(start + 1, ')'); + if (!end) + die(_("bad ls-files format: element '%s'" + "does not end in ')'"), start); + + len = end - start + 1; + if (skip_prefix(start, "(objectmode)", &p)) + strbuf_addf(sb, "%06o", data->ce->ce_mode); + else if (skip_prefix(start, "(objectname)", &p)) + strbuf_add_unique_abbrev(sb, &data->ce->oid, abbrev); + else if (skip_prefix(start, "(stage)", &p)) + strbuf_addf(sb, "%d", ce_stage(data->ce)); + else if (skip_prefix(start, "(eolinfo:index)", &p)) + strbuf_addstr(sb, S_ISREG(data->ce->ce_mode) ? + get_cached_convert_stats_ascii(data->istate, + data->ce->name) : ""); + else if (skip_prefix(start, "(eolinfo:worktree)", &p)) + strbuf_addstr(sb, !lstat(data->pathname, &st) && + S_ISREG(st.st_mode) ? + get_wt_convert_stats_ascii(data->pathname) : ""); + else if (skip_prefix(start, "(eolattr)", &p)) + strbuf_addstr(sb, get_convert_attr_ascii(data->istate, + data->pathname)); + else if (skip_prefix(start, "(path)", &p)) + write_name_to_buf(sb, data->pathname); + else + die(_("bad ls-files format: %%%.*s"), (int)len, start); + + return len; +} + +static void show_ce_fmt(struct repository *repo, const struct cache_entry *ce, + const char *format, const char *fullname) { + struct show_index_data data = { + .pathname = fullname, + .istate = repo->index, + .ce = ce, + }; + struct strbuf sb = STRBUF_INIT; + + strbuf_expand(&sb, format, expand_show_index, &data); + strbuf_addch(&sb, line_terminator); + fwrite(sb.buf, sb.len, 1, stdout); + strbuf_release(&sb); +} + static void show_ce(struct repository *repo, struct dir_struct *dir, const struct cache_entry *ce, const char *fullname, const char *tag) @@ -236,6 +315,12 @@ static void show_ce(struct repository *repo, struct dir_struct *dir, max_prefix_len, ps_matched, S_ISDIR(ce->ce_mode) || S_ISGITLINK(ce->ce_mode))) { + if (format) { + show_ce_fmt(repo, ce, format, fullname); + print_debug(ce); + return; + } + tag = get_tag(ce, tag); if (!show_stage) { @@ -675,6 +760,9 @@ int cmd_ls_files(int argc, const char **argv, const char *cmd_prefix) N_("suppress duplicate entries")), OPT_BOOL(0, "sparse", &show_sparse_dirs, N_("show sparse directories in the presence of a sparse index")), + OPT_STRING_F(0, "format", &format, N_("format"), + N_("format to use for the output"), + PARSE_OPT_NONEG), OPT_END() }; int ret = 0; @@ -699,6 +787,13 @@ int cmd_ls_files(int argc, const char **argv, const char *cmd_prefix) for (i = 0; i < exclude_list.nr; i++) { add_pattern(exclude_list.items[i].string, "", 0, pl, --exclude_args); } + + if (format && (show_stage || show_others || show_killed || + show_resolve_undo || skipping_duplicates || show_eol || show_tag)) + usage_msg_opt(_("--format cannot be used with -s, -o, -k, -t, " + "--resolve-undo, --deduplicate, --eol"), + ls_files_usage, builtin_ls_files_options); + if (show_tag || show_valid_bit || show_fsmonitor_bit) { tag_cached = "H "; tag_unmerged = "M "; diff --git a/builtin/merge-file.c b/builtin/merge-file.c index e695867ee5..c923bbf2ab 100644 --- a/builtin/merge-file.c +++ b/builtin/merge-file.c @@ -25,10 +25,10 @@ static int label_cb(const struct option *opt, const char *arg, int unset) int cmd_merge_file(int argc, const char **argv, const char *prefix) { - const char *names[3] = { NULL, NULL, NULL }; - mmfile_t mmfs[3]; - mmbuffer_t result = {NULL, 0}; - xmparam_t xmp = {{0}}; + const char *names[3] = { 0 }; + mmfile_t mmfs[3] = { 0 }; + mmbuffer_t result = { 0 }; + xmparam_t xmp = { 0 }; int ret = 0, i = 0, to_stdout = 0; int quiet = 0; struct option options[] = { @@ -71,21 +71,24 @@ int cmd_merge_file(int argc, const char **argv, const char *prefix) for (i = 0; i < 3; i++) { char *fname; - int ret; + mmfile_t *mmf = mmfs + i; if (!names[i]) names[i] = argv[i]; fname = prefix_filename(prefix, argv[i]); - ret = read_mmfile(mmfs + i, fname); + + if (read_mmfile(mmf, fname)) + ret = -1; + else if (mmf->size > MAX_XDIFF_SIZE || + buffer_is_binary(mmf->ptr, mmf->size)) + ret = error("Cannot merge binary files: %s", + argv[i]); + free(fname); if (ret) - return -1; + goto cleanup; - if (mmfs[i].size > MAX_XDIFF_SIZE || - buffer_is_binary(mmfs[i].ptr, mmfs[i].size)) - return error("Cannot merge binary files: %s", - argv[i]); } xmp.ancestor = names[1]; @@ -93,9 +96,6 @@ int cmd_merge_file(int argc, const char **argv, const char *prefix) xmp.file2 = names[2]; ret = xdl_merge(mmfs + 1, mmfs + 0, mmfs + 2, &xmp, &result); - for (i = 0; i < 3; i++) - free(mmfs[i].ptr); - if (ret >= 0) { const char *filename = argv[0]; char *fpath = prefix_filename(prefix, argv[0]); @@ -116,5 +116,9 @@ int cmd_merge_file(int argc, const char **argv, const char *prefix) if (ret > 127) ret = 127; +cleanup: + for (i = 0; i < 3; i++) + free(mmfs[i].ptr); + return ret; } diff --git a/builtin/merge-tree.c b/builtin/merge-tree.c index 5dc94d6f88..ae5782917b 100644 --- a/builtin/merge-tree.c +++ b/builtin/merge-tree.c @@ -2,13 +2,18 @@ #include "builtin.h" #include "tree-walk.h" #include "xdiff-interface.h" +#include "help.h" +#include "commit-reach.h" +#include "merge-ort.h" #include "object-store.h" +#include "parse-options.h" #include "repository.h" #include "blob.h" #include "exec-cmd.h" #include "merge-blobs.h" +#include "quote.h" -static const char merge_tree_usage[] = "git merge-tree <base-tree> <branch1> <branch2>"; +static int line_termination = '\n'; struct merge_list { struct merge_list *next; @@ -28,7 +33,7 @@ static void add_merge_entry(struct merge_list *entry) merge_result_end = &entry->next; } -static void merge_trees(struct tree_desc t[3], const char *base); +static void trivial_merge_trees(struct tree_desc t[3], const char *base); static const char *explanation(struct merge_list *entry) { @@ -225,7 +230,7 @@ static void unresolved_directory(const struct traverse_info *info, buf2 = fill_tree_descriptor(r, t + 2, ENTRY_OID(n + 2)); #undef ENTRY_OID - merge_trees(t, newbase); + trivial_merge_trees(t, newbase); free(buf0); free(buf1); @@ -342,7 +347,7 @@ static int threeway_callback(int n, unsigned long mask, unsigned long dirmask, s return mask; } -static void merge_trees(struct tree_desc t[3], const char *base) +static void trivial_merge_trees(struct tree_desc t[3], const char *base) { struct traverse_info info; @@ -366,19 +371,18 @@ static void *get_tree_descriptor(struct repository *r, return buf; } -int cmd_merge_tree(int argc, const char **argv, const char *prefix) +static int trivial_merge(const char *base, + const char *branch1, + const char *branch2) { struct repository *r = the_repository; struct tree_desc t[3]; void *buf1, *buf2, *buf3; - if (argc != 4) - usage(merge_tree_usage); - - buf1 = get_tree_descriptor(r, t+0, argv[1]); - buf2 = get_tree_descriptor(r, t+1, argv[2]); - buf3 = get_tree_descriptor(r, t+2, argv[3]); - merge_trees(t, ""); + buf1 = get_tree_descriptor(r, t+0, base); + buf2 = get_tree_descriptor(r, t+1, branch1); + buf3 = get_tree_descriptor(r, t+2, branch2); + trivial_merge_trees(t, ""); free(buf1); free(buf2); free(buf3); @@ -386,3 +390,162 @@ int cmd_merge_tree(int argc, const char **argv, const char *prefix) show_result(); return 0; } + +enum mode { + MODE_UNKNOWN, + MODE_TRIVIAL, + MODE_REAL, +}; + +struct merge_tree_options { + int mode; + int allow_unrelated_histories; + int show_messages; + int name_only; +}; + +static int real_merge(struct merge_tree_options *o, + const char *branch1, const char *branch2, + const char *prefix) +{ + struct commit *parent1, *parent2; + struct commit_list *merge_bases = NULL; + struct merge_options opt; + struct merge_result result = { 0 }; + + parent1 = get_merge_parent(branch1); + if (!parent1) + help_unknown_ref(branch1, "merge-tree", + _("not something we can merge")); + + parent2 = get_merge_parent(branch2); + if (!parent2) + help_unknown_ref(branch2, "merge-tree", + _("not something we can merge")); + + init_merge_options(&opt, the_repository); + + opt.show_rename_progress = 0; + + opt.branch1 = branch1; + opt.branch2 = branch2; + + /* + * Get the merge bases, in reverse order; see comment above + * merge_incore_recursive in merge-ort.h + */ + merge_bases = get_merge_bases(parent1, parent2); + if (!merge_bases && !o->allow_unrelated_histories) + die(_("refusing to merge unrelated histories")); + merge_bases = reverse_commit_list(merge_bases); + + merge_incore_recursive(&opt, merge_bases, parent1, parent2, &result); + if (result.clean < 0) + die(_("failure to merge")); + + if (o->show_messages == -1) + o->show_messages = !result.clean; + + printf("%s%c", oid_to_hex(&result.tree->object.oid), line_termination); + if (!result.clean) { + struct string_list conflicted_files = STRING_LIST_INIT_NODUP; + const char *last = NULL; + int i; + + merge_get_conflicted_files(&result, &conflicted_files); + for (i = 0; i < conflicted_files.nr; i++) { + const char *name = conflicted_files.items[i].string; + struct stage_info *c = conflicted_files.items[i].util; + if (!o->name_only) + printf("%06o %s %d\t", + c->mode, oid_to_hex(&c->oid), c->stage); + else if (last && !strcmp(last, name)) + continue; + write_name_quoted_relative( + name, prefix, stdout, line_termination); + last = name; + } + string_list_clear(&conflicted_files, 1); + } + if (o->show_messages) { + putchar(line_termination); + merge_display_update_messages(&opt, line_termination == '\0', + &result); + } + merge_finalize(&opt, &result); + return !result.clean; /* result.clean < 0 handled above */ +} + +int cmd_merge_tree(int argc, const char **argv, const char *prefix) +{ + struct merge_tree_options o = { .show_messages = -1 }; + int expected_remaining_argc; + int original_argc; + + const char * const merge_tree_usage[] = { + N_("git merge-tree [--write-tree] [<options>] <branch1> <branch2>"), + N_("git merge-tree [--trivial-merge] <base-tree> <branch1> <branch2>"), + NULL + }; + struct option mt_options[] = { + OPT_CMDMODE(0, "write-tree", &o.mode, + N_("do a real merge instead of a trivial merge"), + MODE_REAL), + OPT_CMDMODE(0, "trivial-merge", &o.mode, + N_("do a trivial merge only"), MODE_TRIVIAL), + OPT_BOOL(0, "messages", &o.show_messages, + N_("also show informational/conflict messages")), + OPT_SET_INT('z', NULL, &line_termination, + N_("separate paths with the NUL character"), '\0'), + OPT_BOOL_F(0, "name-only", + &o.name_only, + N_("list filenames without modes/oids/stages"), + PARSE_OPT_NONEG), + OPT_BOOL_F(0, "allow-unrelated-histories", + &o.allow_unrelated_histories, + N_("allow merging unrelated histories"), + PARSE_OPT_NONEG), + OPT_END() + }; + + /* Parse arguments */ + original_argc = argc - 1; /* ignoring argv[0] */ + argc = parse_options(argc, argv, prefix, mt_options, + merge_tree_usage, PARSE_OPT_STOP_AT_NON_OPTION); + switch (o.mode) { + default: + BUG("unexpected command mode %d", o.mode); + case MODE_UNKNOWN: + switch (argc) { + default: + usage_with_options(merge_tree_usage, mt_options); + case 2: + o.mode = MODE_REAL; + break; + case 3: + o.mode = MODE_TRIVIAL; + break; + } + expected_remaining_argc = argc; + break; + case MODE_REAL: + expected_remaining_argc = 2; + break; + case MODE_TRIVIAL: + expected_remaining_argc = 3; + /* Removal of `--trivial-merge` is expected */ + original_argc--; + break; + } + if (o.mode == MODE_TRIVIAL && argc < original_argc) + die(_("--trivial-merge is incompatible with all other options")); + + if (argc != expected_remaining_argc) + usage_with_options(merge_tree_usage, mt_options); + + /* Do the relevant type of merge */ + if (o.mode == MODE_REAL) + return real_merge(&o, argv[0], argv[1], prefix); + else + return trivial_merge(argv[0], argv[1], argv[2]); +} diff --git a/builtin/merge.c b/builtin/merge.c index d9784d4891..f7c92c0e64 100644 --- a/builtin/merge.c +++ b/builtin/merge.c @@ -313,8 +313,16 @@ static int save_state(struct object_id *stash) int len; struct child_process cp = CHILD_PROCESS_INIT; struct strbuf buffer = STRBUF_INIT; + struct lock_file lock_file = LOCK_INIT; + int fd; int rc = -1; + fd = repo_hold_locked_index(the_repository, &lock_file, 0); + refresh_cache(REFRESH_QUIET); + if (0 <= fd) + repo_update_index_if_able(the_repository, &lock_file); + rollback_lock_file(&lock_file); + strvec_pushl(&cp.args, "stash", "create", NULL); cp.out = -1; cp.git_cmd = 1; @@ -375,24 +383,26 @@ static void reset_hard(const struct object_id *oid, int verbose) static void restore_state(const struct object_id *head, const struct object_id *stash) { - struct strbuf sb = STRBUF_INIT; - const char *args[] = { "stash", "apply", NULL, NULL }; - - if (is_null_oid(stash)) - return; + struct strvec args = STRVEC_INIT; reset_hard(head, 1); - args[2] = oid_to_hex(stash); + if (is_null_oid(stash)) + goto refresh_cache; + + strvec_pushl(&args, "stash", "apply", "--index", "--quiet", NULL); + strvec_push(&args, oid_to_hex(stash)); /* * It is OK to ignore error here, for example when there was * nothing to restore. */ - run_command_v_opt(args, RUN_GIT_CMD); + run_command_v_opt(args.v, RUN_GIT_CMD); + strvec_clear(&args); - strbuf_release(&sb); - refresh_cache(REFRESH_QUIET); +refresh_cache: + if (discard_cache() < 0 || read_cache() < 0) + die(_("could not read index")); } /* This is called when no merge was necessary. */ @@ -502,7 +512,6 @@ static void merge_name(const char *remote, struct strbuf *msg) { struct commit *remote_head; struct object_id branch_head; - struct strbuf buf = STRBUF_INIT; struct strbuf bname = STRBUF_INIT; struct merge_remote_desc *desc; const char *ptr; @@ -590,7 +599,6 @@ static void merge_name(const char *remote, struct strbuf *msg) oid_to_hex(&remote_head->object.oid), remote); cleanup: free(found_ref); - strbuf_release(&buf); strbuf_release(&bname); } @@ -758,8 +766,10 @@ static int try_merge_strategy(const char *strategy, struct commit_list *common, else clean = merge_recursive(&o, head, remoteheads->item, reversed, &result); - if (clean < 0) - exit(128); + if (clean < 0) { + rollback_lock_file(&lock); + return 2; + } if (write_locked_index(&the_index, &lock, COMMIT_LOCK | SKIP_IF_UNCHANGED)) die(_("unable to write %s"), get_index_file()); @@ -1603,6 +1613,21 @@ int cmd_merge(int argc, const char **argv, const char *prefix) */ refresh_cache(REFRESH_QUIET); if (allow_trivial && fast_forward != FF_ONLY) { + /* + * Must first ensure that index matches HEAD before + * attempting a trivial merge. + */ + struct tree *head_tree = get_commit_tree(head_commit); + struct strbuf sb = STRBUF_INIT; + + if (repo_index_has_changes(the_repository, head_tree, + &sb)) { + error(_("Your local changes to the following files would be overwritten by merge:\n %s"), + sb.buf); + strbuf_release(&sb); + return 2; + } + /* See if it is really trivial. */ git_committer_info(IDENT_STRICT); printf(_("Trying really trivial in-index merge...\n")); @@ -1659,12 +1684,12 @@ int cmd_merge(int argc, const char **argv, const char *prefix) * tree in the index -- this means that the index must be in * sync with the head commit. The strategies are responsible * to ensure this. + * + * Stash away the local changes so that we can try more than one + * and/or recover from merge strategies bailing while leaving the + * index and working tree polluted. */ - if (use_strategies_nr == 1 || - /* - * Stash away the local changes so that we can try more than one. - */ - save_state(&stash)) + if (save_state(&stash)) oidclr(&stash); for (i = 0; !merge_was_ok && i < use_strategies_nr; i++) { diff --git a/builtin/multi-pack-index.c b/builtin/multi-pack-index.c index 5edbb7fe86..8f24d59a75 100644 --- a/builtin/multi-pack-index.c +++ b/builtin/multi-pack-index.c @@ -134,7 +134,7 @@ static int cmd_multi_pack_index_write(int argc, const char **argv) opts.flags |= MIDX_PROGRESS; argc = parse_options(argc, argv, NULL, options, builtin_multi_pack_index_write_usage, - PARSE_OPT_KEEP_UNKNOWN); + 0); if (argc) usage_with_options(builtin_multi_pack_index_write_usage, options); @@ -176,7 +176,7 @@ static int cmd_multi_pack_index_verify(int argc, const char **argv) opts.flags |= MIDX_PROGRESS; argc = parse_options(argc, argv, NULL, options, builtin_multi_pack_index_verify_usage, - PARSE_OPT_KEEP_UNKNOWN); + 0); if (argc) usage_with_options(builtin_multi_pack_index_verify_usage, options); @@ -202,7 +202,7 @@ static int cmd_multi_pack_index_expire(int argc, const char **argv) opts.flags |= MIDX_PROGRESS; argc = parse_options(argc, argv, NULL, options, builtin_multi_pack_index_expire_usage, - PARSE_OPT_KEEP_UNKNOWN); + 0); if (argc) usage_with_options(builtin_multi_pack_index_expire_usage, options); @@ -232,7 +232,7 @@ static int cmd_multi_pack_index_repack(int argc, const char **argv) argc = parse_options(argc, argv, NULL, options, builtin_multi_pack_index_repack_usage, - PARSE_OPT_KEEP_UNKNOWN); + 0); if (argc) usage_with_options(builtin_multi_pack_index_repack_usage, options); diff --git a/builtin/mv.c b/builtin/mv.c index 83a465ba83..4729bb1a1a 100644 --- a/builtin/mv.c +++ b/builtin/mv.c @@ -13,12 +13,21 @@ #include "string-list.h" #include "parse-options.h" #include "submodule.h" +#include "entry.h" static const char * const builtin_mv_usage[] = { N_("git mv [<options>] <source>... <destination>"), NULL }; +enum update_mode { + BOTH = 0, + WORKING_DIRECTORY = (1 << 1), + INDEX = (1 << 2), + SPARSE = (1 << 3), + SKIP_WORKTREE_DIR = (1 << 4), +}; + #define DUP_BASENAME 1 #define KEEP_TRAILING_SLASH 2 @@ -115,6 +124,36 @@ static int index_range_of_same_dir(const char *src, int length, return last - first; } +/* + * Check if an out-of-cone directory should be in the index. Imagine this case + * that all the files under a directory are marked with 'CE_SKIP_WORKTREE' bit + * and thus the directory is sparsified. + * + * Return 0 if such directory exist (i.e. with any of its contained files not + * marked with CE_SKIP_WORKTREE, the directory would be present in working tree). + * Return 1 otherwise. + */ +static int check_dir_in_index(const char *name) +{ + const char *with_slash = add_slash(name); + int length = strlen(with_slash); + + int pos = cache_name_pos(with_slash, length); + const struct cache_entry *ce; + + if (pos < 0) { + pos = -pos - 1; + if (pos >= the_index.cache_nr) + return 1; + ce = active_cache[pos]; + if (strncmp(with_slash, ce->name, length)) + return 1; + if (ce_skip_worktree(ce)) + return 0; + } + return 1; +} + int cmd_mv(int argc, const char **argv, const char *prefix) { int i, flags, gitmodules_modified = 0; @@ -129,7 +168,7 @@ int cmd_mv(int argc, const char **argv, const char *prefix) OPT_END(), }; const char **source, **destination, **dest_path, **submodule_gitfile; - enum update_mode { BOTH = 0, WORKING_DIRECTORY, INDEX, SPARSE } *modes; + enum update_mode *modes; struct stat st; struct string_list src_for_dst = STRING_LIST_INIT_NODUP; struct lock_file lock_file = LOCK_INIT; @@ -148,7 +187,8 @@ int cmd_mv(int argc, const char **argv, const char *prefix) die(_("index file corrupt")); source = internal_prefix_pathspec(prefix, argv, argc, 0); - modes = xcalloc(argc, sizeof(enum update_mode)); + CALLOC_ARRAY(modes, argc); + /* * Keep trailing slash, needed to let * "git mv file no-such-dir/" error out, except in the case @@ -176,7 +216,7 @@ int cmd_mv(int argc, const char **argv, const char *prefix) /* Checking */ for (i = 0; i < argc; i++) { const char *src = source[i], *dst = destination[i]; - int length, src_is_dir; + int length; const char *bad = NULL; int skip_sparse = 0; @@ -185,54 +225,103 @@ int cmd_mv(int argc, const char **argv, const char *prefix) length = strlen(src); if (lstat(src, &st) < 0) { - /* only error if existence is expected. */ - if (modes[i] != SPARSE) + int pos; + const struct cache_entry *ce; + + pos = cache_name_pos(src, length); + if (pos < 0) { + const char *src_w_slash = add_slash(src); + if (!path_in_sparse_checkout(src_w_slash, &the_index) && + !check_dir_in_index(src)) { + modes[i] |= SKIP_WORKTREE_DIR; + goto dir_check; + } + /* only error if existence is expected. */ + if (!(modes[i] & SPARSE)) + bad = _("bad source"); + goto act_on_entry; + } + ce = active_cache[pos]; + if (!ce_skip_worktree(ce)) { bad = _("bad source"); - } else if (!strncmp(src, dst, length) && - (dst[length] == 0 || dst[length] == '/')) { + goto act_on_entry; + } + if (!ignore_sparse) { + string_list_append(&only_match_skip_worktree, src); + goto act_on_entry; + } + /* Check if dst exists in index */ + if (cache_name_pos(dst, strlen(dst)) < 0) { + modes[i] |= SPARSE; + goto act_on_entry; + } + if (!force) { + bad = _("destination exists"); + goto act_on_entry; + } + modes[i] |= SPARSE; + goto act_on_entry; + } + if (!strncmp(src, dst, length) && + (dst[length] == 0 || dst[length] == '/')) { bad = _("can not move directory into itself"); - } else if ((src_is_dir = S_ISDIR(st.st_mode)) - && lstat(dst, &st) == 0) + goto act_on_entry; + } + if (S_ISDIR(st.st_mode) + && lstat(dst, &st) == 0) { bad = _("cannot move directory over file"); - else if (src_is_dir) { + goto act_on_entry; + } + +dir_check: + if (S_ISDIR(st.st_mode)) { + int j, dst_len, n; int first = cache_name_pos(src, length), last; - if (first >= 0) + if (first >= 0) { prepare_move_submodule(src, first, submodule_gitfile + i); - else if (index_range_of_same_dir(src, length, - &first, &last) < 1) + goto act_on_entry; + } else if (index_range_of_same_dir(src, length, + &first, &last) < 1) { bad = _("source directory is empty"); - else { /* last - first >= 1 */ - int j, dst_len, n; - - modes[i] = WORKING_DIRECTORY; - n = argc + last - first; - REALLOC_ARRAY(source, n); - REALLOC_ARRAY(destination, n); - REALLOC_ARRAY(modes, n); - REALLOC_ARRAY(submodule_gitfile, n); - - dst = add_slash(dst); - dst_len = strlen(dst); - - for (j = 0; j < last - first; j++) { - const struct cache_entry *ce = active_cache[first + j]; - const char *path = ce->name; - source[argc + j] = path; - destination[argc + j] = - prefix_path(dst, dst_len, path + length + 1); - modes[argc + j] = ce_skip_worktree(ce) ? SPARSE : INDEX; - submodule_gitfile[argc + j] = NULL; - } - argc += last - first; + goto act_on_entry; } - } else if (!(ce = cache_file_exists(src, length, 0))) { + + /* last - first >= 1 */ + modes[i] |= WORKING_DIRECTORY; + n = argc + last - first; + REALLOC_ARRAY(source, n); + REALLOC_ARRAY(destination, n); + REALLOC_ARRAY(modes, n); + REALLOC_ARRAY(submodule_gitfile, n); + + dst = add_slash(dst); + dst_len = strlen(dst); + + for (j = 0; j < last - first; j++) { + const struct cache_entry *ce = active_cache[first + j]; + const char *path = ce->name; + source[argc + j] = path; + destination[argc + j] = + prefix_path(dst, dst_len, path + length + 1); + memset(modes + argc + j, 0, sizeof(enum update_mode)); + modes[argc + j] |= ce_skip_worktree(ce) ? SPARSE : INDEX; + submodule_gitfile[argc + j] = NULL; + } + argc += last - first; + goto act_on_entry; + } + if (!(ce = cache_file_exists(src, length, 0))) { bad = _("not under version control"); - } else if (ce_stage(ce)) { + goto act_on_entry; + } + if (ce_stage(ce)) { bad = _("conflicted"); - } else if (lstat(dst, &st) == 0 && - (!ignore_case || strcasecmp(src, dst))) { + goto act_on_entry; + } + if (lstat(dst, &st) == 0 && + (!ignore_case || strcasecmp(src, dst))) { bad = _("destination exists"); if (force) { /* @@ -246,34 +335,40 @@ int cmd_mv(int argc, const char **argv, const char *prefix) } else bad = _("Cannot overwrite"); } - } else if (string_list_has_string(&src_for_dst, dst)) + goto act_on_entry; + } + if (string_list_has_string(&src_for_dst, dst)) { bad = _("multiple sources for the same target"); - else if (is_dir_sep(dst[strlen(dst) - 1])) + goto act_on_entry; + } + if (is_dir_sep(dst[strlen(dst) - 1])) { bad = _("destination directory does not exist"); - else { - /* - * We check if the paths are in the sparse-checkout - * definition as a very final check, since that - * allows us to point the user to the --sparse - * option as a way to have a successful run. - */ - if (!ignore_sparse && - !path_in_sparse_checkout(src, &the_index)) { - string_list_append(&only_match_skip_worktree, src); - skip_sparse = 1; - } - if (!ignore_sparse && - !path_in_sparse_checkout(dst, &the_index)) { - string_list_append(&only_match_skip_worktree, dst); - skip_sparse = 1; - } - - if (skip_sparse) - goto remove_entry; + goto act_on_entry; + } - string_list_insert(&src_for_dst, dst); + /* + * We check if the paths are in the sparse-checkout + * definition as a very final check, since that + * allows us to point the user to the --sparse + * option as a way to have a successful run. + */ + if (!ignore_sparse && + !path_in_sparse_checkout(src, &the_index)) { + string_list_append(&only_match_skip_worktree, src); + skip_sparse = 1; } + if (!ignore_sparse && + !path_in_sparse_checkout(dst, &the_index)) { + string_list_append(&only_match_skip_worktree, dst); + skip_sparse = 1; + } + + if (skip_sparse) + goto remove_entry; + + string_list_insert(&src_for_dst, dst); +act_on_entry: if (!bad) continue; if (!ignore_errors) @@ -282,14 +377,11 @@ int cmd_mv(int argc, const char **argv, const char *prefix) remove_entry: if (--argc > 0) { int n = argc - i; - memmove(source + i, source + i + 1, - n * sizeof(char *)); - memmove(destination + i, destination + i + 1, - n * sizeof(char *)); - memmove(modes + i, modes + i + 1, - n * sizeof(enum update_mode)); - memmove(submodule_gitfile + i, submodule_gitfile + i + 1, - n * sizeof(char *)); + MOVE_ARRAY(source + i, source + i + 1, n); + MOVE_ARRAY(destination + i, destination + i + 1, n); + MOVE_ARRAY(modes + i, modes + i + 1, n); + MOVE_ARRAY(submodule_gitfile + i, + submodule_gitfile + i + 1, n); i--; } } @@ -304,11 +396,17 @@ remove_entry: const char *src = source[i], *dst = destination[i]; enum update_mode mode = modes[i]; int pos; + struct checkout state = CHECKOUT_INIT; + state.istate = &the_index; + + if (force) + state.force = 1; if (show_only || verbose) printf(_("Renaming %s to %s\n"), src, dst); if (show_only) continue; - if (mode != INDEX && mode != SPARSE && rename(src, dst) < 0) { + if (!(mode & (INDEX | SPARSE | SKIP_WORKTREE_DIR)) && + rename(src, dst) < 0) { if (ignore_errors) continue; die_errno(_("renaming '%s' failed"), src); @@ -322,12 +420,23 @@ remove_entry: 1); } - if (mode == WORKING_DIRECTORY) + if (mode & (WORKING_DIRECTORY | SKIP_WORKTREE_DIR)) continue; pos = cache_name_pos(src, strlen(src)); assert(pos >= 0); rename_cache_entry_at(pos, dst); + + if ((mode & SPARSE) && + (path_in_sparse_checkout(dst, &the_index))) { + int dst_pos; + + dst_pos = cache_name_pos(dst, strlen(dst)); + active_cache[dst_pos]->ce_flags &= ~CE_SKIP_WORKTREE; + + if (checkout_entry(active_cache[dst_pos], &state, NULL, NULL)) + die(_("cannot checkout %s"), active_cache[dst_pos]->name); + } } if (gitmodules_modified) diff --git a/builtin/pull.c b/builtin/pull.c index 01155ba67b..403a24d7ca 100644 --- a/builtin/pull.c +++ b/builtin/pull.c @@ -990,6 +990,7 @@ int cmd_pull(int argc, const char **argv, const char *prefix) int rebase_unspecified = 0; int can_ff; int divergent; + int ret; if (!getenv("GIT_REFLOG_ACTION")) set_reflog_message(argc, argv); @@ -1100,7 +1101,8 @@ int cmd_pull(int argc, const char **argv, const char *prefix) if (is_null_oid(&orig_head)) { if (merge_heads.nr > 1) die(_("Cannot merge multiple branches into empty head.")); - return pull_into_void(merge_heads.oid, &curr_head); + ret = pull_into_void(merge_heads.oid, &curr_head); + goto cleanup; } if (merge_heads.nr > 1) { if (opt_rebase) @@ -1125,8 +1127,6 @@ int cmd_pull(int argc, const char **argv, const char *prefix) } if (opt_rebase) { - int ret = 0; - struct object_id newbase; struct object_id upstream; get_rebase_newbase_and_upstream(&newbase, &upstream, &curr_head, @@ -1149,12 +1149,16 @@ int cmd_pull(int argc, const char **argv, const char *prefix) recurse_submodules == RECURSE_SUBMODULES_ON_DEMAND)) ret = rebase_submodules(); - return ret; + goto cleanup; } else { - int ret = run_merge(); + ret = run_merge(); if (!ret && (recurse_submodules == RECURSE_SUBMODULES_ON || recurse_submodules == RECURSE_SUBMODULES_ON_DEMAND)) ret = update_submodules(); - return ret; + goto cleanup; } + +cleanup: + oid_array_clear(&merge_heads); + return ret; } diff --git a/builtin/rebase.c b/builtin/rebase.c index 70aa7c842f..56e4214b44 100644 --- a/builtin/rebase.c +++ b/builtin/rebase.c @@ -102,6 +102,7 @@ struct rebase_options { int reschedule_failed_exec; int reapply_cherry_picks; int fork_point; + int update_refs; }; #define REBASE_OPTIONS_INIT { \ @@ -298,6 +299,7 @@ static int do_interactive_rebase(struct rebase_options *opts, unsigned flags) ret = complete_action(the_repository, &replay, flags, shortrevisions, opts->onto_name, opts->onto, &opts->orig_head, &commands, opts->autosquash, + opts->update_refs, &todo_list); } @@ -800,6 +802,11 @@ static int rebase_config(const char *var, const char *value, void *data) return 0; } + if (!strcmp(var, "rebase.updaterefs")) { + opts->update_refs = git_config_bool(var, value); + return 0; + } + if (!strcmp(var, "rebase.reschedulefailedexec")) { opts->reschedule_failed_exec = git_config_bool(var, value); return 0; @@ -1124,6 +1131,9 @@ int cmd_rebase(int argc, const char **argv, const char *prefix) OPT_BOOL(0, "autosquash", &options.autosquash, N_("move commits that begin with " "squash!/fixup! under -i")), + OPT_BOOL(0, "update-refs", &options.update_refs, + N_("update branches that point to commits " + "that are being rebased")), { OPTION_STRING, 'S', "gpg-sign", &gpg_sign, N_("key-id"), N_("GPG-sign commits"), PARSE_OPT_OPTARG, NULL, (intptr_t) "" }, diff --git a/builtin/reflog.c b/builtin/reflog.c index 4dd297dce8..8123956847 100644 --- a/builtin/reflog.c +++ b/builtin/reflog.c @@ -193,6 +193,8 @@ static int expire_unreachable_callback(const struct option *opt, { struct cmd_reflog_expire_cb *cmd = opt->value; + BUG_ON_OPT_NEG(unset); + if (parse_expiry_date(arg, &cmd->expire_unreachable)) die(_("invalid timestamp '%s' given to '--%s'"), arg, opt->long_name); @@ -207,6 +209,8 @@ static int expire_total_callback(const struct option *opt, { struct cmd_reflog_expire_cb *cmd = opt->value; + BUG_ON_OPT_NEG(unset); + if (parse_expiry_date(arg, &cmd->expire_total)) die(_("invalid timestamp '%s' given to '--%s'"), arg, opt->long_name); diff --git a/builtin/remote.c b/builtin/remote.c index a3a0c27d7a..c713463d89 100644 --- a/builtin/remote.c +++ b/builtin/remote.c @@ -344,12 +344,13 @@ static void read_branches(void) struct ref_states { struct remote *remote; - struct string_list new_refs, stale, tracked, heads, push; + struct string_list new_refs, skipped, stale, tracked, heads, push; int queried; }; #define REF_STATES_INIT { \ .new_refs = STRING_LIST_INIT_DUP, \ + .skipped = STRING_LIST_INIT_DUP, \ .stale = STRING_LIST_INIT_DUP, \ .tracked = STRING_LIST_INIT_DUP, \ .heads = STRING_LIST_INIT_DUP, \ @@ -368,7 +369,9 @@ static int get_ref_states(const struct ref *remote_refs, struct ref_states *stat states->remote->fetch.raw[i]); for (ref = fetch_map; ref; ref = ref->next) { - if (!ref->peer_ref || !ref_exists(ref->peer_ref->name)) + if (omit_name_by_refspec(ref->name, &states->remote->fetch)) + string_list_append(&states->skipped, abbrev_branch(ref->name)); + else if (!ref->peer_ref || !ref_exists(ref->peer_ref->name)) string_list_append(&states->new_refs, abbrev_branch(ref->name)); else string_list_append(&states->tracked, abbrev_branch(ref->name)); @@ -383,6 +386,7 @@ static int get_ref_states(const struct ref *remote_refs, struct ref_states *stat free_refs(fetch_map); string_list_sort(&states->new_refs); + string_list_sort(&states->skipped); string_list_sort(&states->tracked); string_list_sort(&states->stale); @@ -941,6 +945,7 @@ static void clear_push_info(void *util, const char *string) static void free_remote_ref_states(struct ref_states *states) { string_list_clear(&states->new_refs, 0); + string_list_clear(&states->skipped, 0); string_list_clear(&states->stale, 1); string_list_clear(&states->tracked, 0); string_list_clear(&states->heads, 0); @@ -1035,6 +1040,8 @@ static int show_remote_info_item(struct string_list_item *item, void *cb_data) arg = states->remote->name; } else if (string_list_has_string(&states->tracked, name)) arg = _(" tracked"); + else if (string_list_has_string(&states->skipped, name)) + arg = _(" skipped"); else if (string_list_has_string(&states->stale, name)) arg = _(" stale (use 'git remote prune' to remove)"); else @@ -1307,6 +1314,7 @@ static int show(int argc, const char **argv) /* remote branch info */ info.width = 0; for_each_string_list(&info.states.new_refs, add_remote_to_show_info, &info); + for_each_string_list(&info.states.skipped, add_remote_to_show_info, &info); for_each_string_list(&info.states.tracked, add_remote_to_show_info, &info); for_each_string_list(&info.states.stale, add_remote_to_show_info, &info); if (info.list.nr) diff --git a/builtin/repack.c b/builtin/repack.c index 4a7ae4cf48..482b66f57d 100644 --- a/builtin/repack.c +++ b/builtin/repack.c @@ -727,7 +727,6 @@ int cmd_repack(int argc, const char **argv, const char *prefix) struct child_process cmd = CHILD_PROCESS_INIT; struct string_list_item *item; struct string_list names = STRING_LIST_INIT_DUP; - struct string_list rollback = STRING_LIST_INIT_NODUP; struct string_list existing_nonkept_packs = STRING_LIST_INIT_DUP; struct string_list existing_kept_packs = STRING_LIST_INIT_DUP; struct pack_geometry *geometry = NULL; @@ -1117,7 +1116,6 @@ int cmd_repack(int argc, const char **argv, const char *prefix) } string_list_clear(&names, 0); - string_list_clear(&rollback, 0); string_list_clear(&existing_nonkept_packs, 0); string_list_clear(&existing_kept_packs, 0); clear_pack_geometry(geometry); diff --git a/builtin/replace.c b/builtin/replace.c index 583702a098..a29e911d30 100644 --- a/builtin/replace.c +++ b/builtin/replace.c @@ -106,6 +106,7 @@ static int for_each_replace_name(const char **argv, each_replace_name_fn fn) size_t base_len; int had_error = 0; struct object_id oid; + const char *git_replace_ref_base = ref_namespace[NAMESPACE_REPLACE].ref; strbuf_addstr(&ref, git_replace_ref_base); base_len = ref.len; @@ -147,6 +148,8 @@ static int check_ref_valid(struct object_id *object, struct strbuf *ref, int force) { + const char *git_replace_ref_base = ref_namespace[NAMESPACE_REPLACE].ref; + strbuf_reset(ref); strbuf_addf(ref, "%s%s", git_replace_ref_base, oid_to_hex(object)); if (check_refname_format(ref->buf, 0)) diff --git a/builtin/reset.c b/builtin/reset.c index 344fff8f3a..fdce6f8c85 100644 --- a/builtin/reset.c +++ b/builtin/reset.c @@ -174,88 +174,6 @@ static void update_index_from_diff(struct diff_queue_struct *q, } } -static int pathspec_needs_expanded_index(const struct pathspec *pathspec) -{ - unsigned int i, pos; - int res = 0; - char *skip_worktree_seen = NULL; - - /* - * When using a magic pathspec, assume for the sake of simplicity that - * the index needs to be expanded to match all matchable files. - */ - if (pathspec->magic) - return 1; - - for (i = 0; i < pathspec->nr; i++) { - struct pathspec_item item = pathspec->items[i]; - - /* - * If the pathspec item has a wildcard, the index should be expanded - * if the pathspec has the possibility of matching a subset of entries inside - * of a sparse directory (but not the entire directory). - * - * If the pathspec item is a literal path, the index only needs to be expanded - * if a) the pathspec isn't in the sparse checkout cone (to make sure we don't - * expand for in-cone files) and b) it doesn't match any sparse directories - * (since we can reset whole sparse directories without expanding them). - */ - if (item.nowildcard_len < item.len) { - /* - * Special case: if the pattern is a path inside the cone - * followed by only wildcards, the pattern cannot match - * partial sparse directories, so we know we don't need to - * expand the index. - * - * Examples: - * - in-cone/foo***: doesn't need expanded index - * - not-in-cone/bar*: may need expanded index - * - **.c: may need expanded index - */ - if (strspn(item.original + item.nowildcard_len, "*") == item.len - item.nowildcard_len && - path_in_cone_mode_sparse_checkout(item.original, &the_index)) - continue; - - for (pos = 0; pos < active_nr; pos++) { - struct cache_entry *ce = active_cache[pos]; - - if (!S_ISSPARSEDIR(ce->ce_mode)) - continue; - - /* - * If the pre-wildcard length is longer than the sparse - * directory name and the sparse directory is the first - * component of the pathspec, need to expand the index. - */ - if (item.nowildcard_len > ce_namelen(ce) && - !strncmp(item.original, ce->name, ce_namelen(ce))) { - res = 1; - break; - } - - /* - * If the pre-wildcard length is shorter than the sparse - * directory and the pathspec does not match the whole - * directory, need to expand the index. - */ - if (!strncmp(item.original, ce->name, item.nowildcard_len) && - wildmatch(item.original, ce->name, 0)) { - res = 1; - break; - } - } - } else if (!path_in_cone_mode_sparse_checkout(item.original, &the_index) && - !matches_skip_worktree(pathspec, i, &skip_worktree_seen)) - res = 1; - - if (res > 0) - break; - } - - free(skip_worktree_seen); - return res; -} - static int read_from_tree(const struct pathspec *pathspec, struct object_id *tree_oid, int intent_to_add) @@ -273,7 +191,7 @@ static int read_from_tree(const struct pathspec *pathspec, opt.change = diff_change; opt.add_remove = diff_addremove; - if (pathspec->nr && the_index.sparse_index && pathspec_needs_expanded_index(pathspec)) + if (pathspec->nr && pathspec_needs_expanded_index(&the_index, pathspec)) ensure_full_index(&the_index); if (do_diff_cache(tree_oid, &opt)) diff --git a/builtin/rev-list.c b/builtin/rev-list.c index 30fd8e83ea..fba6f5d51f 100644 --- a/builtin/rev-list.c +++ b/builtin/rev-list.c @@ -46,6 +46,7 @@ static const char rev_list_usage[] = " --parents\n" " --children\n" " --objects | --objects-edge\n" +" --disk-usage[=human]\n" " --unpacked\n" " --header | --pretty\n" " --[no-]object-names\n" @@ -81,6 +82,7 @@ static int arg_show_object_names = 1; static int show_disk_usage; static off_t total_disk_usage; +static int human_readable; static off_t get_object_disk_usage(struct object *obj) { @@ -368,6 +370,17 @@ static int show_object_fast( return 1; } +static void print_disk_usage(off_t size) +{ + struct strbuf sb = STRBUF_INIT; + if (human_readable) + strbuf_humanise_bytes(&sb, size); + else + strbuf_addf(&sb, "%"PRIuMAX, (uintmax_t)size); + puts(sb.buf); + strbuf_release(&sb); +} + static inline int parse_missing_action_value(const char *value) { if (!strcmp(value, "error")) { @@ -473,6 +486,7 @@ static int try_bitmap_disk_usage(struct rev_info *revs, int filter_provided_objects) { struct bitmap_index *bitmap_git; + off_t size_from_bitmap; if (!show_disk_usage) return -1; @@ -481,8 +495,8 @@ static int try_bitmap_disk_usage(struct rev_info *revs, if (!bitmap_git) return -1; - printf("%"PRIuMAX"\n", - (uintmax_t)get_disk_usage_from_bitmap(bitmap_git, revs)); + size_from_bitmap = get_disk_usage_from_bitmap(bitmap_git, revs); + print_disk_usage(size_from_bitmap); return 0; } @@ -624,7 +638,21 @@ int cmd_rev_list(int argc, const char **argv, const char *prefix) continue; } - if (!strcmp(arg, "--disk-usage")) { + if (skip_prefix(arg, "--disk-usage", &arg)) { + if (*arg == '=') { + if (!strcmp(++arg, "human")) { + human_readable = 1; + } else + die(_("invalid value for '%s': '%s', the only allowed format is '%s'"), + "--disk-usage=<format>", arg, "human"); + } else if (*arg) { + /* + * Arguably should goto a label to continue chain of ifs? + * Doesn't matter unless we try to add --disk-usage-foo + * afterwards. + */ + usage(rev_list_usage); + } show_disk_usage = 1; info.flags |= REV_LIST_QUIET; continue; @@ -753,7 +781,7 @@ int cmd_rev_list(int argc, const char **argv, const char *prefix) } if (show_disk_usage) - printf("%"PRIuMAX"\n", (uintmax_t)total_disk_usage); + print_disk_usage(total_disk_usage); cleanup: release_revisions(&revs); diff --git a/builtin/revert.c b/builtin/revert.c index f84c253f4c..2554f9099c 100644 --- a/builtin/revert.c +++ b/builtin/revert.c @@ -246,6 +246,9 @@ int cmd_revert(int argc, const char **argv, const char *prefix) res = run_sequencer(argc, argv, &opts); if (res < 0) die(_("revert failed")); + if (opts.revs) + release_revisions(opts.revs); + free(opts.revs); return res; } diff --git a/builtin/rm.c b/builtin/rm.c index 84a935a16e..b6ba859fe4 100644 --- a/builtin/rm.c +++ b/builtin/rm.c @@ -287,6 +287,8 @@ int cmd_rm(int argc, const char **argv, const char *prefix) if (!index_only) setup_work_tree(); + prepare_repo_settings(the_repository); + the_repository->settings.command_requires_full_index = 0; hold_locked_index(&lock_file, LOCK_DIE_ON_ERROR); if (read_cache() < 0) @@ -296,8 +298,9 @@ int cmd_rm(int argc, const char **argv, const char *prefix) seen = xcalloc(pathspec.nr, 1); - /* TODO: audit for interaction with sparse-index. */ - ensure_full_index(&the_index); + if (pathspec_needs_expanded_index(&the_index, &pathspec)) + ensure_full_index(&the_index); + for (i = 0; i < active_nr; i++) { const struct cache_entry *ce = active_cache[i]; diff --git a/builtin/submodule--helper.c b/builtin/submodule--helper.c index c597df7528..b63f420ece 100644 --- a/builtin/submodule--helper.c +++ b/builtin/submodule--helper.c @@ -118,10 +118,11 @@ static int resolve_relative_url_test(int argc, const char **argv, const char *pr return 0; } -static char *do_get_submodule_displaypath(const char *path, - const char *prefix, - const char *super_prefix) +/* the result should be freed by the caller. */ +static char *get_submodule_displaypath(const char *path, const char *prefix) { + const char *super_prefix = get_super_prefix(); + if (prefix && super_prefix) { BUG("cannot have prefix '%s' and superprefix '%s'", prefix, super_prefix); @@ -137,13 +138,6 @@ static char *do_get_submodule_displaypath(const char *path, } } -/* the result should be freed by the caller. */ -static char *get_submodule_displaypath(const char *path, const char *prefix) -{ - const char *super_prefix = get_super_prefix(); - return do_get_submodule_displaypath(path, prefix, super_prefix); -} - static char *compute_rev_name(const char *sub_path, const char* object_id) { struct strbuf sb = STRBUF_INIT; @@ -444,7 +438,7 @@ static int module_foreach(int argc, const char **argv, const char *prefix) }; const char *const git_submodule_helper_usage[] = { - N_("git submodule--helper foreach [--quiet] [--recursive] [--] <command>"), + N_("git submodule foreach [--quiet] [--recursive] [--] <command>"), NULL }; @@ -477,22 +471,18 @@ static int starts_with_dot_dot_slash(const char *const path) struct init_cb { const char *prefix; - const char *superprefix; unsigned int flags; }; #define INIT_CB_INIT { 0 } static void init_submodule(const char *path, const char *prefix, - const char *superprefix, unsigned int flags) + unsigned int flags) { const struct submodule *sub; struct strbuf sb = STRBUF_INIT; char *upd = NULL, *url = NULL, *displaypath; - /* try superprefix from the environment, if it is not passed explicitly */ - if (!superprefix) - superprefix = get_super_prefix(); - displaypath = do_get_submodule_displaypath(path, prefix, superprefix); + displaypath = get_submodule_displaypath(path, prefix); sub = submodule_from_path(the_repository, null_oid(), path); @@ -566,7 +556,7 @@ static void init_submodule(const char *path, const char *prefix, static void init_submodule_cb(const struct cache_entry *list_item, void *cb_data) { struct init_cb *info = cb_data; - init_submodule(list_item->name, info->prefix, info->superprefix, info->flags); + init_submodule(list_item->name, info->prefix, info->flags); } static int module_init(int argc, const char **argv, const char *prefix) @@ -582,7 +572,7 @@ static int module_init(int argc, const char **argv, const char *prefix) }; const char *const git_submodule_helper_usage[] = { - N_("git submodule--helper init [<options>] [<path>]"), + N_("git submodule init [<options>] [<path>]"), NULL }; @@ -1114,6 +1104,9 @@ static int compute_summary_module_list(struct object_id *head_oid, { struct strvec diff_args = STRVEC_INIT; struct rev_info rev; + struct setup_revision_opt opt = { + .free_removed_argv_elements = 1, + }; struct module_cb_list list = MODULE_CB_LIST_INIT; int ret = 0; @@ -1131,7 +1124,7 @@ static int compute_summary_module_list(struct object_id *head_oid, init_revisions(&rev, info->prefix); rev.abbrev = 0; precompose_argv_prefix(diff_args.nr, diff_args.v, NULL); - setup_revisions(diff_args.nr, diff_args.v, &rev, NULL); + setup_revisions(diff_args.nr, diff_args.v, &rev, &opt); rev.diffopt.output_format = DIFF_FORMAT_NO_OUTPUT | DIFF_FORMAT_CALLBACK; rev.diffopt.format_callback = submodule_summary_callback; rev.diffopt.format_callback_data = &list; @@ -1185,7 +1178,7 @@ static int module_summary(int argc, const char **argv, const char *prefix) }; const char *const git_submodule_helper_usage[] = { - N_("git submodule--helper summary [<options>] [<commit>] [--] [<path>]"), + N_("git submodule summary [<options>] [<commit>] [--] [<path>]"), NULL }; @@ -1349,7 +1342,7 @@ static int module_sync(int argc, const char **argv, const char *prefix) }; const char *const git_submodule_helper_usage[] = { - N_("git submodule--helper sync [--quiet] [--recursive] [<path>]"), + N_("git submodule sync [--quiet] [--recursive] [<path>]"), NULL }; @@ -1818,7 +1811,7 @@ static int module_clone(int argc, const char **argv, const char *prefix) static void determine_submodule_update_strategy(struct repository *r, int just_cloned, const char *path, - const char *update, + enum submodule_update_type update, struct submodule_update_strategy *out) { const struct submodule *sub = submodule_from_path(r, null_oid(), path); @@ -1828,9 +1821,7 @@ static void determine_submodule_update_strategy(struct repository *r, key = xstrfmt("submodule.%s.update", sub->name); if (update) { - if (parse_submodule_update_strategy(update, out) < 0) - die(_("Invalid update mode '%s' for submodule path '%s'"), - update, path); + out->type = update; } else if (!repo_config_get_string_tmp(r, key, &val)) { if (parse_submodule_update_strategy(val, out) < 0) die(_("Invalid update mode '%s' configured for submodule path '%s'"), @@ -1880,9 +1871,8 @@ struct submodule_update_clone { struct update_data { const char *prefix; - const char *recursive_prefix; const char *displaypath; - const char *update_default; + enum submodule_update_type update_default; struct object_id suboid; struct string_list references; struct submodule_update_strategy update_strategy; @@ -1949,30 +1939,20 @@ static int prepare_to_clone_next_submodule(const struct cache_entry *ce, const char *update_string; enum submodule_update_type update_type; char *key; - struct strbuf displaypath_sb = STRBUF_INIT; + struct update_data *ud = suc->update_data; + char *displaypath = get_submodule_displaypath(ce->name, ud->prefix); struct strbuf sb = STRBUF_INIT; - const char *displaypath = NULL; int needs_cloning = 0; int need_free_url = 0; if (ce_stage(ce)) { - if (suc->update_data->recursive_prefix) - strbuf_addf(&sb, "%s/%s", suc->update_data->recursive_prefix, ce->name); - else - strbuf_addstr(&sb, ce->name); - strbuf_addf(out, _("Skipping unmerged submodule %s"), sb.buf); + strbuf_addf(out, _("Skipping unmerged submodule %s"), displaypath); strbuf_addch(out, '\n'); goto cleanup; } sub = submodule_from_path(the_repository, null_oid(), ce->name); - if (suc->update_data->recursive_prefix) - displaypath = relative_path(suc->update_data->recursive_prefix, - ce->name, &displaypath_sb); - else - displaypath = ce->name; - if (!sub) { next_submodule_warn_missing(suc, out, displaypath); goto cleanup; @@ -2062,7 +2042,7 @@ static int prepare_to_clone_next_submodule(const struct cache_entry *ce, "--no-single-branch"); cleanup: - strbuf_release(&displaypath_sb); + free(displaypath); strbuf_release(&sb); if (need_free_url) free((void*)url); @@ -2405,13 +2385,33 @@ static void ensure_core_worktree(const char *path) } } +static const char *submodule_update_type_to_label(enum submodule_update_type type) +{ + switch (type) { + case SM_UPDATE_CHECKOUT: + return "checkout"; + case SM_UPDATE_MERGE: + return "merge"; + case SM_UPDATE_REBASE: + return "rebase"; + case SM_UPDATE_UNSPECIFIED: + case SM_UPDATE_NONE: + case SM_UPDATE_COMMAND: + break; + } + BUG("unreachable with type %d", type); +} + static void update_data_to_args(struct update_data *update_data, struct strvec *args) { + enum submodule_update_type update_type = update_data->update_default; + + if (update_data->displaypath) { + strvec_push(args, "--super-prefix"); + strvec_pushf(args, "%s/", update_data->displaypath); + } strvec_pushl(args, "submodule--helper", "update", "--recursive", NULL); strvec_pushf(args, "--jobs=%d", update_data->max_jobs); - if (update_data->recursive_prefix) - strvec_pushl(args, "--recursive-prefix", - update_data->recursive_prefix, NULL); if (update_data->quiet) strvec_push(args, "--quiet"); if (update_data->force) @@ -2430,8 +2430,10 @@ static void update_data_to_args(struct update_data *update_data, struct strvec * strvec_push(args, "--require-init"); if (update_data->depth) strvec_pushf(args, "--depth=%d", update_data->depth); - if (update_data->update_default) - strvec_pushl(args, "--update", update_data->update_default, NULL); + if (update_type != SM_UPDATE_UNSPECIFIED) + strvec_pushf(args, "--%s", + submodule_update_type_to_label(update_type)); + if (update_data->references.nr) { struct string_list_item *item; for_each_string_list_item(item, &update_data->references) @@ -2453,19 +2455,10 @@ static void update_data_to_args(struct update_data *update_data, struct strvec * static int update_submodule(struct update_data *update_data) { - char *prefixed_path; - ensure_core_worktree(update_data->sm_path); - if (update_data->recursive_prefix) - prefixed_path = xstrfmt("%s%s", update_data->recursive_prefix, - update_data->sm_path); - else - prefixed_path = xstrdup(update_data->sm_path); - - update_data->displaypath = get_submodule_displaypath(prefixed_path, - update_data->prefix); - free(prefixed_path); + update_data->displaypath = get_submodule_displaypath( + update_data->sm_path, update_data->prefix); determine_submodule_update_strategy(the_repository, update_data->just_cloned, update_data->sm_path, update_data->update_default, @@ -2505,14 +2498,6 @@ static int update_submodule(struct update_data *update_data) struct update_data next = *update_data; int res; - if (update_data->recursive_prefix) - prefixed_path = xstrfmt("%s%s/", update_data->recursive_prefix, - update_data->sm_path); - else - prefixed_path = xstrfmt("%s/", update_data->sm_path); - - next.recursive_prefix = get_submodule_displaypath(prefixed_path, - update_data->prefix); next.prefix = NULL; oidcpy(&next.oid, null_oid()); oidcpy(&next.suboid, null_oid()); @@ -2597,13 +2582,15 @@ static int module_update(int argc, const char **argv, const char *prefix) OPT_STRING(0, "prefix", &opt.prefix, N_("path"), N_("path into the working tree")), - OPT_STRING(0, "recursive-prefix", &opt.recursive_prefix, - N_("path"), - N_("path into the working tree, across nested " - "submodule boundaries")), - OPT_STRING(0, "update", &opt.update_default, - N_("string"), - N_("rebase, merge, checkout or none")), + OPT_SET_INT(0, "checkout", &opt.update_default, + N_("use the 'checkout' update strategy (default)"), + SM_UPDATE_CHECKOUT), + OPT_SET_INT('m', "merge", &opt.update_default, + N_("use the 'merge' update strategy"), + SM_UPDATE_MERGE), + OPT_SET_INT('r', "rebase", &opt.update_default, + N_("use the 'rebase' update strategy"), + SM_UPDATE_REBASE), OPT_STRING_LIST(0, "reference", &opt.references, N_("repo"), N_("reference repository")), OPT_BOOL(0, "dissociate", &opt.dissociate, @@ -2619,7 +2606,7 @@ static int module_update(int argc, const char **argv, const char *prefix) OPT_BOOL(0, "progress", &opt.progress, N_("force cloning progress")), OPT_BOOL(0, "require-init", &opt.require_init, - N_("disallow cloning into non-empty directory")), + N_("disallow cloning into non-empty directory, implies --init")), OPT_BOOL(0, "single-branch", &opt.single_branch, N_("clone only one branch, HEAD or --branch")), OPT_PARSE_LIST_OBJECTS_FILTER(&filter_options), @@ -2643,6 +2630,9 @@ static int module_update(int argc, const char **argv, const char *prefix) argc = parse_options(argc, argv, prefix, module_update_options, git_submodule_helper_usage, 0); + if (opt.require_init) + opt.init = 1; + if (filter_options.choice && !opt.init) { usage_with_options(git_submodule_helper_usage, module_update_options); @@ -2651,9 +2641,7 @@ static int module_update(int argc, const char **argv, const char *prefix) opt.filter_options = &filter_options; if (opt.update_default) - if (parse_submodule_update_strategy(opt.update_default, - &opt.update_strategy) < 0) - die(_("bad value for update parameter")); + opt.update_strategy.type = opt.update_default; if (module_list_compute(argc, argv, prefix, &pathspec, &opt.list) < 0) { list_objects_filter_release(&filter_options); @@ -2679,7 +2667,6 @@ static int module_update(int argc, const char **argv, const char *prefix) module_list_active(&list); info.prefix = opt.prefix; - info.superprefix = opt.recursive_prefix; if (opt.quiet) info.flags |= OPT_QUIET; @@ -2785,7 +2772,7 @@ static int absorb_git_dirs(int argc, const char **argv, const char *prefix) }; const char *const git_submodule_helper_usage[] = { - N_("git submodule--helper absorb-git-dirs [<options>] [<path>...]"), + N_("git submodule absorbgitdirs [<options>] [<path>...]"), NULL }; @@ -2890,7 +2877,7 @@ static int module_set_url(int argc, const char **argv, const char *prefix) OPT_END() }; const char *const usage[] = { - N_("git submodule--helper set-url [--quiet] <path> <newurl>"), + N_("git submodule set-url [--quiet] <path> <newurl>"), NULL }; @@ -2929,8 +2916,8 @@ static int module_set_branch(int argc, const char **argv, const char *prefix) OPT_END() }; const char *const usage[] = { - N_("git submodule--helper set-branch [-q|--quiet] (-d|--default) <path>"), - N_("git submodule--helper set-branch [-q|--quiet] (-b|--branch) <branch> <path>"), + N_("git submodule set-branch [-q|--quiet] (-d|--default) <path>"), + N_("git submodule set-branch [-q|--quiet] (-b|--branch) <branch> <path>"), NULL }; @@ -3274,7 +3261,7 @@ static int module_add(int argc, const char **argv, const char *prefix) }; const char *const usage[] = { - N_("git submodule--helper add [<options>] [--] <repository> [<path>]"), + N_("git submodule add [<options>] [--] <repository> [<path>]"), NULL }; @@ -3376,18 +3363,18 @@ struct cmd_struct { static struct cmd_struct commands[] = { {"list", module_list, 0}, {"name", module_name, 0}, - {"clone", module_clone, 0}, - {"add", module_add, SUPPORT_SUPER_PREFIX}, - {"update", module_update, 0}, + {"clone", module_clone, SUPPORT_SUPER_PREFIX}, + {"add", module_add, 0}, + {"update", module_update, SUPPORT_SUPER_PREFIX}, {"resolve-relative-url-test", resolve_relative_url_test, 0}, {"foreach", module_foreach, SUPPORT_SUPER_PREFIX}, - {"init", module_init, SUPPORT_SUPER_PREFIX}, + {"init", module_init, 0}, {"status", module_status, SUPPORT_SUPER_PREFIX}, {"sync", module_sync, SUPPORT_SUPER_PREFIX}, {"deinit", module_deinit, 0}, - {"summary", module_summary, SUPPORT_SUPER_PREFIX}, + {"summary", module_summary, 0}, {"push-check", push_check, 0}, - {"absorb-git-dirs", absorb_git_dirs, SUPPORT_SUPER_PREFIX}, + {"absorbgitdirs", absorb_git_dirs, SUPPORT_SUPER_PREFIX}, {"is-active", is_active, 0}, {"check-name", check_name, 0}, {"config", module_config, 0}, diff --git a/builtin/symbolic-ref.c b/builtin/symbolic-ref.c index e547a08d6c..1b0f10225f 100644 --- a/builtin/symbolic-ref.c +++ b/builtin/symbolic-ref.c @@ -71,6 +71,8 @@ int cmd_symbolic_ref(int argc, const char **argv, const char *prefix) if (!strcmp(argv[0], "HEAD") && !starts_with(argv[1], "refs/")) die("Refusing to point HEAD outside of refs/"); + if (check_refname_format(argv[1], REFNAME_ALLOW_ONELEVEL) < 0) + die("Refusing to set '%s' to invalid ref '%s'", argv[0], argv[1]); ret = !!create_symref(argv[0], argv[1], msg); break; default: diff --git a/builtin/unpack-objects.c b/builtin/unpack-objects.c index 56d05e2725..43789b8ef2 100644 --- a/builtin/unpack-objects.c +++ b/builtin/unpack-objects.c @@ -97,15 +97,27 @@ static void use(int bytes) display_throughput(progress, consumed_bytes); } +/* + * Decompress zstream from the standard input into a newly + * allocated buffer of specified size and return the buffer. + * The caller is responsible to free the returned buffer. + * + * But for dry_run mode, "get_data()" is only used to check the + * integrity of data, and the returned buffer is not used at all. + * Therefore, in dry_run mode, "get_data()" will release the small + * allocated buffer which is reused to hold temporary zstream output + * and return NULL instead of returning garbage data. + */ static void *get_data(unsigned long size) { git_zstream stream; - void *buf = xmallocz(size); + unsigned long bufsize = dry_run && size > 8192 ? 8192 : size; + void *buf = xmallocz(bufsize); memset(&stream, 0, sizeof(stream)); stream.next_out = buf; - stream.avail_out = size; + stream.avail_out = bufsize; stream.next_in = fill(1); stream.avail_in = len; git_inflate_init(&stream); @@ -125,8 +137,17 @@ static void *get_data(unsigned long size) } stream.next_in = fill(1); stream.avail_in = len; + if (dry_run) { + /* reuse the buffer in dry_run mode */ + stream.next_out = buf; + stream.avail_out = bufsize > size - stream.total_out ? + size - stream.total_out : + bufsize; + } } git_inflate_end(&stream); + if (dry_run) + FREE_AND_NULL(buf); return buf; } @@ -326,10 +347,70 @@ static void unpack_non_delta_entry(enum object_type type, unsigned long size, { void *buf = get_data(size); - if (!dry_run && buf) + if (buf) write_object(nr, type, buf, size); - else - free(buf); +} + +struct input_zstream_data { + git_zstream *zstream; + unsigned char buf[8192]; + int status; +}; + +static const void *feed_input_zstream(struct input_stream *in_stream, + unsigned long *readlen) +{ + struct input_zstream_data *data = in_stream->data; + git_zstream *zstream = data->zstream; + void *in = fill(1); + + if (in_stream->is_finished) { + *readlen = 0; + return NULL; + } + + zstream->next_out = data->buf; + zstream->avail_out = sizeof(data->buf); + zstream->next_in = in; + zstream->avail_in = len; + + data->status = git_inflate(zstream, 0); + + in_stream->is_finished = data->status != Z_OK; + use(len - zstream->avail_in); + *readlen = sizeof(data->buf) - zstream->avail_out; + + return data->buf; +} + +static void stream_blob(unsigned long size, unsigned nr) +{ + git_zstream zstream = { 0 }; + struct input_zstream_data data = { 0 }; + struct input_stream in_stream = { + .read = feed_input_zstream, + .data = &data, + }; + struct obj_info *info = &obj_list[nr]; + + data.zstream = &zstream; + git_inflate_init(&zstream); + + if (stream_loose_object(&in_stream, size, &info->oid)) + die(_("failed to write object in stream")); + + if (data.status != Z_STREAM_END) + die(_("inflate returned (%d)"), data.status); + git_inflate_end(&zstream); + + if (strict) { + struct blob *blob = lookup_blob(the_repository, &info->oid); + + if (!blob) + die(_("invalid blob object from stream")); + blob->object.flags |= FLAG_WRITTEN; + } + info->obj = NULL; } static int resolve_against_held(unsigned nr, const struct object_id *base, @@ -359,10 +440,8 @@ static void unpack_delta_entry(enum object_type type, unsigned long delta_size, oidread(&base_oid, fill(the_hash_algo->rawsz)); use(the_hash_algo->rawsz); delta_data = get_data(delta_size); - if (dry_run || !delta_data) { - free(delta_data); + if (!delta_data) return; - } if (has_object_file(&base_oid)) ; /* Ok we have this one */ else if (resolve_against_held(nr, &base_oid, @@ -398,10 +477,8 @@ static void unpack_delta_entry(enum object_type type, unsigned long delta_size, die("offset value out of bound for delta base object"); delta_data = get_data(delta_size); - if (dry_run || !delta_data) { - free(delta_data); + if (!delta_data) return; - } lo = 0; hi = nr; while (lo < hi) { @@ -468,9 +545,14 @@ static void unpack_one(unsigned nr) } switch (type) { + case OBJ_BLOB: + if (!dry_run && size > big_file_threshold) { + stream_blob(size, nr); + return; + } + /* fallthrough */ case OBJ_COMMIT: case OBJ_TREE: - case OBJ_BLOB: case OBJ_TAG: unpack_non_delta_entry(type, size, nr); return; diff --git a/bulk-checkin.c b/bulk-checkin.c index 98ec893842..855b68ec23 100644 --- a/bulk-checkin.c +++ b/bulk-checkin.c @@ -340,6 +340,8 @@ void fsync_loose_object_bulk_checkin(int fd, const char *filename) */ if (!bulk_fsync_objdir || git_fsync(fd, FSYNC_WRITEOUT_ONLY) < 0) { + if (errno == ENOSYS) + warning(_("core.fsyncMethod = batch is unsupported on this platform")); fsync_or_die(fd, filename); } } diff --git a/cache-tree.c b/cache-tree.c index 56db0b5026..c97111cccf 100644 --- a/cache-tree.c +++ b/cache-tree.c @@ -857,9 +857,7 @@ int cache_tree_matches_traversal(struct cache_tree *root, return 0; } -static void verify_one_sparse(struct repository *r, - struct index_state *istate, - struct cache_tree *it, +static void verify_one_sparse(struct index_state *istate, struct strbuf *path, int pos) { @@ -910,7 +908,7 @@ static int verify_one(struct repository *r, return 1; if (pos >= 0) { - verify_one_sparse(r, istate, it, path, pos); + verify_one_sparse(istate, path, pos); return 0; } @@ -475,8 +475,7 @@ extern struct index_state the_index; /* * Values in this enum (except those outside the 3 bit range) are part - * of pack file format. See Documentation/technical/pack-format.txt - * for more information. + * of pack file format. See gitformat-pack(5) for more information. */ enum object_type { OBJ_BAD = -1, @@ -1017,7 +1016,6 @@ void reset_shared_repository(void); * commands that do not want replace references to be active. */ extern int read_replace_refs; -extern char *git_replace_ref_base; /* * These values are used to help identify parts of a repository to fsync. @@ -1698,6 +1696,12 @@ struct ident_split { int split_ident_line(struct ident_split *, const char *, int); /* + * Given a commit or tag object buffer and the commit or tag headers, replaces + * the idents in the headers with their canonical versions using the mailmap mechanism. + */ +void apply_mailmap_to_header(struct strbuf *, const char **, struct string_list *); + +/* * Compare split idents for equality or strict ordering. Note that we * compare only the ident part of the line, ignoring any timestamp. * @@ -276,6 +276,7 @@ linux-musl) linux-leaks) export SANITIZE=leak export GIT_TEST_PASSING_SANITIZE_LEAK=true + export GIT_TEST_SANITIZE_LEAK_LOG=true ;; esac diff --git a/command-list.txt b/command-list.txt index 9bd6f3c48f..f96bdabd7d 100644 --- a/command-list.txt +++ b/command-list.txt @@ -43,6 +43,15 @@ # specified here, which can only have "guide" attribute and nothing # else. # +# User-facing repository, command and file interfaces such as +# documentation for the .gitmodules, .mailmap etc. files lives in man +# sections 5 and 7. These entries can only have the "userinterfaces" +# attribute and nothing else. +# +# Git's file formats and protocols, such as documentation for the +# *.bundle format lives in man section 5. These entries can only have +# the "developerinterfaces" attribute and nothing else. +# ### command list (do not change this line) # command name category [category] [category] git-add mainporcelain worktree @@ -192,24 +201,35 @@ git-verify-tag ancillaryinterrogators git-whatchanged ancillaryinterrogators complete git-worktree mainporcelain git-write-tree plumbingmanipulators -gitattributes guide -gitcli guide +gitattributes userinterfaces +gitcli userinterfaces gitcore-tutorial guide gitcredentials guide gitcvs-migration guide gitdiffcore guide giteveryday guide gitfaq guide +gitformat-bundle developerinterfaces +gitformat-chunk developerinterfaces +gitformat-commit-graph developerinterfaces +gitformat-index developerinterfaces +gitformat-pack developerinterfaces +gitformat-signature developerinterfaces gitglossary guide -githooks guide -gitignore guide +githooks userinterfaces +gitignore userinterfaces gitk mainporcelain -gitmailmap guide -gitmodules guide +gitmailmap userinterfaces +gitmodules userinterfaces gitnamespaces guide +gitprotocol-capabilities developerinterfaces +gitprotocol-common developerinterfaces +gitprotocol-http developerinterfaces +gitprotocol-pack developerinterfaces +gitprotocol-v2 developerinterfaces gitremote-helpers guide -gitrepository-layout guide -gitrevisions guide +gitrepository-layout userinterfaces +gitrevisions userinterfaces gitsubmodules guide gittutorial guide gittutorial-2 guide diff --git a/commit-graph.c b/commit-graph.c index a487d49c3e..f2a36032f8 100644 --- a/commit-graph.c +++ b/commit-graph.c @@ -252,7 +252,8 @@ struct commit_graph *load_commit_graph_one_fd_st(struct repository *r, } graph_map = xmmap(NULL, graph_size, PROT_READ, MAP_PRIVATE, fd, 0); close(fd); - ret = parse_commit_graph(r, graph_map, graph_size); + prepare_repo_settings(r); + ret = parse_commit_graph(&r->settings, graph_map, graph_size); if (ret) ret->odb = odb; @@ -321,7 +322,7 @@ static int graph_read_bloom_data(const unsigned char *chunk_start, return 0; } -struct commit_graph *parse_commit_graph(struct repository *r, +struct commit_graph *parse_commit_graph(struct repo_settings *s, void *graph_map, size_t graph_size) { const unsigned char *data; @@ -359,8 +360,6 @@ struct commit_graph *parse_commit_graph(struct repository *r, return NULL; } - prepare_repo_settings(r); - graph = alloc_commit_graph(); graph->hash_len = the_hash_algo->rawsz; @@ -390,7 +389,7 @@ struct commit_graph *parse_commit_graph(struct repository *r, pair_chunk(cf, GRAPH_CHUNKID_EXTRAEDGES, &graph->chunk_extra_edges); pair_chunk(cf, GRAPH_CHUNKID_BASE, &graph->chunk_base_graphs); - if (get_configured_generation_version(r) >= 2) { + if (s->commit_graph_generation_version >= 2) { pair_chunk(cf, GRAPH_CHUNKID_GENERATION_DATA, &graph->chunk_generation_data); pair_chunk(cf, GRAPH_CHUNKID_GENERATION_DATA_OVERFLOW, @@ -400,7 +399,7 @@ struct commit_graph *parse_commit_graph(struct repository *r, graph->read_generation_data = 1; } - if (r->settings.commit_graph_read_changed_paths) { + if (s->commit_graph_read_changed_paths) { pair_chunk(cf, GRAPH_CHUNKID_BLOOMINDEXES, &graph->chunk_bloom_indexes); read_chunk(cf, GRAPH_CHUNKID_BLOOMDATA, diff --git a/commit-graph.h b/commit-graph.h index f23b9e9026..37faee6b66 100644 --- a/commit-graph.h +++ b/commit-graph.h @@ -108,7 +108,12 @@ struct commit_graph *load_commit_graph_one_fd_st(struct repository *r, struct object_directory *odb); struct commit_graph *read_commit_graph_one(struct repository *r, struct object_directory *odb); -struct commit_graph *parse_commit_graph(struct repository *r, + +/* + * Callers should initialize the repo_settings with prepare_repo_settings() + * prior to calling parse_commit_graph(). + */ +struct commit_graph *parse_commit_graph(struct repo_settings *s, void *graph_map, size_t graph_size); /* @@ -642,10 +642,11 @@ struct commit_list * commit_list_insert_by_date(struct commit *item, struct comm return commit_list_insert(item, pp); } -static int commit_list_compare_by_date(const void *a, const void *b) +static int commit_list_compare_by_date(const struct commit_list *a, + const struct commit_list *b) { - timestamp_t a_date = ((const struct commit_list *)a)->item->date; - timestamp_t b_date = ((const struct commit_list *)b)->item->date; + timestamp_t a_date = a->item->date; + timestamp_t b_date = b->item->date; if (a_date < b_date) return 1; if (a_date > b_date) @@ -653,20 +654,11 @@ static int commit_list_compare_by_date(const void *a, const void *b) return 0; } -static void *commit_list_get_next(const void *a) -{ - return ((const struct commit_list *)a)->next; -} - -static void commit_list_set_next(void *a, void *next) -{ - ((struct commit_list *)a)->next = next; -} +DEFINE_LIST_SORT(static, commit_list_sort, struct commit_list, next); void commit_list_sort_by_date(struct commit_list **list) { - *list = llist_mergesort(*list, commit_list_get_next, commit_list_set_next, - commit_list_compare_by_date); + commit_list_sort(list, commit_list_compare_by_date); } struct commit *pop_most_recent_commit(struct commit_list **list, diff --git a/compat/disk.h b/compat/disk.h new file mode 100644 index 0000000000..50a32e3d8a --- /dev/null +++ b/compat/disk.h @@ -0,0 +1,56 @@ +#ifndef COMPAT_DISK_H +#define COMPAT_DISK_H + +#include "git-compat-util.h" + +static int get_disk_info(struct strbuf *out) +{ + struct strbuf buf = STRBUF_INIT; + int res = 0; + +#ifdef GIT_WINDOWS_NATIVE + char volume_name[MAX_PATH], fs_name[MAX_PATH]; + DWORD serial_number, component_length, flags; + ULARGE_INTEGER avail2caller, total, avail; + + strbuf_realpath(&buf, ".", 1); + if (!GetDiskFreeSpaceExA(buf.buf, &avail2caller, &total, &avail)) { + error(_("could not determine free disk size for '%s'"), + buf.buf); + res = -1; + goto cleanup; + } + + strbuf_setlen(&buf, offset_1st_component(buf.buf)); + if (!GetVolumeInformationA(buf.buf, volume_name, sizeof(volume_name), + &serial_number, &component_length, &flags, + fs_name, sizeof(fs_name))) { + error(_("could not get info for '%s'"), buf.buf); + res = -1; + goto cleanup; + } + strbuf_addf(out, "Available space on '%s': ", buf.buf); + strbuf_humanise_bytes(out, avail2caller.QuadPart); + strbuf_addch(out, '\n'); +#else + struct statvfs stat; + + strbuf_realpath(&buf, ".", 1); + if (statvfs(buf.buf, &stat) < 0) { + error_errno(_("could not determine free disk size for '%s'"), + buf.buf); + res = -1; + goto cleanup; + } + + strbuf_addf(out, "Available space on '%s': ", buf.buf); + strbuf_humanise_bytes(out, (off_t)stat.f_bsize * (off_t)stat.f_bavail); + strbuf_addf(out, " (mount flags 0x%lx)\n", stat.f_flag); +#endif + +cleanup: + strbuf_release(&buf); + return res; +} + +#endif /* COMPAT_DISK_H */ diff --git a/compat/nonblock.c b/compat/nonblock.c new file mode 100644 index 0000000000..9694ebdb1d --- /dev/null +++ b/compat/nonblock.c @@ -0,0 +1,50 @@ +#include "git-compat-util.h" +#include "nonblock.h" + +#ifdef O_NONBLOCK + +int enable_pipe_nonblock(int fd) +{ + int flags = fcntl(fd, F_GETFL); + if (flags < 0) + return -1; + flags |= O_NONBLOCK; + return fcntl(fd, F_SETFL, flags); +} + +#elif defined(GIT_WINDOWS_NATIVE) + +#include "win32.h" + +int enable_pipe_nonblock(int fd) +{ + HANDLE h = (HANDLE)_get_osfhandle(fd); + DWORD mode; + DWORD type = GetFileType(h); + if (type == FILE_TYPE_UNKNOWN && GetLastError() != NO_ERROR) { + errno = EBADF; + return -1; + } + if (type != FILE_TYPE_PIPE) + BUG("unsupported file type: %lu", type); + if (!GetNamedPipeHandleState(h, &mode, NULL, NULL, NULL, NULL, 0)) { + errno = err_win_to_posix(GetLastError()); + return -1; + } + mode |= PIPE_NOWAIT; + if (!SetNamedPipeHandleState(h, &mode, NULL, NULL)) { + errno = err_win_to_posix(GetLastError()); + return -1; + } + return 0; +} + +#else + +int enable_pipe_nonblock(int fd) +{ + errno = ENOSYS; + return -1; +} + +#endif diff --git a/compat/nonblock.h b/compat/nonblock.h new file mode 100644 index 0000000000..af1a331301 --- /dev/null +++ b/compat/nonblock.h @@ -0,0 +1,9 @@ +#ifndef COMPAT_NONBLOCK_H +#define COMPAT_NONBLOCK_H + +/* + * Enable non-blocking I/O for the pipe specified by the passed-in descriptor. + */ +int enable_pipe_nonblock(int fd); + +#endif @@ -81,6 +81,17 @@ static enum config_scope current_parsing_scope; static int pack_compression_seen; static int zlib_compression_seen; +/* + * Config that comes from trusted scopes, namely: + * - CONFIG_SCOPE_SYSTEM (e.g. /etc/gitconfig) + * - CONFIG_SCOPE_GLOBAL (e.g. $HOME/.gitconfig, $XDG_CONFIG_HOME/git) + * - CONFIG_SCOPE_COMMAND (e.g. "-c" option, environment variables) + * + * This is declared here for code cleanliness, but unlike the other + * static variables, this does not hold config parser state. + */ +static struct config_set protected_config; + static int config_file_fgetc(struct config_source *conf) { return getc_unlocked(conf->u.file); @@ -1968,6 +1979,8 @@ int git_config_from_file_with_options(config_fn_t fn, const char *filename, int ret = -1; FILE *f; + if (!filename) + BUG("filename cannot be NULL"); f = fopen_or_warn(filename, "r"); if (f) { ret = do_config_from_file(fn, CONFIG_ORIGIN_FILE, filename, @@ -2378,6 +2391,11 @@ int git_configset_add_file(struct config_set *cs, const char *filename) return git_config_from_file(config_set_callback, filename, cs); } +int git_configset_add_parameters(struct config_set *cs) +{ + return git_config_from_parameters(config_set_callback, cs); +} + int git_configset_get_value(struct config_set *cs, const char *key, const char **value) { const struct string_list *values = NULL; @@ -2619,6 +2637,36 @@ int repo_config_get_pathname(struct repository *repo, return ret; } +/* Read values into protected_config. */ +static void read_protected_config(void) +{ + char *xdg_config = NULL, *user_config = NULL, *system_config = NULL; + + git_configset_init(&protected_config); + + system_config = git_system_config(); + git_global_config(&user_config, &xdg_config); + + if (system_config) + git_configset_add_file(&protected_config, system_config); + if (xdg_config) + git_configset_add_file(&protected_config, xdg_config); + if (user_config) + git_configset_add_file(&protected_config, user_config); + git_configset_add_parameters(&protected_config); + + free(system_config); + free(xdg_config); + free(user_config); +} + +void git_protected_config(config_fn_t fn, void *data) +{ + if (!protected_config.hash_initialized) + read_protected_config(); + configset_iter(&protected_config, fn, data); +} + /* Functions used historically to read configuration from 'the_repository' */ void git_config(config_fn_t fn, void *data) { @@ -447,6 +447,15 @@ void git_configset_init(struct config_set *cs); int git_configset_add_file(struct config_set *cs, const char *filename); /** + * Parses command line options and environment variables, and adds the + * variable-value pairs to the `config_set`. Returns 0 on success, or -1 + * if there is an error in parsing. The caller decides whether to free + * the incomplete configset or continue using it when the function + * returns -1. + */ +int git_configset_add_parameters(struct config_set *cs); + +/** * Finds and returns the value list, sorted in order of increasing priority * for the configuration variable `key` and config set `cs`. When the * configuration variable `key` is not found, returns NULL. The caller @@ -505,6 +514,13 @@ int repo_config_get_maybe_bool(struct repository *repo, int repo_config_get_pathname(struct repository *repo, const char *key, const char **dest); +/* + * Functions for reading protected config. By definition, protected + * config ignores repository config, so these do not take a `struct + * repository` parameter. + */ +void git_protected_config(config_fn_t fn, void *data); + /** * Querying For Specific Variables * ------------------------------- diff --git a/contrib/coccinelle/array.cocci b/contrib/coccinelle/array.cocci index 9a4f00cb1b..aa75937950 100644 --- a/contrib/coccinelle/array.cocci +++ b/contrib/coccinelle/array.cocci @@ -1,60 +1,58 @@ @@ -expression dst, src, n, E; +type T; +T *dst_ptr; +T *src_ptr; +expression n; @@ - memcpy(dst, src, n * sizeof( -- E[...] -+ *(E) - )) +- memcpy(dst_ptr, src_ptr, (n) * \( sizeof(T) +- \| sizeof(*(dst_ptr)) +- \| sizeof(*(src_ptr)) +- \| sizeof(dst_ptr[...]) +- \| sizeof(src_ptr[...]) +- \) ) ++ COPY_ARRAY(dst_ptr, src_ptr, n) @@ type T; -T *ptr; -T[] arr; -expression E, n; +T *dst_ptr; +T[] src_arr; +expression n; @@ -( - memcpy(ptr, E, -- n * sizeof(*(ptr)) -+ n * sizeof(T) - ) -| - memcpy(arr, E, -- n * sizeof(*(arr)) -+ n * sizeof(T) - ) -| - memcpy(E, ptr, -- n * sizeof(*(ptr)) -+ n * sizeof(T) - ) -| - memcpy(E, arr, -- n * sizeof(*(arr)) -+ n * sizeof(T) - ) -) +- memcpy(dst_ptr, src_arr, (n) * \( sizeof(T) +- \| sizeof(*(dst_ptr)) +- \| sizeof(*(src_arr)) +- \| sizeof(dst_ptr[...]) +- \| sizeof(src_arr[...]) +- \) ) ++ COPY_ARRAY(dst_ptr, src_arr, n) @@ type T; -T *dst_ptr; +T[] dst_arr; T *src_ptr; +expression n; +@@ +- memcpy(dst_arr, src_ptr, (n) * \( sizeof(T) +- \| sizeof(*(dst_arr)) +- \| sizeof(*(src_ptr)) +- \| sizeof(dst_arr[...]) +- \| sizeof(src_ptr[...]) +- \) ) ++ COPY_ARRAY(dst_arr, src_ptr, n) + +@@ +type T; T[] dst_arr; T[] src_arr; expression n; @@ -( -- memcpy(dst_ptr, src_ptr, (n) * sizeof(T)) -+ COPY_ARRAY(dst_ptr, src_ptr, n) -| -- memcpy(dst_ptr, src_arr, (n) * sizeof(T)) -+ COPY_ARRAY(dst_ptr, src_arr, n) -| -- memcpy(dst_arr, src_ptr, (n) * sizeof(T)) -+ COPY_ARRAY(dst_arr, src_ptr, n) -| -- memcpy(dst_arr, src_arr, (n) * sizeof(T)) +- memcpy(dst_arr, src_arr, (n) * \( sizeof(T) +- \| sizeof(*(dst_arr)) +- \| sizeof(*(src_arr)) +- \| sizeof(dst_arr[...]) +- \| sizeof(src_arr[...]) +- \) ) + COPY_ARRAY(dst_arr, src_arr, n) -) @@ type T; diff --git a/contrib/coccinelle/tests/free.c b/contrib/coccinelle/tests/free.c new file mode 100644 index 0000000000..96d4abc0c7 --- /dev/null +++ b/contrib/coccinelle/tests/free.c @@ -0,0 +1,11 @@ +int use_FREE_AND_NULL(int *v) +{ + free(*v); + *v = NULL; +} + +int need_no_if(int *v) +{ + if (v) + free(v); +} diff --git a/contrib/coccinelle/tests/free.res b/contrib/coccinelle/tests/free.res new file mode 100644 index 0000000000..f90fd9f48e --- /dev/null +++ b/contrib/coccinelle/tests/free.res @@ -0,0 +1,9 @@ +int use_FREE_AND_NULL(int *v) +{ + FREE_AND_NULL(*v); +} + +int need_no_if(int *v) +{ + free(v); +} diff --git a/contrib/coccinelle/tests/unused.c b/contrib/coccinelle/tests/unused.c new file mode 100644 index 0000000000..8294d734ba --- /dev/null +++ b/contrib/coccinelle/tests/unused.c @@ -0,0 +1,82 @@ +void test_strbuf(void) +{ + struct strbuf sb1 = STRBUF_INIT; + struct strbuf sb2 = STRBUF_INIT; + struct strbuf sb3 = STRBUF_INIT; + struct strbuf sb4 = STRBUF_INIT; + struct strbuf sb5; + struct strbuf sb6 = { 0 }; + struct strbuf sb7 = STRBUF_INIT; + struct strbuf sb8 = STRBUF_INIT; + struct strbuf *sp1; + struct strbuf *sp2; + struct strbuf *sp3; + struct strbuf *sp4 = xmalloc(sizeof(struct strbuf)); + struct strbuf *sp5 = xmalloc(sizeof(struct strbuf)); + struct strbuf *sp6 = xmalloc(sizeof(struct strbuf)); + struct strbuf *sp7; + + strbuf_init(&sb5, 0); + strbuf_init(sp1, 0); + strbuf_init(sp2, 0); + strbuf_init(sp3, 0); + strbuf_init(sp4, 0); + strbuf_init(sp5, 0); + strbuf_init(sp6, 0); + strbuf_init(sp7, 0); + sp7 = xmalloc(sizeof(struct strbuf)); + + use_before(&sb3); + use_as_str("%s", sb7.buf); + use_as_str("%s", sp1->buf); + use_as_str("%s", sp6->buf); + pass_pp(&sp3); + + strbuf_release(&sb1); + strbuf_reset(&sb2); + strbuf_release(&sb3); + strbuf_release(&sb4); + strbuf_release(&sb5); + strbuf_release(&sb6); + strbuf_release(&sb7); + strbuf_release(sp1); + strbuf_release(sp2); + strbuf_release(sp3); + strbuf_release(sp4); + strbuf_release(sp5); + strbuf_release(sp6); + strbuf_release(sp7); + + use_after(&sb4); + + if (when_strict()) + return; + strbuf_release(&sb8); +} + +void test_other(void) +{ + struct string_list l = STRING_LIST_INIT_DUP; + struct strbuf sb = STRBUF_INIT; + + string_list_clear(&l, 0); + string_list_clear(&sb, 0); +} + +void test_worktrees(void) +{ + struct worktree **w1 = get_worktrees(); + struct worktree **w2 = get_worktrees(); + struct worktree **w3; + struct worktree **w4; + + w3 = get_worktrees(); + w4 = get_worktrees(); + + use_it(w4); + + free_worktrees(w1); + free_worktrees(w2); + free_worktrees(w3); + free_worktrees(w4); +} diff --git a/contrib/coccinelle/tests/unused.res b/contrib/coccinelle/tests/unused.res new file mode 100644 index 0000000000..6d3e745683 --- /dev/null +++ b/contrib/coccinelle/tests/unused.res @@ -0,0 +1,45 @@ +void test_strbuf(void) +{ + struct strbuf sb3 = STRBUF_INIT; + struct strbuf sb4 = STRBUF_INIT; + struct strbuf sb7 = STRBUF_INIT; + struct strbuf *sp1; + struct strbuf *sp3; + struct strbuf *sp6 = xmalloc(sizeof(struct strbuf)); + strbuf_init(sp1, 0); + strbuf_init(sp3, 0); + strbuf_init(sp6, 0); + + use_before(&sb3); + use_as_str("%s", sb7.buf); + use_as_str("%s", sp1->buf); + use_as_str("%s", sp6->buf); + pass_pp(&sp3); + + strbuf_release(&sb3); + strbuf_release(&sb4); + strbuf_release(&sb7); + strbuf_release(sp1); + strbuf_release(sp3); + strbuf_release(sp6); + + use_after(&sb4); + + if (when_strict()) + return; +} + +void test_other(void) +{ +} + +void test_worktrees(void) +{ + struct worktree **w4; + + w4 = get_worktrees(); + + use_it(w4); + + free_worktrees(w4); +} diff --git a/contrib/coccinelle/unused.cocci b/contrib/coccinelle/unused.cocci new file mode 100644 index 0000000000..d84046f82e --- /dev/null +++ b/contrib/coccinelle/unused.cocci @@ -0,0 +1,43 @@ +// This rule finds sequences of "unused" declerations and uses of a +// variable, where "unused" is defined to include only calling the +// equivalent of alloc, init & free functions on the variable. +@@ +type T; +identifier I; +// STRBUF_INIT, but also e.g. STRING_LIST_INIT_DUP (so no anchoring) +constant INIT_MACRO =~ "_INIT"; +identifier MALLOC1 =~ "^x?[mc]alloc$"; +identifier INIT_ASSIGN1 =~ "^get_worktrees$"; +identifier INIT_CALL1 =~ "^[a-z_]*_init$"; +identifier REL1 =~ "^[a-z_]*_(release|reset|clear|free)$"; +identifier REL2 =~ "^(release|clear|free)_[a-z_]*$"; +@@ + +( +- T I; +| +- T I = { 0 }; +| +- T I = INIT_MACRO; +| +- T I = MALLOC1(...); +| +- T I = INIT_ASSIGN1(...); +) + +<... when != \( I \| &I \) +( +- \( INIT_CALL1 \)( \( I \| &I \), ...); +| +- I = \( INIT_ASSIGN1 \)(...); +| +- I = MALLOC1(...); +) +...> + +( +- \( REL1 \| REL2 \)( \( I \| &I \), ...); +| +- \( REL1 \| REL2 \)( \( &I \| I \) ); +) + ... when != \( I \| &I \) diff --git a/contrib/completion/git-prompt.sh b/contrib/completion/git-prompt.sh index 1435548e00..57972c2845 100644 --- a/contrib/completion/git-prompt.sh +++ b/contrib/completion/git-prompt.sh @@ -84,6 +84,10 @@ # single '?' character by setting GIT_PS1_COMPRESSSPARSESTATE, or omitted # by setting GIT_PS1_OMITSPARSESTATE. # +# If you would like to see a notification on the prompt when there are +# unresolved conflicts, set GIT_PS1_SHOWCONFLICTSTATE to "yes". The +# prompt will include "|CONFLICT". +# # If you would like to see more information about the identity of # commits checked out as a detached HEAD, set GIT_PS1_DESCRIBE_STYLE # to one of these values: @@ -508,6 +512,12 @@ __git_ps1 () r="$r $step/$total" fi + local conflict="" # state indicator for unresolved conflicts + if [[ "${GIT_PS1_SHOWCONFLICTSTATE}" == "yes" ]] && + [[ $(git ls-files --unmerged 2>/dev/null) ]]; then + conflict="|CONFLICT" + fi + local w="" local i="" local s="" @@ -572,7 +582,7 @@ __git_ps1 () fi local f="$h$w$i$s$u$p" - local gitstring="$c$b${f:+$z$f}${sparse}$r${upstream}" + local gitstring="$c$b${f:+$z$f}${sparse}$r${upstream}${conflict}" if [ $pcmode = yes ]; then if [ "${__git_printf_supports_v-}" != yes ]; then diff --git a/contrib/credential/netrc/t-git-credential-netrc.sh b/contrib/credential/netrc/t-git-credential-netrc.sh index 07227d0228..bf2777308a 100755 --- a/contrib/credential/netrc/t-git-credential-netrc.sh +++ b/contrib/credential/netrc/t-git-credential-netrc.sh @@ -3,16 +3,9 @@ cd ../../../t test_description='git-credential-netrc' . ./test-lib.sh + . "$TEST_DIRECTORY"/lib-perl.sh - if ! test_have_prereq PERL; then - skip_all='skipping perl interface tests, perl not available' - test_done - fi - - perl -MTest::More -e 0 2>/dev/null || { - skip_all="Perl Test::More unavailable, skipping test" - test_done - } + skip_all_if_no_Test_More # set up test repository @@ -20,13 +13,10 @@ 'set up test repository' \ 'git config --add gpg.program test.git-config-gpg' - # The external test will outputs its own plan - test_external_has_tap=1 - export PERL5LIB="$GITPERLLIB" - test_external \ - 'git-credential-netrc' \ + test_expect_success 'git-credential-netrc' ' perl "$GIT_BUILD_DIR"/contrib/credential/netrc/test.pl + ' test_done ) diff --git a/contrib/scalar/README.md b/contrib/scalar/README.md deleted file mode 100644 index 634b5771ed..0000000000 --- a/contrib/scalar/README.md +++ /dev/null @@ -1,82 +0,0 @@ -# Scalar - an opinionated repository management tool - -Scalar is an add-on to Git that helps users take advantage of advanced -performance features in Git. Originally implemented in C# using .NET Core, -based on the learnings from the VFS for Git project, most of the techniques -developed by the Scalar project have been integrated into core Git already: - -* partial clone, -* commit graphs, -* multi-pack index, -* sparse checkout (cone mode), -* scheduled background maintenance, -* etc - -This directory contains the remaining parts of Scalar that are not (yet) in -core Git. - -## Roadmap - -The idea is to populate this directory via incremental patch series and -eventually move to a top-level directory next to `gitk-git/` and to `git-gui/`. The -current plan involves the following patch series: - -- `scalar-the-beginning`: The initial patch series which sets up - `contrib/scalar/` and populates it with a minimal `scalar` command that - demonstrates the fundamental ideas. - -- `scalar-c-and-C`: The `scalar` command learns about two options that can be - specified before the command, `-c <key>=<value>` and `-C <directory>`. - -- `scalar-diagnose`: The `scalar` command is taught the `diagnose` subcommand. - -- `scalar-and-builtin-fsmonitor`: The built-in FSMonitor is enabled in `scalar - register` and in `scalar clone`, for an enormous performance boost when - working in large worktrees. This patch series necessarily depends on Jeff - Hostetler's FSMonitor patch series to be integrated into Git. - -- `scalar-gentler-config-locking`: Scalar enlistments are registered in the - user's Git config. This usually does not represent any problem because it is - rare for a user to register an enlistment. However, in Scalar's functional - tests, Scalar enlistments are created galore, and in parallel, which can lead - to lock contention. This patch series works around that problem by re-trying - to lock the config file in a gentle fashion. - -- `scalar-extra-docs`: Add some extensive documentation that has been written - in the original Scalar project (all subject to discussion, of course). - -- `optionally-install-scalar`: Now that Scalar is feature (and documentation) - complete and is verified in CI builds, let's offer to install it. - -- `move-scalar-to-toplevel`: Now that Scalar is complete, let's move it next to - `gitk-git/` and to `git-gui/`, making it a top-level command. - -The following two patch series exist in Microsoft's fork of Git and are -publicly available. There is no current plan to upstream them, not because I -want to withhold these patches, but because I don't think the Git community is -interested in these patches. - -There are some interesting ideas there, but the implementation is too specific -to Azure Repos and/or VFS for Git to be of much help in general (and also: my -colleagues tried to upstream some patches already and the enthusiasm for -integrating things related to Azure Repos and VFS for Git can be summarized in -very, very few words). - -These still exist mainly because the GVFS protocol is what Azure Repos has -instead of partial clone, while Git is focused on improving partial clone: - -- `scalar-with-gvfs`: The primary purpose of this patch series is to support - existing Scalar users whose repositories are hosted in Azure Repos (which - does not support Git's partial clones, but supports its predecessor, the GVFS - protocol, which is used by Scalar to emulate the partial clone). - - Since the GVFS protocol will never be supported by core Git, this patch - series will remain in Microsoft's fork of Git. - -- `run-scalar-functional-tests`: The Scalar project developed a quite - comprehensive set of integration tests (or, "Functional Tests"). They are the - sole remaining part of the original C#-based Scalar project, and this patch - adds a GitHub workflow that runs them all. - - Since the tests partially depend on features that are only provided in the - `scalar-with-gvfs` patch series, this patch cannot be upstreamed. diff --git a/contrib/scalar/scalar.c b/contrib/scalar/scalar.c index 28176914e5..642d16124e 100644 --- a/contrib/scalar/scalar.c +++ b/contrib/scalar/scalar.c @@ -7,27 +7,13 @@ #include "parse-options.h" #include "config.h" #include "run-command.h" +#include "simple-ipc.h" +#include "fsmonitor-ipc.h" +#include "fsmonitor-settings.h" #include "refs.h" #include "dir.h" #include "packfile.h" #include "help.h" -#include "archive.h" -#include "object-store.h" - -/* - * Remove the deepest subdirectory in the provided path string. Path must not - * include a trailing path separator. Returns 1 if parent directory found, - * otherwise 0. - */ -static int strbuf_parent_directory(struct strbuf *buf) -{ - size_t len = buf->len; - size_t offset = offset_1st_component(buf->buf); - char *path_sep = find_last_dir_sep(buf->buf + offset); - strbuf_setlen(buf, path_sep ? path_sep - buf->buf : offset); - - return buf->len < len; -} static void setup_enlistment_directory(int argc, const char **argv, const char * const *usagestr, @@ -35,8 +21,8 @@ static void setup_enlistment_directory(int argc, const char **argv, struct strbuf *enlistment_root) { struct strbuf path = STRBUF_INIT; - char *root; - int enlistment_found = 0; + int enlistment_is_repo_parent = 0; + size_t len; if (startup_info->have_repository) BUG("gitdir already set up?!?"); @@ -49,51 +35,36 @@ static void setup_enlistment_directory(int argc, const char **argv, strbuf_add_absolute_path(&path, argv[0]); if (!is_directory(path.buf)) die(_("'%s' does not exist"), path.buf); + if (chdir(path.buf) < 0) + die_errno(_("could not switch to '%s'"), path.buf); } else if (strbuf_getcwd(&path) < 0) die(_("need a working directory")); strbuf_trim_trailing_dir_sep(&path); - do { - const size_t len = path.len; - - /* check if currently in enlistment root with src/ workdir */ - strbuf_addstr(&path, "/src"); - if (is_nonbare_repository_dir(&path)) { - if (enlistment_root) - strbuf_add(enlistment_root, path.buf, len); - enlistment_found = 1; - break; - } - - /* reset to original path */ - strbuf_setlen(&path, len); - - /* check if currently in workdir */ - if (is_nonbare_repository_dir(&path)) { - if (enlistment_root) { - /* - * If the worktree's directory's name is `src`, the enlistment is the - * parent directory, otherwise it is identical to the worktree. - */ - root = strip_path_suffix(path.buf, "src"); - strbuf_addstr(enlistment_root, root ? root : path.buf); - free(root); - } + /* check if currently in enlistment root with src/ workdir */ + len = path.len; + strbuf_addstr(&path, "/src"); + if (is_nonbare_repository_dir(&path)) { + enlistment_is_repo_parent = 1; + if (chdir(path.buf) < 0) + die_errno(_("could not switch to '%s'"), path.buf); + } + strbuf_setlen(&path, len); - enlistment_found = 1; - break; - } - } while (strbuf_parent_directory(&path)); + setup_git_directory(); - if (!enlistment_found) - die(_("could not find enlistment root")); + if (!the_repository->worktree) + die(_("Scalar enlistments require a worktree")); - if (chdir(path.buf) < 0) - die_errno(_("could not switch to '%s'"), path.buf); + if (enlistment_root) { + if (enlistment_is_repo_parent) + strbuf_addbuf(enlistment_root, &path); + else + strbuf_addstr(enlistment_root, the_repository->worktree); + } strbuf_release(&path); - setup_git_directory(); } static int run_git(const char *arg, ...) @@ -115,13 +86,39 @@ static int run_git(const char *arg, ...) return res; } +struct scalar_config { + const char *key; + const char *value; + int overwrite_on_reconfigure; +}; + +static int set_scalar_config(const struct scalar_config *config, int reconfigure) +{ + char *value = NULL; + int res; + + if ((reconfigure && config->overwrite_on_reconfigure) || + git_config_get_string(config->key, &value)) { + trace2_data_string("scalar", the_repository, config->key, "created"); + res = git_config_set_gently(config->key, config->value); + } else { + trace2_data_string("scalar", the_repository, config->key, "exists"); + res = 0; + } + + free(value); + return res; +} + +static int have_fsmonitor_support(void) +{ + return fsmonitor_ipc__is_supported() && + fsm_settings__get_reason(the_repository) == FSMONITOR_REASON_OK; +} + static int set_recommended_config(int reconfigure) { - struct { - const char *key; - const char *value; - int overwrite_on_reconfigure; - } config[] = { + struct scalar_config config[] = { /* Required */ { "am.keepCR", "true", 1 }, { "core.FSCache", "true", 1 }, @@ -175,17 +172,16 @@ static int set_recommended_config(int reconfigure) char *value; for (i = 0; config[i].key; i++) { - if ((reconfigure && config[i].overwrite_on_reconfigure) || - git_config_get_string(config[i].key, &value)) { - trace2_data_string("scalar", the_repository, config[i].key, "created"); - if (git_config_set_gently(config[i].key, - config[i].value) < 0) - return error(_("could not configure %s=%s"), - config[i].key, config[i].value); - } else { - trace2_data_string("scalar", the_repository, config[i].key, "exists"); - free(value); - } + if (set_scalar_config(config + i, reconfigure)) + return error(_("could not configure %s=%s"), + config[i].key, config[i].value); + } + + if (have_fsmonitor_support()) { + struct scalar_config fsmonitor = { "core.fsmonitor", "true" }; + if (set_scalar_config(&fsmonitor, reconfigure)) + return error(_("could not configure %s=%s"), + fsmonitor.key, fsmonitor.value); } /* @@ -236,123 +232,55 @@ static int add_or_remove_enlistment(int add) "scalar.repo", the_repository->worktree, NULL); } -static int register_dir(void) +static int start_fsmonitor_daemon(void) { - int res = add_or_remove_enlistment(1); - - if (!res) - res = set_recommended_config(0); + assert(have_fsmonitor_support()); - if (!res) - res = toggle_maintenance(1); + if (fsmonitor_ipc__get_state() != IPC_STATE__LISTENING) + return run_git("fsmonitor--daemon", "start", NULL); - return res; + return 0; } -static int unregister_dir(void) +static int stop_fsmonitor_daemon(void) { - int res = 0; + assert(have_fsmonitor_support()); - if (toggle_maintenance(0) < 0) - res = -1; + if (fsmonitor_ipc__get_state() == IPC_STATE__LISTENING) + return run_git("fsmonitor--daemon", "stop", NULL); - if (add_or_remove_enlistment(0) < 0) - res = -1; - - return res; + return 0; } -static int add_directory_to_archiver(struct strvec *archiver_args, - const char *path, int recurse) +static int register_dir(void) { - int at_root = !*path; - DIR *dir = opendir(at_root ? "." : path); - struct dirent *e; - struct strbuf buf = STRBUF_INIT; - size_t len; - int res = 0; + if (add_or_remove_enlistment(1)) + return error(_("could not add enlistment")); - if (!dir) - return error_errno(_("could not open directory '%s'"), path); - - if (!at_root) - strbuf_addf(&buf, "%s/", path); - len = buf.len; - strvec_pushf(archiver_args, "--prefix=%s", buf.buf); - - while (!res && (e = readdir(dir))) { - if (!strcmp(".", e->d_name) || !strcmp("..", e->d_name)) - continue; - - strbuf_setlen(&buf, len); - strbuf_addstr(&buf, e->d_name); - - if (e->d_type == DT_REG) - strvec_pushf(archiver_args, "--add-file=%s", buf.buf); - else if (e->d_type != DT_DIR) - warning(_("skipping '%s', which is neither file nor " - "directory"), buf.buf); - else if (recurse && - add_directory_to_archiver(archiver_args, - buf.buf, recurse) < 0) - res = -1; + if (set_recommended_config(0)) + return error(_("could not set recommended config")); + + if (toggle_maintenance(1)) + return error(_("could not turn on maintenance")); + + if (have_fsmonitor_support() && start_fsmonitor_daemon()) { + return error(_("could not start the FSMonitor daemon")); } - closedir(dir); - strbuf_release(&buf); - return res; + return 0; } -#ifndef WIN32 -#include <sys/statvfs.h> -#endif - -static int get_disk_info(struct strbuf *out) +static int unregister_dir(void) { -#ifdef WIN32 - struct strbuf buf = STRBUF_INIT; - char volume_name[MAX_PATH], fs_name[MAX_PATH]; - DWORD serial_number, component_length, flags; - ULARGE_INTEGER avail2caller, total, avail; - - strbuf_realpath(&buf, ".", 1); - if (!GetDiskFreeSpaceExA(buf.buf, &avail2caller, &total, &avail)) { - error(_("could not determine free disk size for '%s'"), - buf.buf); - strbuf_release(&buf); - return -1; - } + int res = 0; - strbuf_setlen(&buf, offset_1st_component(buf.buf)); - if (!GetVolumeInformationA(buf.buf, volume_name, sizeof(volume_name), - &serial_number, &component_length, &flags, - fs_name, sizeof(fs_name))) { - error(_("could not get info for '%s'"), buf.buf); - strbuf_release(&buf); - return -1; - } - strbuf_addf(out, "Available space on '%s': ", buf.buf); - strbuf_humanise_bytes(out, avail2caller.QuadPart); - strbuf_addch(out, '\n'); - strbuf_release(&buf); -#else - struct strbuf buf = STRBUF_INIT; - struct statvfs stat; + if (toggle_maintenance(0)) + res = error(_("could not turn off maintenance")); - strbuf_realpath(&buf, ".", 1); - if (statvfs(buf.buf, &stat) < 0) { - error_errno(_("could not determine free disk size for '%s'"), - buf.buf); - strbuf_release(&buf); - return -1; - } + if (add_or_remove_enlistment(0)) + res = error(_("could not remove enlistment")); - strbuf_addf(out, "Available space on '%s': ", buf.buf); - strbuf_humanise_bytes(out, st_mult(stat.f_bsize, stat.f_bavail)); - strbuf_addf(out, " (mount flags 0x%lx)\n", stat.f_flag); - strbuf_release(&buf); -#endif - return 0; + return res; } /* printf-style interface, expects `<key>=<value>` argument */ @@ -431,25 +359,35 @@ static int delete_enlistment(struct strbuf *enlistment) { #ifdef WIN32 struct strbuf parent = STRBUF_INIT; + size_t offset; + char *path_sep; #endif if (unregister_dir()) - die(_("failed to unregister repository")); + return error(_("failed to unregister repository")); #ifdef WIN32 /* * Change the current directory to one outside of the enlistment so * that we may delete everything underneath it. */ - strbuf_addbuf(&parent, enlistment); - strbuf_parent_directory(&parent); - if (chdir(parent.buf) < 0) - die_errno(_("could not switch to '%s'"), parent.buf); + offset = offset_1st_component(enlistment->buf); + path_sep = find_last_dir_sep(enlistment->buf + offset); + strbuf_add(&parent, enlistment->buf, + path_sep ? path_sep - enlistment->buf : offset); + if (chdir(parent.buf) < 0) { + int res = error_errno(_("could not switch to '%s'"), parent.buf); + strbuf_release(&parent); + return res; + } strbuf_release(&parent); #endif + if (have_fsmonitor_support() && stop_fsmonitor_daemon()) + return error(_("failed to stop the FSMonitor daemon")); + if (remove_dir_recursively(enlistment, 0)) - die(_("failed to delete enlistment directory")); + return error(_("failed to delete enlistment directory")); return 0; } @@ -595,83 +533,6 @@ cleanup: return res; } -static void dir_file_stats_objects(const char *full_path, size_t full_path_len, - const char *file_name, void *data) -{ - struct strbuf *buf = data; - struct stat st; - - if (!stat(full_path, &st)) - strbuf_addf(buf, "%-70s %16" PRIuMAX "\n", file_name, - (uintmax_t)st.st_size); -} - -static int dir_file_stats(struct object_directory *object_dir, void *data) -{ - struct strbuf *buf = data; - - strbuf_addf(buf, "Contents of %s:\n", object_dir->path); - - for_each_file_in_pack_dir(object_dir->path, dir_file_stats_objects, - data); - - return 0; -} - -static int count_files(char *path) -{ - DIR *dir = opendir(path); - struct dirent *e; - int count = 0; - - if (!dir) - return 0; - - while ((e = readdir(dir)) != NULL) - if (!is_dot_or_dotdot(e->d_name) && e->d_type == DT_REG) - count++; - - closedir(dir); - return count; -} - -static void loose_objs_stats(struct strbuf *buf, const char *path) -{ - DIR *dir = opendir(path); - struct dirent *e; - int count; - int total = 0; - unsigned char c; - struct strbuf count_path = STRBUF_INIT; - size_t base_path_len; - - if (!dir) - return; - - strbuf_addstr(buf, "Object directory stats for "); - strbuf_add_absolute_path(buf, path); - strbuf_addstr(buf, ":\n"); - - strbuf_add_absolute_path(&count_path, path); - strbuf_addch(&count_path, '/'); - base_path_len = count_path.len; - - while ((e = readdir(dir)) != NULL) - if (!is_dot_or_dotdot(e->d_name) && - e->d_type == DT_DIR && strlen(e->d_name) == 2 && - !hex_to_bytes(&c, e->d_name, 1)) { - strbuf_setlen(&count_path, base_path_len); - strbuf_addstr(&count_path, e->d_name); - total += (count = count_files(count_path.buf)); - strbuf_addf(buf, "%s : %7d files\n", e->d_name, count); - } - - strbuf_addf(buf, "Total: %d loose objects", total); - - strbuf_release(&count_path); - closedir(dir); -} - static int cmd_diagnose(int argc, const char **argv) { struct option options[] = { @@ -681,107 +542,19 @@ static int cmd_diagnose(int argc, const char **argv) N_("scalar diagnose [<enlistment>]"), NULL }; - struct strbuf zip_path = STRBUF_INIT; - struct strvec archiver_args = STRVEC_INIT; - char **argv_copy = NULL; - int stdout_fd = -1, archiver_fd = -1; - time_t now = time(NULL); - struct tm tm; - struct strbuf path = STRBUF_INIT, buf = STRBUF_INIT; + struct strbuf diagnostics_root = STRBUF_INIT; int res = 0; argc = parse_options(argc, argv, NULL, options, usage, 0); - setup_enlistment_directory(argc, argv, usage, options, &zip_path); - - strbuf_addstr(&zip_path, "/.scalarDiagnostics/scalar_"); - strbuf_addftime(&zip_path, - "%Y%m%d_%H%M%S", localtime_r(&now, &tm), 0, 0); - strbuf_addstr(&zip_path, ".zip"); - switch (safe_create_leading_directories(zip_path.buf)) { - case SCLD_EXISTS: - case SCLD_OK: - break; - default: - error_errno(_("could not create directory for '%s'"), - zip_path.buf); - goto diagnose_cleanup; - } - stdout_fd = dup(1); - if (stdout_fd < 0) { - res = error_errno(_("could not duplicate stdout")); - goto diagnose_cleanup; - } + setup_enlistment_directory(argc, argv, usage, options, &diagnostics_root); + strbuf_addstr(&diagnostics_root, "/.scalarDiagnostics"); - archiver_fd = xopen(zip_path.buf, O_CREAT | O_WRONLY | O_TRUNC, 0666); - if (archiver_fd < 0 || dup2(archiver_fd, 1) < 0) { - res = error_errno(_("could not redirect output")); - goto diagnose_cleanup; - } - - init_zip_archiver(); - strvec_pushl(&archiver_args, "scalar-diagnose", "--format=zip", NULL); - - strbuf_reset(&buf); - strbuf_addstr(&buf, "Collecting diagnostic info\n\n"); - get_version_info(&buf, 1); - - strbuf_addf(&buf, "Enlistment root: %s\n", the_repository->worktree); - get_disk_info(&buf); - write_or_die(stdout_fd, buf.buf, buf.len); - strvec_pushf(&archiver_args, - "--add-virtual-file=diagnostics.log:%.*s", - (int)buf.len, buf.buf); - - strbuf_reset(&buf); - strbuf_addstr(&buf, "--add-virtual-file=packs-local.txt:"); - dir_file_stats(the_repository->objects->odb, &buf); - foreach_alt_odb(dir_file_stats, &buf); - strvec_push(&archiver_args, buf.buf); - - strbuf_reset(&buf); - strbuf_addstr(&buf, "--add-virtual-file=objects-local.txt:"); - loose_objs_stats(&buf, ".git/objects"); - strvec_push(&archiver_args, buf.buf); - - if ((res = add_directory_to_archiver(&archiver_args, ".git", 0)) || - (res = add_directory_to_archiver(&archiver_args, ".git/hooks", 0)) || - (res = add_directory_to_archiver(&archiver_args, ".git/info", 0)) || - (res = add_directory_to_archiver(&archiver_args, ".git/logs", 1)) || - (res = add_directory_to_archiver(&archiver_args, ".git/objects/info", 0))) - goto diagnose_cleanup; - - strvec_pushl(&archiver_args, "--prefix=", - oid_to_hex(the_hash_algo->empty_tree), "--", NULL); - - /* `write_archive()` modifies the `argv` passed to it. Let it. */ - argv_copy = xmemdupz(archiver_args.v, - sizeof(char *) * archiver_args.nr); - res = write_archive(archiver_args.nr, (const char **)argv_copy, NULL, - the_repository, NULL, 0); - if (res) { - error(_("failed to write archive")); - goto diagnose_cleanup; - } - - if (!res) - fprintf(stderr, "\n" - "Diagnostics complete.\n" - "All of the gathered info is captured in '%s'\n", - zip_path.buf); - -diagnose_cleanup: - if (archiver_fd >= 0) { - close(1); - dup2(stdout_fd, 1); - } - free(argv_copy); - strvec_clear(&archiver_args); - strbuf_release(&zip_path); - strbuf_release(&path); - strbuf_release(&buf); + res = run_git("diagnose", "--mode=all", "-s", "%Y%m%d_%H%M%S", + "-o", diagnostics_root.buf, NULL); + strbuf_release(&diagnostics_root); return res; } diff --git a/contrib/scalar/scalar.txt b/contrib/scalar/scalar.txt index c0425e0653..1a12dc4507 100644 --- a/contrib/scalar/scalar.txt +++ b/contrib/scalar/scalar.txt @@ -3,7 +3,7 @@ scalar(1) NAME ---- -scalar - an opinionated repository management tool +scalar - A tool for managing large Git repositories SYNOPSIS -------- @@ -20,10 +20,9 @@ scalar delete <enlistment> DESCRIPTION ----------- -Scalar is an opinionated repository management tool. By creating new -repositories or registering existing repositories with Scalar, your Git -experience will speed up. Scalar sets advanced Git config settings, -maintains your repositories in the background, and helps reduce data sent +Scalar is a repository management tool that optimizes Git for use in large +repositories. Scalar improves performance by configuring advanced Git settings, +maintaining repositories in the background, and helping to reduce data sent across the network. An important Scalar concept is the enlistment: this is the top-level directory diff --git a/contrib/scalar/t/Makefile b/contrib/scalar/t/Makefile index 01e82e56d1..1ed174a8cf 100644 --- a/contrib/scalar/t/Makefile +++ b/contrib/scalar/t/Makefile @@ -42,7 +42,7 @@ $(T): @echo "*** $@ ***"; GIT_CONFIG=.git/config '$(SHELL_PATH_SQ)' $@ $(GIT_TEST_OPTS) clean-except-prove-cache: - $(RM) -r 'trash directory'.* '$(TEST_RESULTS_DIRECTORY_SQ)' + $(RM) -r 'trash directory'.* $(RM) -r valgrind/bin clean: clean-except-prove-cache diff --git a/contrib/scalar/t/t9099-scalar.sh b/contrib/scalar/t/t9099-scalar.sh index 10b1172a8a..dfb949f52e 100755 --- a/contrib/scalar/t/t9099-scalar.sh +++ b/contrib/scalar/t/t9099-scalar.sh @@ -17,6 +17,99 @@ test_expect_success 'scalar shows a usage' ' test_expect_code 129 scalar -h ' +test_expect_success 'scalar invoked on enlistment root' ' + test_when_finished rm -rf test src deeper && + + for enlistment_root in test src deeper/test + do + git init ${enlistment_root}/src && + + # Register + scalar register ${enlistment_root} && + scalar list >out && + grep "$(pwd)/${enlistment_root}/src\$" out && + + # Delete (including enlistment root) + scalar delete $enlistment_root && + test_path_is_missing $enlistment_root && + scalar list >out && + ! grep "^$(pwd)/${enlistment_root}/src\$" out || return 1 + done +' + +test_expect_success 'scalar invoked on enlistment src repo' ' + test_when_finished rm -rf test src deeper && + + for enlistment_root in test src deeper/test + do + git init ${enlistment_root}/src && + + # Register + scalar register ${enlistment_root}/src && + scalar list >out && + grep "$(pwd)/${enlistment_root}/src\$" out && + + # Delete (will not include enlistment root) + scalar delete ${enlistment_root}/src && + test_path_is_dir $enlistment_root && + scalar list >out && + ! grep "^$(pwd)/${enlistment_root}/src\$" out || return 1 + done +' + +test_expect_success 'scalar invoked when enlistment root and repo are the same' ' + test_when_finished rm -rf test src deeper && + + for enlistment_root in test src deeper/test + do + git init ${enlistment_root} && + + # Register + scalar register ${enlistment_root} && + scalar list >out && + grep "$(pwd)/${enlistment_root}\$" out && + + # Delete (will not include enlistment root) + scalar delete ${enlistment_root} && + test_path_is_missing $enlistment_root && + scalar list >out && + ! grep "^$(pwd)/${enlistment_root}\$" out && + + # Make sure we did not accidentally delete the trash dir + test_path_is_dir "$TRASH_DIRECTORY" || return 1 + done +' + +test_expect_success 'scalar repo search respects GIT_CEILING_DIRECTORIES' ' + test_when_finished rm -rf test && + + git init test/src && + mkdir -p test/src/deep && + GIT_CEILING_DIRECTORIES="$(pwd)/test/src" && + ! scalar register test/src/deep 2>err && + grep "not a git repository" err +' + +test_expect_success 'scalar enlistments need a worktree' ' + test_when_finished rm -rf bare test && + + git init --bare bare/src && + ! scalar register bare/src 2>err && + grep "Scalar enlistments require a worktree" err && + + git init test/src && + ! scalar register test/src/.git 2>err && + grep "Scalar enlistments require a worktree" err +' + +test_expect_success FSMONITOR_DAEMON 'scalar register starts fsmon daemon' ' + git init test/src && + test_must_fail git -C test/src fsmonitor--daemon status && + scalar register test/src && + git -C test/src fsmonitor--daemon status && + test_cmp_config -C test/src true core.fsmonitor +' + test_expect_success 'scalar unregister' ' git init vanish/src && scalar register vanish/src && @@ -109,14 +202,14 @@ test_expect_success UNZIP 'scalar diagnose' ' sed -n "s/.*$SQ\\(.*\\.zip\\)$SQ.*/\\1/p" <err >zip_path && zip_path=$(cat zip_path) && test -n "$zip_path" && - unzip -v "$zip_path" && + "$GIT_UNZIP" -v "$zip_path" && folder=${zip_path%.zip} && test_path_is_missing "$folder" && - unzip -p "$zip_path" diagnostics.log >out && + "$GIT_UNZIP" -p "$zip_path" diagnostics.log >out && test_file_not_empty out && - unzip -p "$zip_path" packs-local.txt >out && + "$GIT_UNZIP" -p "$zip_path" packs-local.txt >out && grep "$(pwd)/.git/objects" out && - unzip -p "$zip_path" objects-local.txt >out && + "$GIT_UNZIP" -p "$zip_path" objects-local.txt >out && grep "^Total: [1-9]" out ' diff --git a/contrib/subtree/git-subtree.sh b/contrib/subtree/git-subtree.sh index 1af1d9653e..7562a395c2 100755 --- a/contrib/subtree/git-subtree.sh +++ b/contrib/subtree/git-subtree.sh @@ -50,6 +50,14 @@ m,message= use the given message as the commit message for the merge commit indent=0 +# Usage: say [MSG...] +say () { + if test -z "$arg_quiet" + then + printf '%s\n' "$*" + fi +} + # Usage: debug [MSG...] debug () { if test -n "$arg_debug" @@ -60,7 +68,7 @@ debug () { # Usage: progress [MSG...] progress () { - if test -z "$GIT_QUIET" + if test -z "$arg_quiet" then if test -z "$arg_debug" then @@ -146,6 +154,7 @@ main () { eval "$set_args" # Begin "real" flag parsing. + arg_quiet= arg_debug= arg_prefix= arg_split_branch= @@ -161,7 +170,7 @@ main () { case "$opt" in -q) - GIT_QUIET=1 + arg_quiet=1 ;; -d) arg_debug=1 @@ -252,7 +261,7 @@ main () { dir="$(dirname "$arg_prefix/.")" debug "command: {$arg_command}" - debug "quiet: {$GIT_QUIET}" + debug "quiet: {$arg_quiet}" debug "dir: {$dir}" debug "opts: {$*}" debug diff --git a/contrib/subtree/t/Makefile b/contrib/subtree/t/Makefile index 276898eb6b..3d278bb0ed 100644 --- a/contrib/subtree/t/Makefile +++ b/contrib/subtree/t/Makefile @@ -47,7 +47,7 @@ pre-clean: $(RM) -r '$(TEST_RESULTS_DIRECTORY_SQ)' clean-except-prove-cache: - $(RM) -r 'trash directory'.* '$(TEST_RESULTS_DIRECTORY_SQ)' + $(RM) -r 'trash directory'.* $(RM) -r valgrind/bin clean: clean-except-prove-cache diff --git a/diagnose.c b/diagnose.c new file mode 100644 index 0000000000..beb0a8741b --- /dev/null +++ b/diagnose.c @@ -0,0 +1,269 @@ +#include "cache.h" +#include "diagnose.h" +#include "compat/disk.h" +#include "archive.h" +#include "dir.h" +#include "help.h" +#include "strvec.h" +#include "object-store.h" +#include "packfile.h" + +struct archive_dir { + const char *path; + int recursive; +}; + +struct diagnose_option { + enum diagnose_mode mode; + const char *option_name; +}; + +static struct diagnose_option diagnose_options[] = { + { DIAGNOSE_STATS, "stats" }, + { DIAGNOSE_ALL, "all" }, +}; + +int option_parse_diagnose(const struct option *opt, const char *arg, int unset) +{ + int i; + enum diagnose_mode *diagnose = opt->value; + + if (!arg) { + *diagnose = unset ? DIAGNOSE_NONE : DIAGNOSE_STATS; + return 0; + } + + for (i = 0; i < ARRAY_SIZE(diagnose_options); i++) { + if (!strcmp(arg, diagnose_options[i].option_name)) { + *diagnose = diagnose_options[i].mode; + return 0; + } + } + + return error(_("invalid --%s value '%s'"), opt->long_name, arg); +} + +static void dir_file_stats_objects(const char *full_path, size_t full_path_len, + const char *file_name, void *data) +{ + struct strbuf *buf = data; + struct stat st; + + if (!stat(full_path, &st)) + strbuf_addf(buf, "%-70s %16" PRIuMAX "\n", file_name, + (uintmax_t)st.st_size); +} + +static int dir_file_stats(struct object_directory *object_dir, void *data) +{ + struct strbuf *buf = data; + + strbuf_addf(buf, "Contents of %s:\n", object_dir->path); + + for_each_file_in_pack_dir(object_dir->path, dir_file_stats_objects, + data); + + return 0; +} + +static int count_files(char *path) +{ + DIR *dir = opendir(path); + struct dirent *e; + int count = 0; + + if (!dir) + return 0; + + while ((e = readdir(dir)) != NULL) + if (!is_dot_or_dotdot(e->d_name) && e->d_type == DT_REG) + count++; + + closedir(dir); + return count; +} + +static void loose_objs_stats(struct strbuf *buf, const char *path) +{ + DIR *dir = opendir(path); + struct dirent *e; + int count; + int total = 0; + unsigned char c; + struct strbuf count_path = STRBUF_INIT; + size_t base_path_len; + + if (!dir) + return; + + strbuf_addstr(buf, "Object directory stats for "); + strbuf_add_absolute_path(buf, path); + strbuf_addstr(buf, ":\n"); + + strbuf_add_absolute_path(&count_path, path); + strbuf_addch(&count_path, '/'); + base_path_len = count_path.len; + + while ((e = readdir(dir)) != NULL) + if (!is_dot_or_dotdot(e->d_name) && + e->d_type == DT_DIR && strlen(e->d_name) == 2 && + !hex_to_bytes(&c, e->d_name, 1)) { + strbuf_setlen(&count_path, base_path_len); + strbuf_addstr(&count_path, e->d_name); + total += (count = count_files(count_path.buf)); + strbuf_addf(buf, "%s : %7d files\n", e->d_name, count); + } + + strbuf_addf(buf, "Total: %d loose objects", total); + + strbuf_release(&count_path); + closedir(dir); +} + +static int add_directory_to_archiver(struct strvec *archiver_args, + const char *path, int recurse) +{ + int at_root = !*path; + DIR *dir; + struct dirent *e; + struct strbuf buf = STRBUF_INIT; + size_t len; + int res = 0; + + dir = opendir(at_root ? "." : path); + if (!dir) { + if (errno == ENOENT) { + warning(_("could not archive missing directory '%s'"), path); + return 0; + } + return error_errno(_("could not open directory '%s'"), path); + } + + if (!at_root) + strbuf_addf(&buf, "%s/", path); + len = buf.len; + strvec_pushf(archiver_args, "--prefix=%s", buf.buf); + + while (!res && (e = readdir(dir))) { + if (!strcmp(".", e->d_name) || !strcmp("..", e->d_name)) + continue; + + strbuf_setlen(&buf, len); + strbuf_addstr(&buf, e->d_name); + + if (e->d_type == DT_REG) + strvec_pushf(archiver_args, "--add-file=%s", buf.buf); + else if (e->d_type != DT_DIR) + warning(_("skipping '%s', which is neither file nor " + "directory"), buf.buf); + else if (recurse && + add_directory_to_archiver(archiver_args, + buf.buf, recurse) < 0) + res = -1; + } + + closedir(dir); + strbuf_release(&buf); + return res; +} + +int create_diagnostics_archive(struct strbuf *zip_path, enum diagnose_mode mode) +{ + struct strvec archiver_args = STRVEC_INIT; + char **argv_copy = NULL; + int stdout_fd = -1, archiver_fd = -1; + struct strbuf buf = STRBUF_INIT; + int res, i; + struct archive_dir archive_dirs[] = { + { ".git", 0 }, + { ".git/hooks", 0 }, + { ".git/info", 0 }, + { ".git/logs", 1 }, + { ".git/objects/info", 0 } + }; + + if (mode == DIAGNOSE_NONE) { + res = 0; + goto diagnose_cleanup; + } + + stdout_fd = dup(STDOUT_FILENO); + if (stdout_fd < 0) { + res = error_errno(_("could not duplicate stdout")); + goto diagnose_cleanup; + } + + archiver_fd = xopen(zip_path->buf, O_CREAT | O_WRONLY | O_TRUNC, 0666); + if (dup2(archiver_fd, STDOUT_FILENO) < 0) { + res = error_errno(_("could not redirect output")); + goto diagnose_cleanup; + } + + init_zip_archiver(); + strvec_pushl(&archiver_args, "git-diagnose", "--format=zip", NULL); + + strbuf_reset(&buf); + strbuf_addstr(&buf, "Collecting diagnostic info\n\n"); + get_version_info(&buf, 1); + + strbuf_addf(&buf, "Repository root: %s\n", the_repository->worktree); + get_disk_info(&buf); + write_or_die(stdout_fd, buf.buf, buf.len); + strvec_pushf(&archiver_args, + "--add-virtual-file=diagnostics.log:%.*s", + (int)buf.len, buf.buf); + + strbuf_reset(&buf); + strbuf_addstr(&buf, "--add-virtual-file=packs-local.txt:"); + dir_file_stats(the_repository->objects->odb, &buf); + foreach_alt_odb(dir_file_stats, &buf); + strvec_push(&archiver_args, buf.buf); + + strbuf_reset(&buf); + strbuf_addstr(&buf, "--add-virtual-file=objects-local.txt:"); + loose_objs_stats(&buf, ".git/objects"); + strvec_push(&archiver_args, buf.buf); + + /* Only include this if explicitly requested */ + if (mode == DIAGNOSE_ALL) { + for (i = 0; i < ARRAY_SIZE(archive_dirs); i++) { + if (add_directory_to_archiver(&archiver_args, + archive_dirs[i].path, + archive_dirs[i].recursive)) { + res = error_errno(_("could not add directory '%s' to archiver"), + archive_dirs[i].path); + goto diagnose_cleanup; + } + } + } + + strvec_pushl(&archiver_args, "--prefix=", + oid_to_hex(the_hash_algo->empty_tree), "--", NULL); + + /* `write_archive()` modifies the `argv` passed to it. Let it. */ + argv_copy = xmemdupz(archiver_args.v, + sizeof(char *) * archiver_args.nr); + res = write_archive(archiver_args.nr, (const char **)argv_copy, NULL, + the_repository, NULL, 0); + if (res) { + error(_("failed to write archive")); + goto diagnose_cleanup; + } + + fprintf(stderr, "\n" + "Diagnostics complete.\n" + "All of the gathered info is captured in '%s'\n", + zip_path->buf); + +diagnose_cleanup: + if (archiver_fd >= 0) { + dup2(stdout_fd, STDOUT_FILENO); + close(stdout_fd); + close(archiver_fd); + } + free(argv_copy); + strvec_clear(&archiver_args); + strbuf_release(&buf); + + return res; +} diff --git a/diagnose.h b/diagnose.h new file mode 100644 index 0000000000..7a4951a786 --- /dev/null +++ b/diagnose.h @@ -0,0 +1,17 @@ +#ifndef DIAGNOSE_H +#define DIAGNOSE_H + +#include "strbuf.h" +#include "parse-options.h" + +enum diagnose_mode { + DIAGNOSE_NONE, + DIAGNOSE_STATS, + DIAGNOSE_ALL +}; + +int option_parse_diagnose(const struct option *opt, const char *arg, int unset); + +int create_diagnostics_archive(struct strbuf *zip_path, enum diagnose_mode mode); + +#endif /* DIAGNOSE_H */ @@ -1289,7 +1289,6 @@ static void emit_diff_symbol_from_struct(struct diff_options *o, { static const char *nneof = " No newline at end of file\n"; const char *context, *reset, *set, *set_sign, *meta, *fraginfo; - struct strbuf sb = STRBUF_INIT; enum diff_symbol s = eds->s; const char *line = eds->line; @@ -1521,7 +1520,6 @@ static void emit_diff_symbol_from_struct(struct diff_options *o, default: BUG("unknown diff symbol"); } - strbuf_release(&sb); } static void emit_diff_symbol(struct diff_options *o, enum diff_symbol s, @@ -3362,23 +3360,23 @@ struct userdiff_driver *get_textconv(struct repository *r, return userdiff_get_textconv(r, one->driver); } -static struct strbuf *additional_headers(struct diff_options *o, - const char *path) +static struct string_list *additional_headers(struct diff_options *o, + const char *path) { if (!o->additional_path_headers) return NULL; return strmap_get(o->additional_path_headers, path); } -static void add_formatted_headers(struct strbuf *msg, - struct strbuf *more_headers, +static void add_formatted_header(struct strbuf *msg, + const char *header, const char *line_prefix, const char *meta, const char *reset) { - char *next, *newline; + const char *next, *newline; - for (next = more_headers->buf; *next; next = newline) { + for (next = header; *next; next = newline) { newline = strchrnul(next, '\n'); strbuf_addf(msg, "%s%s%.*s%s\n", line_prefix, meta, (int)(newline - next), next, reset); @@ -3387,6 +3385,19 @@ static void add_formatted_headers(struct strbuf *msg, } } +static void add_formatted_headers(struct strbuf *msg, + struct string_list *more_headers, + const char *line_prefix, + const char *meta, + const char *reset) +{ + int i; + + for (i = 0; i < more_headers->nr; i++) + add_formatted_header(msg, more_headers->items[i].string, + line_prefix, meta, reset); +} + static void builtin_diff(const char *name_a, const char *name_b, struct diff_filespec *one, @@ -4314,7 +4325,7 @@ static void fill_metainfo(struct strbuf *msg, const char *set = diff_get_color(use_color, DIFF_METAINFO); const char *reset = diff_get_color(use_color, DIFF_RESET); const char *line_prefix = diff_line_prefix(o); - struct strbuf *more_headers = NULL; + struct string_list *more_headers = NULL; *must_show_header = 1; strbuf_init(msg, PATH_MAX * 2 + 300); @@ -1244,8 +1244,7 @@ int match_basename(const char *basename, int basenamelen, int match_pathname(const char *pathname, int pathlen, const char *base, int baselen, - const char *pattern, int prefix, int patternlen, - unsigned flags) + const char *pattern, int prefix, int patternlen) { const char *name; int namelen; @@ -1347,8 +1346,7 @@ static struct path_pattern *last_matching_pattern_from_list(const char *pathname if (match_pathname(pathname, pathlen, pattern->base, pattern->baselen ? pattern->baselen - 1 : 0, - exclude, prefix, pattern->patternlen, - pattern->flags)) { + exclude, prefix, pattern->patternlen)) { res = pattern; break; } @@ -414,7 +414,7 @@ int match_basename(const char *, int, const char *, int, int, unsigned); int match_pathname(const char *, int, const char *, int, - const char *, int, int, unsigned); + const char *, int, int); struct path_pattern *last_matching_pattern(struct dir_struct *dir, struct index_state *istate, diff --git a/environment.c b/environment.c index b3296ce7d1..b2004437dc 100644 --- a/environment.c +++ b/environment.c @@ -56,7 +56,6 @@ const char *askpass_program; const char *excludes_file; enum auto_crlf auto_crlf = AUTO_CRLF_FALSE; int read_replace_refs = 1; -char *git_replace_ref_base; enum eol core_eol = EOL_UNSET; int global_conv_flags_eol = CONV_EOL_RNDTRP_WARN; char *check_roundtrip_encoding = "SHIFT-JIS"; @@ -162,6 +161,7 @@ const char *getenv_safe(struct strvec *argv, const char *name) void setup_git_env(const char *git_dir) { + char *git_replace_ref_base; const char *shallow_file; const char *replace_ref_base; struct set_gitdir_args args = { NULL }; @@ -182,9 +182,10 @@ void setup_git_env(const char *git_dir) if (getenv(NO_REPLACE_OBJECTS_ENVIRONMENT)) read_replace_refs = 0; replace_ref_base = getenv(GIT_REPLACE_REF_BASE_ENVIRONMENT); - free(git_replace_ref_base); git_replace_ref_base = xstrdup(replace_ref_base ? replace_ref_base : "refs/replace/"); + update_ref_namespace(NAMESPACE_REPLACE, git_replace_ref_base); + free(git_namespace); git_namespace = expand_namespace(getenv(GIT_NAMESPACE_ENVIRONMENT)); shallow_file = getenv(GIT_SHALLOW_FILE_ENVIRONMENT); diff --git a/fetch-pack.c b/fetch-pack.c index cb6647d657..a1a508ee72 100644 --- a/fetch-pack.c +++ b/fetch-pack.c @@ -26,6 +26,7 @@ #include "commit-reach.h" #include "commit-graph.h" #include "sigchain.h" +#include "mergesort.h" static int transfer_unpack_limit = -1; static int fetch_unpack_limit = -1; @@ -292,6 +293,29 @@ static void mark_tips(struct fetch_negotiator *negotiator, return; } +static void send_filter(struct fetch_pack_args *args, + struct strbuf *req_buf, + int server_supports_filter) +{ + if (args->filter_options.choice) { + const char *spec = + expand_list_objects_filter_spec(&args->filter_options); + if (server_supports_filter) { + print_verbose(args, _("Server supports filter")); + packet_buf_write(req_buf, "filter %s", spec); + trace2_data_string("fetch", the_repository, + "filter/effective", spec); + } else { + warning("filtering not recognized by server, ignoring"); + trace2_data_string("fetch", the_repository, + "filter/unsupported", spec); + } + } else { + trace2_data_string("fetch", the_repository, + "filter/none", ""); + } +} + static int find_common(struct fetch_negotiator *negotiator, struct fetch_pack_args *args, int fd[2], struct object_id *result_oid, @@ -299,6 +323,7 @@ static int find_common(struct fetch_negotiator *negotiator, { int fetching; int count = 0, flushes = 0, flush_at = INITIAL_FLUSH, retval; + int negotiation_round = 0, haves = 0; const struct object_id *oid; unsigned in_vain = 0; int got_continue = 0; @@ -389,11 +414,7 @@ static int find_common(struct fetch_negotiator *negotiator, packet_buf_write(&req_buf, "deepen-not %s", s->string); } } - if (server_supports_filtering && args->filter_options.choice) { - const char *spec = - expand_list_objects_filter_spec(&args->filter_options); - packet_buf_write(&req_buf, "filter %s", spec); - } + send_filter(args, &req_buf, server_supports_filtering); packet_buf_flush(&req_buf); state_len = req_buf.len; @@ -441,9 +462,19 @@ static int find_common(struct fetch_negotiator *negotiator, packet_buf_write(&req_buf, "have %s\n", oid_to_hex(oid)); print_verbose(args, "have %s", oid_to_hex(oid)); in_vain++; + haves++; if (flush_at <= ++count) { int ack; + negotiation_round++; + trace2_region_enter_printf("negotiation_v0_v1", "round", + the_repository, "%d", + negotiation_round); + trace2_data_intmax("negotiation_v0_v1", the_repository, + "haves_added", haves); + trace2_data_intmax("negotiation_v0_v1", the_repository, + "in_vain", in_vain); + haves = 0; packet_buf_flush(&req_buf); send_request(args, fd[1], &req_buf); strbuf_setlen(&req_buf, state_len); @@ -465,6 +496,9 @@ static int find_common(struct fetch_negotiator *negotiator, ack, oid_to_hex(result_oid)); switch (ack) { case ACK: + trace2_region_leave_printf("negotiation_v0_v1", "round", + the_repository, "%d", + negotiation_round); flushes = 0; multi_ack = 0; retval = 0; @@ -490,6 +524,7 @@ static int find_common(struct fetch_negotiator *negotiator, const char *hex = oid_to_hex(result_oid); packet_buf_write(&req_buf, "have %s\n", hex); state_len = req_buf.len; + haves++; /* * Reset in_vain because an ack * for this commit has not been @@ -508,6 +543,9 @@ static int find_common(struct fetch_negotiator *negotiator, } } while (ack); flushes--; + trace2_region_leave_printf("negotiation_v0_v1", "round", + the_repository, "%d", + negotiation_round); if (got_continue && MAX_IN_VAIN < in_vain) { print_verbose(args, _("giving up")); break; /* give up */ @@ -518,6 +556,8 @@ static int find_common(struct fetch_negotiator *negotiator, } done: trace2_region_leave("fetch-pack", "negotiation_v0_v1", the_repository); + trace2_data_intmax("negotiation_v0_v1", the_repository, "total_rounds", + negotiation_round); if (!got_ready || !no_done) { packet_buf_write(&req_buf, "done\n"); send_request(args, fd[1], &req_buf); @@ -1025,6 +1065,13 @@ static int get_pack(struct fetch_pack_args *args, return 0; } +static int ref_compare_name(const struct ref *a, const struct ref *b) +{ + return strcmp(a->name, b->name); +} + +DEFINE_LIST_SORT(static, sort_ref_list, struct ref, next); + static int cmp_ref_by_name(const void *a_, const void *b_) { const struct ref *a = *((const struct ref **)a_); @@ -1323,15 +1370,8 @@ static int send_fetch_request(struct fetch_negotiator *negotiator, int fd_out, die(_("Server does not support shallow requests")); /* Add filter */ - if (server_supports_feature("fetch", "filter", 0) && - args->filter_options.choice) { - const char *spec = - expand_list_objects_filter_spec(&args->filter_options); - print_verbose(args, _("Server supports filter")); - packet_buf_write(&req_buf, "filter %s", spec); - } else if (args->filter_options.choice) { - warning("filtering not recognized by server, ignoring"); - } + send_filter(args, &req_buf, + server_supports_feature("fetch", "filter", 0)); if (server_supports_feature("fetch", "packfile-uris", 0)) { int i; @@ -1361,6 +1401,8 @@ static int send_fetch_request(struct fetch_negotiator *negotiator, int fd_out, haves_added = add_haves(negotiator, &req_buf, haves_to_send); *in_vain += haves_added; + trace2_data_intmax("negotiation_v2", the_repository, "haves_added", haves_added); + trace2_data_intmax("negotiation_v2", the_repository, "in_vain", *in_vain); if (!haves_added || (seen_ack && *in_vain >= MAX_IN_VAIN)) { /* Send Done */ packet_buf_write(&req_buf, "done\n"); @@ -1603,6 +1645,7 @@ static struct ref *do_fetch_pack_v2(struct fetch_pack_args *args, struct oidset common = OIDSET_INIT; struct packet_reader reader; int in_vain = 0, negotiation_started = 0; + int negotiation_round = 0; int haves_to_send = INITIAL_FLUSH; struct fetch_negotiator negotiator_alloc; struct fetch_negotiator *negotiator; @@ -1659,12 +1702,20 @@ static struct ref *do_fetch_pack_v2(struct fetch_pack_args *args, "negotiation_v2", the_repository); } + negotiation_round++; + trace2_region_enter_printf("negotiation_v2", "round", + the_repository, "%d", + negotiation_round); if (send_fetch_request(negotiator, fd[1], args, ref, &common, &haves_to_send, &in_vain, reader.use_sideband, - seen_ack)) + seen_ack)) { + trace2_region_leave_printf("negotiation_v2", "round", + the_repository, "%d", + negotiation_round); state = FETCH_GET_PACK; + } else state = FETCH_PROCESS_ACKS; break; @@ -1677,6 +1728,9 @@ static struct ref *do_fetch_pack_v2(struct fetch_pack_args *args, seen_ack = 1; oidset_insert(&common, &common_oid); } + trace2_region_leave_printf("negotiation_v2", "round", + the_repository, "%d", + negotiation_round); if (received_ready) { /* * Don't check for response delimiter; get_pack() will @@ -1692,6 +1746,8 @@ static struct ref *do_fetch_pack_v2(struct fetch_pack_args *args, trace2_region_leave("fetch-pack", "negotiation_v2", the_repository); + trace2_data_intmax("negotiation_v2", the_repository, + "total_rounds", negotiation_round); /* Check for shallow-info section */ if (process_section_header(&reader, "shallow-info", 1)) receive_shallow_info(args, &reader, shallows, si); @@ -2071,6 +2127,7 @@ void negotiate_using_fetch(const struct oid_array *negotiation_tips, int in_vain = 0; int seen_ack = 0; int last_iteration = 0; + int negotiation_round = 0; timestamp_t min_generation = GENERATION_NUMBER_INFINITY; fetch_negotiator_init(the_repository, &negotiator); @@ -2084,11 +2141,17 @@ void negotiate_using_fetch(const struct oid_array *negotiation_tips, add_to_object_array, &nt_object_array); + trace2_region_enter("fetch-pack", "negotiate_using_fetch", the_repository); while (!last_iteration) { int haves_added; struct object_id common_oid; int received_ready = 0; + negotiation_round++; + + trace2_region_enter_printf("negotiate_using_fetch", "round", + the_repository, "%d", + negotiation_round); strbuf_reset(&req_buf); write_fetch_command_and_capabilities(&req_buf, server_options); @@ -2099,6 +2162,11 @@ void negotiate_using_fetch(const struct oid_array *negotiation_tips, if (!haves_added || (seen_ack && in_vain >= MAX_IN_VAIN)) last_iteration = 1; + trace2_data_intmax("negotiate_using_fetch", the_repository, + "haves_added", haves_added); + trace2_data_intmax("negotiate_using_fetch", the_repository, + "in_vain", in_vain); + /* Send request */ packet_buf_flush(&req_buf); if (write_in_full(fd[1], req_buf.buf, req_buf.len) < 0) @@ -2131,7 +2199,13 @@ void negotiate_using_fetch(const struct oid_array *negotiation_tips, REACH_SCRATCH, 0, min_generation)) last_iteration = 1; + trace2_region_leave_printf("negotiation", "round", + the_repository, "%d", + negotiation_round); } + trace2_region_enter("fetch-pack", "negotiate_using_fetch", the_repository); + trace2_data_intmax("negotiate_using_fetch", the_repository, + "total_rounds", negotiation_round); clear_common_flag(acked_commits); strbuf_release(&req_buf); } diff --git a/fuzz-commit-graph.c b/fuzz-commit-graph.c index e7cf6d5b0f..914026f5d8 100644 --- a/fuzz-commit-graph.c +++ b/fuzz-commit-graph.c @@ -1,7 +1,7 @@ #include "commit-graph.h" #include "repository.h" -struct commit_graph *parse_commit_graph(struct repository *r, +struct commit_graph *parse_commit_graph(struct repo_settings *s, void *graph_map, size_t graph_size); int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size); @@ -11,7 +11,15 @@ int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) struct commit_graph *g; initialize_the_repository(); - g = parse_commit_graph(the_repository, (void *)data, size); + /* + * Initialize the_repository with commit-graph settings that would + * normally be read from the repository's gitdir. We want to avoid + * touching the disk to keep the individual fuzz-test cases as fast as + * possible. + */ + the_repository->settings.commit_graph_generation_version = 2; + the_repository->settings.commit_graph_read_changed_paths = 1; + g = parse_commit_graph(&the_repository->settings, (void *)data, size); repo_clear(the_repository); free_commit_graph(g); diff --git a/git-compat-util.h b/git-compat-util.h index 36a25ae252..4e51a1c48b 100644 --- a/git-compat-util.h +++ b/git-compat-util.h @@ -261,6 +261,7 @@ static inline int is_xplatform_dir_sep(int c) #include <sys/resource.h> #include <sys/socket.h> #include <sys/ioctl.h> +#include <sys/statvfs.h> #include <termios.h> #ifndef NO_SYS_SELECT_H #include <sys/select.h> @@ -998,6 +999,28 @@ static inline unsigned long cast_size_t_to_ulong(size_t a) return (unsigned long)a; } +/* + * Limit size of IO chunks, because huge chunks only cause pain. OS X + * 64-bit is buggy, returning EINVAL if len >= INT_MAX; and even in + * the absence of bugs, large chunks can result in bad latencies when + * you decide to kill the process. + * + * We pick 8 MiB as our default, but if the platform defines SSIZE_MAX + * that is smaller than that, clip it to SSIZE_MAX, as a call to + * read(2) or write(2) larger than that is allowed to fail. As the last + * resort, we allow a port to pass via CFLAGS e.g. "-DMAX_IO_SIZE=value" + * to override this, if the definition of SSIZE_MAX given by the platform + * is broken. + */ +#ifndef MAX_IO_SIZE +# define MAX_IO_SIZE_DEFAULT (8*1024*1024) +# if defined(SSIZE_MAX) && (SSIZE_MAX < MAX_IO_SIZE_DEFAULT) +# define MAX_IO_SIZE SSIZE_MAX +# else +# define MAX_IO_SIZE MAX_IO_SIZE_DEFAULT +# endif +#endif + #ifdef HAVE_ALLOCA_H # include <alloca.h> # define xalloca(size) (alloca(size)) diff --git a/git-instaweb.sh b/git-instaweb.sh index 4349566c89..c68f49454c 100755 --- a/git-instaweb.sh +++ b/git-instaweb.sh @@ -102,7 +102,7 @@ resolve_full_httpd () { start_httpd () { if test -f "$fqgitdir/pid"; then - say "Instance already running. Restarting..." + echo "Instance already running. Restarting..." stop_httpd fi diff --git a/git-merge-resolve.sh b/git-merge-resolve.sh index 343fe7bccd..77e93121bf 100755 --- a/git-merge-resolve.sh +++ b/git-merge-resolve.sh @@ -5,6 +5,16 @@ # # Resolve two trees, using enhanced multi-base read-tree. +. git-sh-setup + +# Abort if index does not match HEAD +if ! git diff-index --quiet --cached HEAD -- +then + gettextln "Error: Your local changes to the following files would be overwritten by merge" + git diff-index --cached --name-only HEAD -- | sed -e 's/^/ /' + exit 2 +fi + # The first parameters up to -- are merge bases; the rest are heads. bases= head= remotes= sep_seen= for arg @@ -2259,7 +2259,7 @@ class P4Submit(Command, P4UserMap): raw=True): if regexp.search(line): if verbose: - print("got keyword match on %s in %s in %s" % (regex.pattern, line, file)) + print("got keyword match on %s in %s in %s" % (regexp.pattern, line, file)) kwfiles[file] = regexp break @@ -4402,19 +4402,16 @@ class P4Unshelve(Command): def renameBranch(self, branch_name): """Rename the existing branch to branch_name.N .""" - found = True for i in range(0, 1000): backup_branch_name = "{0}.{1}".format(branch_name, i) if not gitBranchExists(backup_branch_name): # Copy ref to backup gitUpdateRef(backup_branch_name, branch_name) gitDeleteRef(branch_name) - found = True print("renamed old unshelve branch to {0}".format(backup_branch_name)) break - - if not found: - sys.exit("gave up trying to rename existing branch {0}".format(sync.branch)) + else: + sys.exit("gave up trying to rename existing branch {0}".format(branch_name)) def findLastP4Revision(self, starting_point): """Look back from starting_point for the first commit created by git-p4 diff --git a/git-sh-setup.sh b/git-sh-setup.sh index d92df37e99..ce273fe0e4 100644 --- a/git-sh-setup.sh +++ b/git-sh-setup.sh @@ -57,15 +57,6 @@ die_with_status () { exit "$status" } -GIT_QUIET= - -say () { - if test -z "$GIT_QUIET" - then - printf '%s\n' "$*" - fi -} - if test -n "$OPTIONS_SPEC"; then usage() { "$0" -h @@ -285,13 +276,6 @@ get_author_ident_from_commit () { parse_ident_from_commit author AUTHOR } -# Clear repo-local GIT_* environment variables. Useful when switching to -# another repository (e.g. when entering a submodule). See also the env -# list in git_connect() -clear_local_git_env() { - unset $(git rev-parse --local-env-vars) -} - # Generate a virtual base file for a two-file merge. Uses git apply to # remove lines from $1 that are not in $2, leaving only common lines. create_virtual_base() { diff --git a/git-submodule.sh b/git-submodule.sh index fd0b4a2c94..5e5d21c010 100755 --- a/git-submodule.sh +++ b/git-submodule.sh @@ -30,6 +30,7 @@ GIT_PROTOCOL_FROM_USER=0 export GIT_PROTOCOL_FROM_USER command= +quiet= branch= force= reference= @@ -40,8 +41,9 @@ require_init= files= remote= nofetch= -update= -prefix= +rebase= +merge= +checkout= custom_name= depth= progress= @@ -56,17 +58,6 @@ isnumber() n=$(($1 + 0)) 2>/dev/null && test "$n" = "$1" } -# Sanitize the local git environment for use within a submodule. We -# can't simply use clear_local_git_env since we want to preserve some -# of the settings from GIT_CONFIG_PARAMETERS. -sanitize_submodule_env() -{ - save_config=$GIT_CONFIG_PARAMETERS - clear_local_git_env - GIT_CONFIG_PARAMETERS=$save_config - export GIT_CONFIG_PARAMETERS -} - # # Add a new submodule to the working tree, .gitmodules and the index # @@ -90,7 +81,7 @@ cmd_add() force=$1 ;; -q|--quiet) - GIT_QUIET=1 + quiet=1 ;; --progress) progress=1 @@ -138,7 +129,7 @@ cmd_add() usage fi - git ${wt_prefix:+-C "$wt_prefix"} ${prefix:+--super-prefix "$prefix"} submodule--helper add ${GIT_QUIET:+--quiet} ${force:+--force} ${progress:+"--progress"} ${branch:+--branch "$branch"} ${reference_path:+--reference "$reference_path"} ${dissociate:+--dissociate} ${custom_name:+--name "$custom_name"} ${depth:+"$depth"} -- "$@" + git ${wt_prefix:+-C "$wt_prefix"} submodule--helper add ${quiet:+--quiet} ${force:+--force} ${progress:+"--progress"} ${branch:+--branch "$branch"} ${reference_path:+--reference "$reference_path"} ${dissociate:+--dissociate} ${custom_name:+--name "$custom_name"} ${depth:+"$depth"} -- "$@" } # @@ -154,7 +145,7 @@ cmd_foreach() do case "$1" in -q|--quiet) - GIT_QUIET=1 + quiet=1 ;; --recursive) recursive=1 @@ -169,7 +160,7 @@ cmd_foreach() shift done - git ${wt_prefix:+-C "$wt_prefix"} submodule--helper foreach ${GIT_QUIET:+--quiet} ${recursive:+--recursive} -- "$@" + git ${wt_prefix:+-C "$wt_prefix"} submodule--helper foreach ${quiet:+--quiet} ${recursive:+--recursive} -- "$@" } # @@ -184,7 +175,7 @@ cmd_init() do case "$1" in -q|--quiet) - GIT_QUIET=1 + quiet=1 ;; --) shift @@ -200,7 +191,7 @@ cmd_init() shift done - git ${wt_prefix:+-C "$wt_prefix"} ${prefix:+--super-prefix "$prefix"} submodule--helper init ${GIT_QUIET:+--quiet} -- "$@" + git ${wt_prefix:+-C "$wt_prefix"} submodule--helper init ${quiet:+--quiet} -- "$@" } # @@ -217,7 +208,7 @@ cmd_deinit() force=$1 ;; -q|--quiet) - GIT_QUIET=1 + quiet=1 ;; --all) deinit_all=t @@ -236,7 +227,7 @@ cmd_deinit() shift done - git ${wt_prefix:+-C "$wt_prefix"} submodule--helper deinit ${GIT_QUIET:+--quiet} ${force:+--force} ${deinit_all:+--all} -- "$@" + git ${wt_prefix:+-C "$wt_prefix"} submodule--helper deinit ${quiet:+--quiet} ${force:+--force} ${deinit_all:+--all} -- "$@" } # @@ -251,10 +242,7 @@ cmd_update() do case "$1" in -q|--quiet) - GIT_QUIET=1 - ;; - -v) - unset GIT_QUIET + quiet=1 ;; --progress) progress=1 @@ -263,7 +251,6 @@ cmd_update() init=1 ;; --require-init) - init=1 require_init=1 ;; --remote) @@ -276,7 +263,7 @@ cmd_update() force=$1 ;; -r|--rebase) - update="rebase" + rebase=1 ;; --reference) case "$2" in '') usage ;; esac @@ -290,13 +277,13 @@ cmd_update() dissociate=1 ;; -m|--merge) - update="merge" + merge=1 ;; --recursive) recursive=1 ;; --checkout) - update="checkout" + checkout=1 ;; --recommend-shallow) recommend_shallow="--recommend-shallow" @@ -349,7 +336,7 @@ cmd_update() done git ${wt_prefix:+-C "$wt_prefix"} submodule--helper update \ - ${GIT_QUIET:+--quiet} \ + ${quiet:+--quiet} \ ${force:+--force} \ ${progress:+"--progress"} \ ${remote:+--remote} \ @@ -357,8 +344,9 @@ cmd_update() ${init:+--init} \ ${nofetch:+--no-fetch} \ ${wt_prefix:+--prefix "$wt_prefix"} \ - ${prefix:+--recursive-prefix "$prefix"} \ - ${update:+--update "$update"} \ + ${rebase:+--rebase} \ + ${merge:+--merge} \ + ${checkout:+--checkout} \ ${reference:+"$reference"} \ ${dissociate:+"--dissociate"} \ ${depth:+"$depth"} \ @@ -409,7 +397,7 @@ cmd_set_branch() { shift done - git ${wt_prefix:+-C "$wt_prefix"} submodule--helper set-branch ${GIT_QUIET:+--quiet} ${branch:+--branch "$branch"} ${default:+--default} -- "$@" + git ${wt_prefix:+-C "$wt_prefix"} submodule--helper set-branch ${quiet:+--quiet} ${branch:+--branch "$branch"} ${default:+--default} -- "$@" } # @@ -422,7 +410,7 @@ cmd_set_url() { do case "$1" in -q|--quiet) - GIT_QUIET=1 + quiet=1 ;; --) shift @@ -438,7 +426,7 @@ cmd_set_url() { shift done - git ${wt_prefix:+-C "$wt_prefix"} submodule--helper set-url ${GIT_QUIET:+--quiet} -- "$@" + git ${wt_prefix:+-C "$wt_prefix"} submodule--helper set-url ${quiet:+--quiet} -- "$@" } # @@ -459,7 +447,7 @@ cmd_summary() { do case "$1" in --cached) - cached="$1" + cached=1 ;; --files) files="$1" @@ -509,7 +497,7 @@ cmd_status() do case "$1" in -q|--quiet) - GIT_QUIET=1 + quiet=1 ;; --cached) cached=1 @@ -531,7 +519,7 @@ cmd_status() shift done - git ${wt_prefix:+-C "$wt_prefix"} submodule--helper status ${GIT_QUIET:+--quiet} ${cached:+--cached} ${recursive:+--recursive} -- "$@" + git ${wt_prefix:+-C "$wt_prefix"} submodule--helper status ${quiet:+--quiet} ${cached:+--cached} ${recursive:+--recursive} -- "$@" } # # Sync remote urls for submodules @@ -544,7 +532,7 @@ cmd_sync() do case "$1" in -q|--quiet) - GIT_QUIET=1 + quiet=1 shift ;; --recursive) @@ -564,12 +552,12 @@ cmd_sync() esac done - git ${wt_prefix:+-C "$wt_prefix"} submodule--helper sync ${GIT_QUIET:+--quiet} ${recursive:+--recursive} -- "$@" + git ${wt_prefix:+-C "$wt_prefix"} submodule--helper sync ${quiet:+--quiet} ${recursive:+--recursive} -- "$@" } cmd_absorbgitdirs() { - git submodule--helper absorb-git-dirs --prefix "$wt_prefix" "$@" + git submodule--helper absorbgitdirs --prefix "$wt_prefix" "$@" } # This loop parses the command line arguments to find the @@ -585,18 +573,10 @@ do command=$1 ;; -q|--quiet) - GIT_QUIET=1 - ;; - -b|--branch) - case "$2" in - '') - usage - ;; - esac - branch="$2"; shift + quiet=1 ;; --cached) - cached="$1" + cached=1 ;; --) break @@ -622,12 +602,6 @@ then fi fi -# "-b branch" is accepted only by "add" and "set-branch" -if test -n "$branch" && (test "$command" != add || test "$command" != set-branch) -then - usage -fi - # "--cached" is accepted only by "status" and "summary" if test -n "$cached" && test "$command" != status && test "$command" != summary then @@ -522,6 +522,7 @@ static struct cmd_struct commands[] = { { "credential-cache--daemon", cmd_credential_cache_daemon }, { "credential-store", cmd_credential_store }, { "describe", cmd_describe, RUN_SETUP }, + { "diagnose", cmd_diagnose, RUN_SETUP_GENTLY }, { "diff", cmd_diff, NO_PARSEOPT }, { "diff-files", cmd_diff_files, RUN_SETUP | NEED_WORK_TREE | NO_PARSEOPT }, { "diff-index", cmd_diff_index, RUN_SETUP | NO_PARSEOPT }, @@ -565,7 +566,7 @@ static struct cmd_struct commands[] = { { "merge-recursive-ours", cmd_merge_recursive, RUN_SETUP | NEED_WORK_TREE | NO_PARSEOPT }, { "merge-recursive-theirs", cmd_merge_recursive, RUN_SETUP | NEED_WORK_TREE | NO_PARSEOPT }, { "merge-subtree", cmd_merge_recursive, RUN_SETUP | NEED_WORK_TREE | NO_PARSEOPT }, - { "merge-tree", cmd_merge_tree, RUN_SETUP | NO_PARSEOPT }, + { "merge-tree", cmd_merge_tree, RUN_SETUP }, { "mktag", cmd_mktag, RUN_SETUP | NO_PARSEOPT }, { "mktree", cmd_mktree, RUN_SETUP }, { "multi-pack-index", cmd_multi_pack_index, RUN_SETUP }, diff --git a/gitweb/Makefile b/gitweb/Makefile index f13e23c4de..3b68ab2d67 100644 --- a/gitweb/Makefile +++ b/gitweb/Makefile @@ -1,8 +1,8 @@ -# The default target of this Makefile is... -all:: +ifndef MAK_DIR_GITWEB +$(error do not run gitweb/Makefile stand-alone anymore. The "gitweb" and \ +"install-gitweb" targets now live in the top-level Makefile) +endif -# Define V=1 to have a more verbose compile. -# # Define JSMIN to point to JavaScript minifier that functions as # a filter to have static/gitweb.js minified. # @@ -10,13 +10,6 @@ all:: # version of static/gitweb.css # -prefix ?= $(HOME) -bindir ?= $(prefix)/bin -gitwebdir ?= /var/www/cgi-bin - -RM ?= rm -f -INSTALL ?= install - # default configuration for gitweb GITWEB_CONFIG = gitweb_config.perl GITWEB_CONFIG_SYSTEM = /etc/gitweb.conf @@ -30,89 +23,45 @@ GITWEB_STRICT_EXPORT = GITWEB_BASE_URL = GITWEB_LIST = GITWEB_HOMETEXT = indextext.html -GITWEB_CSS = static/gitweb.css +GITWEB_CSS_IN = static/gitweb.css +GITWEB_CSS = $(GITWEB_CSS_IN) GITWEB_LOGO = static/git-logo.png GITWEB_FAVICON = static/git-favicon.png -GITWEB_JS = static/gitweb.js +GITWEB_JS_IN = static/gitweb.js +GITWEB_JS = $(GITWEB_JS_IN) GITWEB_SITE_HTML_HEAD_STRING = GITWEB_SITE_HEADER = GITWEB_SITE_FOOTER = HIGHLIGHT_BIN = highlight -# include user config --include ../config.mak.autogen --include ../config.mak --include config.mak - -# determine version -../GIT-VERSION-FILE: .FORCE-GIT-VERSION-FILE - $(QUIET_SUBDIR0)../ $(QUIET_SUBDIR1) GIT-VERSION-FILE - -ifneq ($(MAKECMDGOALS),clean) --include ../GIT-VERSION-FILE -endif - -### Build rules - -SHELL_PATH ?= $(SHELL) -PERL_PATH ?= /usr/bin/perl +# What targets we'll add to 'all' for "make gitweb" +GITWEB_ALL = +GITWEB_ALL += gitweb.cgi +GITWEB_ALL += $(GITWEB_JS) -# Shell quote; -bindir_SQ = $(subst ','\'',$(bindir))#' -gitwebdir_SQ = $(subst ','\'',$(gitwebdir))#' -gitwebstaticdir_SQ = $(subst ','\'',$(gitwebdir)/static)#' -SHELL_PATH_SQ = $(subst ','\'',$(SHELL_PATH))#' -PERL_PATH_SQ = $(subst ','\'',$(PERL_PATH))#' -DESTDIR_SQ = $(subst ','\'',$(DESTDIR))#' - -# Quiet generation (unless V=1) -QUIET_SUBDIR0 = +$(MAKE) -C # space to separate -C and subdir -QUIET_SUBDIR1 = - -ifneq ($(findstring $(MAKEFLAGS),w),w) -PRINT_DIR = --no-print-directory -else # "make -w" -NO_SUBDIR = : -endif - -ifneq ($(findstring $(MAKEFLAGS),s),s) -ifndef V - QUIET = @ - QUIET_GEN = $(QUIET)echo ' ' GEN $@; - QUIET_SUBDIR0 = +@subdir= - QUIET_SUBDIR1 = ;$(NO_SUBDIR) echo ' ' SUBDIR $$subdir; \ - $(MAKE) $(PRINT_DIR) -C $$subdir - export V - export QUIET - export QUIET_GEN - export QUIET_SUBDIR0 - export QUIET_SUBDIR1 -endif -endif - -all:: gitweb.cgi static/gitweb.js +MAK_DIR_GITWEB_ALL = $(addprefix $(MAK_DIR_GITWEB),$(GITWEB_ALL)) GITWEB_PROGRAMS = gitweb.cgi +GITWEB_JS_MIN = static/gitweb.min.js ifdef JSMIN -GITWEB_FILES += static/gitweb.min.js -GITWEB_JS = static/gitweb.min.js -all:: static/gitweb.min.js -static/gitweb.min.js: static/gitweb.js GITWEB-BUILD-OPTIONS +GITWEB_JS = $(GITWEB_JS_MIN) +GITWEB_ALL += $(MAK_DIR_GITWEB)$(GITWEB_JS_MIN) +$(MAK_DIR_GITWEB)$(GITWEB_JS_MIN): $(MAK_DIR_GITWEB)GITWEB-BUILD-OPTIONS +$(MAK_DIR_GITWEB)$(GITWEB_JS_MIN): $(MAK_DIR_GITWEB)$(GITWEB_JS_IN) $(QUIET_GEN)$(JSMIN) <$< >$@ -else -GITWEB_FILES += static/gitweb.js endif +GITWEB_FILES += $(GITWEB_JS) +GITWEB_CSS_MIN = static/gitweb.min.css ifdef CSSMIN -GITWEB_FILES += static/gitweb.min.css -GITWEB_CSS = static/gitweb.min.css -all:: static/gitweb.min.css -static/gitweb.min.css: static/gitweb.css GITWEB-BUILD-OPTIONS +GITWEB_CSS = $(GITWEB_CSS_MIN) +GITWEB_ALL += $(MAK_DIR_GITWEB)$(GITWEB_CSS_MIN) +$(MAK_DIR_GITWEB)$(GITWEB_CSS_MIN): $(MAK_DIR_GITWEB)GITWEB-BUILD-OPTIONS +$(MAK_DIR_GITWEB)$(GITWEB_CSS_MIN): $(MAK_DIR_GITWEB)$(GITWEB_CSS_IN) $(QUIET_GEN)$(CSSMIN) <$< >$@ -else -GITWEB_FILES += static/gitweb.css endif +GITWEB_FILES += $(GITWEB_CSS) GITWEB_FILES += static/git-logo.png static/git-favicon.png @@ -120,6 +69,7 @@ GITWEB_FILES += static/git-logo.png static/git-favicon.png # # js/lib/common-lib.js should be always first, then js/lib/*.js, # then the rest of files; js/gitweb.js should be last (if it exists) +GITWEB_JSLIB_FILES = GITWEB_JSLIB_FILES += static/js/lib/common-lib.js GITWEB_JSLIB_FILES += static/js/lib/datetime.js GITWEB_JSLIB_FILES += static/js/lib/cookies.js @@ -152,46 +102,45 @@ GITWEB_REPLACE = \ -e 's|++GITWEB_SITE_FOOTER++|$(GITWEB_SITE_FOOTER)|g' \ -e 's|++HIGHLIGHT_BIN++|$(HIGHLIGHT_BIN)|g' -GITWEB-BUILD-OPTIONS: FORCE +.PHONY: FORCE +$(MAK_DIR_GITWEB)GITWEB-BUILD-OPTIONS: FORCE @rm -f $@+ @echo "x" '$(PERL_PATH_SQ)' $(GITWEB_REPLACE) "$(JSMIN)|$(CSSMIN)" >$@+ @cmp -s $@+ $@ && rm -f $@+ || mv -f $@+ $@ -gitweb.cgi: gitweb.perl GITWEB-BUILD-OPTIONS +$(MAK_DIR_GITWEB)gitweb.cgi: $(MAK_DIR_GITWEB)GITWEB-BUILD-OPTIONS +$(MAK_DIR_GITWEB)gitweb.cgi: $(MAK_DIR_GITWEB)gitweb.perl $(QUIET_GEN)$(RM) $@ $@+ && \ sed -e '1s|#!.*perl|#!$(PERL_PATH_SQ)|' \ $(GITWEB_REPLACE) $< >$@+ && \ chmod +x $@+ && \ mv $@+ $@ -static/gitweb.js: $(GITWEB_JSLIB_FILES) +$(MAK_DIR_GITWEB)static/gitweb.js: $(addprefix $(MAK_DIR_GITWEB),$(GITWEB_JSLIB_FILES)) $(QUIET_GEN)$(RM) $@ $@+ && \ cat $^ >$@+ && \ mv $@+ $@ -### Testing rules - -test: - $(MAKE) -C ../t gitweb-test - -test-installed: - GITWEB_TEST_INSTALLED='$(DESTDIR_SQ)$(gitwebdir_SQ)' \ - $(MAKE) -C ../t gitweb-test - ### Installation rules -install: all +.PHONY: install-gitweb +install-gitweb: $(MAK_DIR_GITWEB_ALL) $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(gitwebdir_SQ)' - $(INSTALL) -m 755 $(GITWEB_PROGRAMS) '$(DESTDIR_SQ)$(gitwebdir_SQ)' + $(INSTALL) -m 755 $(addprefix $(MAK_DIR_GITWEB),$(GITWEB_PROGRAMS)) '$(DESTDIR_SQ)$(gitwebdir_SQ)' $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(gitwebstaticdir_SQ)' - $(INSTALL) -m 644 $(GITWEB_FILES) '$(DESTDIR_SQ)$(gitwebstaticdir_SQ)' + $(INSTALL) -m 644 $(addprefix $(MAK_DIR_GITWEB),$(GITWEB_FILES)) \ + '$(DESTDIR_SQ)$(gitwebstaticdir_SQ)' +ifndef NO_GITWEB +ifndef NO_PERL +install: install-gitweb +endif +endif ### Cleaning rules -clean: - $(RM) gitweb.cgi static/gitweb.js \ - static/gitweb.min.js static/gitweb.min.css \ - GITWEB-BUILD-OPTIONS - -.PHONY: all clean install test test-installed .FORCE-GIT-VERSION-FILE FORCE - +.PHONY: gitweb-clean +gitweb-clean: + $(RM) $(addprefix $(MAK_DIR_GITWEB),gitweb.cgi $(GITWEB_JS_IN) \ + $(GITWEB_JS_MIN) $(GITWEB_CSS_MIN) \ + GITWEB-BUILD-OPTIONS) +clean: gitweb-clean diff --git a/gitweb/gitweb.perl b/gitweb/gitweb.perl index 1835487ab2..e66eb3d9ba 100755 --- a/gitweb/gitweb.perl +++ b/gitweb/gitweb.perl @@ -3560,23 +3560,6 @@ sub parse_commit_text { $title =~ s/^ //; if ($title ne "") { $co{'title'} = chop_str($title, 80, 5); - # remove leading stuff of merges to make the interesting part visible - if (length($title) > 50) { - $title =~ s/^Automatic //; - $title =~ s/^merge (of|with) /Merge ... /i; - if (length($title) > 50) { - $title =~ s/(http|rsync):\/\///; - } - if (length($title) > 50) { - $title =~ s/(master|www|rsync)\.//; - } - if (length($title) > 50) { - $title =~ s/kernel.org:?//; - } - if (length($title) > 50) { - $title =~ s/\/pub\/scm//; - } - } $co{'title_short'} = chop_str($title, 50, 5); last; } diff --git a/gpg-interface.c b/gpg-interface.c index 947b58ad4d..6dff241460 100644 --- a/gpg-interface.c +++ b/gpg-interface.c @@ -165,15 +165,17 @@ static struct { { 0, "TRUST_", GPG_STATUS_TRUST_LEVEL }, }; -static struct { +/* Keep the order same as enum signature_trust_level */ +static struct sigcheck_gpg_trust_level { const char *key; + const char *display_key; enum signature_trust_level value; } sigcheck_gpg_trust_level[] = { - { "UNDEFINED", TRUST_UNDEFINED }, - { "NEVER", TRUST_NEVER }, - { "MARGINAL", TRUST_MARGINAL }, - { "FULLY", TRUST_FULLY }, - { "ULTIMATE", TRUST_ULTIMATE }, + { "UNDEFINED", "undefined", TRUST_UNDEFINED }, + { "NEVER", "never", TRUST_NEVER }, + { "MARGINAL", "marginal", TRUST_MARGINAL }, + { "FULLY", "fully", TRUST_FULLY }, + { "ULTIMATE", "ultimate", TRUST_ULTIMATE }, }; static void replace_cstring(char **field, const char *line, const char *next) @@ -905,6 +907,20 @@ const char *get_signing_key(void) return git_committer_info(IDENT_STRICT | IDENT_NO_DATE); } +const char *gpg_trust_level_to_str(enum signature_trust_level level) +{ + struct sigcheck_gpg_trust_level *trust; + + if (level < 0 || level >= ARRAY_SIZE(sigcheck_gpg_trust_level)) + BUG("invalid trust level requested %d", level); + + trust = &sigcheck_gpg_trust_level[level]; + if (trust->value != level) + BUG("sigcheck_gpg_trust_level[] unsorted"); + + return sigcheck_gpg_trust_level[level].display_key; +} + int sign_buffer(struct strbuf *buffer, struct strbuf *signature, const char *signing_key) { return use_format->sign_buffer(buffer, signature, signing_key); diff --git a/gpg-interface.h b/gpg-interface.h index b30cbdcd3d..8a9ef41779 100644 --- a/gpg-interface.h +++ b/gpg-interface.h @@ -71,6 +71,14 @@ size_t parse_signed_buffer(const char *buf, size_t size); int sign_buffer(struct strbuf *buffer, struct strbuf *signature, const char *signing_key); + +/* + * Returns corresponding string in lowercase for a given member of + * enum signature_trust_level. For example, `TRUST_ULTIMATE` will + * return "ultimate". + */ +const char *gpg_trust_level_to_str(enum signature_trust_level level); + int git_gpg_config(const char *, const char *, void *); void set_signing_key(const char *); const char *get_signing_key(void); @@ -1615,7 +1615,7 @@ static int grep_source_1(struct grep_opt *opt, struct grep_source *gs, int colle return 0; goto next_line; } - if (hit) { + if (hit && (opt->max_count < 0 || count < opt->max_count)) { count++; if (opt->status_only) return 1; @@ -171,6 +171,7 @@ struct grep_opt { int show_hunk_mark; int file_break; int heading; + int max_count; void *priv; void (*output)(struct grep_opt *opt, const void *data, size_t size); @@ -181,6 +182,7 @@ struct grep_opt { .relative = 1, \ .pathname = 1, \ .max_depth = -1, \ + .max_count = -1, \ .pattern_type_option = GREP_PATTERN_TYPE_UNSPECIFIED, \ .colors = { \ [GREP_COLOR_CONTEXT] = "", \ @@ -16,7 +16,9 @@ #include "block-sha1/sha1.h" #endif -#if defined(SHA256_GCRYPT) +#if defined(SHA256_NETTLE) +#include "sha256/nettle.h" +#elif defined(SHA256_GCRYPT) #define SHA256_NEEDS_CLONE_HELPER #include "sha256/gcrypt.h" #elif defined(SHA256_OPENSSL) @@ -38,19 +38,30 @@ static struct category_description main_categories[] = { { CAT_plumbinginterrogators, N_("Low-level Commands / Interrogators") }, { CAT_synchingrepositories, N_("Low-level Commands / Syncing Repositories") }, { CAT_purehelpers, N_("Low-level Commands / Internal Helpers") }, + { CAT_userinterfaces, N_("User-facing repository, command and file interfaces") }, + { CAT_developerinterfaces, N_("Developer-facing file file formats, protocols and interfaces") }, { 0, NULL } }; static const char *drop_prefix(const char *name, uint32_t category) { const char *new_name; - - if (skip_prefix(name, "git-", &new_name)) - return new_name; - if (category == CAT_guide && skip_prefix(name, "git", &new_name)) + const char *prefix; + + switch (category) { + case CAT_guide: + case CAT_userinterfaces: + case CAT_developerinterfaces: + prefix = "git"; + break; + default: + prefix = "git-"; + break; + } + if (skip_prefix(name, prefix, &new_name)) return new_name; - return name; + return name; } static void extract_cmds(struct cmdname_help **p_cmds, uint32_t mask) @@ -426,6 +437,26 @@ void list_guides_help(void) putchar('\n'); } +void list_user_interfaces_help(void) +{ + struct category_description catdesc[] = { + { CAT_userinterfaces, N_("User-facing repository, command and file interfaces:") }, + { 0, NULL } + }; + print_cmd_by_category(catdesc, NULL); + putchar('\n'); +} + +void list_developer_interfaces_help(void) +{ + struct category_description catdesc[] = { + { CAT_developerinterfaces, N_("File formats, protocols and other developer interfaces:") }, + { 0, NULL } + }; + print_cmd_by_category(catdesc, NULL); + putchar('\n'); +} + static int get_alias(const char *var, const char *value, void *data) { struct string_list *list = data; @@ -22,6 +22,8 @@ static inline void mput_char(char c, unsigned int num) void list_common_cmds_help(void); void list_all_cmds_help(int show_external_commands, int show_aliases); void list_guides_help(void); +void list_user_interfaces_help(void); +void list_developer_interfaces_help(void); void list_all_main_cmds(struct string_list *list); void list_all_other_cmds(struct string_list *list); @@ -1775,7 +1775,7 @@ static void write_accept_language(struct strbuf *buf) * LANGUAGE= LANG=en_US.UTF-8 -> "Accept-Language: en-US, *; q=0.1" * LANGUAGE= LANG=C -> "" */ -static const char *get_accept_language(void) +const char *http_get_accept_language_header(void) { if (!cached_accept_language) { struct strbuf buf = STRBUF_INIT; @@ -1829,7 +1829,7 @@ static int http_request(const char *url, fwrite_buffer); } - accept_language = get_accept_language(); + accept_language = http_get_accept_language_header(); if (accept_language) headers = curl_slist_append(headers, accept_language); @@ -178,6 +178,9 @@ int http_fetch_ref(const char *base, struct ref *ref); int http_get_info_packs(const char *base_url, struct packed_git **packs_head); +/* Helper for getting Accept-Language header */ +const char *http_get_accept_language_header(void); + struct http_pack_request { char *url; @@ -8,6 +8,7 @@ #include "cache.h" #include "config.h" #include "date.h" +#include "mailmap.h" static struct strbuf git_default_name = STRBUF_INIT; static struct strbuf git_default_email = STRBUF_INIT; @@ -346,6 +347,79 @@ person_only: return 0; } +/* + * Returns the difference between the new and old length of the ident line. + */ +static ssize_t rewrite_ident_line(const char *person, size_t len, + struct strbuf *buf, + struct string_list *mailmap) +{ + size_t namelen, maillen; + const char *name; + const char *mail; + struct ident_split ident; + + if (split_ident_line(&ident, person, len)) + return 0; + + mail = ident.mail_begin; + maillen = ident.mail_end - ident.mail_begin; + name = ident.name_begin; + namelen = ident.name_end - ident.name_begin; + + if (map_user(mailmap, &mail, &maillen, &name, &namelen)) { + struct strbuf namemail = STRBUF_INIT; + size_t newlen; + + strbuf_addf(&namemail, "%.*s <%.*s>", + (int)namelen, name, (int)maillen, mail); + + strbuf_splice(buf, ident.name_begin - buf->buf, + ident.mail_end - ident.name_begin + 1, + namemail.buf, namemail.len); + newlen = namemail.len; + + strbuf_release(&namemail); + + return newlen - (ident.mail_end - ident.name_begin); + } + + return 0; +} + +void apply_mailmap_to_header(struct strbuf *buf, const char **header, + struct string_list *mailmap) +{ + size_t buf_offset = 0; + + if (!mailmap) + return; + + for (;;) { + const char *person, *line; + size_t i; + int found_header = 0; + + line = buf->buf + buf_offset; + if (!*line || *line == '\n') + return; /* End of headers */ + + for (i = 0; header[i]; i++) + if (skip_prefix(line, header[i], &person)) { + const char *endp = strchrnul(person, '\n'); + found_header = 1; + buf_offset += endp - line; + buf_offset += rewrite_ident_line(person, endp - person, buf, mailmap); + break; + } + + if (!found_header) { + buf_offset = strchrnul(line, '\n') - buf->buf; + if (buf->buf[buf_offset] == '\n') + buf_offset++; + } + } +} static void ident_env_hint(enum want_ident whose_ident) { diff --git a/log-tree.c b/log-tree.c index d0ac0a6327..3e8c70ddcf 100644 --- a/log-tree.c +++ b/log-tree.c @@ -137,10 +137,12 @@ static int ref_filter_match(const char *refname, static int add_ref_decoration(const char *refname, const struct object_id *oid, int flags, void *cb_data) { + int i; struct object *obj; enum object_type objtype; enum decoration_type deco_type = DECORATION_NONE; struct decoration_filter *filter = (struct decoration_filter *)cb_data; + const char *git_replace_ref_base = ref_namespace[NAMESPACE_REPLACE].ref; if (filter && !ref_filter_match(refname, filter)) return 0; @@ -165,16 +167,21 @@ static int add_ref_decoration(const char *refname, const struct object_id *oid, return 0; obj = lookup_object_by_type(the_repository, oid, objtype); - if (starts_with(refname, "refs/heads/")) - deco_type = DECORATION_REF_LOCAL; - else if (starts_with(refname, "refs/remotes/")) - deco_type = DECORATION_REF_REMOTE; - else if (starts_with(refname, "refs/tags/")) - deco_type = DECORATION_REF_TAG; - else if (!strcmp(refname, "refs/stash")) - deco_type = DECORATION_REF_STASH; - else if (!strcmp(refname, "HEAD")) - deco_type = DECORATION_REF_HEAD; + for (i = 0; i < ARRAY_SIZE(ref_namespace); i++) { + struct ref_namespace_info *info = &ref_namespace[i]; + + if (!info->decoration) + continue; + if (info->exact) { + if (!strcmp(refname, info->ref)) { + deco_type = info->decoration; + break; + } + } else if (starts_with(refname, info->ref)) { + deco_type = info->decoration; + break; + } + } add_name_decoration(deco_type, refname, obj); while (obj->type == OBJ_TAG) { @@ -956,8 +963,7 @@ static void cleanup_additional_headers(struct diff_options *o) static int do_remerge_diff(struct rev_info *opt, struct commit_list *parents, - struct object_id *oid, - struct commit *commit) + struct object_id *oid) { struct merge_options o; struct commit_list *bases; @@ -1052,7 +1058,7 @@ static int log_tree_diff(struct rev_info *opt, struct commit *commit, struct log "for octopus merges.\n"); return 1; } - return do_remerge_diff(opt, parents, oid, commit); + return do_remerge_diff(opt, parents, oid); } if (opt->combine_merges) return do_diff_combined(opt, commit); diff --git a/merge-ort-wrappers.c b/merge-ort-wrappers.c index ad04106169..748924a69b 100644 --- a/merge-ort-wrappers.c +++ b/merge-ort-wrappers.c @@ -10,8 +10,8 @@ static int unclean(struct merge_options *opt, struct tree *head) struct strbuf sb = STRBUF_INIT; if (head && repo_index_has_changes(opt->repo, head, &sb)) { - fprintf(stderr, _("Your local changes to the following files would be overwritten by merge:\n %s"), - sb.buf); + error(_("Your local changes to the following files would be overwritten by merge:\n %s"), + sb.buf); strbuf_release(&sb); return -1; } diff --git a/merge-ort.c b/merge-ort.c index ebb9a75425..99dcee2db8 100644 --- a/merge-ort.c +++ b/merge-ort.c @@ -349,13 +349,15 @@ struct merge_options_internal { struct mem_pool pool; /* - * output: special messages and conflict notices for various paths + * conflicts: logical conflicts and messages stored by _primary_ path * * This is a map of pathnames (a subset of the keys in "paths" above) - * to strbufs. It gathers various warning/conflict/notice messages - * for later processing. + * to struct string_list, with each item's `util` containing a + * `struct logical_conflict_info`. Note, though, that for each path, + * it only stores the logical conflicts for which that path is the + * primary path; the path might be part of additional conflicts. */ - struct strmap output; + struct strmap conflicts; /* * renames: various data relating to rename detection @@ -385,8 +387,24 @@ struct merge_options_internal { /* call_depth: recursion level counter for merging merge bases */ int call_depth; + + /* field that holds submodule conflict information */ + struct string_list conflicted_submodules; +}; + +struct conflicted_submodule_item { + char *abbrev; + int flag; }; +static void conflicted_submodule_item_free(void *util, const char *str) +{ + struct conflicted_submodule_item *item = util; + + free(item->abbrev); + free(item); +} + struct version_info { struct object_id oid; unsigned short mode; @@ -481,6 +499,100 @@ struct conflict_info { unsigned match_mask:3; }; +enum conflict_and_info_types { + /* "Simple" conflicts and informational messages */ + INFO_AUTO_MERGING = 0, + CONFLICT_CONTENTS, /* text file that failed to merge */ + CONFLICT_BINARY, + CONFLICT_FILE_DIRECTORY, + CONFLICT_DISTINCT_MODES, + CONFLICT_MODIFY_DELETE, + + /* Regular rename */ + CONFLICT_RENAME_RENAME, /* same file renamed differently */ + CONFLICT_RENAME_COLLIDES, /* rename/add or two files renamed to 1 */ + CONFLICT_RENAME_DELETE, + + /* Basic directory rename */ + CONFLICT_DIR_RENAME_SUGGESTED, + INFO_DIR_RENAME_APPLIED, + + /* Special directory rename cases */ + INFO_DIR_RENAME_SKIPPED_DUE_TO_RERENAME, + CONFLICT_DIR_RENAME_FILE_IN_WAY, + CONFLICT_DIR_RENAME_COLLISION, + CONFLICT_DIR_RENAME_SPLIT, + + /* Basic submodule */ + INFO_SUBMODULE_FAST_FORWARDING, + CONFLICT_SUBMODULE_FAILED_TO_MERGE, + + /* Special submodule cases broken out from FAILED_TO_MERGE */ + CONFLICT_SUBMODULE_FAILED_TO_MERGE_BUT_POSSIBLE_RESOLUTION, + CONFLICT_SUBMODULE_NOT_INITIALIZED, + CONFLICT_SUBMODULE_HISTORY_NOT_AVAILABLE, + CONFLICT_SUBMODULE_MAY_HAVE_REWINDS, + CONFLICT_SUBMODULE_NULL_MERGE_BASE, + + /* Keep this entry _last_ in the list */ + NB_CONFLICT_TYPES, +}; + +/* + * Short description of conflict type, relied upon by external tools. + * + * We can add more entries, but DO NOT change any of these strings. Also, + * Order MUST match conflict_info_and_types. + */ +static const char *type_short_descriptions[] = { + /*** "Simple" conflicts and informational messages ***/ + [INFO_AUTO_MERGING] = "Auto-merging", + [CONFLICT_CONTENTS] = "CONFLICT (contents)", + [CONFLICT_BINARY] = "CONFLICT (binary)", + [CONFLICT_FILE_DIRECTORY] = "CONFLICT (file/directory)", + [CONFLICT_DISTINCT_MODES] = "CONFLICT (distinct modes)", + [CONFLICT_MODIFY_DELETE] = "CONFLICT (modify/delete)", + + /*** Regular rename ***/ + [CONFLICT_RENAME_RENAME] = "CONFLICT (rename/rename)", + [CONFLICT_RENAME_COLLIDES] = "CONFLICT (rename involved in collision)", + [CONFLICT_RENAME_DELETE] = "CONFLICT (rename/delete)", + + /*** Basic directory rename ***/ + [CONFLICT_DIR_RENAME_SUGGESTED] = + "CONFLICT (directory rename suggested)", + [INFO_DIR_RENAME_APPLIED] = "Path updated due to directory rename", + + /*** Special directory rename cases ***/ + [INFO_DIR_RENAME_SKIPPED_DUE_TO_RERENAME] = + "Directory rename skipped since directory was renamed on both sides", + [CONFLICT_DIR_RENAME_FILE_IN_WAY] = + "CONFLICT (file in way of directory rename)", + [CONFLICT_DIR_RENAME_COLLISION] = "CONFLICT(directory rename collision)", + [CONFLICT_DIR_RENAME_SPLIT] = "CONFLICT(directory rename unclear split)", + + /*** Basic submodule ***/ + [INFO_SUBMODULE_FAST_FORWARDING] = "Fast forwarding submodule", + [CONFLICT_SUBMODULE_FAILED_TO_MERGE] = "CONFLICT (submodule)", + + /*** Special submodule cases broken out from FAILED_TO_MERGE ***/ + [CONFLICT_SUBMODULE_FAILED_TO_MERGE_BUT_POSSIBLE_RESOLUTION] = + "CONFLICT (submodule with possible resolution)", + [CONFLICT_SUBMODULE_NOT_INITIALIZED] = + "CONFLICT (submodule not initialized)", + [CONFLICT_SUBMODULE_HISTORY_NOT_AVAILABLE] = + "CONFLICT (submodule history not available)", + [CONFLICT_SUBMODULE_MAY_HAVE_REWINDS] = + "CONFLICT (submodule may have rewinds)", + [CONFLICT_SUBMODULE_NULL_MERGE_BASE] = + "CONFLICT (submodule lacks merge base)" +}; + +struct logical_conflict_info { + enum conflict_and_info_types type; + struct strvec paths; +}; + /*** Function Grouping: various utility functions ***/ /* @@ -567,24 +679,32 @@ static void clear_or_reinit_internal_opts(struct merge_options_internal *opti, struct strmap_entry *e; /* Release and free each strbuf found in output */ - strmap_for_each_entry(&opti->output, &iter, e) { - struct strbuf *sb = e->value; - strbuf_release(sb); + strmap_for_each_entry(&opti->conflicts, &iter, e) { + struct string_list *list = e->value; + for (int i = 0; i < list->nr; i++) { + struct logical_conflict_info *info = + list->items[i].util; + strvec_clear(&info->paths); + } /* - * While strictly speaking we don't need to free(sb) - * here because we could pass free_values=1 when - * calling strmap_clear() on opti->output, that would - * require strmap_clear to do another - * strmap_for_each_entry() loop, so we just free it - * while we're iterating anyway. + * While strictly speaking we don't need to + * free(conflicts) here because we could pass + * free_values=1 when calling strmap_clear() on + * opti->conflicts, that would require strmap_clear + * to do another strmap_for_each_entry() loop, so we + * just free it while we're iterating anyway. */ - free(sb); + string_list_clear(list, 1); + free(list); } - strmap_clear(&opti->output, 0); + strmap_clear(&opti->conflicts, 0); } mem_pool_discard(&opti->pool, 0); + string_list_clear_func(&opti->conflicted_submodules, + conflicted_submodule_item_free); + /* Clean out callback_data as well. */ FREE_AND_NULL(renames->callback_data); renames->callback_data_nr = renames->callback_data_alloc = 0; @@ -627,29 +747,57 @@ static void format_commit(struct strbuf *sb, strbuf_addch(sb, '\n'); } -__attribute__((format (printf, 4, 5))) +__attribute__((format (printf, 8, 9))) static void path_msg(struct merge_options *opt, - const char *path, + enum conflict_and_info_types type, int omittable_hint, /* skippable under --remerge-diff */ + const char *primary_path, + const char *other_path_1, /* may be NULL */ + const char *other_path_2, /* may be NULL */ + struct string_list *other_paths, /* may be NULL */ const char *fmt, ...) { va_list ap; - struct strbuf *sb, *dest; + struct string_list *path_conflicts; + struct logical_conflict_info *info; + struct strbuf buf = STRBUF_INIT; + struct strbuf *dest; struct strbuf tmp = STRBUF_INIT; + /* Sanity checks */ + assert(omittable_hint == + !starts_with(type_short_descriptions[type], "CONFLICT") || + type == CONFLICT_DIR_RENAME_SUGGESTED); if (opt->record_conflict_msgs_as_headers && omittable_hint) return; /* Do not record mere hints in headers */ if (opt->priv->call_depth && opt->verbosity < 5) return; /* Ignore messages from inner merges */ - sb = strmap_get(&opt->priv->output, path); - if (!sb) { - sb = xmalloc(sizeof(*sb)); - strbuf_init(sb, 0); - strmap_put(&opt->priv->output, path, sb); + /* Ensure path_conflicts (ptr to array of logical_conflict) allocated */ + path_conflicts = strmap_get(&opt->priv->conflicts, primary_path); + if (!path_conflicts) { + path_conflicts = xmalloc(sizeof(*path_conflicts)); + string_list_init_dup(path_conflicts); + strmap_put(&opt->priv->conflicts, primary_path, path_conflicts); } - dest = (opt->record_conflict_msgs_as_headers ? &tmp : sb); + /* Add a logical_conflict at the end to store info from this call */ + info = xcalloc(1, sizeof(*info)); + info->type = type; + strvec_init(&info->paths); + + /* Handle the list of paths */ + strvec_push(&info->paths, primary_path); + if (other_path_1) + strvec_push(&info->paths, other_path_1); + if (other_path_2) + strvec_push(&info->paths, other_path_2); + if (other_paths) + for (int i = 0; i < other_paths->nr; i++) + strvec_push(&info->paths, other_paths->items[i].string); + + /* Handle message and its format, in normal case */ + dest = (opt->record_conflict_msgs_as_headers ? &tmp : &buf); va_start(ap, fmt); if (opt->priv->call_depth) { @@ -660,32 +808,32 @@ static void path_msg(struct merge_options *opt, strbuf_vaddf(dest, fmt, ap); va_end(ap); + /* Handle specialized formatting of message under --remerge-diff */ if (opt->record_conflict_msgs_as_headers) { int i_sb = 0, i_tmp = 0; /* Start with the specified prefix */ if (opt->msg_header_prefix) - strbuf_addf(sb, "%s ", opt->msg_header_prefix); + strbuf_addf(&buf, "%s ", opt->msg_header_prefix); /* Copy tmp to sb, adding spaces after newlines */ - strbuf_grow(sb, sb->len + 2*tmp.len); /* more than sufficient */ + strbuf_grow(&buf, buf.len + 2*tmp.len); /* more than sufficient */ for (; i_tmp < tmp.len; i_tmp++, i_sb++) { /* Copy next character from tmp to sb */ - sb->buf[sb->len + i_sb] = tmp.buf[i_tmp]; + buf.buf[buf.len + i_sb] = tmp.buf[i_tmp]; /* If we copied a newline, add a space */ if (tmp.buf[i_tmp] == '\n') - sb->buf[++i_sb] = ' '; + buf.buf[++i_sb] = ' '; } /* Update length and ensure it's NUL-terminated */ - sb->len += i_sb; - sb->buf[sb->len] = '\0'; + buf.len += i_sb; + buf.buf[buf.len] = '\0'; strbuf_release(&tmp); } - - /* Add final newline character to sb */ - strbuf_addch(sb, '\n'); + string_list_append_nodup(path_conflicts, strbuf_detach(&buf, NULL)) + ->util = info; } static struct diff_filespec *pool_alloc_filespec(struct mem_pool *pool, @@ -1614,38 +1762,50 @@ static int merge_submodule(struct merge_options *opt, int i; int search = !opt->priv->call_depth; + int sub_not_initialized = 1; + int sub_flag = CONFLICT_SUBMODULE_FAILED_TO_MERGE; /* store fallback answer in result in case we fail */ oidcpy(result, opt->priv->call_depth ? o : a); /* we can not handle deletion conflicts */ - if (is_null_oid(o)) - return 0; - if (is_null_oid(a)) - return 0; - if (is_null_oid(b)) - return 0; + if (is_null_oid(a) || is_null_oid(b)) + BUG("submodule deleted on one side; this should be handled outside of merge_submodule()"); + + if ((sub_not_initialized = repo_submodule_init(&subrepo, + opt->repo, path, null_oid()))) { + path_msg(opt, CONFLICT_SUBMODULE_NOT_INITIALIZED, 0, + path, NULL, NULL, NULL, + _("Failed to merge submodule %s (not checked out)"), + path); + sub_flag = CONFLICT_SUBMODULE_NOT_INITIALIZED; + goto cleanup; + } - if (repo_submodule_init(&subrepo, opt->repo, path, null_oid())) { - path_msg(opt, path, 0, - _("Failed to merge submodule %s (not checked out)"), - path); - return 0; + if (is_null_oid(o)) { + path_msg(opt, CONFLICT_SUBMODULE_NULL_MERGE_BASE, 0, + path, NULL, NULL, NULL, + _("Failed to merge submodule %s (no merge base)"), + path); + goto cleanup; } if (!(commit_o = lookup_commit_reference(&subrepo, o)) || !(commit_a = lookup_commit_reference(&subrepo, a)) || !(commit_b = lookup_commit_reference(&subrepo, b))) { - path_msg(opt, path, 0, + path_msg(opt, CONFLICT_SUBMODULE_HISTORY_NOT_AVAILABLE, 0, + path, NULL, NULL, NULL, _("Failed to merge submodule %s (commits not present)"), path); + sub_flag = CONFLICT_SUBMODULE_HISTORY_NOT_AVAILABLE; goto cleanup; } /* check whether both changes are forward */ if (!repo_in_merge_bases(&subrepo, commit_o, commit_a) || !repo_in_merge_bases(&subrepo, commit_o, commit_b)) { - path_msg(opt, path, 0, + path_msg(opt, CONFLICT_SUBMODULE_MAY_HAVE_REWINDS, 0, + path, NULL, NULL, NULL, _("Failed to merge submodule %s " "(commits don't follow merge-base)"), path); @@ -1655,7 +1815,8 @@ static int merge_submodule(struct merge_options *opt, /* Case #1: a is contained in b or vice versa */ if (repo_in_merge_bases(&subrepo, commit_a, commit_b)) { oidcpy(result, b); - path_msg(opt, path, 1, + path_msg(opt, INFO_SUBMODULE_FAST_FORWARDING, 1, + path, NULL, NULL, NULL, _("Note: Fast-forwarding submodule %s to %s"), path, oid_to_hex(b)); ret = 1; @@ -1663,7 +1824,8 @@ static int merge_submodule(struct merge_options *opt, } if (repo_in_merge_bases(&subrepo, commit_b, commit_a)) { oidcpy(result, a); - path_msg(opt, path, 1, + path_msg(opt, INFO_SUBMODULE_FAST_FORWARDING, 1, + path, NULL, NULL, NULL, _("Note: Fast-forwarding submodule %s to %s"), path, oid_to_hex(a)); ret = 1; @@ -1686,30 +1848,27 @@ static int merge_submodule(struct merge_options *opt, &merges); switch (parent_count) { case 0: - path_msg(opt, path, 0, _("Failed to merge submodule %s"), path); + path_msg(opt, CONFLICT_SUBMODULE_FAILED_TO_MERGE, 0, + path, NULL, NULL, NULL, + _("Failed to merge submodule %s"), path); break; case 1: format_commit(&sb, 4, &subrepo, (struct commit *)merges.objects[0].item); - path_msg(opt, path, 0, + path_msg(opt, CONFLICT_SUBMODULE_FAILED_TO_MERGE_BUT_POSSIBLE_RESOLUTION, 0, + path, NULL, NULL, NULL, _("Failed to merge submodule %s, but a possible merge " - "resolution exists:\n%s\n"), + "resolution exists: %s"), path, sb.buf); - path_msg(opt, path, 1, - _("If this is correct simply add it to the index " - "for example\n" - "by using:\n\n" - " git update-index --cacheinfo 160000 %s \"%s\"\n\n" - "which will accept this suggestion.\n"), - oid_to_hex(&merges.objects[0].item->oid), path); strbuf_release(&sb); break; default: for (i = 0; i < merges.nr; i++) format_commit(&sb, 4, &subrepo, (struct commit *)merges.objects[i].item); - path_msg(opt, path, 0, + path_msg(opt, CONFLICT_SUBMODULE_FAILED_TO_MERGE_BUT_POSSIBLE_RESOLUTION, 0, + path, NULL, NULL, NULL, _("Failed to merge submodule %s, but multiple " "possible merges exist:\n%s"), path, sb.buf); strbuf_release(&sb); @@ -1717,7 +1876,23 @@ static int merge_submodule(struct merge_options *opt, object_array_clear(&merges); cleanup: - repo_clear(&subrepo); + if (!opt->priv->call_depth && !ret) { + struct string_list *csub = &opt->priv->conflicted_submodules; + struct conflicted_submodule_item *util; + const char *abbrev; + + util = xmalloc(sizeof(*util)); + util->flag = sub_flag; + util->abbrev = NULL; + if (!sub_not_initialized) { + abbrev = repo_find_unique_abbrev(&subrepo, b, DEFAULT_ABBREV); + util->abbrev = xstrdup(abbrev); + } + string_list_append(csub, path)->util = util; + } + + if (!sub_not_initialized) + repo_clear(&subrepo); return ret; } @@ -1835,7 +2010,8 @@ static int merge_3way(struct merge_options *opt, &src1, name1, &src2, name2, &opt->priv->attr_index, &ll_opts); if (merge_status == LL_MERGE_BINARY_CONFLICT) - path_msg(opt, path, 0, + path_msg(opt, CONFLICT_BINARY, 0, + path, NULL, NULL, NULL, "warning: Cannot merge binary files: %s (%s vs. %s)", path, name1, name2); @@ -1947,7 +2123,8 @@ static int handle_content_merge(struct merge_options *opt, if (ret) return -1; clean &= (merge_status == 0); - path_msg(opt, path, 1, _("Auto-merging %s"), path); + path_msg(opt, INFO_AUTO_MERGING, 1, path, NULL, NULL, NULL, + _("Auto-merging %s"), path); } else if (S_ISGITLINK(a->mode)) { int two_way = ((S_IFMT & o->mode) != (S_IFMT & a->mode)); clean = merge_submodule(opt, pathnames[0], @@ -2085,21 +2262,24 @@ static char *handle_path_level_conflicts(struct merge_options *opt, c_info->reported_already = 1; strbuf_add_separated_string_list(&collision_paths, ", ", &c_info->source_files); - path_msg(opt, new_path, 0, - _("CONFLICT (implicit dir rename): Existing file/dir " - "at %s in the way of implicit directory rename(s) " - "putting the following path(s) there: %s."), - new_path, collision_paths.buf); + path_msg(opt, CONFLICT_DIR_RENAME_FILE_IN_WAY, 0, + new_path, NULL, NULL, &c_info->source_files, + _("CONFLICT (implicit dir rename): Existing " + "file/dir at %s in the way of implicit " + "directory rename(s) putting the following " + "path(s) there: %s."), + new_path, collision_paths.buf); clean = 0; } else if (c_info->source_files.nr > 1) { c_info->reported_already = 1; strbuf_add_separated_string_list(&collision_paths, ", ", &c_info->source_files); - path_msg(opt, new_path, 0, - _("CONFLICT (implicit dir rename): Cannot map more " - "than one path to %s; implicit directory renames " - "tried to put these paths there: %s"), - new_path, collision_paths.buf); + path_msg(opt, CONFLICT_DIR_RENAME_COLLISION, 0, + new_path, NULL, NULL, &c_info->source_files, + _("CONFLICT (implicit dir rename): Cannot map " + "more than one path to %s; implicit directory " + "renames tried to put these paths there: %s"), + new_path, collision_paths.buf); clean = 0; } @@ -2153,13 +2333,14 @@ static void get_provisional_directory_renames(struct merge_options *opt, continue; if (bad_max == max) { - path_msg(opt, source_dir, 0, - _("CONFLICT (directory rename split): " - "Unclear where to rename %s to; it was " - "renamed to multiple other directories, with " - "no destination getting a majority of the " - "files."), - source_dir); + path_msg(opt, CONFLICT_DIR_RENAME_SPLIT, 0, + source_dir, NULL, NULL, NULL, + _("CONFLICT (directory rename split): " + "Unclear where to rename %s to; it was " + "renamed to multiple other directories, " + "with no destination getting a majority of " + "the files."), + source_dir); *clean = 0; } else { strmap_put(&renames->dir_renames[side], @@ -2334,7 +2515,8 @@ static char *check_for_directory_rename(struct merge_options *opt, new_dir = rename_info->value; /* old_dir = rename_info->key; */ otherinfo = strmap_get_entry(dir_rename_exclusions, new_dir); if (otherinfo) { - path_msg(opt, rename_info->key, 1, + path_msg(opt, INFO_DIR_RENAME_SKIPPED_DUE_TO_RERENAME, 1, + rename_info->key, path, new_dir, NULL, _("WARNING: Avoiding applying %s -> %s rename " "to %s, because %s itself was renamed."), rename_info->key, new_dir, path, new_dir); @@ -2475,14 +2657,16 @@ static void apply_directory_rename_modifications(struct merge_options *opt, if (opt->detect_directory_renames == MERGE_DIRECTORY_RENAMES_TRUE) { /* Notify user of updated path */ if (pair->status == 'A') - path_msg(opt, new_path, 1, + path_msg(opt, INFO_DIR_RENAME_APPLIED, 1, + new_path, old_path, NULL, NULL, _("Path updated: %s added in %s inside a " "directory that was renamed in %s; moving " "it to %s."), old_path, branch_with_new_path, branch_with_dir_rename, new_path); else - path_msg(opt, new_path, 1, + path_msg(opt, INFO_DIR_RENAME_APPLIED, 1, + new_path, old_path, NULL, NULL, _("Path updated: %s renamed to %s in %s, " "inside a directory that was renamed in %s; " "moving it to %s."), @@ -2495,7 +2679,8 @@ static void apply_directory_rename_modifications(struct merge_options *opt, */ ci->path_conflict = 1; if (pair->status == 'A') - path_msg(opt, new_path, 1, + path_msg(opt, CONFLICT_DIR_RENAME_SUGGESTED, 1, + new_path, old_path, NULL, NULL, _("CONFLICT (file location): %s added in %s " "inside a directory that was renamed in %s, " "suggesting it should perhaps be moved to " @@ -2503,7 +2688,8 @@ static void apply_directory_rename_modifications(struct merge_options *opt, old_path, branch_with_new_path, branch_with_dir_rename, new_path); else - path_msg(opt, new_path, 1, + path_msg(opt, CONFLICT_DIR_RENAME_SUGGESTED, 1, + new_path, old_path, NULL, NULL, _("CONFLICT (file location): %s renamed to %s " "in %s, inside a directory that was renamed " "in %s, suggesting it should perhaps be " @@ -2659,7 +2845,8 @@ static int process_renames(struct merge_options *opt, * and remove the setting of base->path_conflict to 1. */ base->path_conflict = 1; - path_msg(opt, oldpath, 0, + path_msg(opt, CONFLICT_RENAME_RENAME, 0, + pathnames[0], pathnames[1], pathnames[2], NULL, _("CONFLICT (rename/rename): %s renamed to " "%s in %s and to %s in %s."), pathnames[0], @@ -2754,7 +2941,8 @@ static int process_renames(struct merge_options *opt, memcpy(&newinfo->stages[target_index], &merged, sizeof(merged)); if (!clean) { - path_msg(opt, newpath, 0, + path_msg(opt, CONFLICT_RENAME_COLLIDES, 0, + newpath, oldpath, NULL, NULL, _("CONFLICT (rename involved in " "collision): rename of %s -> %s has " "content conflicts AND collides " @@ -2773,7 +2961,8 @@ static int process_renames(struct merge_options *opt, */ newinfo->path_conflict = 1; - path_msg(opt, newpath, 0, + path_msg(opt, CONFLICT_RENAME_DELETE, 0, + newpath, oldpath, NULL, NULL, _("CONFLICT (rename/delete): %s renamed " "to %s in %s, but deleted in %s."), oldpath, newpath, rename_branch, delete_branch); @@ -2797,7 +2986,8 @@ static int process_renames(struct merge_options *opt, } else if (source_deleted) { /* rename/delete */ newinfo->path_conflict = 1; - path_msg(opt, newpath, 0, + path_msg(opt, CONFLICT_RENAME_DELETE, 0, + newpath, oldpath, NULL, NULL, _("CONFLICT (rename/delete): %s renamed" " to %s in %s, but deleted in %s."), oldpath, newpath, @@ -3712,7 +3902,8 @@ static void process_entry(struct merge_options *opt, path = unique_path(opt, path, branch); strmap_put(&opt->priv->paths, path, new_ci); - path_msg(opt, path, 0, + path_msg(opt, CONFLICT_FILE_DIRECTORY, 0, + path, old_path, NULL, NULL, _("CONFLICT (file/directory): directory in the way " "of %s from %s; moving it to %s instead."), old_path, branch, path); @@ -3788,15 +3979,23 @@ static void process_entry(struct merge_options *opt, rename_b = 1; } + if (rename_a) + a_path = unique_path(opt, path, opt->branch1); + if (rename_b) + b_path = unique_path(opt, path, opt->branch2); + if (rename_a && rename_b) { - path_msg(opt, path, 0, + path_msg(opt, CONFLICT_DISTINCT_MODES, 0, + path, a_path, b_path, NULL, _("CONFLICT (distinct types): %s had " "different types on each side; " "renamed both of them so each can " "be recorded somewhere."), path); } else { - path_msg(opt, path, 0, + path_msg(opt, CONFLICT_DISTINCT_MODES, 0, + path, rename_a ? a_path : b_path, + NULL, NULL, _("CONFLICT (distinct types): %s had " "different types on each side; " "renamed one of them so each can be " @@ -3833,14 +4032,10 @@ static void process_entry(struct merge_options *opt, /* Insert entries into opt->priv_paths */ assert(rename_a || rename_b); - if (rename_a) { - a_path = unique_path(opt, path, opt->branch1); + if (rename_a) strmap_put(&opt->priv->paths, a_path, ci); - } - if (rename_b) - b_path = unique_path(opt, path, opt->branch2); - else + if (!rename_b) b_path = path; strmap_put(&opt->priv->paths, b_path, new_ci); @@ -3891,7 +4086,8 @@ static void process_entry(struct merge_options *opt, reason = _("add/add"); if (S_ISGITLINK(merged_file.mode)) reason = _("submodule"); - path_msg(opt, path, 0, + path_msg(opt, CONFLICT_CONTENTS, 0, + path, NULL, NULL, NULL, _("CONFLICT (%s): Merge conflict in %s"), reason, path); } @@ -3935,7 +4131,8 @@ static void process_entry(struct merge_options *opt, * since the contents were not modified. */ } else { - path_msg(opt, path, 0, + path_msg(opt, CONFLICT_MODIFY_DELETE, 0, + path, NULL, NULL, NULL, _("CONFLICT (modify/delete): %s deleted in %s " "and modified in %s. Version %s of %s left " "in tree."), @@ -4223,21 +4420,8 @@ static int record_conflicted_index_entries(struct merge_options *opt) * the CE_SKIP_WORKTREE bit and manually write those * files to the working disk here. */ - if (ce_skip_worktree(ce)) { - struct stat st; - - if (!lstat(path, &st)) { - char *new_name = unique_path(opt, - path, - "cruft"); - - path_msg(opt, path, 1, - _("Note: %s not up to date and in way of checking out conflicted version; old copy renamed to %s"), - path, new_name); - errs |= rename(path, new_name); - } + if (ce_skip_worktree(ce)) errs |= checkout_entry(ce, &state, NULL, NULL); - } /* * Mark this cache entry for removal and instead add @@ -4279,6 +4463,152 @@ static int record_conflicted_index_entries(struct merge_options *opt) return errs; } +static void print_submodule_conflict_suggestion(struct string_list *csub) { + struct string_list_item *item; + struct strbuf msg = STRBUF_INIT; + struct strbuf tmp = STRBUF_INIT; + struct strbuf subs = STRBUF_INIT; + + if (!csub->nr) + return; + + strbuf_add_separated_string_list(&subs, " ", csub); + for_each_string_list_item(item, csub) { + struct conflicted_submodule_item *util = item->util; + + /* + * NEEDSWORK: The steps to resolve these errors deserve a more + * detailed explanation than what is currently printed below. + */ + if (util->flag == CONFLICT_SUBMODULE_NOT_INITIALIZED || + util->flag == CONFLICT_SUBMODULE_HISTORY_NOT_AVAILABLE) + continue; + + /* + * TRANSLATORS: This is a line of advice to resolve a merge + * conflict in a submodule. The first argument is the submodule + * name, and the second argument is the abbreviated id of the + * commit that needs to be merged. For example: + * - go to submodule (mysubmodule), and either merge commit abc1234" + */ + strbuf_addf(&tmp, _(" - go to submodule (%s), and either merge commit %s\n" + " or update to an existing commit which has merged those changes\n"), + item->string, util->abbrev); + } + + /* + * TRANSLATORS: This is a detailed message for resolving submodule + * conflicts. The first argument is string containing one step per + * submodule. The second is a space-separated list of submodule names. + */ + strbuf_addf(&msg, + _("Recursive merging with submodules currently only supports trivial cases.\n" + "Please manually handle the merging of each conflicted submodule.\n" + "This can be accomplished with the following steps:\n" + "%s" + " - come back to superproject and run:\n\n" + " git add %s\n\n" + " to record the above merge or update\n" + " - resolve any other conflicts in the superproject\n" + " - commit the resulting index in the superproject\n"), + tmp.buf, subs.buf); + + printf("%s", msg.buf); + + strbuf_release(&subs); + strbuf_release(&tmp); + strbuf_release(&msg); +} + +void merge_display_update_messages(struct merge_options *opt, + int detailed, + struct merge_result *result) +{ + struct merge_options_internal *opti = result->priv; + struct hashmap_iter iter; + struct strmap_entry *e; + struct string_list olist = STRING_LIST_INIT_NODUP; + + if (opt->record_conflict_msgs_as_headers) + BUG("Either display conflict messages or record them as headers, not both"); + + trace2_region_enter("merge", "display messages", opt->repo); + + /* Hack to pre-allocate olist to the desired size */ + ALLOC_GROW(olist.items, strmap_get_size(&opti->conflicts), + olist.alloc); + + /* Put every entry from output into olist, then sort */ + strmap_for_each_entry(&opti->conflicts, &iter, e) { + string_list_append(&olist, e->key)->util = e->value; + } + string_list_sort(&olist); + + /* Iterate over the items, printing them */ + for (int path_nr = 0; path_nr < olist.nr; ++path_nr) { + struct string_list *conflicts = olist.items[path_nr].util; + for (int i = 0; i < conflicts->nr; i++) { + struct logical_conflict_info *info = + conflicts->items[i].util; + + if (detailed) { + printf("%lu", (unsigned long)info->paths.nr); + putchar('\0'); + for (int n = 0; n < info->paths.nr; n++) { + fputs(info->paths.v[n], stdout); + putchar('\0'); + } + fputs(type_short_descriptions[info->type], + stdout); + putchar('\0'); + } + puts(conflicts->items[i].string); + if (detailed) + putchar('\0'); + } + } + string_list_clear(&olist, 0); + + print_submodule_conflict_suggestion(&opti->conflicted_submodules); + + /* Also include needed rename limit adjustment now */ + diff_warn_rename_limit("merge.renamelimit", + opti->renames.needed_limit, 0); + + trace2_region_leave("merge", "display messages", opt->repo); +} + +void merge_get_conflicted_files(struct merge_result *result, + struct string_list *conflicted_files) +{ + struct hashmap_iter iter; + struct strmap_entry *e; + struct merge_options_internal *opti = result->priv; + + strmap_for_each_entry(&opti->conflicted, &iter, e) { + const char *path = e->key; + struct conflict_info *ci = e->value; + int i; + + VERIFY_CI(ci); + + for (i = MERGE_BASE; i <= MERGE_SIDE2; i++) { + struct stage_info *si; + + if (!(ci->filemask & (1ul << i))) + continue; + + si = xmalloc(sizeof(*si)); + si->stage = i+1; + si->mode = ci->stages[i].mode; + oidcpy(&si->oid, &ci->stages[i].oid); + string_list_append(conflicted_files, path)->util = si; + } + } + /* string_list_sort() uses a stable sort, so we're good */ + string_list_sort(conflicted_files); +} + void merge_switch_to_result(struct merge_options *opt, struct tree *head, struct merge_result *result, @@ -4321,43 +4651,8 @@ void merge_switch_to_result(struct merge_options *opt, fclose(fp); trace2_region_leave("merge", "write_auto_merge", opt->repo); } - - if (display_update_msgs) { - struct merge_options_internal *opti = result->priv; - struct hashmap_iter iter; - struct strmap_entry *e; - struct string_list olist = STRING_LIST_INIT_NODUP; - int i; - - if (opt->record_conflict_msgs_as_headers) - BUG("Either display conflict messages or record them as headers, not both"); - - trace2_region_enter("merge", "display messages", opt->repo); - - /* Hack to pre-allocate olist to the desired size */ - ALLOC_GROW(olist.items, strmap_get_size(&opti->output), - olist.alloc); - - /* Put every entry from output into olist, then sort */ - strmap_for_each_entry(&opti->output, &iter, e) { - string_list_append(&olist, e->key)->util = e->value; - } - string_list_sort(&olist); - - /* Iterate over the items, printing them */ - for (i = 0; i < olist.nr; ++i) { - struct strbuf *sb = olist.items[i].util; - - printf("%s", sb->buf); - } - string_list_clear(&olist, 0); - - /* Also include needed rename limit adjustment now */ - diff_warn_rename_limit("merge.renamelimit", - opti->renames.needed_limit, 0); - - trace2_region_leave("merge", "display messages", opt->repo); - } + if (display_update_msgs) + merge_display_update_messages(opt, /* detailed */ 0, result); merge_finalize(opt, result); } @@ -4477,6 +4772,7 @@ static void merge_start(struct merge_options *opt, struct merge_result *result) trace2_region_enter("merge", "allocate/init", opt->repo); if (opt->priv) { clear_or_reinit_internal_opts(opt->priv, 1); + string_list_init_nodup(&opt->priv->conflicted_submodules); trace2_region_leave("merge", "allocate/init", opt->repo); return; } @@ -4531,11 +4827,11 @@ static void merge_start(struct merge_options *opt, struct merge_result *result) strmap_init_with_options(&opt->priv->conflicted, pool, 0); /* - * keys & strbufs in output will sometimes need to outlive "paths", - * so it will have a copy of relevant keys. It's probably a small - * subset of the overall paths that have special output. + * keys & string_lists in conflicts will sometimes need to outlive + * "paths", so it will have a copy of relevant keys. It's probably + * a small subset of the overall paths that have special output. */ - strmap_init(&opt->priv->output); + strmap_init(&opt->priv->conflicts); trace2_region_leave("merge", "allocate/init", opt->repo); } @@ -4636,7 +4932,8 @@ redo: trace2_region_leave("merge", "process_entries", opt->repo); /* Set return values */ - result->path_messages = &opt->priv->output; + result->path_messages = &opt->priv->conflicts; + result->tree = parse_tree_indirect(&working_tree_oid); /* existence of conflicted entries implies unclean */ result->clean &= strmap_empty(&opt->priv->conflicted); diff --git a/merge-ort.h b/merge-ort.h index fe599b8786..a994c9a5fc 100644 --- a/merge-ort.h +++ b/merge-ort.h @@ -2,6 +2,7 @@ #define MERGE_ORT_H #include "merge-recursive.h" +#include "hash.h" struct commit; struct tree; @@ -27,7 +28,7 @@ struct merge_result { /* * Special messages and conflict notices for various paths * - * This is a map of pathnames to strbufs. It contains various + * This is a map of pathnames to a string_list. It contains various * warning/conflict/notice messages (possibly multiple per path) * that callers may want to use. */ @@ -80,6 +81,35 @@ void merge_switch_to_result(struct merge_options *opt, int update_worktree_and_index, int display_update_msgs); +/* + * Display messages about conflicts and which files were 3-way merged. + * Automatically called by merge_switch_to_result() with stream == stdout, + * so only call this when bypassing merge_switch_to_result(). + */ +void merge_display_update_messages(struct merge_options *opt, + int detailed, + struct merge_result *result); + +struct stage_info { + struct object_id oid; + int mode; + int stage; +}; + +/* + * Provide a list of path -> {struct stage_info*} mappings for + * all conflicted files. Note that each path could appear up to three + * times in the list, corresponding to 3 different stage entries. In short, + * this basically provides the info that would be printed by `ls-files -u`. + * + * result should have been populated by a call to + * one of the merge_incore_[non]recursive() functions. + * + * conflicted_files should be empty before calling this function. + */ +void merge_get_conflicted_files(struct merge_result *result, + struct string_list *conflicted_files); + /* Do needed cleanup when not calling merge_switch_to_result() */ void merge_finalize(struct merge_options *opt, struct merge_result *result); diff --git a/mergesort.c b/mergesort.c deleted file mode 100644 index bd9c6ef8ee..0000000000 --- a/mergesort.c +++ /dev/null @@ -1,84 +0,0 @@ -#include "cache.h" -#include "mergesort.h" - -/* Combine two sorted lists. Take from `list` on equality. */ -static void *llist_merge(void *list, void *other, - void *(*get_next_fn)(const void *), - void (*set_next_fn)(void *, void *), - int (*compare_fn)(const void *, const void *)) -{ - void *result = list, *tail; - - if (compare_fn(list, other) > 0) { - result = other; - goto other; - } - for (;;) { - do { - tail = list; - list = get_next_fn(list); - if (!list) { - set_next_fn(tail, other); - return result; - } - } while (compare_fn(list, other) <= 0); - set_next_fn(tail, other); - other: - do { - tail = other; - other = get_next_fn(other); - if (!other) { - set_next_fn(tail, list); - return result; - } - } while (compare_fn(list, other) > 0); - set_next_fn(tail, list); - } -} - -/* - * Perform an iterative mergesort using an array of sublists. - * - * n is the number of items. - * ranks[i] is undefined if n & 2^i == 0, and assumed empty. - * ranks[i] contains a sublist of length 2^i otherwise. - * - * The number of bits in a void pointer limits the number of objects - * that can be created, and thus the number of array elements necessary - * to be able to sort any valid list. - * - * Adding an item to this array is like incrementing a binary number; - * positional values for set bits correspond to sublist lengths. - */ -void *llist_mergesort(void *list, - void *(*get_next_fn)(const void *), - void (*set_next_fn)(void *, void *), - int (*compare_fn)(const void *, const void *)) -{ - void *ranks[bitsizeof(void *)]; - size_t n = 0; - int i; - - while (list) { - void *next = get_next_fn(list); - if (next) - set_next_fn(list, NULL); - for (i = 0; n & ((size_t)1 << i); i++) - list = llist_merge(ranks[i], list, get_next_fn, - set_next_fn, compare_fn); - n++; - ranks[i] = list; - list = next; - } - - for (i = 0; n; i++, n >>= 1) { - if (!(n & 1)) - continue; - if (list) - list = llist_merge(ranks[i], list, get_next_fn, - set_next_fn, compare_fn); - else - list = ranks[i]; - } - return list; -} diff --git a/mergesort.h b/mergesort.h index 644cff1f96..7c36f08bd5 100644 --- a/mergesort.h +++ b/mergesort.h @@ -1,17 +1,105 @@ #ifndef MERGESORT_H #define MERGESORT_H +/* Combine two sorted lists. Take from `list` on equality. */ +#define DEFINE_LIST_MERGE_INTERNAL(name, type) \ +static type *name##__merge(type *list, type *other, \ + int (*compare_fn)(const type *, const type *))\ +{ \ + type *result = list, *tail; \ + int prefer_list = compare_fn(list, other) <= 0; \ + \ + if (!prefer_list) { \ + result = other; \ + SWAP(list, other); \ + } \ + for (;;) { \ + do { \ + tail = list; \ + list = name##__get_next(list); \ + if (!list) { \ + name##__set_next(tail, other); \ + return result; \ + } \ + } while (compare_fn(list, other) < prefer_list); \ + name##__set_next(tail, other); \ + prefer_list ^= 1; \ + SWAP(list, other); \ + } \ +} + /* - * Sort linked list in place. - * - get_next_fn() returns the next element given an element of a linked list. - * - set_next_fn() takes two elements A and B, and makes B the "next" element - * of A on the list. - * - compare_fn() takes two elements A and B, and returns negative, 0, positive - * as the same sign as "subtracting" B from A. + * Perform an iterative mergesort using an array of sublists. + * + * n is the number of items. + * ranks[i] is undefined if n & 2^i == 0, and assumed empty. + * ranks[i] contains a sublist of length 2^i otherwise. + * + * The number of bits in a void pointer limits the number of objects + * that can be created, and thus the number of array elements necessary + * to be able to sort any valid list. + * + * Adding an item to this array is like incrementing a binary number; + * positional values for set bits correspond to sublist lengths. */ -void *llist_mergesort(void *list, - void *(*get_next_fn)(const void *), - void (*set_next_fn)(void *, void *), - int (*compare_fn)(const void *, const void *)); +#define DEFINE_LIST_SORT_INTERNAL(scope, name, type) \ +scope void name(type **listp, \ + int (*compare_fn)(const type *, const type *)) \ +{ \ + type *list = *listp; \ + type *ranks[bitsizeof(type *)]; \ + size_t n = 0; \ + \ + if (!list) \ + return; \ + \ + for (;;) { \ + int i; \ + size_t m; \ + type *next = name##__get_next(list); \ + if (next) \ + name##__set_next(list, NULL); \ + for (i = 0, m = n;; i++, m >>= 1) { \ + if (m & 1) { \ + list = name##__merge(ranks[i], list, \ + compare_fn); \ + } else if (next) { \ + break; \ + } else if (!m) { \ + *listp = list; \ + return; \ + } \ + } \ + n++; \ + ranks[i] = list; \ + list = next; \ + } \ +} + +#define DECLARE_LIST_SORT(scope, name, type) \ +scope void name(type **listp, \ + int (*compare_fn)(const type *, const type *)) + +#define DEFINE_LIST_SORT_DEBUG(scope, name, type, next_member, \ + on_get_next, on_set_next) \ + \ +static inline type *name##__get_next(const type *elem) \ +{ \ + on_get_next; \ + return elem->next_member; \ +} \ + \ +static inline void name##__set_next(type *elem, type *next) \ +{ \ + on_set_next; \ + elem->next_member = next; \ +} \ + \ +DEFINE_LIST_MERGE_INTERNAL(name, type) \ +DEFINE_LIST_SORT_INTERNAL(scope, name, type) \ +DECLARE_LIST_SORT(scope, name, type) + +#define DEFINE_LIST_SORT(scope, name, type, next_member) \ +DEFINE_LIST_SORT_DEBUG(scope, name, type, next_member, (void)0, (void)0) #endif @@ -1053,40 +1053,34 @@ static struct commit **find_commits_for_midx_bitmap(uint32_t *indexed_commits_nr return cb.commits; } -static int write_midx_bitmap(char *midx_name, unsigned char *midx_hash, - struct write_midx_context *ctx, - const char *refs_snapshot, +static int write_midx_bitmap(const char *midx_name, + const unsigned char *midx_hash, + struct packing_data *pdata, + struct commit **commits, + uint32_t commits_nr, + uint32_t *pack_order, unsigned flags) { - struct packing_data pdata; - struct pack_idx_entry **index; - struct commit **commits = NULL; - uint32_t i, commits_nr; + int ret, i; uint16_t options = 0; - char *bitmap_name = xstrfmt("%s-%s.bitmap", midx_name, hash_to_hex(midx_hash)); - int ret; - - if (!ctx->entries_nr) - BUG("cannot write a bitmap without any objects"); + struct pack_idx_entry **index; + char *bitmap_name = xstrfmt("%s-%s.bitmap", midx_name, + hash_to_hex(midx_hash)); if (flags & MIDX_WRITE_BITMAP_HASH_CACHE) options |= BITMAP_OPT_HASH_CACHE; - prepare_midx_packing_data(&pdata, ctx); - - commits = find_commits_for_midx_bitmap(&commits_nr, refs_snapshot, ctx); - /* * Build the MIDX-order index based on pdata.objects (which is already * in MIDX order; c.f., 'midx_pack_order_cmp()' for the definition of * this order). */ - ALLOC_ARRAY(index, pdata.nr_objects); - for (i = 0; i < pdata.nr_objects; i++) - index[i] = &pdata.objects[i].idx; + ALLOC_ARRAY(index, pdata->nr_objects); + for (i = 0; i < pdata->nr_objects; i++) + index[i] = &pdata->objects[i].idx; bitmap_writer_show_progress(flags & MIDX_PROGRESS); - bitmap_writer_build_type_index(&pdata, index, pdata.nr_objects); + bitmap_writer_build_type_index(pdata, index, pdata->nr_objects); /* * bitmap_writer_finish expects objects in lex order, but pack_order @@ -1101,16 +1095,16 @@ static int write_midx_bitmap(char *midx_name, unsigned char *midx_hash, * happens between bitmap_writer_build_type_index() and * bitmap_writer_finish(). */ - for (i = 0; i < pdata.nr_objects; i++) - index[ctx->pack_order[i]] = &pdata.objects[i].idx; + for (i = 0; i < pdata->nr_objects; i++) + index[pack_order[i]] = &pdata->objects[i].idx; bitmap_writer_select_commits(commits, commits_nr, -1); - ret = bitmap_writer_build(&pdata); + ret = bitmap_writer_build(pdata); if (ret < 0) goto cleanup; bitmap_writer_set_checksum(midx_hash); - bitmap_writer_finish(index, pdata.nr_objects, bitmap_name, options); + bitmap_writer_finish(index, pdata->nr_objects, bitmap_name, options); cleanup: free(index); @@ -1443,14 +1437,40 @@ static int write_midx_internal(const char *object_dir, if (flags & MIDX_WRITE_REV_INDEX && git_env_bool("GIT_TEST_MIDX_WRITE_REV", 0)) write_midx_reverse_index(midx_name.buf, midx_hash, &ctx); + if (flags & MIDX_WRITE_BITMAP) { - if (write_midx_bitmap(midx_name.buf, midx_hash, &ctx, - refs_snapshot, flags) < 0) { + struct packing_data pdata; + struct commit **commits; + uint32_t commits_nr; + + if (!ctx.entries_nr) + BUG("cannot write a bitmap without any objects"); + + prepare_midx_packing_data(&pdata, &ctx); + + commits = find_commits_for_midx_bitmap(&commits_nr, refs_snapshot, &ctx); + + /* + * The previous steps translated the information from + * 'entries' into information suitable for constructing + * bitmaps. We no longer need that array, so clear it to + * reduce memory pressure. + */ + FREE_AND_NULL(ctx.entries); + ctx.entries_nr = 0; + + if (write_midx_bitmap(midx_name.buf, midx_hash, &pdata, + commits, commits_nr, ctx.pack_order, + flags) < 0) { error(_("could not write multi-pack bitmap")); result = 1; goto cleanup; } } + /* + * NOTE: Do not use ctx.entries beyond this point, since it might + * have been freed in the previous if block. + */ if (ctx.m) close_object_store(the_repository->objects); @@ -1005,6 +1005,7 @@ void init_notes(struct notes_tree *t, const char *notes_ref, if (!notes_ref) notes_ref = default_notes_ref(); + update_ref_namespace(NAMESPACE_NOTES, xstrdup(notes_ref)); if (!combine_notes) combine_notes = combine_notes_concatenate; diff --git a/object-file.c b/object-file.c index 6c8e3b1660..5b270f046d 100644 --- a/object-file.c +++ b/object-file.c @@ -1951,6 +1951,96 @@ static int create_tmpfile(struct strbuf *tmp, const char *filename) return fd; } +/** + * Common steps for loose object writers to start writing loose + * objects: + * + * - Create tmpfile for the loose object. + * - Setup zlib stream for compression. + * - Start to feed header to zlib stream. + * + * Returns a "fd", which should later be provided to + * end_loose_object_common(). + */ +static int start_loose_object_common(struct strbuf *tmp_file, + const char *filename, unsigned flags, + git_zstream *stream, + unsigned char *buf, size_t buflen, + git_hash_ctx *c, + char *hdr, int hdrlen) +{ + int fd; + + fd = create_tmpfile(tmp_file, filename); + if (fd < 0) { + if (flags & HASH_SILENT) + return -1; + else if (errno == EACCES) + return error(_("insufficient permission for adding " + "an object to repository database %s"), + get_object_directory()); + else + return error_errno( + _("unable to create temporary file")); + } + + /* Setup zlib stream for compression */ + git_deflate_init(stream, zlib_compression_level); + stream->next_out = buf; + stream->avail_out = buflen; + the_hash_algo->init_fn(c); + + /* Start to feed header to zlib stream */ + stream->next_in = (unsigned char *)hdr; + stream->avail_in = hdrlen; + while (git_deflate(stream, 0) == Z_OK) + ; /* nothing */ + the_hash_algo->update_fn(c, hdr, hdrlen); + + return fd; +} + +/** + * Common steps for the inner git_deflate() loop for writing loose + * objects. Returns what git_deflate() returns. + */ +static int write_loose_object_common(git_hash_ctx *c, + git_zstream *stream, const int flush, + unsigned char *in0, const int fd, + unsigned char *compressed, + const size_t compressed_len) +{ + int ret; + + ret = git_deflate(stream, flush ? Z_FINISH : 0); + the_hash_algo->update_fn(c, in0, stream->next_in - in0); + if (write_buffer(fd, compressed, stream->next_out - compressed) < 0) + die(_("unable to write loose object file")); + stream->next_out = compressed; + stream->avail_out = compressed_len; + + return ret; +} + +/** + * Common steps for loose object writers to end writing loose objects: + * + * - End the compression of zlib stream. + * - Get the calculated oid to "oid". + */ +static int end_loose_object_common(git_hash_ctx *c, git_zstream *stream, + struct object_id *oid) +{ + int ret; + + ret = git_deflate_end_gently(stream); + if (ret != Z_OK) + return ret; + the_hash_algo->final_oid_fn(oid, c); + + return Z_OK; +} + static int write_loose_object(const struct object_id *oid, char *hdr, int hdrlen, const void *buf, unsigned long len, time_t mtime, unsigned flags) @@ -1968,50 +2058,29 @@ static int write_loose_object(const struct object_id *oid, char *hdr, loose_object_path(the_repository, &filename, oid); - fd = create_tmpfile(&tmp_file, filename.buf); - if (fd < 0) { - if (flags & HASH_SILENT) - return -1; - else if (errno == EACCES) - return error(_("insufficient permission for adding an object to repository database %s"), get_object_directory()); - else - return error_errno(_("unable to create temporary file")); - } - - /* Set it up */ - git_deflate_init(&stream, zlib_compression_level); - stream.next_out = compressed; - stream.avail_out = sizeof(compressed); - the_hash_algo->init_fn(&c); - - /* First header.. */ - stream.next_in = (unsigned char *)hdr; - stream.avail_in = hdrlen; - while (git_deflate(&stream, 0) == Z_OK) - ; /* nothing */ - the_hash_algo->update_fn(&c, hdr, hdrlen); + fd = start_loose_object_common(&tmp_file, filename.buf, flags, + &stream, compressed, sizeof(compressed), + &c, hdr, hdrlen); + if (fd < 0) + return -1; /* Then the data itself.. */ stream.next_in = (void *)buf; stream.avail_in = len; do { unsigned char *in0 = stream.next_in; - ret = git_deflate(&stream, Z_FINISH); - the_hash_algo->update_fn(&c, in0, stream.next_in - in0); - if (write_buffer(fd, compressed, stream.next_out - compressed) < 0) - die(_("unable to write loose object file")); - stream.next_out = compressed; - stream.avail_out = sizeof(compressed); + + ret = write_loose_object_common(&c, &stream, 1, in0, fd, + compressed, sizeof(compressed)); } while (ret == Z_OK); if (ret != Z_STREAM_END) die(_("unable to deflate new object %s (%d)"), oid_to_hex(oid), ret); - ret = git_deflate_end_gently(&stream); + ret = end_loose_object_common(&c, &stream, ¶no_oid); if (ret != Z_OK) die(_("deflateEnd on object %s failed (%d)"), oid_to_hex(oid), ret); - the_hash_algo->final_oid_fn(¶no_oid, &c); if (!oideq(oid, ¶no_oid)) die(_("confused by unstable object source data for %s"), oid_to_hex(oid)); @@ -2050,6 +2119,110 @@ static int freshen_packed_object(const struct object_id *oid) return 1; } +int stream_loose_object(struct input_stream *in_stream, size_t len, + struct object_id *oid) +{ + int fd, ret, err = 0, flush = 0; + unsigned char compressed[4096]; + git_zstream stream; + git_hash_ctx c; + struct strbuf tmp_file = STRBUF_INIT; + struct strbuf filename = STRBUF_INIT; + int dirlen; + char hdr[MAX_HEADER_LEN]; + int hdrlen; + + if (batch_fsync_enabled(FSYNC_COMPONENT_LOOSE_OBJECT)) + prepare_loose_object_bulk_checkin(); + + /* Since oid is not determined, save tmp file to odb path. */ + strbuf_addf(&filename, "%s/", get_object_directory()); + hdrlen = format_object_header(hdr, sizeof(hdr), OBJ_BLOB, len); + + /* + * Common steps for write_loose_object and stream_loose_object to + * start writing loose objects: + * + * - Create tmpfile for the loose object. + * - Setup zlib stream for compression. + * - Start to feed header to zlib stream. + */ + fd = start_loose_object_common(&tmp_file, filename.buf, 0, + &stream, compressed, sizeof(compressed), + &c, hdr, hdrlen); + if (fd < 0) { + err = -1; + goto cleanup; + } + + /* Then the data itself.. */ + do { + unsigned char *in0 = stream.next_in; + + if (!stream.avail_in && !in_stream->is_finished) { + const void *in = in_stream->read(in_stream, &stream.avail_in); + stream.next_in = (void *)in; + in0 = (unsigned char *)in; + /* All data has been read. */ + if (in_stream->is_finished) + flush = 1; + } + ret = write_loose_object_common(&c, &stream, flush, in0, fd, + compressed, sizeof(compressed)); + /* + * Unlike write_loose_object(), we do not have the entire + * buffer. If we get Z_BUF_ERROR due to too few input bytes, + * then we'll replenish them in the next input_stream->read() + * call when we loop. + */ + } while (ret == Z_OK || ret == Z_BUF_ERROR); + + if (stream.total_in != len + hdrlen) + die(_("write stream object %ld != %"PRIuMAX), stream.total_in, + (uintmax_t)len + hdrlen); + + /* + * Common steps for write_loose_object and stream_loose_object to + * end writing loose oject: + * + * - End the compression of zlib stream. + * - Get the calculated oid. + */ + if (ret != Z_STREAM_END) + die(_("unable to stream deflate new object (%d)"), ret); + ret = end_loose_object_common(&c, &stream, oid); + if (ret != Z_OK) + die(_("deflateEnd on stream object failed (%d)"), ret); + close_loose_object(fd, tmp_file.buf); + + if (freshen_packed_object(oid) || freshen_loose_object(oid)) { + unlink_or_warn(tmp_file.buf); + goto cleanup; + } + + loose_object_path(the_repository, &filename, oid); + + /* We finally know the object path, and create the missing dir. */ + dirlen = directory_size(filename.buf); + if (dirlen) { + struct strbuf dir = STRBUF_INIT; + strbuf_add(&dir, filename.buf, dirlen); + + if (mkdir_in_gitdir(dir.buf) && errno != EEXIST) { + err = error_errno(_("unable to create directory %s"), dir.buf); + strbuf_release(&dir); + goto cleanup; + } + strbuf_release(&dir); + } + + err = finalize_object_file(tmp_file.buf, filename.buf); +cleanup: + strbuf_release(&tmp_file); + strbuf_release(&filename); + return err; +} + int write_object_file_flags(const void *buf, unsigned long len, enum object_type type, struct object_id *oid, unsigned flags) diff --git a/object-store.h b/object-store.h index 539ea43904..5222ee5460 100644 --- a/object-store.h +++ b/object-store.h @@ -46,6 +46,12 @@ struct object_directory { char *path; }; +struct input_stream { + const void *(*read)(struct input_stream *, unsigned long *len); + void *data; + int is_finished; +}; + KHASH_INIT(odb_path_map, const char * /* key: odb_path */, struct object_directory *, 1, fspathhash, fspatheq) @@ -269,6 +275,8 @@ static inline int write_object_file(const void *buf, unsigned long len, int write_object_file_literally(const void *buf, unsigned long len, const char *type, struct object_id *oid, unsigned flags); +int stream_loose_object(struct input_stream *in_stream, size_t len, + struct object_id *oid); /* * Add an object file to the in-memory object store, without writing it @@ -59,7 +59,7 @@ struct object_array { /* * object flag allocation: - * revision.h: 0---------10 15 23------26 + * revision.h: 0---------10 15 23------27 * fetch-pack.c: 01 67 * negotiator/default.c: 2--5 * walker.c: 0-2 diff --git a/pack-bitmap-write.c b/pack-bitmap-write.c index c43375bd34..4fcfaed428 100644 --- a/pack-bitmap-write.c +++ b/pack-bitmap-write.c @@ -683,7 +683,7 @@ static void write_hash_cache(struct hashfile *f, } } -void bitmap_writer_set_checksum(unsigned char *sha1) +void bitmap_writer_set_checksum(const unsigned char *sha1) { hashcpy(writer.pack_checksum, sha1); } diff --git a/pack-bitmap.c b/pack-bitmap.c index 36134222d7..ef580be9e3 100644 --- a/pack-bitmap.c +++ b/pack-bitmap.c @@ -1,5 +1,6 @@ #include "cache.h" #include "commit.h" +#include "strbuf.h" #include "tag.h" #include "diff.h" #include "revision.h" @@ -138,7 +139,7 @@ static struct ewah_bitmap *read_bitmap_1(struct bitmap_index *index) index->map_size - index->map_pos); if (bitmap_size < 0) { - error("Failed to load bitmap index (corrupted?)"); + error(_("failed to load bitmap index (corrupted?)")); ewah_pool_free(b); return NULL; } @@ -160,14 +161,14 @@ static int load_bitmap_header(struct bitmap_index *index) size_t header_size = sizeof(*header) - GIT_MAX_RAWSZ + the_hash_algo->rawsz; if (index->map_size < header_size + the_hash_algo->rawsz) - return error("Corrupted bitmap index (too small)"); + return error(_("corrupted bitmap index (too small)")); if (memcmp(header->magic, BITMAP_IDX_SIGNATURE, sizeof(BITMAP_IDX_SIGNATURE)) != 0) - return error("Corrupted bitmap index file (wrong header)"); + return error(_("corrupted bitmap index file (wrong header)")); index->version = ntohs(header->version); if (index->version != 1) - return error("Unsupported version for bitmap index file (%d)", index->version); + return error(_("unsupported version '%d' for bitmap index file"), index->version); /* Parse known bitmap format options */ { @@ -176,12 +177,12 @@ static int load_bitmap_header(struct bitmap_index *index) unsigned char *index_end = index->map + index->map_size - the_hash_algo->rawsz; if ((flags & BITMAP_OPT_FULL_DAG) == 0) - return error("Unsupported options for bitmap index file " + BUG("unsupported options for bitmap index file " "(Git requires BITMAP_OPT_FULL_DAG)"); if (flags & BITMAP_OPT_HASH_CACHE) { if (cache_size > index_end - index->map - header_size) - return error("corrupted bitmap index file (too short to fit hash cache)"); + return error(_("corrupted bitmap index file (too short to fit hash cache)")); index->hashes = (void *)(index_end - cache_size); index_end -= cache_size; } @@ -215,7 +216,7 @@ static struct stored_bitmap *store_bitmap(struct bitmap_index *index, * because the SHA1 already existed on the map. this is bad, there * shouldn't be duplicated commits in the index */ if (ret == 0) { - error("Duplicate entry in bitmap index: %s", oid_to_hex(oid)); + error(_("duplicate entry in bitmap index: '%s'"), oid_to_hex(oid)); return NULL; } @@ -259,14 +260,14 @@ static int load_bitmap_entries_v1(struct bitmap_index *index) struct object_id oid; if (index->map_size - index->map_pos < 6) - return error("corrupt ewah bitmap: truncated header for entry %d", i); + return error(_("corrupt ewah bitmap: truncated header for entry %d"), i); commit_idx_pos = read_be32(index->map, &index->map_pos); xor_offset = read_u8(index->map, &index->map_pos); flags = read_u8(index->map, &index->map_pos); if (nth_bitmap_object_oid(index, &oid, commit_idx_pos) < 0) - return error("corrupt ewah bitmap: commit index %u out of range", + return error(_("corrupt ewah bitmap: commit index %u out of range"), (unsigned)commit_idx_pos); bitmap = read_bitmap_1(index); @@ -274,13 +275,13 @@ static int load_bitmap_entries_v1(struct bitmap_index *index) return -1; if (xor_offset > MAX_XOR_OFFSET || xor_offset > i) - return error("Corrupted bitmap pack index"); + return error(_("corrupted bitmap pack index")); if (xor_offset > 0) { xor_bitmap = recent_bitmaps[(i - xor_offset) % MAX_XOR_OFFSET]; if (!xor_bitmap) - return error("Invalid XOR offset in bitmap pack index"); + return error(_("invalid XOR offset in bitmap pack index")); } recent_bitmaps[i % MAX_XOR_OFFSET] = store_bitmap( @@ -313,17 +314,21 @@ static int open_midx_bitmap_1(struct bitmap_index *bitmap_git, struct multi_pack_index *midx) { struct stat st; - char *idx_name = midx_bitmap_filename(midx); - int fd = git_open(idx_name); + char *bitmap_name = midx_bitmap_filename(midx); + int fd = git_open(bitmap_name); uint32_t i; struct packed_git *preferred; - free(idx_name); - - if (fd < 0) + if (fd < 0) { + if (errno != ENOENT) + warning_errno("cannot open '%s'", bitmap_name); + free(bitmap_name); return -1; + } + free(bitmap_name); if (fstat(fd, &st)) { + error_errno(_("cannot fstat bitmap file")); close(fd); return -1; } @@ -332,7 +337,7 @@ static int open_midx_bitmap_1(struct bitmap_index *bitmap_git, struct strbuf buf = STRBUF_INIT; get_midx_filename(&buf, midx->object_dir); /* ignore extra bitmap file; we can only handle one */ - warning("ignoring extra bitmap file: %s", buf.buf); + warning(_("ignoring extra bitmap file: '%s'"), buf.buf); close(fd); strbuf_release(&buf); return -1; @@ -348,8 +353,10 @@ static int open_midx_bitmap_1(struct bitmap_index *bitmap_git, if (load_bitmap_header(bitmap_git) < 0) goto cleanup; - if (!hasheq(get_midx_checksum(bitmap_git->midx), bitmap_git->checksum)) + if (!hasheq(get_midx_checksum(bitmap_git->midx), bitmap_git->checksum)) { + error(_("checksum doesn't match in MIDX and bitmap")); goto cleanup; + } if (load_midx_revindex(bitmap_git->midx) < 0) { warning(_("multi-pack bitmap is missing required reverse index")); @@ -384,26 +391,31 @@ static int open_pack_bitmap_1(struct bitmap_index *bitmap_git, struct packed_git { int fd; struct stat st; - char *idx_name; + char *bitmap_name; if (open_pack_index(packfile)) return -1; - idx_name = pack_bitmap_filename(packfile); - fd = git_open(idx_name); - free(idx_name); + bitmap_name = pack_bitmap_filename(packfile); + fd = git_open(bitmap_name); - if (fd < 0) + if (fd < 0) { + if (errno != ENOENT) + warning_errno("cannot open '%s'", bitmap_name); + free(bitmap_name); return -1; + } + free(bitmap_name); if (fstat(fd, &st)) { + error_errno(_("cannot fstat bitmap file")); close(fd); return -1; } if (bitmap_git->pack || bitmap_git->midx) { /* ignore extra bitmap file; we can only handle one */ - warning("ignoring extra bitmap file: %s", packfile->pack_name); + warning(_("ignoring extra bitmap file: '%s'"), packfile->pack_name); close(fd); return -1; } @@ -508,15 +520,16 @@ static int open_pack_bitmap(struct repository *r, static int open_midx_bitmap(struct repository *r, struct bitmap_index *bitmap_git) { + int ret = -1; struct multi_pack_index *midx; assert(!bitmap_git->map); for (midx = get_multi_pack_index(r); midx; midx = midx->next) { if (!open_midx_bitmap_1(bitmap_git, midx)) - return 0; + ret = 0; } - return -1; + return ret; } static int open_bitmap(struct repository *r, @@ -831,7 +844,7 @@ static struct bitmap *find_objects(struct bitmap_index *bitmap_git, revs->include_check_data = &incdata; if (prepare_revision_walk(revs)) - die("revision walk setup failed"); + die(_("revision walk setup failed")); show_data.bitmap_git = bitmap_git; show_data.base = base; @@ -1640,15 +1653,15 @@ static void test_bitmap_type(struct bitmap_test_data *tdata, } if (bitmap_type == OBJ_NONE) - die("object %s not found in type bitmaps", + die(_("object '%s' not found in type bitmaps"), oid_to_hex(&obj->oid)); if (bitmaps_nr > 1) - die("object %s does not have a unique type", + die(_("object '%s' does not have a unique type"), oid_to_hex(&obj->oid)); if (bitmap_type != obj->type) - die("object %s: real type %s, expected: %s", + die(_("object '%s': real type '%s', expected: '%s'"), oid_to_hex(&obj->oid), type_name(obj->type), type_name(bitmap_type)); @@ -1662,7 +1675,7 @@ static void test_show_object(struct object *object, const char *name, bitmap_pos = bitmap_position(tdata->bitmap_git, &object->oid); if (bitmap_pos < 0) - die("Object not in bitmap: %s\n", oid_to_hex(&object->oid)); + die(_("object not in bitmap: '%s'"), oid_to_hex(&object->oid)); test_bitmap_type(tdata, object, bitmap_pos); bitmap_set(tdata->base, bitmap_pos); @@ -1677,7 +1690,7 @@ static void test_show_commit(struct commit *commit, void *data) bitmap_pos = bitmap_position(tdata->bitmap_git, &commit->object.oid); if (bitmap_pos < 0) - die("Object not in bitmap: %s\n", oid_to_hex(&commit->object.oid)); + die(_("object not in bitmap: '%s'"), oid_to_hex(&commit->object.oid)); test_bitmap_type(tdata, &commit->object, bitmap_pos); bitmap_set(tdata->base, bitmap_pos); @@ -1694,26 +1707,26 @@ void test_bitmap_walk(struct rev_info *revs) struct ewah_bitmap *bm; if (!(bitmap_git = prepare_bitmap_git(revs->repo))) - die("failed to load bitmap indexes"); + die(_("failed to load bitmap indexes")); if (revs->pending.nr != 1) - die("you must specify exactly one commit to test"); + die(_("you must specify exactly one commit to test")); - fprintf(stderr, "Bitmap v%d test (%d entries loaded)\n", + fprintf_ln(stderr, "Bitmap v%d test (%d entries loaded)", bitmap_git->version, bitmap_git->entry_count); root = revs->pending.objects[0].item; bm = bitmap_for_commit(bitmap_git, (struct commit *)root); if (bm) { - fprintf(stderr, "Found bitmap for %s. %d bits / %08x checksum\n", + fprintf_ln(stderr, "Found bitmap for '%s'. %d bits / %08x checksum", oid_to_hex(&root->oid), (int)bm->bit_size, ewah_checksum(bm)); result = ewah_to_bitmap(bm); } if (!result) - die("Commit %s doesn't have an indexed bitmap", oid_to_hex(&root->oid)); + die(_("commit '%s' doesn't have an indexed bitmap"), oid_to_hex(&root->oid)); revs->tag_objects = 1; revs->tree_objects = 1; @@ -1722,7 +1735,7 @@ void test_bitmap_walk(struct rev_info *revs) result_popcnt = bitmap_popcount(result); if (prepare_revision_walk(revs)) - die("revision walk setup failed"); + die(_("revision walk setup failed")); tdata.bitmap_git = bitmap_git; tdata.base = bitmap_new(); @@ -1738,9 +1751,9 @@ void test_bitmap_walk(struct rev_info *revs) stop_progress(&tdata.prg); if (bitmap_equals(result, tdata.base)) - fprintf(stderr, "OK!\n"); + fprintf_ln(stderr, "OK!"); else - die("mismatch in bitmap results"); + die(_("mismatch in bitmap results")); bitmap_free(result); bitmap_free(tdata.base); @@ -1758,10 +1771,10 @@ int test_bitmap_commits(struct repository *r) MAYBE_UNUSED void *value; if (!bitmap_git) - die("failed to load bitmap indexes"); + die(_("failed to load bitmap indexes")); kh_foreach(bitmap_git->bitmaps, oid, value, { - printf("%s\n", oid_to_hex(&oid)); + printf_ln("%s", oid_to_hex(&oid)); }); free_bitmap_index(bitmap_git); @@ -1786,7 +1799,7 @@ int test_bitmap_hashes(struct repository *r) nth_bitmap_object_oid(bitmap_git, &oid, index_pos); - printf("%s %"PRIu32"\n", + printf_ln("%s %"PRIu32"", oid_to_hex(&oid), get_be32(bitmap_git->hashes + index_pos)); } @@ -1948,7 +1961,7 @@ static off_t get_disk_usage_for_type(struct bitmap_index *bitmap_git, struct object_id oid; nth_midxed_object_oid(&oid, bitmap_git->midx, midx_pos); - die(_("could not find %s in pack %s at offset %"PRIuMAX), + die(_("could not find '%s' in pack '%s' at offset %"PRIuMAX), oid_to_hex(&oid), pack->pack_name, (uintmax_t)offset); @@ -1984,7 +1997,7 @@ static off_t get_disk_usage_for_extended(struct bitmap_index *bitmap_git) continue; if (oid_object_info_extended(the_repository, &obj->oid, &oi, 0) < 0) - die(_("unable to get disk usage of %s"), + die(_("unable to get disk usage of '%s'"), oid_to_hex(&obj->oid)); total += object_size; diff --git a/pack-bitmap.h b/pack-bitmap.h index 3d3ddd7734..f3a57ca065 100644 --- a/pack-bitmap.h +++ b/pack-bitmap.h @@ -75,7 +75,7 @@ int bitmap_has_oid_in_uninteresting(struct bitmap_index *, const struct object_i off_t get_disk_usage_from_bitmap(struct bitmap_index *, struct rev_info *); void bitmap_writer_show_progress(int show); -void bitmap_writer_set_checksum(unsigned char *sha1); +void bitmap_writer_set_checksum(const unsigned char *sha1); void bitmap_writer_build_type_index(struct packing_data *to_pack, struct pack_idx_entry **index, uint32_t index_nr); diff --git a/pack-revindex.h b/pack-revindex.h index 74f4eae668..4974e75eb4 100644 --- a/pack-revindex.h +++ b/pack-revindex.h @@ -22,7 +22,7 @@ * * - pack position refers to an object's position within a non-existent pack * described by the MIDX. The pack structure is described in - * Documentation/technical/pack-format.txt. + * gitformat-pack(5). * * It is effectively a concatanation of all packs in the MIDX (ordered by * their numeric ID within the MIDX) in their original order within each diff --git a/packfile.c b/packfile.c index e5b1d0ed76..a41887c944 100644 --- a/packfile.c +++ b/packfile.c @@ -941,20 +941,10 @@ unsigned long repo_approximate_object_count(struct repository *r) return r->objects->approximate_object_count; } -static void *get_next_packed_git(const void *p) -{ - return ((const struct packed_git *)p)->next; -} - -static void set_next_packed_git(void *p, void *next) -{ - ((struct packed_git *)p)->next = next; -} +DEFINE_LIST_SORT(static, sort_packs, struct packed_git, next); -static int sort_pack(const void *a_, const void *b_) +static int sort_pack(const struct packed_git *a, const struct packed_git *b) { - const struct packed_git *a = a_; - const struct packed_git *b = b_; int st; /* @@ -981,9 +971,7 @@ static int sort_pack(const void *a_, const void *b_) static void rearrange_packed_git(struct repository *r) { - r->objects->packed_git = llist_mergesort( - r->objects->packed_git, get_next_packed_git, - set_next_packed_git, sort_pack); + sort_packs(&r->objects->packed_git, sort_pack); } static void prepare_packed_git_mru(struct repository *r) @@ -2229,7 +2217,17 @@ static int add_promisor_object(const struct object_id *oid, void *set_) { struct oidset *set = set_; - struct object *obj = parse_object(the_repository, oid); + struct object *obj; + int we_parsed_object; + + obj = lookup_object(the_repository, oid); + if (obj && obj->parsed) { + we_parsed_object = 0; + } else { + we_parsed_object = 1; + obj = parse_object(the_repository, oid); + } + if (!obj) return 1; @@ -2251,7 +2249,8 @@ static int add_promisor_object(const struct object_id *oid, return 0; while (tree_entry_gently(&desc, &entry)) oidset_insert(set, &entry.oid); - free_tree_buffer(tree); + if (we_parsed_object) + free_tree_buffer(tree); } else if (obj->type == OBJ_COMMIT) { struct commit *commit = (struct commit *) obj; struct commit_list *parents = commit->parents; @@ -2275,7 +2274,8 @@ int is_promisor_object(const struct object_id *oid) if (has_promisor_remote()) { for_each_packed_object(add_promisor_object, &promisor_objects, - FOR_EACH_OBJECT_PROMISOR_ONLY); + FOR_EACH_OBJECT_PROMISOR_ONLY | + FOR_EACH_OBJECT_PACK_ORDER); } promisor_objects_prepared = 1; } diff --git a/pathspec.c b/pathspec.c index 84ad9c73cf..46e77a85fe 100644 --- a/pathspec.c +++ b/pathspec.c @@ -759,3 +759,92 @@ int match_pathspec_attrs(struct index_state *istate, return 1; } + +int pathspec_needs_expanded_index(struct index_state *istate, + const struct pathspec *pathspec) +{ + unsigned int i, pos; + int res = 0; + char *skip_worktree_seen = NULL; + + /* + * If index is not sparse, no index expansion is needed. + */ + if (!istate->sparse_index) + return 0; + + /* + * When using a magic pathspec, assume for the sake of simplicity that + * the index needs to be expanded to match all matchable files. + */ + if (pathspec->magic) + return 1; + + for (i = 0; i < pathspec->nr; i++) { + struct pathspec_item item = pathspec->items[i]; + + /* + * If the pathspec item has a wildcard, the index should be expanded + * if the pathspec has the possibility of matching a subset of entries inside + * of a sparse directory (but not the entire directory). + * + * If the pathspec item is a literal path, the index only needs to be expanded + * if a) the pathspec isn't in the sparse checkout cone (to make sure we don't + * expand for in-cone files) and b) it doesn't match any sparse directories + * (since we can reset whole sparse directories without expanding them). + */ + if (item.nowildcard_len < item.len) { + /* + * Special case: if the pattern is a path inside the cone + * followed by only wildcards, the pattern cannot match + * partial sparse directories, so we know we don't need to + * expand the index. + * + * Examples: + * - in-cone/foo***: doesn't need expanded index + * - not-in-cone/bar*: may need expanded index + * - **.c: may need expanded index + */ + if (strspn(item.original + item.nowildcard_len, "*") == item.len - item.nowildcard_len && + path_in_cone_mode_sparse_checkout(item.original, istate)) + continue; + + for (pos = 0; pos < istate->cache_nr; pos++) { + struct cache_entry *ce = istate->cache[pos]; + + if (!S_ISSPARSEDIR(ce->ce_mode)) + continue; + + /* + * If the pre-wildcard length is longer than the sparse + * directory name and the sparse directory is the first + * component of the pathspec, need to expand the index. + */ + if (item.nowildcard_len > ce_namelen(ce) && + !strncmp(item.original, ce->name, ce_namelen(ce))) { + res = 1; + break; + } + + /* + * If the pre-wildcard length is shorter than the sparse + * directory and the pathspec does not match the whole + * directory, need to expand the index. + */ + if (!strncmp(item.original, ce->name, item.nowildcard_len) && + wildmatch(item.original, ce->name, 0)) { + res = 1; + break; + } + } + } else if (!path_in_cone_mode_sparse_checkout(item.original, istate) && + !matches_skip_worktree(pathspec, i, &skip_worktree_seen)) + res = 1; + + if (res > 0) + break; + } + + free(skip_worktree_seen); + return res; +} diff --git a/pathspec.h b/pathspec.h index 402ebb8080..41f6adfbb4 100644 --- a/pathspec.h +++ b/pathspec.h @@ -171,4 +171,16 @@ int match_pathspec_attrs(struct index_state *istate, const char *name, int namelen, const struct pathspec_item *item); +/* + * Determine whether a pathspec will match only entire index entries (non-sparse + * files and/or entire sparse directories). If the pathspec has the potential to + * match partial contents of a sparse directory, return 1 to indicate the index + * should be expanded to match the appropriate index entries. + * + * For the sake of simplicity, always return 1 if using a more complex "magic" + * pathspec. + */ +int pathspec_needs_expanded_index(struct index_state *istate, + const struct pathspec *pathspec); + #endif /* PATHSPEC_H */ diff --git a/pkt-line.c b/pkt-line.c index 8e43c2def4..ce4e73b683 100644 --- a/pkt-line.c +++ b/pkt-line.c @@ -309,7 +309,8 @@ int write_packetized_from_fd_no_flush(int fd_in, int fd_out) return err; } -int write_packetized_from_buf_no_flush(const char *src_in, size_t len, int fd_out) +int write_packetized_from_buf_no_flush_count(const char *src_in, size_t len, + int fd_out, int *packet_counter) { int err = 0; size_t bytes_written = 0; @@ -324,6 +325,8 @@ int write_packetized_from_buf_no_flush(const char *src_in, size_t len, int fd_ou break; err = packet_write_gently(fd_out, src_in + bytes_written, bytes_to_write); bytes_written += bytes_to_write; + if (packet_counter) + (*packet_counter)++; } return err; } diff --git a/pkt-line.h b/pkt-line.h index 1f623de60a..79c538b99e 100644 --- a/pkt-line.h +++ b/pkt-line.h @@ -32,7 +32,13 @@ void packet_buf_write(struct strbuf *buf, const char *fmt, ...) __attribute__((f int packet_flush_gently(int fd); int packet_write_fmt_gently(int fd, const char *fmt, ...) __attribute__((format (printf, 2, 3))); int write_packetized_from_fd_no_flush(int fd_in, int fd_out); -int write_packetized_from_buf_no_flush(const char *src_in, size_t len, int fd_out); +int write_packetized_from_buf_no_flush_count(const char *src_in, size_t len, + int fd_out, int *packet_counter); +static inline int write_packetized_from_buf_no_flush(const char *src_in, + size_t len, int fd_out) +{ + return write_packetized_from_buf_no_flush_count(src_in, len, fd_out, NULL); +} /* * Stdio versions of packet_write functions. When mixing these with fd diff --git a/preload-index.c b/preload-index.c index e5529a5863..100f7a374d 100644 --- a/preload-index.c +++ b/preload-index.c @@ -151,6 +151,12 @@ void preload_index(struct index_state *index, } stop_progress(&pd.progress); + if (pathspec) { + /* earlier we made deep copies for each thread to work with */ + for (i = 0; i < threads; i++) + clear_pathspec(&data[i].pathspec); + } + trace_performance_leave("preload index"); trace2_data_intmax("index", NULL, "preload/sum_lstat", t2_sum_lstat); @@ -1575,23 +1575,7 @@ static size_t format_commit_one(struct strbuf *sb, /* in UTF-8 */ strbuf_addstr(sb, c->signature_check.primary_key_fingerprint); break; case 'T': - switch (c->signature_check.trust_level) { - case TRUST_UNDEFINED: - strbuf_addstr(sb, "undefined"); - break; - case TRUST_NEVER: - strbuf_addstr(sb, "never"); - break; - case TRUST_MARGINAL: - strbuf_addstr(sb, "marginal"); - break; - case TRUST_FULLY: - strbuf_addstr(sb, "fully"); - break; - case TRUST_ULTIMATE: - strbuf_addstr(sb, "ultimate"); - break; - } + strbuf_addstr(sb, gpg_trust_level_to_str(c->signature_check.trust_level)); break; default: return 0; diff --git a/promisor-remote.c b/promisor-remote.c index 5b33f88bca..68f46f5ec7 100644 --- a/promisor-remote.c +++ b/promisor-remote.c @@ -146,7 +146,7 @@ static void promisor_remote_init(struct repository *r) if (r->promisor_remote_config) return; config = r->promisor_remote_config = - xcalloc(sizeof(*r->promisor_remote_config), 1); + xcalloc(1, sizeof(*r->promisor_remote_config)); config->promisors_tail = &config->promisors; repo_config(r, promisor_remote_config, config); diff --git a/rebase-interactive.c b/rebase-interactive.c index 87649d0c01..7407c59319 100644 --- a/rebase-interactive.c +++ b/rebase-interactive.c @@ -54,9 +54,12 @@ void append_todo_help(int command_count, "l, label <label> = label current HEAD with a name\n" "t, reset <label> = reset HEAD to a label\n" "m, merge [-C <commit> | -c <commit>] <label> [# <oneline>]\n" -". create a merge commit using the original merge commit's\n" -". message (or the oneline, if no original merge commit was\n" -". specified); use -c <commit> to reword the commit message\n" +" create a merge commit using the original merge commit's\n" +" message (or the oneline, if no original merge commit was\n" +" specified); use -c <commit> to reword the commit message\n" +"u, update-ref <ref> = track a placeholder for the <ref> to be updated\n" +" to this position in the new commits. The <ref> is\n" +" updated at the end of the rebase\n" "\n" "These lines can be re-ordered; they are executed from top to bottom.\n"); unsigned edit_todo = !(shortrevisions && shortonto); @@ -143,6 +146,12 @@ int edit_todo_list(struct repository *r, struct todo_list *todo_list, return -4; } + /* + * See if branches need to be added or removed from the update-refs + * file based on the new todo list. + */ + todo_list_filter_update_refs(r, new_todo); + return 0; } diff --git a/ref-filter.c b/ref-filter.c index d3c90e5dbe..bdf39fa761 100644 --- a/ref-filter.c +++ b/ref-filter.c @@ -2405,6 +2405,7 @@ static void reach_filter(struct ref_array *array, int filter_refs(struct ref_array *array, struct ref_filter *filter, unsigned int type) { struct ref_filter_cbdata ref_cbdata; + int save_commit_buffer_orig; int ret = 0; ref_cbdata.array = array; @@ -2412,6 +2413,9 @@ int filter_refs(struct ref_array *array, struct ref_filter *filter, unsigned int filter->kind = type & FILTER_REFS_KIND_MASK; + save_commit_buffer_orig = save_commit_buffer; + save_commit_buffer = 0; + init_contains_cache(&ref_cbdata.contains_cache); init_contains_cache(&ref_cbdata.no_contains_cache); @@ -2444,6 +2448,7 @@ int filter_refs(struct ref_array *array, struct ref_filter *filter, unsigned int reach_filter(array, filter->reachable_from, INCLUDE_REACHED); reach_filter(array, filter->unreachable_from, EXCLUDE_REACHED); + save_commit_buffer = save_commit_buffer_orig; return ret; } @@ -20,6 +20,7 @@ #include "repository.h" #include "sigchain.h" #include "date.h" +#include "commit.h" /* * List of all available backends @@ -56,6 +57,88 @@ static unsigned char refname_disposition[256] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 4, 4 }; +struct ref_namespace_info ref_namespace[] = { + [NAMESPACE_HEAD] = { + .ref = "HEAD", + .decoration = DECORATION_REF_HEAD, + .exact = 1, + }, + [NAMESPACE_BRANCHES] = { + .ref = "refs/heads/", + .decoration = DECORATION_REF_LOCAL, + }, + [NAMESPACE_TAGS] = { + .ref = "refs/tags/", + .decoration = DECORATION_REF_TAG, + }, + [NAMESPACE_REMOTE_REFS] = { + /* + * The default refspec for new remotes copies refs from + * refs/heads/ on the remote into refs/remotes/<remote>/. + * As such, "refs/remotes/" has special handling. + */ + .ref = "refs/remotes/", + .decoration = DECORATION_REF_REMOTE, + }, + [NAMESPACE_STASH] = { + /* + * The single ref "refs/stash" stores the latest stash. + * Older stashes can be found in the reflog. + */ + .ref = "refs/stash", + .exact = 1, + .decoration = DECORATION_REF_STASH, + }, + [NAMESPACE_REPLACE] = { + /* + * This namespace allows Git to act as if one object ID + * points to the content of another. Unlike the other + * ref namespaces, this one can be changed by the + * GIT_REPLACE_REF_BASE environment variable. This + * .namespace value will be overwritten in setup_git_env(). + */ + .ref = "refs/replace/", + .decoration = DECORATION_GRAFTED, + }, + [NAMESPACE_NOTES] = { + /* + * The refs/notes/commit ref points to the tip of a + * parallel commit history that adds metadata to commits + * in the normal history. This ref can be overwritten + * by the core.notesRef config variable or the + * GIT_NOTES_REFS environment variable. + */ + .ref = "refs/notes/commit", + .exact = 1, + }, + [NAMESPACE_PREFETCH] = { + /* + * Prefetch refs are written by the background 'fetch' + * maintenance task. It allows faster foreground fetches + * by advertising these previously-downloaded tips without + * updating refs/remotes/ without user intervention. + */ + .ref = "refs/prefetch/", + }, + [NAMESPACE_REWRITTEN] = { + /* + * Rewritten refs are used by the 'label' command in the + * sequencer. These are particularly useful during an + * interactive rebase that uses the 'merge' command. + */ + .ref = "refs/rewritten/", + }, +}; + +void update_ref_namespace(enum ref_namespace namespace, char *ref) +{ + struct ref_namespace_info *info = &ref_namespace[namespace]; + if (info->ref_updated) + free(info->ref); + info->ref = ref; + info->ref_updated = 1; +} + /* * Try to read one refname component from the front of refname. * Return the length of the component found, or -1 if the component is @@ -455,11 +538,16 @@ void normalize_glob_ref(struct string_list_item *item, const char *prefix, if (*pattern == '/') BUG("pattern must not start with '/'"); - if (prefix) { + if (prefix) strbuf_addstr(&normalized_pattern, prefix); - } - else if (!starts_with(pattern, "refs/")) + else if (!starts_with(pattern, "refs/") && + strcmp(pattern, "HEAD")) strbuf_addstr(&normalized_pattern, "refs/"); + /* + * NEEDSWORK: Special case other symrefs such as REBASE_HEAD, + * MERGE_HEAD, etc. + */ + strbuf_addstr(&normalized_pattern, pattern); strbuf_strip_suffix(&normalized_pattern, "/"); @@ -1524,6 +1612,7 @@ int refs_for_each_fullref_in(struct ref_store *refs, const char *prefix, int for_each_replace_ref(struct repository *r, each_repo_ref_fn fn, void *cb_data) { + const char *git_replace_ref_base = ref_namespace[NAMESPACE_REPLACE].ref; return do_for_each_repo_ref(r, git_replace_ref_base, fn, strlen(git_replace_ref_base), DO_FOR_EACH_INCLUDE_BROKEN, cb_data); @@ -2,6 +2,7 @@ #define REFS_H #include "cache.h" +#include "commit.h" struct object_id; struct ref_store; @@ -930,4 +931,49 @@ struct ref_store *get_main_ref_store(struct repository *r); struct ref_store *get_submodule_ref_store(const char *submodule); struct ref_store *get_worktree_ref_store(const struct worktree *wt); +/* + * Some of the names specified by refs have special meaning to Git. + * Organize these namespaces in a comon 'ref_namespace' array for + * reference from multiple places in the codebase. + */ + +struct ref_namespace_info { + char *ref; + enum decoration_type decoration; + + /* + * If 'exact' is true, then we must match the 'ref' exactly. + * Otherwise, use a prefix match. + * + * 'ref_updated' is for internal use. It represents whether the + * 'ref' value was replaced from its original literal version. + */ + unsigned exact:1, + ref_updated:1; +}; + +enum ref_namespace { + NAMESPACE_HEAD, + NAMESPACE_BRANCHES, + NAMESPACE_TAGS, + NAMESPACE_REMOTE_REFS, + NAMESPACE_STASH, + NAMESPACE_REPLACE, + NAMESPACE_NOTES, + NAMESPACE_PREFETCH, + NAMESPACE_REWRITTEN, + + /* Must be last */ + NAMESPACE__COUNT +}; + +/* See refs.c for the contents of this array. */ +extern struct ref_namespace_info ref_namespace[NAMESPACE__COUNT]; + +/* + * Some ref namespaces can be modified by config values or environment + * variables. Modify a namespace as specified by its ref_namespace key. + */ +void update_ref_namespace(enum ref_namespace namespace, char *ref); + #endif /* REFS_H */ @@ -69,7 +69,7 @@ int valid_remote_name(const char *name); struct strvec; /* * Determine what <prefix> values to pass to the peer in ref-prefix lines - * (see Documentation/technical/protocol-v2.txt). + * (see linkgit:gitprotocol-v2[5]). */ void refspec_ref_prefixes(const struct refspec *rs, struct strvec *ref_prefixes); diff --git a/reftable/reader.c b/reftable/reader.c index 54b4025105..b4db23ce18 100644 --- a/reftable/reader.c +++ b/reftable/reader.c @@ -443,7 +443,7 @@ static int reader_start(struct reftable_reader *r, struct table_iter *ti, return reader_table_iter_at(r, ti, off, typ); } -static int reader_seek_linear(struct reftable_reader *r, struct table_iter *ti, +static int reader_seek_linear(struct table_iter *ti, struct reftable_record *want) { struct reftable_record rec = @@ -510,7 +510,7 @@ static int reader_seek_indexed(struct reftable_reader *r, if (err < 0) goto done; - err = reader_seek_linear(r, &index_iter, &want_index); + err = reader_seek_linear(&index_iter, &want_index); while (1) { err = table_iter_next(&index_iter, &index_result); table_iter_block_done(&index_iter); @@ -570,7 +570,7 @@ static int reader_seek_internal(struct reftable_reader *r, err = reader_start(r, &ti, reftable_record_type(rec), 0); if (err < 0) return err; - err = reader_seek_linear(r, &ti, rec); + err = reader_seek_linear(&ti, rec); if (err < 0) return err; else { diff --git a/remote-curl.c b/remote-curl.c index 67f178b112..b8758757ec 100644 --- a/remote-curl.c +++ b/remote-curl.c @@ -580,6 +580,7 @@ struct rpc_state { char *service_url; char *hdr_content_type; char *hdr_accept; + char *hdr_accept_language; char *protocol_header; char *buf; size_t alloc; @@ -607,6 +608,8 @@ struct rpc_state { unsigned flush_read_but_not_sent : 1; }; +#define RPC_STATE_INIT { 0 } + /* * Appends the result of reading from rpc->out to the string represented by * rpc->buf and rpc->len if there is enough space. Returns 1 if there was @@ -932,6 +935,10 @@ static int post_rpc(struct rpc_state *rpc, int stateless_connect, int flush_rece headers = curl_slist_append(headers, needs_100_continue ? "Expect: 100-continue" : "Expect:"); + /* Add Accept-Language header */ + if (rpc->hdr_accept_language) + headers = curl_slist_append(headers, rpc->hdr_accept_language); + /* Add the extra Git-Protocol header */ if (rpc->protocol_header) headers = curl_slist_append(headers, rpc->protocol_header); @@ -1080,6 +1087,8 @@ static int rpc_service(struct rpc_state *rpc, struct discovery *heads, strbuf_addf(&buf, "%s%s", url.buf, svc); rpc->service_url = strbuf_detach(&buf, NULL); + rpc->hdr_accept_language = xstrdup_or_null(http_get_accept_language_header()); + strbuf_addf(&buf, "Content-Type: application/x-%s-request", svc); rpc->hdr_content_type = strbuf_detach(&buf, NULL); @@ -1118,6 +1127,7 @@ static int rpc_service(struct rpc_state *rpc, struct discovery *heads, free(rpc->service_url); free(rpc->hdr_content_type); free(rpc->hdr_accept); + free(rpc->hdr_accept_language); free(rpc->protocol_header); free(rpc->buf); strbuf_release(&buf); @@ -1153,7 +1163,7 @@ static int fetch_dumb(int nr_heads, struct ref **to_fetch) static int fetch_git(struct discovery *heads, int nr_heads, struct ref **to_fetch) { - struct rpc_state rpc; + struct rpc_state rpc = RPC_STATE_INIT; struct strbuf preamble = STRBUF_INIT; int i, err; struct strvec args = STRVEC_INIT; @@ -1299,7 +1309,7 @@ static int push_dav(int nr_spec, const char **specs) static int push_git(struct discovery *heads, int nr_spec, const char **specs) { - struct rpc_state rpc; + struct rpc_state rpc = RPC_STATE_INIT; int i, err; struct strvec args; struct string_list_item *cas_option; @@ -1398,8 +1408,9 @@ free_specs: static int stateless_connect(const char *service_name) { struct discovery *discover; - struct rpc_state rpc; + struct rpc_state rpc = RPC_STATE_INIT; struct strbuf buf = STRBUF_INIT; + const char *accept_language; /* * Run the info/refs request and see if the server supports protocol @@ -1418,6 +1429,9 @@ static int stateless_connect(const char *service_name) printf("\n"); fflush(stdout); } + accept_language = http_get_accept_language_header(); + if (accept_language) + rpc.hdr_accept_language = xstrfmt("%s", accept_language); rpc.service_name = service_name; rpc.service_url = xstrfmt("%s%s", url.buf, rpc.service_name); @@ -1467,6 +1481,7 @@ static int stateless_connect(const char *service_name) free(rpc.service_url); free(rpc.hdr_content_type); free(rpc.hdr_accept); + free(rpc.hdr_accept_language); free(rpc.protocol_header); free(rpc.buf); strbuf_release(&buf); @@ -11,7 +11,6 @@ #include "dir.h" #include "tag.h" #include "string-list.h" -#include "mergesort.h" #include "strvec.h" #include "commit-reach.h" #include "advice.h" @@ -850,7 +849,7 @@ static int refspec_match(const struct refspec_item *refspec, return !strcmp(refspec->src, name); } -static int omit_name_by_refspec(const char *name, struct refspec *rs) +int omit_name_by_refspec(const char *name, struct refspec *rs) { int i; @@ -1082,27 +1081,6 @@ void free_refs(struct ref *ref) } } -int ref_compare_name(const void *va, const void *vb) -{ - const struct ref *a = va, *b = vb; - return strcmp(a->name, b->name); -} - -static void *ref_list_get_next(const void *a) -{ - return ((const struct ref *)a)->next; -} - -static void ref_list_set_next(void *a, void *next) -{ - ((struct ref *)a)->next = next; -} - -void sort_ref_list(struct ref **l, int (*cmp)(const void *, const void *)) -{ - *l = llist_mergesort(*l, ref_list_get_next, ref_list_set_next, cmp); -} - int count_refspec_match(const char *pattern, struct ref *refs, struct ref **matched_ref) @@ -2169,6 +2147,9 @@ static int stat_branch_pair(const char *branch_name, const char *base, struct object_id oid; struct commit *ours, *theirs; struct rev_info revs; + struct setup_revision_opt opt = { + .free_removed_argv_elements = 1, + }; struct strvec argv = STRVEC_INIT; /* Cannot stat if what we used to build on no longer exists */ @@ -2203,7 +2184,7 @@ static int stat_branch_pair(const char *branch_name, const char *base, strvec_push(&argv, "--"); repo_init_revisions(the_repository, &revs, NULL); - setup_revisions(argv.nr, argv.v, &revs, NULL); + setup_revisions(argv.nr, argv.v, &revs, &opt); if (prepare_revision_walk(&revs)) die(_("revision walk setup failed")); @@ -207,9 +207,7 @@ struct ref *find_ref_by_name(const struct ref *list, const char *name); struct ref *alloc_ref(const char *name); struct ref *copy_ref(const struct ref *ref); struct ref *copy_ref_list(const struct ref *ref); -void sort_ref_list(struct ref **, int (*cmp)(const void *, const void *)); int count_refspec_match(const char *, struct ref *refs, struct ref **matched_ref); -int ref_compare_name(const void *, const void *); int check_ref_type(const struct ref *ref, int flags); @@ -248,6 +246,12 @@ int resolve_remote_symref(struct ref *ref, struct ref *list); struct ref *ref_remove_duplicates(struct ref *ref_map); /* + * Check whether a name matches any negative refspec in rs. Returns 1 if the + * name matches at least one negative refspec, and 0 otherwise. + */ +int omit_name_by_refspec(const char *name, struct refspec *rs); + +/* * Remove all entries in the input list which match any negative refspec in * the refspec list. */ diff --git a/repo-settings.c b/repo-settings.c index 2dfcb2b654..43bc881bfc 100644 --- a/repo-settings.c +++ b/repo-settings.c @@ -11,6 +11,13 @@ static void repo_cfg_bool(struct repository *r, const char *key, int *dest, *dest = def; } +static void repo_cfg_int(struct repository *r, const char *key, int *dest, + int def) +{ + if (repo_config_get_int(r, key, dest)) + *dest = def; +} + void prepare_repo_settings(struct repository *r) { int experimental; @@ -42,11 +49,14 @@ void prepare_repo_settings(struct repository *r) r->settings.core_untracked_cache = UNTRACKED_CACHE_WRITE; } - /* Boolean config or default, does not cascade (simple) */ + /* Commit graph config or default, does not cascade (simple) */ repo_cfg_bool(r, "core.commitgraph", &r->settings.core_commit_graph, 1); + repo_cfg_int(r, "commitgraph.generationversion", &r->settings.commit_graph_generation_version, 2); repo_cfg_bool(r, "commitgraph.readchangedpaths", &r->settings.commit_graph_read_changed_paths, 1); repo_cfg_bool(r, "gc.writecommitgraph", &r->settings.gc_write_commit_graph, 1); repo_cfg_bool(r, "fetch.writecommitgraph", &r->settings.fetch_write_commit_graph, 0); + + /* Boolean config or default, does not cascade (simple) */ repo_cfg_bool(r, "pack.usesparse", &r->settings.pack_use_sparse, 1); repo_cfg_bool(r, "core.multipackindex", &r->settings.core_multi_pack_index, 1); repo_cfg_bool(r, "index.sparse", &r->settings.sparse_index, 0); diff --git a/repository.h b/repository.h index 6cc661e5a4..797f471cce 100644 --- a/repository.h +++ b/repository.h @@ -30,6 +30,7 @@ struct repo_settings { int initialized; int core_commit_graph; + int commit_graph_generation_version; int commit_graph_read_changed_paths; int gc_write_commit_graph; int fetch_write_commit_graph; diff --git a/revision.c b/revision.c index 0c6e26cd9c..ee702e498a 100644 --- a/revision.c +++ b/revision.c @@ -1105,7 +1105,7 @@ static int process_parents(struct rev_info *revs, struct commit *commit, struct commit_list **list, struct prio_queue *queue) { struct commit_list *parent = commit->parents; - unsigned left_flag; + unsigned pass_flags; if (commit->object.flags & ADDED) return 0; @@ -1160,7 +1160,7 @@ static int process_parents(struct rev_info *revs, struct commit *commit, if (revs->no_walk) return 0; - left_flag = (commit->object.flags & SYMMETRIC_LEFT); + pass_flags = (commit->object.flags & (SYMMETRIC_LEFT | ANCESTRY_PATH)); for (parent = commit->parents; parent; parent = parent->next) { struct commit *p = parent->item; @@ -1181,7 +1181,7 @@ static int process_parents(struct rev_info *revs, struct commit *commit, if (!*slot) *slot = *revision_sources_at(revs->sources, commit); } - p->object.flags |= left_flag; + p->object.flags |= pass_flags; if (!(p->object.flags & SEEN)) { p->object.flags |= (SEEN | NOT_USER_GIVEN); if (list) @@ -1304,13 +1304,24 @@ static int still_interesting(struct commit_list *src, timestamp_t date, int slop } /* - * "rev-list --ancestry-path A..B" computes commits that are ancestors - * of B but not ancestors of A but further limits the result to those - * that are descendants of A. This takes the list of bottom commits and - * the result of "A..B" without --ancestry-path, and limits the latter - * further to the ones that can reach one of the commits in "bottom". + * "rev-list --ancestry-path=C_0 [--ancestry-path=C_1 ...] A..B" + * computes commits that are ancestors of B but not ancestors of A but + * further limits the result to those that have any of C in their + * ancestry path (i.e. are either ancestors of any of C, descendants + * of any of C, or are any of C). If --ancestry-path is specified with + * no commit, we use all bottom commits for C. + * + * Before this function is called, ancestors of C will have already + * been marked with ANCESTRY_PATH previously. + * + * This takes the list of bottom commits and the result of "A..B" + * without --ancestry-path, and limits the latter further to the ones + * that have any of C in their ancestry path. Since the ancestors of C + * have already been marked (a prerequisite of this function), we just + * need to mark the descendants, then exclude any commit that does not + * have any of these marks. */ -static void limit_to_ancestry(struct commit_list *bottom, struct commit_list *list) +static void limit_to_ancestry(struct commit_list *bottoms, struct commit_list *list) { struct commit_list *p; struct commit_list *rlist = NULL; @@ -1323,7 +1334,7 @@ static void limit_to_ancestry(struct commit_list *bottom, struct commit_list *li for (p = list; p; p = p->next) commit_list_insert(p->item, &rlist); - for (p = bottom; p; p = p->next) + for (p = bottoms; p; p = p->next) p->item->object.flags |= TMP_MARK; /* @@ -1356,38 +1367,39 @@ static void limit_to_ancestry(struct commit_list *bottom, struct commit_list *li */ /* - * The ones that are not marked with TMP_MARK are uninteresting + * The ones that are not marked with either TMP_MARK or + * ANCESTRY_PATH are uninteresting */ for (p = list; p; p = p->next) { struct commit *c = p->item; - if (c->object.flags & TMP_MARK) + if (c->object.flags & (TMP_MARK | ANCESTRY_PATH)) continue; c->object.flags |= UNINTERESTING; } - /* We are done with the TMP_MARK */ + /* We are done with TMP_MARK and ANCESTRY_PATH */ for (p = list; p; p = p->next) - p->item->object.flags &= ~TMP_MARK; - for (p = bottom; p; p = p->next) - p->item->object.flags &= ~TMP_MARK; + p->item->object.flags &= ~(TMP_MARK | ANCESTRY_PATH); + for (p = bottoms; p; p = p->next) + p->item->object.flags &= ~(TMP_MARK | ANCESTRY_PATH); free_commit_list(rlist); } /* - * Before walking the history, keep the set of "negative" refs the - * caller has asked to exclude. + * Before walking the history, add the set of "negative" refs the + * caller has asked to exclude to the bottom list. * * This is used to compute "rev-list --ancestry-path A..B", as we need * to filter the result of "A..B" further to the ones that can actually * reach A. */ -static struct commit_list *collect_bottom_commits(struct commit_list *list) +static void collect_bottom_commits(struct commit_list *list, + struct commit_list **bottom) { - struct commit_list *elem, *bottom = NULL; + struct commit_list *elem; for (elem = list; elem; elem = elem->next) if (elem->item->object.flags & BOTTOM) - commit_list_insert(elem->item, &bottom); - return bottom; + commit_list_insert(elem->item, bottom); } /* Assumes either left_only or right_only is set */ @@ -1414,12 +1426,12 @@ static int limit_list(struct rev_info *revs) struct commit_list *original_list = revs->commits; struct commit_list *newlist = NULL; struct commit_list **p = &newlist; - struct commit_list *bottom = NULL; struct commit *interesting_cache = NULL; - if (revs->ancestry_path) { - bottom = collect_bottom_commits(original_list); - if (!bottom) + if (revs->ancestry_path_implicit_bottoms) { + collect_bottom_commits(original_list, + &revs->ancestry_path_bottoms); + if (!revs->ancestry_path_bottoms) die("--ancestry-path given but there are no bottom commits"); } @@ -1464,9 +1476,8 @@ static int limit_list(struct rev_info *revs) if (revs->left_only || revs->right_only) limit_left_right(newlist, revs); - if (bottom) - limit_to_ancestry(bottom, newlist); - free_commit_list(bottom); + if (revs->ancestry_path) + limit_to_ancestry(revs->ancestry_path_bottoms, newlist); /* * Check if any commits have become TREESAME by some of their parents @@ -2213,7 +2224,7 @@ static int handle_revision_opt(struct rev_info *revs, int argc, const char **arg const struct setup_revision_opt* opt) { const char *arg = argv[0]; - const char *optarg; + const char *optarg = NULL; int argcount; const unsigned hexsz = the_hash_algo->hexsz; @@ -2284,6 +2295,23 @@ static int handle_revision_opt(struct rev_info *revs, int argc, const char **arg revs->ancestry_path = 1; revs->simplify_history = 0; revs->limited = 1; + revs->ancestry_path_implicit_bottoms = 1; + } else if (skip_prefix(arg, "--ancestry-path=", &optarg)) { + struct commit *c; + struct object_id oid; + const char *msg = _("could not get commit for ancestry-path argument %s"); + + revs->ancestry_path = 1; + revs->simplify_history = 0; + revs->limited = 1; + + if (repo_get_oid_committish(revs->repo, optarg, &oid)) + return error(msg, optarg); + get_reference(revs, optarg, &oid, ANCESTRY_PATH); + c = lookup_commit_reference(revs->repo, &oid); + if (!c) + return error(msg, optarg); + commit_list_insert(c, &revs->ancestry_path_bottoms); } else if (!strcmp(arg, "-g") || !strcmp(arg, "--walk-reflogs")) { init_reflog_walk(&revs->reflog_info); } else if (!strcmp(arg, "--default")) { @@ -2784,6 +2812,8 @@ int setup_revisions(int argc, const char **argv, struct rev_info *revs, struct s const char *arg = argv[i]; if (strcmp(arg, "--")) continue; + if (opt && opt->free_removed_argv_elements) + free((char *)argv[i]); argv[i] = NULL; argc = i; if (argv[i + 1]) @@ -2991,6 +3021,7 @@ static void release_revisions_topo_walk_info(struct topo_walk_info *info); void release_revisions(struct rev_info *revs) { free_commit_list(revs->commits); + free_commit_list(revs->ancestry_path_bottoms); object_array_clear(&revs->pending); object_array_clear(&revs->boundary_commits); release_revisions_cmdline(&revs->cmdline); @@ -3791,51 +3822,6 @@ int rewrite_parents(struct rev_info *revs, struct commit *commit, return 0; } -static int commit_rewrite_person(struct strbuf *buf, const char *what, struct string_list *mailmap) -{ - char *person, *endp; - size_t len, namelen, maillen; - const char *name; - const char *mail; - struct ident_split ident; - - person = strstr(buf->buf, what); - if (!person) - return 0; - - person += strlen(what); - endp = strchr(person, '\n'); - if (!endp) - return 0; - - len = endp - person; - - if (split_ident_line(&ident, person, len)) - return 0; - - mail = ident.mail_begin; - maillen = ident.mail_end - ident.mail_begin; - name = ident.name_begin; - namelen = ident.name_end - ident.name_begin; - - if (map_user(mailmap, &mail, &maillen, &name, &namelen)) { - struct strbuf namemail = STRBUF_INIT; - - strbuf_addf(&namemail, "%.*s <%.*s>", - (int)namelen, name, (int)maillen, mail); - - strbuf_splice(buf, ident.name_begin - buf->buf, - ident.mail_end - ident.name_begin + 1, - namemail.buf, namemail.len); - - strbuf_release(&namemail); - - return 1; - } - - return 0; -} - static int commit_match(struct commit *commit, struct rev_info *opt) { int retval; @@ -3868,11 +3854,12 @@ static int commit_match(struct commit *commit, struct rev_info *opt) strbuf_addstr(&buf, message); if (opt->grep_filter.header_list && opt->mailmap) { + const char *commit_headers[] = { "author ", "committer ", NULL }; + if (!buf.len) strbuf_addstr(&buf, message); - commit_rewrite_person(&buf, "\nauthor ", opt->mailmap); - commit_rewrite_person(&buf, "\ncommitter ", opt->mailmap); + apply_mailmap_to_header(&buf, commit_headers, opt->mailmap); } /* Append "fake" message parts as needed */ diff --git a/revision.h b/revision.h index e576845cdd..61a9b1316b 100644 --- a/revision.h +++ b/revision.h @@ -48,6 +48,7 @@ */ #define NOT_USER_GIVEN (1u<<25) #define TRACK_LINEAR (1u<<26) +#define ANCESTRY_PATH (1u<<27) #define ALL_REV_FLAGS (((1u<<11)-1) | NOT_USER_GIVEN | TRACK_LINEAR | PULL_MERGE) #define DECORATE_SHORT_REFS 1 @@ -164,6 +165,13 @@ struct rev_info { cherry_mark:1, bisect:1, ancestry_path:1, + + /* True if --ancestry-path was specified without an + * argument. The bottom revisions are implicitly + * the arguments in this case. + */ + ancestry_path_implicit_bottoms:1, + first_parent_only:1, exclude_first_parent_only:1, line_level_traverse:1, @@ -306,6 +314,7 @@ struct rev_info { struct saved_parents *saved_parents_slab; struct commit_list *previous_parents; + struct commit_list *ancestry_path_bottoms; const char *break_bar; struct revision_sources *sources; @@ -375,7 +384,8 @@ struct setup_revision_opt { const char *def; void (*tweak)(struct rev_info *, struct setup_revision_opt *); unsigned int assume_dashdash:1, - allow_exclude_promisor_objects:1; + allow_exclude_promisor_objects:1, + free_removed_argv_elements:1; unsigned revarg_opt; }; int setup_revisions(int argc, const char **argv, struct rev_info *revs, diff --git a/run-command.c b/run-command.c index 14f17830f5..5ec3a46dcc 100644 --- a/run-command.c +++ b/run-command.c @@ -10,6 +10,7 @@ #include "config.h" #include "packfile.h" #include "hook.h" +#include "compat/nonblock.h" void child_process_init(struct child_process *child) { @@ -1364,12 +1365,25 @@ static int pump_io_round(struct io_pump *slots, int nr, struct pollfd *pfd) continue; if (io->type == POLLOUT) { - ssize_t len = xwrite(io->fd, - io->u.out.buf, io->u.out.len); + ssize_t len; + + /* + * Don't use xwrite() here. It loops forever on EAGAIN, + * and we're in our own poll() loop here. + * + * Note that we lose xwrite()'s handling of MAX_IO_SIZE + * and EINTR, so we have to implement those ourselves. + */ + len = write(io->fd, io->u.out.buf, + io->u.out.len <= MAX_IO_SIZE ? + io->u.out.len : MAX_IO_SIZE); if (len < 0) { - io->error = errno; - close(io->fd); - io->fd = -1; + if (errno != EINTR && errno != EAGAIN && + errno != ENOSPC) { + io->error = errno; + close(io->fd); + io->fd = -1; + } } else { io->u.out.buf += len; io->u.out.len -= len; @@ -1438,6 +1452,15 @@ int pipe_command(struct child_process *cmd, return -1; if (in) { + if (enable_pipe_nonblock(cmd->in) < 0) { + error_errno("unable to make pipe non-blocking"); + close(cmd->in); + if (out) + close(cmd->out); + if (err) + close(cmd->err); + return -1; + } io[nr].fd = cmd->in; io[nr].type = POLLOUT; io[nr].u.out.buf = in; diff --git a/send-pack.c b/send-pack.c index bc0fcdbb00..662f7c2aeb 100644 --- a/send-pack.c +++ b/send-pack.c @@ -84,6 +84,8 @@ static int pack_objects(int fd, struct ref *refs, struct oid_array *advertised, strvec_push(&po.args, "--progress"); if (is_repository_shallow(the_repository)) strvec_push(&po.args, "--shallow"); + if (args->disable_bitmaps) + strvec_push(&po.args, "--no-use-bitmap-index"); po.in = -1; po.out = args->stateless_rpc ? -1 : fd; po.git_cmd = 1; @@ -487,6 +489,7 @@ int send_pack(struct send_pack_args *args, struct async demux; const char *push_cert_nonce = NULL; struct packet_reader reader; + int use_bitmaps; if (!remote_refs) { fprintf(stderr, "No refs in common and none specified; doing nothing.\n" @@ -498,6 +501,9 @@ int send_pack(struct send_pack_args *args, if (push_negotiate) get_commons_through_negotiation(args->url, remote_refs, &commons); + if (!git_config_get_bool("push.usebitmaps", &use_bitmaps)) + args->disable_bitmaps = !use_bitmaps; + git_config_get_bool("transfer.advertisesid", &advertise_sid); /* Does the other end support the reporting? */ diff --git a/send-pack.h b/send-pack.h index e148fcd960..7edb80596c 100644 --- a/send-pack.h +++ b/send-pack.h @@ -26,7 +26,8 @@ struct send_pack_args { /* One of the SEND_PACK_PUSH_CERT_* constants. */ push_cert:2, stateless_rpc:1, - atomic:1; + atomic:1, + disable_bitmaps:1; const struct string_list *push_options; }; diff --git a/sequencer.c b/sequencer.c index 61a8e0020d..79dad522f5 100644 --- a/sequencer.c +++ b/sequencer.c @@ -35,6 +35,8 @@ #include "commit-reach.h" #include "rebase-interactive.h" #include "reset.h" +#include "branch.h" +#include "log-tree.h" #define GIT_REFLOG_ACTION "GIT_REFLOG_ACTION" @@ -148,6 +150,20 @@ static GIT_PATH_FUNC(rebase_path_squash_onto, "rebase-merge/squash-onto") static GIT_PATH_FUNC(rebase_path_refs_to_delete, "rebase-merge/refs-to-delete") /* + * The update-refs file stores a list of refs that will be updated at the end + * of the rebase sequence. The 'update-ref <ref>' commands in the todo file + * update the OIDs for the refs in this file, but the refs are not updated + * until the end of the rebase sequence. + * + * rebase_path_update_refs() returns the path to this file for a given + * worktree directory. For the current worktree, pass the_repository->gitdir. + */ +static char *rebase_path_update_refs(const char *wt_git_dir) +{ + return xstrfmt("%s/rebase-merge/update-refs", wt_git_dir); +} + +/* * The following files are written by git-rebase just after parsing the * command-line. */ @@ -169,6 +185,30 @@ static GIT_PATH_FUNC(rebase_path_no_reschedule_failed_exec, "rebase-merge/no-res static GIT_PATH_FUNC(rebase_path_drop_redundant_commits, "rebase-merge/drop_redundant_commits") static GIT_PATH_FUNC(rebase_path_keep_redundant_commits, "rebase-merge/keep_redundant_commits") +/** + * A 'struct update_refs_record' represents a value in the update-refs + * list. We use a string_list to map refs to these (before, after) pairs. + */ +struct update_ref_record { + struct object_id before; + struct object_id after; +}; + +static struct update_ref_record *init_update_ref_record(const char *ref) +{ + struct update_ref_record *rec; + + CALLOC_ARRAY(rec, 1); + + oidcpy(&rec->before, null_oid()); + oidcpy(&rec->after, null_oid()); + + /* This may fail, but that's fine, we will keep the null OID. */ + read_ref(ref, &rec->before); + + return rec; +} + static int git_sequencer_config(const char *k, const char *v, void *cb) { struct replay_opts *opts = cb; @@ -497,7 +537,7 @@ static struct tree *empty_tree(struct repository *r) static int error_dirty_index(struct repository *repo, struct replay_opts *opts) { if (repo_read_index_unmerged(repo)) - return error_resolve_conflict(_(action_name(opts))); + return error_resolve_conflict(action_name(opts)); error(_("your local changes would be overwritten by %s."), _(action_name(opts))); @@ -535,7 +575,7 @@ static int fast_forward_to(struct repository *r, if (checkout_fast_forward(r, from, to, 1)) return -1; /* the callee should have complained already */ - strbuf_addf(&sb, _("%s: fast-forward"), _(action_name(opts))); + strbuf_addf(&sb, "%s: fast-forward", action_name(opts)); transaction = ref_transaction_begin(&err); if (!transaction || @@ -1689,20 +1729,21 @@ static struct { char c; const char *str; } todo_command_info[] = { - { 'p', "pick" }, - { 0, "revert" }, - { 'e', "edit" }, - { 'r', "reword" }, - { 'f', "fixup" }, - { 's', "squash" }, - { 'x', "exec" }, - { 'b', "break" }, - { 'l', "label" }, - { 't', "reset" }, - { 'm', "merge" }, - { 0, "noop" }, - { 'd', "drop" }, - { 0, NULL } + [TODO_PICK] = { 'p', "pick" }, + [TODO_REVERT] = { 0, "revert" }, + [TODO_EDIT] = { 'e', "edit" }, + [TODO_REWORD] = { 'r', "reword" }, + [TODO_FIXUP] = { 'f', "fixup" }, + [TODO_SQUASH] = { 's', "squash" }, + [TODO_EXEC] = { 'x', "exec" }, + [TODO_BREAK] = { 'b', "break" }, + [TODO_LABEL] = { 'l', "label" }, + [TODO_RESET] = { 't', "reset" }, + [TODO_MERGE] = { 'm', "merge" }, + [TODO_UPDATE_REF] = { 'u', "update-ref" }, + [TODO_NOOP] = { 0, "noop" }, + [TODO_DROP] = { 'd', "drop" }, + [TODO_COMMENT] = { 0, NULL }, }; static const char *command_to_string(const enum todo_command command) @@ -2381,7 +2422,7 @@ static int read_and_refresh_cache(struct repository *r, if (repo_read_index(r) < 0) { rollback_lock_file(&index_lock); return error(_("git %s: failed to read the index"), - _(action_name(opts))); + action_name(opts)); } refresh_index(r->index, REFRESH_QUIET|REFRESH_UNMERGED, NULL, NULL, NULL); @@ -2389,7 +2430,7 @@ static int read_and_refresh_cache(struct repository *r, if (write_locked_index(r->index, &index_lock, COMMIT_LOCK | SKIP_IF_UNCHANGED)) { return error(_("git %s: failed to refresh the index"), - _(action_name(opts))); + action_name(opts)); } } @@ -2481,7 +2522,7 @@ static int parse_insn_line(struct repository *r, struct todo_item *item, command_to_string(item->command)); if (item->command == TODO_EXEC || item->command == TODO_LABEL || - item->command == TODO_RESET) { + item->command == TODO_RESET || item->command == TODO_UPDATE_REF) { item->commit = NULL; item->arg_offset = bol - buf; item->arg_len = (int)(eol - bol); @@ -3712,7 +3753,7 @@ static int do_reset(struct repository *r, init_checkout_metadata(&unpack_tree_opts.meta, name, &oid, NULL); if (repo_read_index_unmerged(r)) { - ret = error_resolve_conflict(_(action_name(opts))); + ret = error_resolve_conflict(action_name(opts)); goto cleanup; } @@ -4081,6 +4122,221 @@ leave_merge: return ret; } +static int write_update_refs_state(struct string_list *refs_to_oids) +{ + int result = 0; + struct lock_file lock = LOCK_INIT; + FILE *fp = NULL; + struct string_list_item *item; + char *path; + + if (!refs_to_oids->nr) + return 0; + + path = rebase_path_update_refs(the_repository->gitdir); + + if (safe_create_leading_directories(path)) { + result = error(_("unable to create leading directories of %s"), + path); + goto cleanup; + } + + if (hold_lock_file_for_update(&lock, path, 0) < 0) { + result = error(_("another 'rebase' process appears to be running; " + "'%s.lock' already exists"), + path); + goto cleanup; + } + + fp = fdopen_lock_file(&lock, "w"); + if (!fp) { + result = error_errno(_("could not open '%s' for writing"), path); + rollback_lock_file(&lock); + goto cleanup; + } + + for_each_string_list_item(item, refs_to_oids) { + struct update_ref_record *rec = item->util; + fprintf(fp, "%s\n%s\n%s\n", item->string, + oid_to_hex(&rec->before), oid_to_hex(&rec->after)); + } + + result = commit_lock_file(&lock); + +cleanup: + free(path); + return result; +} + +/* + * Parse the update-refs file for the current rebase, then remove the + * refs that do not appear in the todo_list (and have not had updated + * values stored) and add refs that are in the todo_list but not + * represented in the update-refs file. + * + * If there are changes to the update-refs list, then write the new state + * to disk. + */ +void todo_list_filter_update_refs(struct repository *r, + struct todo_list *todo_list) +{ + int i; + int updated = 0; + struct string_list update_refs = STRING_LIST_INIT_DUP; + + sequencer_get_update_refs_state(r->gitdir, &update_refs); + + /* + * For each item in the update_refs list, if it has no updated + * value and does not appear in the todo_list, then remove it + * from the update_refs list. + */ + for (i = 0; i < update_refs.nr; i++) { + int j; + int found = 0; + const char *ref = update_refs.items[i].string; + size_t reflen = strlen(ref); + struct update_ref_record *rec = update_refs.items[i].util; + + /* OID already stored as updated. */ + if (!is_null_oid(&rec->after)) + continue; + + for (j = 0; !found && j < todo_list->total_nr; j++) { + struct todo_item *item = &todo_list->items[j]; + const char *arg = todo_list->buf.buf + item->arg_offset; + + if (item->command != TODO_UPDATE_REF) + continue; + + if (item->arg_len != reflen || + strncmp(arg, ref, reflen)) + continue; + + found = 1; + } + + if (!found) { + free(update_refs.items[i].string); + free(update_refs.items[i].util); + + update_refs.nr--; + MOVE_ARRAY(update_refs.items + i, update_refs.items + i + 1, update_refs.nr - i); + + updated = 1; + i--; + } + } + + /* + * For each todo_item, check if its ref is in the update_refs list. + * If not, then add it as an un-updated ref. + */ + for (i = 0; i < todo_list->total_nr; i++) { + struct todo_item *item = &todo_list->items[i]; + const char *arg = todo_list->buf.buf + item->arg_offset; + int j, found = 0; + + if (item->command != TODO_UPDATE_REF) + continue; + + for (j = 0; !found && j < update_refs.nr; j++) { + const char *ref = update_refs.items[j].string; + + found = strlen(ref) == item->arg_len && + !strncmp(ref, arg, item->arg_len); + } + + if (!found) { + struct string_list_item *inserted; + struct strbuf argref = STRBUF_INIT; + + strbuf_add(&argref, arg, item->arg_len); + inserted = string_list_insert(&update_refs, argref.buf); + inserted->util = init_update_ref_record(argref.buf); + strbuf_release(&argref); + updated = 1; + } + } + + if (updated) + write_update_refs_state(&update_refs); + string_list_clear(&update_refs, 1); +} + +static int do_update_ref(struct repository *r, const char *refname) +{ + struct string_list_item *item; + struct string_list list = STRING_LIST_INIT_DUP; + + if (sequencer_get_update_refs_state(r->gitdir, &list)) + return -1; + + for_each_string_list_item(item, &list) { + if (!strcmp(item->string, refname)) { + struct update_ref_record *rec = item->util; + if (read_ref("HEAD", &rec->after)) + return -1; + break; + } + } + + write_update_refs_state(&list); + string_list_clear(&list, 1); + return 0; +} + +static int do_update_refs(struct repository *r, int quiet) +{ + int res = 0; + struct string_list_item *item; + struct string_list refs_to_oids = STRING_LIST_INIT_DUP; + struct ref_store *refs = get_main_ref_store(r); + struct strbuf update_msg = STRBUF_INIT; + struct strbuf error_msg = STRBUF_INIT; + + if ((res = sequencer_get_update_refs_state(r->gitdir, &refs_to_oids))) + return res; + + for_each_string_list_item(item, &refs_to_oids) { + struct update_ref_record *rec = item->util; + int loop_res; + + loop_res = refs_update_ref(refs, "rewritten during rebase", + item->string, + &rec->after, &rec->before, + 0, UPDATE_REFS_MSG_ON_ERR); + res |= loop_res; + + if (quiet) + continue; + + if (loop_res) + strbuf_addf(&error_msg, "\t%s\n", item->string); + else + strbuf_addf(&update_msg, "\t%s\n", item->string); + } + + if (!quiet && + (update_msg.len || error_msg.len)) { + fprintf(stderr, + _("Updated the following refs with %s:\n%s"), + "--update-refs", + update_msg.buf); + + if (res) + fprintf(stderr, + _("Failed to update the following refs with %s:\n%s"), + "--update-refs", + error_msg.buf); + } + + string_list_clear(&refs_to_oids, 1); + strbuf_release(&update_msg); + strbuf_release(&error_msg); + return res; +} + static int is_final_fixup(struct todo_list *todo_list) { int i = todo_list->current; @@ -4456,6 +4712,12 @@ static int pick_commits(struct repository *r, return error_with_patch(r, item->commit, arg, item->arg_len, opts, res, 0); + } else if (item->command == TODO_UPDATE_REF) { + struct strbuf ref = STRBUF_INIT; + strbuf_add(&ref, arg, item->arg_len); + if ((res = do_update_ref(r, ref.buf))) + reschedule = 1; + strbuf_release(&ref); } else if (!is_noop(item->command)) return error(_("unknown command %d"), item->command); @@ -4591,6 +4853,9 @@ cleanup_head_ref: strbuf_release(&buf); strbuf_release(&head_ref); + + if (do_update_refs(r, opts->quiet)) + return -1; } /* @@ -5638,10 +5903,135 @@ static int skip_unnecessary_picks(struct repository *r, return 0; } +struct todo_add_branch_context { + struct todo_item *items; + size_t items_nr; + size_t items_alloc; + struct strbuf *buf; + struct commit *commit; + struct string_list refs_to_oids; +}; + +static int add_decorations_to_list(const struct commit *commit, + struct todo_add_branch_context *ctx) +{ + const struct name_decoration *decoration = get_name_decoration(&commit->object); + const char *head_ref = resolve_ref_unsafe("HEAD", + RESOLVE_REF_READING, + NULL, + NULL); + + while (decoration) { + struct todo_item *item; + const char *path; + size_t base_offset = ctx->buf->len; + + /* + * If the branch is the current HEAD, then it will be + * updated by the default rebase behavior. + */ + if (head_ref && !strcmp(head_ref, decoration->name)) { + decoration = decoration->next; + continue; + } + + ALLOC_GROW(ctx->items, + ctx->items_nr + 1, + ctx->items_alloc); + item = &ctx->items[ctx->items_nr]; + memset(item, 0, sizeof(*item)); + + /* If the branch is checked out, then leave a comment instead. */ + if ((path = branch_checked_out(decoration->name))) { + item->command = TODO_COMMENT; + strbuf_addf(ctx->buf, "# Ref %s checked out at '%s'\n", + decoration->name, path); + } else { + struct string_list_item *sti; + item->command = TODO_UPDATE_REF; + strbuf_addf(ctx->buf, "%s\n", decoration->name); + + sti = string_list_insert(&ctx->refs_to_oids, + decoration->name); + sti->util = init_update_ref_record(decoration->name); + } + + item->offset_in_buf = base_offset; + item->arg_offset = base_offset; + item->arg_len = ctx->buf->len - base_offset; + ctx->items_nr++; + + decoration = decoration->next; + } + + return 0; +} + +/* + * For each 'pick' command, find out if the commit has a decoration in + * refs/heads/. If so, then add a 'label for-update-refs/' command. + */ +static int todo_list_add_update_ref_commands(struct todo_list *todo_list) +{ + int i, res; + static struct string_list decorate_refs_exclude = STRING_LIST_INIT_NODUP; + static struct string_list decorate_refs_exclude_config = STRING_LIST_INIT_NODUP; + static struct string_list decorate_refs_include = STRING_LIST_INIT_NODUP; + struct decoration_filter decoration_filter = { + .include_ref_pattern = &decorate_refs_include, + .exclude_ref_pattern = &decorate_refs_exclude, + .exclude_ref_config_pattern = &decorate_refs_exclude_config, + }; + struct todo_add_branch_context ctx = { + .buf = &todo_list->buf, + .refs_to_oids = STRING_LIST_INIT_DUP, + }; + + ctx.items_alloc = 2 * todo_list->nr + 1; + ALLOC_ARRAY(ctx.items, ctx.items_alloc); + + string_list_append(&decorate_refs_include, "refs/heads/"); + load_ref_decorations(&decoration_filter, 0); + + for (i = 0; i < todo_list->nr; ) { + struct todo_item *item = &todo_list->items[i]; + + /* insert ith item into new list */ + ALLOC_GROW(ctx.items, + ctx.items_nr + 1, + ctx.items_alloc); + + ctx.items[ctx.items_nr++] = todo_list->items[i++]; + + if (item->commit) { + ctx.commit = item->commit; + add_decorations_to_list(item->commit, &ctx); + } + } + + res = write_update_refs_state(&ctx.refs_to_oids); + + string_list_clear(&ctx.refs_to_oids, 1); + + if (res) { + /* we failed, so clean up the new list. */ + free(ctx.items); + return res; + } + + free(todo_list->items); + todo_list->items = ctx.items; + todo_list->nr = ctx.items_nr; + todo_list->alloc = ctx.items_alloc; + + return 0; +} + int complete_action(struct repository *r, struct replay_opts *opts, unsigned flags, const char *shortrevisions, const char *onto_name, struct commit *onto, const struct object_id *orig_head, struct string_list *commands, unsigned autosquash, + unsigned update_refs, struct todo_list *todo_list) { char shortonto[GIT_MAX_HEXSZ + 1]; @@ -5660,6 +6050,9 @@ int complete_action(struct repository *r, struct replay_opts *opts, unsigned fla item->arg_len = item->arg_offset = item->flags = item->offset_in_buf = 0; } + if (update_refs && todo_list_add_update_ref_commands(todo_list)) + return -1; + if (autosquash && todo_list_rearrange_squash(todo_list)) return -1; @@ -5936,3 +6329,54 @@ int sequencer_determine_whence(struct repository *r, enum commit_whence *whence) return 0; } + +int sequencer_get_update_refs_state(const char *wt_dir, + struct string_list *refs) +{ + int result = 0; + FILE *fp = NULL; + struct strbuf ref = STRBUF_INIT; + struct strbuf hash = STRBUF_INIT; + struct update_ref_record *rec = NULL; + + char *path = rebase_path_update_refs(wt_dir); + + fp = fopen(path, "r"); + if (!fp) + goto cleanup; + + while (strbuf_getline(&ref, fp) != EOF) { + struct string_list_item *item; + + CALLOC_ARRAY(rec, 1); + + if (strbuf_getline(&hash, fp) == EOF || + get_oid_hex(hash.buf, &rec->before)) { + warning(_("update-refs file at '%s' is invalid"), + path); + result = -1; + goto cleanup; + } + + if (strbuf_getline(&hash, fp) == EOF || + get_oid_hex(hash.buf, &rec->after)) { + warning(_("update-refs file at '%s' is invalid"), + path); + result = -1; + goto cleanup; + } + + item = string_list_insert(refs, ref.buf); + item->util = rec; + rec = NULL; + } + +cleanup: + if (fp) + fclose(fp); + free(path); + free(rec); + strbuf_release(&ref); + strbuf_release(&hash); + return result; +} diff --git a/sequencer.h b/sequencer.h index 698599fe4e..563fe59933 100644 --- a/sequencer.h +++ b/sequencer.h @@ -96,6 +96,7 @@ enum todo_command { TODO_LABEL, TODO_RESET, TODO_MERGE, + TODO_UPDATE_REF, /* commands that do nothing but are counted for reporting progress */ TODO_NOOP, TODO_DROP, @@ -132,6 +133,18 @@ void todo_list_release(struct todo_list *todo_list); const char *todo_item_get_arg(struct todo_list *todo_list, struct todo_item *item); +/* + * Parse the update-refs file for the current rebase, then remove the + * refs that do not appear in the todo_list (and have not had updated + * values stored) and add refs that are in the todo_list but not + * represented in the update-refs file. + * + * If there are changes to the update-refs list, then write the new state + * to disk. + */ +void todo_list_filter_update_refs(struct repository *r, + struct todo_list *todo_list); + /* Call this to setup defaults before parsing command line options */ void sequencer_init_config(struct replay_opts *opts); int sequencer_pick_revisions(struct repository *repo, @@ -167,6 +180,7 @@ int complete_action(struct repository *r, struct replay_opts *opts, unsigned fla const char *shortrevisions, const char *onto_name, struct commit *onto, const struct object_id *orig_head, struct string_list *commands, unsigned autosquash, + unsigned update_refs, struct todo_list *todo_list); int todo_list_rearrange_squash(struct todo_list *todo_list); @@ -233,4 +247,13 @@ void sequencer_post_commit_cleanup(struct repository *r, int verbose); int sequencer_get_last_command(struct repository* r, enum replay_action *action); int sequencer_determine_whence(struct repository *r, enum commit_whence *whence); + +/** + * Append the set of ref-OID pairs that are currently stored for the 'git + * rebase --update-refs' feature if such a rebase is currently happening. + * + * Localized to a worktree's git dir. + */ +int sequencer_get_update_refs_state(const char *wt_dir, struct string_list *refs); + #endif /* SEQUENCER_H */ @@ -10,6 +10,10 @@ static int inside_git_dir = -1; static int inside_work_tree = -1; static int work_tree_config_is_bogus; +enum allowed_bare_repo { + ALLOWED_BARE_REPO_EXPLICIT = 0, + ALLOWED_BARE_REPO_ALL, +}; static struct startup_info the_startup_info; struct startup_info *startup_info = &the_startup_info; @@ -1156,11 +1160,51 @@ static int ensure_valid_ownership(const char *gitfile, * constant regardless of what failed above. data.is_safe should be * initialized to false, and might be changed by the callback. */ - read_very_early_config(safe_directory_cb, &data); + git_protected_config(safe_directory_cb, &data); return data.is_safe; } +static int allowed_bare_repo_cb(const char *key, const char *value, void *d) +{ + enum allowed_bare_repo *allowed_bare_repo = d; + + if (strcasecmp(key, "safe.bareRepository")) + return 0; + + if (!strcmp(value, "explicit")) { + *allowed_bare_repo = ALLOWED_BARE_REPO_EXPLICIT; + return 0; + } + if (!strcmp(value, "all")) { + *allowed_bare_repo = ALLOWED_BARE_REPO_ALL; + return 0; + } + return -1; +} + +static enum allowed_bare_repo get_allowed_bare_repo(void) +{ + enum allowed_bare_repo result = ALLOWED_BARE_REPO_ALL; + git_protected_config(allowed_bare_repo_cb, &result); + return result; +} + +static const char *allowed_bare_repo_to_string( + enum allowed_bare_repo allowed_bare_repo) +{ + switch (allowed_bare_repo) { + case ALLOWED_BARE_REPO_EXPLICIT: + return "explicit"; + case ALLOWED_BARE_REPO_ALL: + return "all"; + default: + BUG("invalid allowed_bare_repo %d", + allowed_bare_repo); + } + return NULL; +} + enum discovery_result { GIT_DIR_NONE = 0, GIT_DIR_EXPLICIT, @@ -1170,7 +1214,8 @@ enum discovery_result { GIT_DIR_HIT_CEILING = -1, GIT_DIR_HIT_MOUNT_POINT = -2, GIT_DIR_INVALID_GITFILE = -3, - GIT_DIR_INVALID_OWNERSHIP = -4 + GIT_DIR_INVALID_OWNERSHIP = -4, + GIT_DIR_DISALLOWED_BARE = -5, }; /* @@ -1300,6 +1345,8 @@ static enum discovery_result setup_git_directory_gently_1(struct strbuf *dir, } if (is_git_directory(dir->buf)) { + if (get_allowed_bare_repo() == ALLOWED_BARE_REPO_EXPLICIT) + return GIT_DIR_DISALLOWED_BARE; if (!ensure_valid_ownership(NULL, NULL, dir->buf, report)) return GIT_DIR_INVALID_OWNERSHIP; strbuf_addstr(gitdir, "."); @@ -1448,6 +1495,14 @@ const char *setup_git_directory_gently(int *nongit_ok) } *nongit_ok = 1; break; + case GIT_DIR_DISALLOWED_BARE: + if (!nongit_ok) { + die(_("cannot use bare repository '%s' (safe.bareRepository is '%s')"), + dir.buf, + allowed_bare_repo_to_string(get_allowed_bare_repo())); + } + *nongit_ok = 1; + break; case GIT_DIR_NONE: /* * As a safeguard against setup_git_directory_gently_1 returning diff --git a/sha256/nettle.h b/sha256/nettle.h new file mode 100644 index 0000000000..b63e1c8190 --- /dev/null +++ b/sha256/nettle.h @@ -0,0 +1,31 @@ +#ifndef SHA256_NETTLE_H +#define SHA256_NETTLE_H + +#include <nettle/sha2.h> + +typedef struct sha256_ctx nettle_SHA256_CTX; + +static inline void nettle_SHA256_Init(nettle_SHA256_CTX *ctx) +{ + sha256_init(ctx); +} + +static inline void nettle_SHA256_Update(nettle_SHA256_CTX *ctx, + const void *data, + size_t len) +{ + sha256_update(ctx, len, data); +} + +static inline void nettle_SHA256_Final(unsigned char *digest, + nettle_SHA256_CTX *ctx) +{ + sha256_digest(ctx, SHA256_DIGEST_SIZE, digest); +} + +#define platform_SHA256_CTX nettle_SHA256_CTX +#define platform_SHA256_Init nettle_SHA256_Init +#define platform_SHA256_Update nettle_SHA256_Update +#define platform_SHA256_Final nettle_SHA256_Final + +#endif diff --git a/shared.mak b/shared.mak index 4330192e9c..33f43edbf9 100644 --- a/shared.mak +++ b/shared.mak @@ -70,6 +70,7 @@ ifndef V QUIET_HDR = @echo ' ' HDR $(<:hcc=h); QUIET_RC = @echo ' ' RC $@; QUIET_SPATCH = @echo ' ' SPATCH $<; + QUIET_SPATCH_T = @echo ' ' SPATCH TEST $(@:.build/%=%); ## Used in "Documentation/Makefile" QUIET_ASCIIDOC = @echo ' ' ASCIIDOC $@; diff --git a/submodule.c b/submodule.c index 4e299f578f..3fa5db3ecd 100644 --- a/submodule.c +++ b/submodule.c @@ -2374,7 +2374,7 @@ void absorb_git_dir_into_superproject(const char *path, cp.no_stdin = 1; strvec_pushl(&cp.args, "--super-prefix", sb.buf, "submodule--helper", - "absorb-git-dirs", NULL); + "absorbgitdirs", NULL); prepare_submodule_repo_env(&cp.env); if (run_command(&cp)) die(_("could not recurse into submodule '%s'"), path); @@ -2388,7 +2388,7 @@ int get_superproject_working_tree(struct strbuf *buf) struct child_process cp = CHILD_PROCESS_INIT; struct strbuf sb = STRBUF_INIT; struct strbuf one_up = STRBUF_INIT; - const char *cwd = xgetcwd(); + char *cwd = xgetcwd(); int ret = 0; const char *subpath; int code; @@ -2451,6 +2451,7 @@ int get_superproject_working_tree(struct strbuf *buf) ret = 1; free(super_wt); } + free(cwd); strbuf_release(&sb); code = finish_command(&cp); diff --git a/t/Makefile b/t/Makefile index 056ce55dcc..1c80c0c79a 100644 --- a/t/Makefile +++ b/t/Makefile @@ -35,7 +35,6 @@ TEST_RESULTS_DIRECTORY_SQ = $(subst ','\'',$(TEST_RESULTS_DIRECTORY)) CHAINLINTTMP_SQ = $(subst ','\'',$(CHAINLINTTMP)) T = $(sort $(wildcard t[0-9][0-9][0-9][0-9]-*.sh)) -TGITWEB = $(sort $(wildcard t95[0-9][0-9]-*.sh)) THELPERS = $(sort $(filter-out $(T),$(wildcard *.sh))) TPERF = $(sort $(wildcard perf/p[0-9][0-9][0-9][0-9]-*.sh)) CHAINLINTTESTS = $(sort $(patsubst chainlint/%.test,%,$(wildcard chainlint/*.test))) @@ -63,7 +62,7 @@ pre-clean: $(RM) -r '$(TEST_RESULTS_DIRECTORY_SQ)' clean-except-prove-cache: clean-chainlint - $(RM) -r 'trash directory'.* '$(TEST_RESULTS_DIRECTORY_SQ)' + $(RM) -r 'trash directory'.* $(RM) -r valgrind/bin clean: clean-except-prove-cache @@ -112,9 +111,6 @@ aggregate-results: echo "$$f"; \ done | '$(SHELL_PATH_SQ)' ./aggregate-results.sh -gitweb-test: - $(MAKE) $(TGITWEB) - valgrind: $(MAKE) GIT_TEST_OPTS="$(GIT_TEST_OPTS) --valgrind" @@ -366,12 +366,47 @@ excluded as so much relies on it, but this might change in the future. GIT_TEST_SPLIT_INDEX=<boolean> forces split-index mode on the whole test suite. Accept any boolean values that are accepted by git-config. -GIT_TEST_PASSING_SANITIZE_LEAK=<boolean> when compiled with -SANITIZE=leak will run only those tests that have whitelisted -themselves as passing with no memory leaks. Tests can be whitelisted -by setting "TEST_PASSES_SANITIZE_LEAK=true" before sourcing -"test-lib.sh" itself at the top of the test script. This test mode is -used by the "linux-leaks" CI target. +GIT_TEST_PASSING_SANITIZE_LEAK=true skips those tests that haven't +declared themselves as leak-free by setting +"TEST_PASSES_SANITIZE_LEAK=true" before sourcing "test-lib.sh". This +test mode is used by the "linux-leaks" CI target. + +GIT_TEST_PASSING_SANITIZE_LEAK=check checks that our +"TEST_PASSES_SANITIZE_LEAK=true" markings are current. Rather than +skipping those tests that haven't set "TEST_PASSES_SANITIZE_LEAK=true" +before sourcing "test-lib.sh" this mode runs them with +"--invert-exit-code". This is used to check that there's a one-to-one +mapping between "TEST_PASSES_SANITIZE_LEAK=true" and those tests that +pass under "SANITIZE=leak". This is especially useful when testing a +series that fixes various memory leaks with "git rebase -x". + +GIT_TEST_SANITIZE_LEAK_LOG=true will log memory leaks to +"test-results/$TEST_NAME.leak/trace.*" files. The logs include a +"dedup_token" (see +"ASAN_OPTIONS=help=1 ./git") and other options to +make logs +machine-readable. + +With GIT_TEST_SANITIZE_LEAK_LOG=true we'll look at the leak logs +before exiting and exit on failure if the logs showed that we had a +memory leak, even if the test itself would have otherwise passed. This +allows us to catch e.g. missing &&-chaining. This is especially useful +when combined with "GIT_TEST_PASSING_SANITIZE_LEAK", see below. + +GIT_TEST_PASSING_SANITIZE_LEAK=check when combined with "--immediate" +will run to completion faster, and result in the same failing +tests. The only practical reason to run +GIT_TEST_PASSING_SANITIZE_LEAK=check without "--immediate" is to +combine it with "GIT_TEST_SANITIZE_LEAK_LOG=true". If we stop at the +first failing test case our leak logs won't show subsequent leaks we +might have run into. + +GIT_TEST_PASSING_SANITIZE_LEAK=(true|check) will not catch all memory +leaks unless combined with GIT_TEST_SANITIZE_LEAK_LOG=true. Some tests +run "git" (or "test-tool" etc.) without properly checking the exit +code, or git will invoke itself and fail to ferry the abort() exit +code to the original caller. When the two modes are combined we'll +look at the "test-results/$TEST_NAME.leak/trace.*" files at the end of +the test run to see if had memory leaks which the test itself didn't +catch. GIT_TEST_PROTOCOL_VERSION=<n>, when set, makes 'protocol.version' default to n. @@ -935,32 +970,6 @@ see test-lib-functions.sh for the full list and their options. test_done fi - - test_external [<prereq>] <message> <external> <script> - - Execute a <script> with an <external> interpreter (like perl). This - was added for tests like t9700-perl-git.sh which do most of their - work in an external test script. - - test_external \ - 'GitwebCache::*FileCache*' \ - perl "$TEST_DIRECTORY"/t9503/test_cache_interface.pl - - If the test is outputting its own TAP you should set the - test_external_has_tap variable somewhere before calling the first - test_external* function. See t9700-perl-git.sh for an example. - - # The external test will outputs its own plan - test_external_has_tap=1 - - - test_external_without_stderr [<prereq>] <message> <external> <script> - - Like test_external but fail if there's any output on stderr, - instead of checking the exit code. - - test_external_without_stderr \ - 'Perl API' \ - perl "$TEST_DIRECTORY"/t9700/test.pl - - test_expect_code <exit-code> <command> Run a command and ensure that it exits with the given exit code. diff --git a/t/annotate-tests.sh b/t/annotate-tests.sh index cc01d89150..f1b9a6ce4d 100644 --- a/t/annotate-tests.sh +++ b/t/annotate-tests.sh @@ -153,7 +153,7 @@ test_expect_success 'blame evil merge' ' test_expect_success 'blame huge graft' ' test_when_finished "git checkout branch2" && - test_when_finished "rm -f .git/info/grafts" && + test_when_finished "rm -rf .git/info" && graft= && for i in 0 1 2 do @@ -168,6 +168,7 @@ test_expect_success 'blame huge graft' ' graft="$graft$commit " || return 1 done done && + mkdir .git/info && printf "%s " $graft >.git/info/grafts && check_count -h 00 01 1 10 1 ' diff --git a/t/helper/test-bloom.c b/t/helper/test-bloom.c index ad3ef1cd77..6c900ca668 100644 --- a/t/helper/test-bloom.c +++ b/t/helper/test-bloom.c @@ -16,6 +16,7 @@ static void add_string_to_filter(const char *data, struct bloom_filter *filter) } printf("\n"); add_key_to_filter(&key, filter, &settings); + clear_bloom_key(&key); } static void print_bloom_filter(struct bloom_filter *filter) { @@ -80,6 +81,7 @@ int cmd__bloom(int argc, const char **argv) } print_bloom_filter(&filter); + free(filter.data); } if (!strcmp(argv[1], "get_filter_for_commit")) { diff --git a/t/helper/test-delta.c b/t/helper/test-delta.c index e749a49c88..b15481ea59 100644 --- a/t/helper/test-delta.c +++ b/t/helper/test-delta.c @@ -20,8 +20,9 @@ int cmd__delta(int argc, const char **argv) { int fd; struct stat st; - void *from_buf, *data_buf, *out_buf; + void *from_buf = NULL, *data_buf = NULL, *out_buf = NULL; unsigned long from_size, data_size, out_size; + int ret = 1; if (argc != 5 || (strcmp(argv[1], "-d") && strcmp(argv[1], "-p"))) { fprintf(stderr, "usage: %s\n", usage_str); @@ -38,21 +39,21 @@ int cmd__delta(int argc, const char **argv) if (read_in_full(fd, from_buf, from_size) < 0) { perror(argv[2]); close(fd); - return 1; + goto cleanup; } close(fd); fd = open(argv[3], O_RDONLY); if (fd < 0 || fstat(fd, &st)) { perror(argv[3]); - return 1; + goto cleanup; } data_size = st.st_size; data_buf = xmalloc(data_size); if (read_in_full(fd, data_buf, data_size) < 0) { perror(argv[3]); close(fd); - return 1; + goto cleanup; } close(fd); @@ -66,14 +67,20 @@ int cmd__delta(int argc, const char **argv) &out_size); if (!out_buf) { fprintf(stderr, "delta operation failed (returned NULL)\n"); - return 1; + goto cleanup; } fd = open (argv[4], O_WRONLY|O_CREAT|O_TRUNC, 0666); if (fd < 0 || write_in_full(fd, out_buf, out_size) < 0) { perror(argv[4]); - return 1; + goto cleanup; } - return 0; + ret = 0; +cleanup: + free(from_buf); + free(data_buf); + free(out_buf); + + return ret; } diff --git a/t/helper/test-dump-cache-tree.c b/t/helper/test-dump-cache-tree.c index 6a3f88f5f5..0d6d7f1ecb 100644 --- a/t/helper/test-dump-cache-tree.c +++ b/t/helper/test-dump-cache-tree.c @@ -59,11 +59,16 @@ int cmd__dump_cache_tree(int ac, const char **av) { struct index_state istate; struct cache_tree *another = cache_tree(); + int ret; + setup_git_directory(); if (read_cache() < 0) die("unable to read index file"); istate = the_index; istate.cache_tree = another; cache_tree_update(&istate, WRITE_TREE_DRY_RUN); - return dump_cache_tree(active_cache_tree, another, ""); + ret = dump_cache_tree(active_cache_tree, another, ""); + cache_tree_free(&another); + + return ret; } diff --git a/t/helper/test-fast-rebase.c b/t/helper/test-fast-rebase.c index 4e5553e202..45665ec19a 100644 --- a/t/helper/test-fast-rebase.c +++ b/t/helper/test-fast-rebase.c @@ -184,8 +184,6 @@ int cmd__fast_rebase(int argc, const char **argv) last_picked_commit = commit; last_commit = create_commit(result.tree, commit, last_commit); } - /* TODO: There should be some kind of rev_info_free(&revs) call... */ - memset(&revs, 0, sizeof(revs)); merge_switch_to_result(&merge_opt, head_tree, &result, 1, !result.clean); diff --git a/t/helper/test-hash.c b/t/helper/test-hash.c index 261c545b9d..5860dab0ff 100644 --- a/t/helper/test-hash.c +++ b/t/helper/test-hash.c @@ -54,5 +54,6 @@ int cmd_hash_impl(int ac, const char **av, int algo) fwrite(hash, 1, algop->rawsz, stdout); else puts(hash_to_hex_algop(hash, algop)); + free(buffer); return 0; } diff --git a/t/helper/test-json-writer.c b/t/helper/test-json-writer.c index 37c452535f..8c3edacc00 100644 --- a/t/helper/test-json-writer.c +++ b/t/helper/test-json-writer.c @@ -181,12 +181,18 @@ static struct json_writer nest1 = JSON_WRITER_INIT; static void make_nest1(int pretty) { + make_obj1(0); + make_arr1(0); + jw_object_begin(&nest1, pretty); { jw_object_sub_jw(&nest1, "obj1", &obj1); jw_object_sub_jw(&nest1, "arr1", &arr1); } jw_end(&nest1); + + jw_release(&obj1); + jw_release(&arr1); } static char *expect_inline1 = @@ -313,6 +319,9 @@ static void make_mixed1(int pretty) jw_object_sub_jw(&mixed1, "arr1", &arr1); } jw_end(&mixed1); + + jw_release(&obj1); + jw_release(&arr1); } static void cmp(const char *test, const struct json_writer *jw, const char *exp) @@ -325,8 +334,8 @@ static void cmp(const char *test, const struct json_writer *jw, const char *exp) exit(1); } -#define t(v) do { make_##v(0); cmp(#v, &v, expect_##v); } while (0) -#define p(v) do { make_##v(1); cmp(#v, &v, pretty_##v); } while (0) +#define t(v) do { make_##v(0); cmp(#v, &v, expect_##v); jw_release(&v); } while (0) +#define p(v) do { make_##v(1); cmp(#v, &v, pretty_##v); jw_release(&v); } while (0) /* * Run some basic regression tests with some known patterns. @@ -381,7 +390,6 @@ static int unit_tests(void) /* mixed forms */ t(mixed1); - jw_init(&mixed1); p(mixed1); return 0; @@ -544,7 +552,7 @@ static int scripted(void) printf("%s\n", jw.json.buf); - strbuf_release(&jw.json); + jw_release(&jw); return 0; } diff --git a/t/helper/test-mergesort.c b/t/helper/test-mergesort.c index ebf68f7de8..202e54a7ff 100644 --- a/t/helper/test-mergesort.c +++ b/t/helper/test-mergesort.c @@ -13,19 +13,10 @@ struct line { struct line *next; }; -static void *get_next(const void *a) -{ - return ((const struct line *)a)->next; -} - -static void set_next(void *a, void *b) -{ - ((struct line *)a)->next = b; -} +DEFINE_LIST_SORT(static, sort_lines, struct line, next); -static int compare_strings(const void *a, const void *b) +static int compare_strings(const struct line *x, const struct line *y) { - const struct line *x = a, *y = b; return strcmp(x->text, y->text); } @@ -47,7 +38,7 @@ static int sort_stdin(void) p = line; } - lines = llist_mergesort(lines, get_next, set_next, compare_strings); + sort_lines(&lines, compare_strings); while (lines) { puts(lines->text); @@ -273,21 +264,11 @@ struct number { struct number *next; }; -static void *get_next_number(const void *a) -{ - stats.get_next++; - return ((const struct number *)a)->next; -} - -static void set_next_number(void *a, void *b) -{ - stats.set_next++; - ((struct number *)a)->next = b; -} +DEFINE_LIST_SORT_DEBUG(static, sort_numbers, struct number, next, + stats.get_next++, stats.set_next++); -static int compare_numbers(const void *av, const void *bv) +static int compare_numbers(const struct number *an, const struct number *bn) { - const struct number *an = av, *bn = bv; int a = an->value, b = bn->value; stats.compare++; return (a > b) - (a < b); @@ -325,8 +306,7 @@ static int test(const struct dist *dist, const struct mode *mode, int n, int m) *tail = NULL; stats.get_next = stats.set_next = stats.compare = 0; - list = llist_mergesort(list, get_next_number, set_next_number, - compare_numbers); + sort_numbers(&list, compare_numbers); QSORT(arr, n, compare_ints); for (i = 0, curr = list; i < n && curr; i++, curr = curr->next) { diff --git a/t/helper/test-path-utils.c b/t/helper/test-path-utils.c index 229ed416b0..d20e1b7a18 100644 --- a/t/helper/test-path-utils.c +++ b/t/helper/test-path-utils.c @@ -296,9 +296,8 @@ int cmd__path_utils(int argc, const char **argv) if (argc == 3 && !strcmp(argv[1], "normalize_path_copy")) { char *buf = xmallocz(strlen(argv[2])); int rv = normalize_path_copy(buf, argv[2]); - if (rv) - buf = "++failed++"; - puts(buf); + puts(rv ? "++failed++" : buf); + free(buf); return 0; } @@ -356,7 +355,10 @@ int cmd__path_utils(int argc, const char **argv) int nongit_ok; setup_git_directory_gently(&nongit_ok); while (argc > 3) { - puts(prefix_path(prefix, prefix_len, argv[3])); + char *pfx = prefix_path(prefix, prefix_len, argv[3]); + + puts(pfx); + free(pfx); argc--; argv++; } @@ -366,6 +368,7 @@ int cmd__path_utils(int argc, const char **argv) if (argc == 4 && !strcmp(argv[1], "strip_path_suffix")) { char *prefix = strip_path_suffix(argv[2], argv[3]); printf("%s\n", prefix ? prefix : "(null)"); + free(prefix); return 0; } diff --git a/t/helper/test-ref-store.c b/t/helper/test-ref-store.c index 9646d85fc8..4d18bfb1ca 100644 --- a/t/helper/test-ref-store.c +++ b/t/helper/test-ref-store.c @@ -96,6 +96,7 @@ static const char **get_store(const char **argv, struct ref_store **refs) die("no such worktree: %s", gitdir); *refs = get_worktree_ref_store(*p); + free_worktrees(worktrees); } else die("unknown backend %s", argv[0]); diff --git a/t/helper/test-regex.c b/t/helper/test-regex.c index d6f28ca8d1..bd871a735b 100644 --- a/t/helper/test-regex.c +++ b/t/helper/test-regex.c @@ -34,6 +34,7 @@ static int test_regex_bug(void) if (m[0].rm_so == 3) /* matches '\n' when it should not */ die("regex bug confirmed: re-build git with NO_REGEX=1"); + regfree(&r); return 0; } @@ -94,18 +95,20 @@ int cmd__regex(int argc, const char **argv) die("failed regcomp() for pattern '%s' (%s)", pat, errbuf); } if (!str) - return 0; + goto cleanup; ret = regexec(&r, str, 1, m, 0); if (ret) { if (silent || ret == REG_NOMATCH) - return ret; + goto cleanup; regerror(ret, &r, errbuf, sizeof(errbuf)); die("failed regexec() for subject '%s' (%s)", str, errbuf); } - return 0; +cleanup: + regfree(&r); + return ret; usage: usage("\ttest-tool regex --bug\n" "\ttest-tool regex [--silent] <pattern>\n" diff --git a/t/helper/test-rot13-filter.c b/t/helper/test-rot13-filter.c new file mode 100644 index 0000000000..f8d564c622 --- /dev/null +++ b/t/helper/test-rot13-filter.c @@ -0,0 +1,382 @@ +/* + * Example implementation for the Git filter protocol version 2 + * See Documentation/gitattributes.txt, section "Filter Protocol" + * + * Usage: test-tool rot13-filter [--always-delay] --log=<path> <capabilities> + * + * Log path defines a debug log file that the script writes to. The + * subsequent arguments define a list of supported protocol capabilities + * ("clean", "smudge", etc). + * + * When --always-delay is given all pathnames with the "can-delay" flag + * that don't appear on the list bellow are delayed with a count of 1 + * (see more below). + * + * This implementation supports special test cases: + * (1) If data with the pathname "clean-write-fail.r" is processed with + * a "clean" operation then the write operation will die. + * (2) If data with the pathname "smudge-write-fail.r" is processed with + * a "smudge" operation then the write operation will die. + * (3) If data with the pathname "error.r" is processed with any + * operation then the filter signals that it cannot or does not want + * to process the file. + * (4) If data with the pathname "abort.r" is processed with any + * operation then the filter signals that it cannot or does not want + * to process the file and any file after that is processed with the + * same command. + * (5) If data with a pathname that is a key in the delay hash is + * requested (e.g. "test-delay10.a") then the filter responds with + * a "delay" status and sets the "requested" field in the delay hash. + * The filter will signal the availability of this object after + * "count" (field in delay hash) "list_available_blobs" commands. + * (6) If data with the pathname "missing-delay.a" is processed that the + * filter will drop the path from the "list_available_blobs" response. + * (7) If data with the pathname "invalid-delay.a" is processed that the + * filter will add the path "unfiltered" which was not delayed before + * to the "list_available_blobs" response. + */ + +#include "test-tool.h" +#include "pkt-line.h" +#include "string-list.h" +#include "strmap.h" +#include "parse-options.h" + +static FILE *logfile; +static int always_delay, has_clean_cap, has_smudge_cap; +static struct strmap delay = STRMAP_INIT; + +static inline const char *str_or_null(const char *str) +{ + return str ? str : "(null)"; +} + +static char *rot13(char *str) +{ + char *c; + for (c = str; *c; c++) + if (isalpha(*c)) + *c += tolower(*c) < 'n' ? 13 : -13; + return str; +} + +static char *get_value(char *buf, const char *key) +{ + const char *orig_buf = buf; + if (!buf || + !skip_prefix((const char *)buf, key, (const char **)&buf) || + !skip_prefix((const char *)buf, "=", (const char **)&buf) || + !*buf) + die("expected key '%s', got '%s'", key, str_or_null(orig_buf)); + return buf; +} + +/* + * Read a text packet, expecting that it is in the form "key=value" for + * the given key. An EOF does not trigger any error and is reported + * back to the caller with NULL. Die if the "key" part of "key=value" does + * not match the given key, or the value part is empty. + */ +static char *packet_key_val_read(const char *key) +{ + char *buf; + if (packet_read_line_gently(0, NULL, &buf) < 0) + return NULL; + return xstrdup(get_value(buf, key)); +} + +static inline void assert_remote_capability(struct strset *caps, const char *cap) +{ + if (!strset_contains(caps, cap)) + die("required '%s' capability not available from remote", cap); +} + +static void read_capabilities(struct strset *remote_caps) +{ + for (;;) { + char *buf = packet_read_line(0, NULL); + if (!buf) + break; + strset_add(remote_caps, get_value(buf, "capability")); + } + + assert_remote_capability(remote_caps, "clean"); + assert_remote_capability(remote_caps, "smudge"); + assert_remote_capability(remote_caps, "delay"); +} + +static void check_and_write_capabilities(struct strset *remote_caps, + const char **caps, int nr_caps) +{ + int i; + for (i = 0; i < nr_caps; i++) { + if (!strset_contains(remote_caps, caps[i])) + die("our capability '%s' is not available from remote", + caps[i]); + packet_write_fmt(1, "capability=%s\n", caps[i]); + } + packet_flush(1); +} + +struct delay_entry { + int requested, count; + char *output; +}; + +static void free_delay_entries(void) +{ + struct hashmap_iter iter; + struct strmap_entry *ent; + + strmap_for_each_entry(&delay, &iter, ent) { + struct delay_entry *delay_entry = ent->value; + free(delay_entry->output); + free(delay_entry); + } + strmap_clear(&delay, 0); +} + +static void add_delay_entry(char *pathname, int count, int requested) +{ + struct delay_entry *entry = xcalloc(1, sizeof(*entry)); + entry->count = count; + entry->requested = requested; + if (strmap_put(&delay, pathname, entry)) + BUG("adding the same path twice to delay hash?"); +} + +static void reply_list_available_blobs_cmd(void) +{ + struct hashmap_iter iter; + struct strmap_entry *ent; + struct string_list_item *str_item; + struct string_list paths = STRING_LIST_INIT_NODUP; + + /* flush */ + if (packet_read_line(0, NULL)) + die("bad list_available_blobs end"); + + strmap_for_each_entry(&delay, &iter, ent) { + struct delay_entry *delay_entry = ent->value; + if (!delay_entry->requested) + continue; + delay_entry->count--; + if (!strcmp(ent->key, "invalid-delay.a")) { + /* Send Git a pathname that was not delayed earlier */ + packet_write_fmt(1, "pathname=unfiltered"); + } + if (!strcmp(ent->key, "missing-delay.a")) { + /* Do not signal Git that this file is available */ + } else if (!delay_entry->count) { + string_list_append(&paths, ent->key); + packet_write_fmt(1, "pathname=%s", ent->key); + } + } + + /* Print paths in sorted order. */ + string_list_sort(&paths); + for_each_string_list_item(str_item, &paths) + fprintf(logfile, " %s", str_item->string); + string_list_clear(&paths, 0); + + packet_flush(1); + + fprintf(logfile, " [OK]\n"); + packet_write_fmt(1, "status=success"); + packet_flush(1); +} + +static void command_loop(void) +{ + for (;;) { + char *buf, *output; + char *pathname; + struct delay_entry *entry; + struct strbuf input = STRBUF_INIT; + char *command = packet_key_val_read("command"); + + if (!command) { + fprintf(logfile, "STOP\n"); + break; + } + fprintf(logfile, "IN: %s", command); + + if (!strcmp(command, "list_available_blobs")) { + reply_list_available_blobs_cmd(); + free(command); + continue; + } + + pathname = packet_key_val_read("pathname"); + if (!pathname) + die("unexpected EOF while expecting pathname"); + fprintf(logfile, " %s", pathname); + + /* Read until flush */ + while ((buf = packet_read_line(0, NULL))) { + if (!strcmp(buf, "can-delay=1")) { + entry = strmap_get(&delay, pathname); + if (entry && !entry->requested) + entry->requested = 1; + else if (!entry && always_delay) + add_delay_entry(pathname, 1, 1); + } else if (starts_with(buf, "ref=") || + starts_with(buf, "treeish=") || + starts_with(buf, "blob=")) { + fprintf(logfile, " %s", buf); + } else { + /* + * In general, filters need to be graceful about + * new metadata, since it's documented that we + * can pass any key-value pairs, but for tests, + * let's be a little stricter. + */ + die("Unknown message '%s'", buf); + } + } + + read_packetized_to_strbuf(0, &input, 0); + fprintf(logfile, " %"PRIuMAX" [OK] -- ", (uintmax_t)input.len); + + entry = strmap_get(&delay, pathname); + if (entry && entry->output) { + output = entry->output; + } else if (!strcmp(pathname, "error.r") || !strcmp(pathname, "abort.r")) { + output = ""; + } else if (!strcmp(command, "clean") && has_clean_cap) { + output = rot13(input.buf); + } else if (!strcmp(command, "smudge") && has_smudge_cap) { + output = rot13(input.buf); + } else { + die("bad command '%s'", command); + } + + if (!strcmp(pathname, "error.r")) { + fprintf(logfile, "[ERROR]\n"); + packet_write_fmt(1, "status=error"); + packet_flush(1); + } else if (!strcmp(pathname, "abort.r")) { + fprintf(logfile, "[ABORT]\n"); + packet_write_fmt(1, "status=abort"); + packet_flush(1); + } else if (!strcmp(command, "smudge") && + (entry = strmap_get(&delay, pathname)) && + entry->requested == 1) { + fprintf(logfile, "[DELAYED]\n"); + packet_write_fmt(1, "status=delayed"); + packet_flush(1); + entry->requested = 2; + if (entry->output != output) { + free(entry->output); + entry->output = xstrdup(output); + } + } else { + int i, nr_packets = 0; + size_t output_len; + const char *p; + packet_write_fmt(1, "status=success"); + packet_flush(1); + + if (skip_prefix(pathname, command, &p) && + !strcmp(p, "-write-fail.r")) { + fprintf(logfile, "[WRITE FAIL]\n"); + die("%s write error", command); + } + + output_len = strlen(output); + fprintf(logfile, "OUT: %"PRIuMAX" ", (uintmax_t)output_len); + + if (write_packetized_from_buf_no_flush_count(output, + output_len, 1, &nr_packets)) + die("failed to write buffer to stdout"); + packet_flush(1); + + for (i = 0; i < nr_packets; i++) + fprintf(logfile, "."); + fprintf(logfile, " [OK]\n"); + + packet_flush(1); + } + free(pathname); + strbuf_release(&input); + free(command); + } +} + +static void packet_initialize(void) +{ + char *pkt_buf = packet_read_line(0, NULL); + + if (!pkt_buf || strcmp(pkt_buf, "git-filter-client")) + die("bad initialize: '%s'", str_or_null(pkt_buf)); + + pkt_buf = packet_read_line(0, NULL); + if (!pkt_buf || strcmp(pkt_buf, "version=2")) + die("bad version: '%s'", str_or_null(pkt_buf)); + + pkt_buf = packet_read_line(0, NULL); + if (pkt_buf) + die("bad version end: '%s'", pkt_buf); + + packet_write_fmt(1, "git-filter-server"); + packet_write_fmt(1, "version=2"); + packet_flush(1); +} + +static const char *rot13_usage[] = { + "test-tool rot13-filter [--always-delay] --log=<path> <capabilities>", + NULL +}; + +int cmd__rot13_filter(int argc, const char **argv) +{ + int i, nr_caps; + struct strset remote_caps = STRSET_INIT; + const char *log_path = NULL; + + struct option options[] = { + OPT_BOOL(0, "always-delay", &always_delay, + "delay all paths with the can-delay flag"), + OPT_STRING(0, "log", &log_path, "path", + "path to the debug log file"), + OPT_END() + }; + nr_caps = parse_options(argc, argv, NULL, options, rot13_usage, + PARSE_OPT_STOP_AT_NON_OPTION); + + if (!log_path || !nr_caps) + usage_with_options(rot13_usage, options); + + logfile = fopen(log_path, "a"); + if (!logfile) + die_errno("failed to open log file"); + + for (i = 0; i < nr_caps; i++) { + if (!strcmp(argv[i], "smudge")) + has_smudge_cap = 1; + if (!strcmp(argv[i], "clean")) + has_clean_cap = 1; + } + + add_delay_entry("test-delay10.a", 1, 0); + add_delay_entry("test-delay11.a", 1, 0); + add_delay_entry("test-delay20.a", 2, 0); + add_delay_entry("test-delay10.b", 1, 0); + add_delay_entry("missing-delay.a", 1, 0); + add_delay_entry("invalid-delay.a", 1, 0); + + fprintf(logfile, "START\n"); + packet_initialize(); + + read_capabilities(&remote_caps); + check_and_write_capabilities(&remote_caps, argv, nr_caps); + fprintf(logfile, "init handshake complete\n"); + strset_clear(&remote_caps); + + command_loop(); + + if (fclose(logfile)) + die_errno("error closing logfile"); + free_delay_entries(); + return 0; +} diff --git a/t/helper/test-scrap-cache-tree.c b/t/helper/test-scrap-cache-tree.c index 393f1604ff..026c802479 100644 --- a/t/helper/test-scrap-cache-tree.c +++ b/t/helper/test-scrap-cache-tree.c @@ -12,6 +12,7 @@ int cmd__scrap_cache_tree(int ac, const char **av) hold_locked_index(&index_lock, LOCK_DIE_ON_ERROR); if (read_cache() < 0) die("unable to read index file"); + cache_tree_free(&active_cache_tree); active_cache_tree = NULL; if (write_locked_index(&the_index, &index_lock, COMMIT_LOCK)) die("unable to write index file"); diff --git a/t/helper/test-tool.c b/t/helper/test-tool.c index 318fdbab0c..d6a560f832 100644 --- a/t/helper/test-tool.c +++ b/t/helper/test-tool.c @@ -65,6 +65,7 @@ static struct test_cmd cmds[] = { { "read-midx", cmd__read_midx }, { "ref-store", cmd__ref_store }, { "reftable", cmd__reftable }, + { "rot13-filter", cmd__rot13_filter }, { "dump-reftable", cmd__dump_reftable }, { "regex", cmd__regex }, { "repository", cmd__repository }, diff --git a/t/helper/test-tool.h b/t/helper/test-tool.h index bb79927163..21a91b1019 100644 --- a/t/helper/test-tool.h +++ b/t/helper/test-tool.h @@ -54,6 +54,7 @@ int cmd__read_cache(int argc, const char **argv); int cmd__read_graph(int argc, const char **argv); int cmd__read_midx(int argc, const char **argv); int cmd__ref_store(int argc, const char **argv); +int cmd__rot13_filter(int argc, const char **argv); int cmd__reftable(int argc, const char **argv); int cmd__regex(int argc, const char **argv); int cmd__repository(int argc, const char **argv); diff --git a/t/helper/test-urlmatch-normalization.c b/t/helper/test-urlmatch-normalization.c index 8f4d67e646..86edd454f5 100644 --- a/t/helper/test-urlmatch-normalization.c +++ b/t/helper/test-urlmatch-normalization.c @@ -5,8 +5,9 @@ int cmd__urlmatch_normalization(int argc, const char **argv) { const char usage[] = "test-tool urlmatch-normalization [-p | -l] <url1> | <url1> <url2>"; - char *url1, *url2; + char *url1 = NULL, *url2 = NULL; int opt_p = 0, opt_l = 0; + int ret = 0; /* * For one url, succeed if url_normalize succeeds on it, fail otherwise. @@ -39,7 +40,7 @@ int cmd__urlmatch_normalization(int argc, const char **argv) printf("%s\n", url1); if (opt_l) printf("%u\n", (unsigned)info.url_len); - return 0; + goto cleanup; } if (opt_p || opt_l) @@ -47,5 +48,9 @@ int cmd__urlmatch_normalization(int argc, const char **argv) url1 = url_normalize(argv[1], NULL); url2 = url_normalize(argv[2], NULL); - return (url1 && url2 && !strcmp(url1, url2)) ? 0 : 1; + ret = (url1 && url2 && !strcmp(url1, url2)) ? 0 : 1; +cleanup: + free(url1); + free(url2); + return ret; } diff --git a/t/lib-perl.sh b/t/lib-perl.sh new file mode 100644 index 0000000000..d0bf509a16 --- /dev/null +++ b/t/lib-perl.sh @@ -0,0 +1,19 @@ +# Copyright (c) 2022 Ævar Arnfjörð Bjarmason + +test_lazy_prereq PERL_TEST_MORE ' + perl -MTest::More -e 0 +' + +skip_all_if_no_Test_More () { + if ! test_have_prereq PERL + then + skip_all='skipping perl interface tests, perl not available' + test_done + fi + + if ! test_have_prereq PERL_TEST_MORE + then + skip_all="Perl Test::More unavailable, skipping test" + test_done + fi +} diff --git a/t/lib-rebase.sh b/t/lib-rebase.sh index ec6b9b107d..b57541356b 100644 --- a/t/lib-rebase.sh +++ b/t/lib-rebase.sh @@ -207,3 +207,18 @@ check_reworded_commits () { >reword-log && test_cmp reword-expected reword-log } + +# usage: set_replace_editor <file> +# +# Replace the todo file with the exact contents of the given file. +set_replace_editor () { + cat >script <<-\EOF && + cat FILENAME >"$1" + + echo 'rebase -i script after editing:' + cat "$1" + EOF + + sed -e "s/FILENAME/$1/g" <script | write_script fake-editor.sh && + test_set_editor "$(pwd)/fake-editor.sh" +} diff --git a/t/lib-submodule-update.sh b/t/lib-submodule-update.sh index f7c7df0ca4..03e0abbdb8 100644 --- a/t/lib-submodule-update.sh +++ b/t/lib-submodule-update.sh @@ -207,7 +207,7 @@ prolog () { # should be updated to an existing commit. reset_work_tree_to () { rm -rf submodule_update && - git clone submodule_update_repo submodule_update && + git clone --template= submodule_update_repo submodule_update && ( cd submodule_update && rm -rf sub1 && @@ -902,13 +902,14 @@ test_submodule_switch_recursing_with_args () { ' # ... but an ignored file is fine. test_expect_$RESULTOI "$command: added submodule removes an untracked ignored file" ' - test_when_finished "rm submodule_update/.git/info/exclude" && + test_when_finished "rm -rf submodule_update/.git/info" && prolog && reset_work_tree_to_interested no_submodule && ( cd submodule_update && git branch -t add_sub1 origin/add_sub1 && : >sub1 && + mkdir .git/info && echo sub1 >.git/info/exclude && $command add_sub1 && test_superproject_content origin/add_sub1 && @@ -951,7 +952,9 @@ test_submodule_switch_recursing_with_args () { reset_work_tree_to_interested add_sub1 && ( cd submodule_update && + rm -rf .git/modules/sub1/info && git branch -t replace_sub1_with_file origin/replace_sub1_with_file && + mkdir .git/modules/sub1/info && echo ignored >.git/modules/sub1/info/exclude && : >sub1/ignored && $command replace_sub1_with_file && diff --git a/t/perf/p0004-lazy-init-name-hash.sh b/t/perf/p0004-lazy-init-name-hash.sh index 1afc08fe7f..85be14e4dd 100755 --- a/t/perf/p0004-lazy-init-name-hash.sh +++ b/t/perf/p0004-lazy-init-name-hash.sh @@ -49,7 +49,7 @@ test_perf "single-threaded, $desc" " test-tool lazy-init-name-hash --single --count=$count " -test_perf REPO_BIG_ENOUGH_FOR_MULTI "multi-threaded, $desc" " +test_perf "multi-threaded, $desc" --prereq REPO_BIG_ENOUGH_FOR_MULTI " test-tool lazy-init-name-hash --multi --count=$count " diff --git a/t/perf/p0006-read-tree-checkout.sh b/t/perf/p0006-read-tree-checkout.sh index 900b385c4b..c481c012d2 100755 --- a/t/perf/p0006-read-tree-checkout.sh +++ b/t/perf/p0006-read-tree-checkout.sh @@ -46,7 +46,7 @@ test_expect_success "setup repo" ' ' test_perf "read-tree br_base br_ballast ($nr_files)" ' - git read-tree -m br_base br_ballast -n + git read-tree -n -m br_base br_ballast ' test_perf "switch between br_base br_ballast ($nr_files)" ' diff --git a/t/perf/p0071-sort.sh b/t/perf/p0071-sort.sh index ed366e2e12..ae4ddac864 100755 --- a/t/perf/p0071-sort.sh +++ b/t/perf/p0071-sort.sh @@ -40,11 +40,11 @@ done for file in unsorted sorted reversed do - test_perf "llist_mergesort() $file" " + test_perf "DEFINE_LIST_SORT $file" " test-tool mergesort sort <$file >actual " - test_expect_success "llist_mergesort() $file sorts like sort(1)" " + test_expect_success "DEFINE_LIST_SORT $file sorts like sort(1)" " test_cmp_bin sorted actual " done diff --git a/t/perf/p2000-sparse-operations.sh b/t/perf/p2000-sparse-operations.sh index c181110a43..fce8151d41 100755 --- a/t/perf/p2000-sparse-operations.sh +++ b/t/perf/p2000-sparse-operations.sh @@ -123,5 +123,6 @@ test_perf_on_all git blame $SPARSE_CONE/f3/a test_perf_on_all git read-tree -mu HEAD test_perf_on_all git checkout-index -f --all test_perf_on_all git update-index --add --remove $SPARSE_CONE/a +test_perf_on_all "git rm -f $SPARSE_CONE/a && git checkout HEAD -- $SPARSE_CONE/a" test_done diff --git a/t/perf/p7527-builtin-fsmonitor.sh b/t/perf/p7527-builtin-fsmonitor.sh index 9338b9ea00..c3f9a4caa4 100755 --- a/t/perf/p7527-builtin-fsmonitor.sh +++ b/t/perf/p7527-builtin-fsmonitor.sh @@ -249,7 +249,7 @@ test_expect_success "Cleanup temp and matrix branches" " do for fsm_val in $fsm_values do - cleanup $uc_val $fsm_val + cleanup $uc_val $fsm_val || return 1 done done " diff --git a/t/t0000-basic.sh b/t/t0000-basic.sh index 17a268ccd1..502b4bcf9e 100755 --- a/t/t0000-basic.sh +++ b/t/t0000-basic.sh @@ -578,6 +578,78 @@ test_expect_success 'subtest: --run invalid range end' ' EOF_ERR ' +test_expect_success 'subtest: --invert-exit-code without --immediate' ' + run_sub_test_lib_test_err full-pass \ + --invert-exit-code && + check_sub_test_lib_test_err full-pass \ + <<-\EOF_OUT 3<<-EOF_ERR + ok 1 - passing test #1 + ok 2 - passing test #2 + ok 3 - passing test #3 + # passed all 3 test(s) + 1..3 + # faking up non-zero exit with --invert-exit-code + EOF_OUT + EOF_ERR +' + +test_expect_success 'subtest: --invert-exit-code with --immediate: all passed' ' + run_sub_test_lib_test_err full-pass \ + --invert-exit-code --immediate && + check_sub_test_lib_test_err full-pass \ + <<-\EOF_OUT 3<<-EOF_ERR + ok 1 - passing test #1 + ok 2 - passing test #2 + ok 3 - passing test #3 + # passed all 3 test(s) + 1..3 + # faking up non-zero exit with --invert-exit-code + EOF_OUT + EOF_ERR +' + +test_expect_success 'subtest: --invert-exit-code without --immediate: partial pass' ' + run_sub_test_lib_test partial-pass \ + --invert-exit-code && + check_sub_test_lib_test partial-pass <<-\EOF + ok 1 - passing test #1 + not ok 2 - # TODO induced breakage (--invert-exit-code): failing test #2 + # false + ok 3 - passing test #3 + # failed 1 among 3 test(s) + 1..3 + # faked up failures as TODO & now exiting with 0 due to --invert-exit-code + EOF +' + +test_expect_success 'subtest: --invert-exit-code with --immediate: partial pass' ' + run_sub_test_lib_test partial-pass \ + --invert-exit-code --immediate && + check_sub_test_lib_test partial-pass \ + <<-\EOF_OUT 3<<-EOF_ERR + ok 1 - passing test #1 + not ok 2 - # TODO induced breakage (--invert-exit-code): failing test #2 + # false + 1..2 + # faked up failures as TODO & now exiting with 0 due to --invert-exit-code + EOF_OUT + EOF_ERR +' + +test_expect_success 'subtest: --invert-exit-code --immediate: got a failure' ' + run_sub_test_lib_test partial-pass \ + --invert-exit-code --immediate && + check_sub_test_lib_test_err partial-pass \ + <<-\EOF_OUT 3<<-EOF_ERR + ok 1 - passing test #1 + not ok 2 - # TODO induced breakage (--invert-exit-code): failing test #2 + # false + 1..2 + # faked up failures as TODO & now exiting with 0 due to --invert-exit-code + EOF_OUT + EOF_ERR +' + test_expect_success 'subtest: tests respect prerequisites' ' write_and_run_sub_test_lib_test prereqs <<-\EOF && diff --git a/t/t0002-gitfile.sh b/t/t0002-gitfile.sh index f6356db183..26eaca095a 100755 --- a/t/t0002-gitfile.sh +++ b/t/t0002-gitfile.sh @@ -65,7 +65,7 @@ test_expect_success 'check commit-tree' ' test_path_is_file "$REAL/objects/$(objpath $SHA)" ' -test_expect_success !SANITIZE_LEAK 'check rev-list' ' +test_expect_success 'check rev-list' ' git update-ref "HEAD" "$SHA" && git rev-list HEAD >actual && echo $SHA >expected && diff --git a/t/t0003-attributes.sh b/t/t0003-attributes.sh index 143f100517..f7ee2f2ff0 100755 --- a/t/t0003-attributes.sh +++ b/t/t0003-attributes.sh @@ -3,6 +3,7 @@ test_description=gitattributes TEST_PASSES_SANITIZE_LEAK=true +TEST_CREATE_REPO_NO_TEMPLATE=1 . ./test-lib.sh attr_check_basic () { @@ -284,7 +285,7 @@ test_expect_success 'using --git-dir and --work-tree' ' ' test_expect_success 'setup bare' ' - git clone --bare . bare.git + git clone --template= --bare . bare.git ' test_expect_success 'bare repository: check that .gitattribute is ignored' ' @@ -315,6 +316,7 @@ test_expect_success 'bare repository: check that --cached honors index' ' test_expect_success 'bare repository: test info/attributes' ' ( cd bare.git && + mkdir info && ( echo "f test=f" && echo "a/i test=a/i" @@ -360,6 +362,7 @@ test_expect_success SYMLINKS 'symlinks respected in core.attributesFile' ' test_expect_success SYMLINKS 'symlinks respected in info/attributes' ' test_when_finished "rm .git/info/attributes" && + mkdir .git/info && ln -s ../../attr .git/info/attributes && attr_check file set ' diff --git a/t/t0004-unwritable.sh b/t/t0004-unwritable.sh index 2e9d652d82..8114fac73b 100755 --- a/t/t0004-unwritable.sh +++ b/t/t0004-unwritable.sh @@ -31,7 +31,7 @@ test_expect_success WRITE_TREE_OUT 'write-tree output on unwritable repository' test_cmp expect out.write-tree ' -test_expect_success POSIXPERM,SANITY,!SANITIZE_LEAK 'commit should notice unwritable repository' ' +test_expect_success POSIXPERM,SANITY 'commit should notice unwritable repository' ' test_when_finished "chmod 775 .git/objects .git/objects/??" && chmod a-w .git/objects .git/objects/?? && test_must_fail git commit -m second 2>out.commit diff --git a/t/t0008-ignores.sh b/t/t0008-ignores.sh index 5575dade8e..c70d11bc91 100755 --- a/t/t0008-ignores.sh +++ b/t/t0008-ignores.sh @@ -3,6 +3,7 @@ test_description=check-ignore TEST_PASSES_SANITIZE_LEAK=true +TEST_CREATE_REPO_NO_TEMPLATE=1 . ./test-lib.sh init_vars () { @@ -225,7 +226,8 @@ test_expect_success 'setup' ' !globaltwo globalthree EOF - cat <<-\EOF >>.git/info/exclude + mkdir .git/info && + cat <<-\EOF >.git/info/exclude per-repo EOF ' @@ -543,9 +545,9 @@ test_expect_success_multi 'submodule from subdirectory' '' ' test_expect_success 'global ignore not yet enabled' ' expect_from_stdin <<-\EOF && - .git/info/exclude:7:per-repo per-repo + .git/info/exclude:1:per-repo per-repo a/.gitignore:2:*three a/globalthree - .git/info/exclude:7:per-repo a/per-repo + .git/info/exclude:1:per-repo a/per-repo EOF test_check_ignore "-v globalone per-repo a/globalthree a/per-repo not-ignored a/globaltwo" ' @@ -566,10 +568,10 @@ test_expect_success 'global ignore with -v' ' enable_global_excludes && expect_from_stdin <<-EOF && $global_excludes:1:globalone globalone - .git/info/exclude:7:per-repo per-repo + .git/info/exclude:1:per-repo per-repo $global_excludes:3:globalthree globalthree a/.gitignore:2:*three a/globalthree - .git/info/exclude:7:per-repo a/per-repo + .git/info/exclude:1:per-repo a/per-repo $global_excludes:2:!globaltwo globaltwo EOF test_check_ignore "-v globalone per-repo globalthree a/globalthree a/per-repo not-ignored globaltwo" diff --git a/t/t0012-help.sh b/t/t0012-help.sh index 6c33a43690..4ed2f242eb 100755 --- a/t/t0012-help.sh +++ b/t/t0012-help.sh @@ -44,6 +44,8 @@ test_expect_success 'invalid usage' ' test_expect_code 129 git help -g add && test_expect_code 129 git help -a -g && + test_expect_code 129 git help --user-interfaces add && + test_expect_code 129 git help -g -c && test_expect_code 129 git help --config-for-completion add && test_expect_code 129 git help --config-sections-for-completion add @@ -104,9 +106,9 @@ test_expect_success 'git help' ' test_i18ngrep "^ commit " help.output && test_i18ngrep "^ fetch " help.output ' + test_expect_success 'git help -g' ' git help -g >help.output && - test_i18ngrep "^ attributes " help.output && test_i18ngrep "^ everyday " help.output && test_i18ngrep "^ tutorial " help.output ' @@ -127,6 +129,12 @@ test_expect_success 'git help succeeds without git.html' ' test_cmp expect test-browser.log ' +test_expect_success 'git help --user-interfaces' ' + git help --user-interfaces >help.output && + grep "^ attributes " help.output && + grep "^ mailmap " help.output +' + test_expect_success 'git help -c' ' git help -c >help.output && cat >expect <<-\EOF && @@ -220,6 +228,10 @@ test_expect_success "'git help -a' section spacing" ' Low-level Commands / Syncing Repositories Low-level Commands / Internal Helpers + + User-facing repository, command and file interfaces + + Developer-facing file file formats, protocols and interfaces EOF test_cmp expect actual ' diff --git a/t/t0015-hash.sh b/t/t0015-hash.sh index 086822fc45..0a087a1983 100755 --- a/t/t0015-hash.sh +++ b/t/t0015-hash.sh @@ -1,8 +1,9 @@ #!/bin/sh test_description='test basic hash implementation' -. ./test-lib.sh +TEST_PASSES_SANITIZE_LEAK=true +. ./test-lib.sh test_expect_success 'test basic SHA-1 hash values' ' test-tool sha1 </dev/null >actual && diff --git a/t/t0019-json-writer.sh b/t/t0019-json-writer.sh index 3b0c336b38..19a730c29e 100755 --- a/t/t0019-json-writer.sh +++ b/t/t0019-json-writer.sh @@ -1,6 +1,8 @@ #!/bin/sh test_description='test json-writer JSON generation' + +TEST_PASSES_SANITIZE_LEAK=true . ./test-lib.sh test_expect_success 'unit test of json-writer routines' ' diff --git a/t/t0021-conversion.sh b/t/t0021-conversion.sh index 1c840348bd..abecd75e4e 100755 --- a/t/t0021-conversion.sh +++ b/t/t0021-conversion.sh @@ -17,9 +17,6 @@ tr \ 'nopqrstuvwxyzabcdefghijklmNOPQRSTUVWXYZABCDEFGHIJKLM' EOF -write_script rot13-filter.pl "$PERL_PATH" \ - <"$TEST_DIRECTORY"/t0021/rot13-filter.pl - generate_random_characters () { LEN=$1 NAME=$2 @@ -365,8 +362,8 @@ test_expect_success 'diff does not reuse worktree files that need cleaning' ' test_line_count = 0 count ' -test_expect_success PERL 'required process filter should filter data' ' - test_config_global filter.protocol.process "rot13-filter.pl debug.log clean smudge" && +test_expect_success 'required process filter should filter data' ' + test_config_global filter.protocol.process "test-tool rot13-filter --log=debug.log clean smudge" && test_config_global filter.protocol.required true && rm -rf repo && mkdir repo && @@ -450,8 +447,8 @@ test_expect_success PERL 'required process filter should filter data' ' ) ' -test_expect_success PERL 'required process filter should filter data for various subcommands' ' - test_config_global filter.protocol.process "rot13-filter.pl debug.log clean smudge" && +test_expect_success 'required process filter should filter data for various subcommands' ' + test_config_global filter.protocol.process "test-tool rot13-filter --log=debug.log clean smudge" && test_config_global filter.protocol.required true && ( cd repo && @@ -561,9 +558,9 @@ test_expect_success PERL 'required process filter should filter data for various ) ' -test_expect_success PERL 'required process filter takes precedence' ' +test_expect_success 'required process filter takes precedence' ' test_config_global filter.protocol.clean false && - test_config_global filter.protocol.process "rot13-filter.pl debug.log clean" && + test_config_global filter.protocol.process "test-tool rot13-filter --log=debug.log clean" && test_config_global filter.protocol.required true && rm -rf repo && mkdir repo && @@ -587,8 +584,8 @@ test_expect_success PERL 'required process filter takes precedence' ' ) ' -test_expect_success PERL 'required process filter should be used only for "clean" operation only' ' - test_config_global filter.protocol.process "rot13-filter.pl debug.log clean" && +test_expect_success 'required process filter should be used only for "clean" operation only' ' + test_config_global filter.protocol.process "test-tool rot13-filter --log=debug.log clean" && rm -rf repo && mkdir repo && ( @@ -622,8 +619,8 @@ test_expect_success PERL 'required process filter should be used only for "clean ) ' -test_expect_success PERL 'required process filter should process multiple packets' ' - test_config_global filter.protocol.process "rot13-filter.pl debug.log clean smudge" && +test_expect_success 'required process filter should process multiple packets' ' + test_config_global filter.protocol.process "test-tool rot13-filter --log=debug.log clean smudge" && test_config_global filter.protocol.required true && rm -rf repo && @@ -687,8 +684,8 @@ test_expect_success PERL 'required process filter should process multiple packet ) ' -test_expect_success PERL 'required process filter with clean error should fail' ' - test_config_global filter.protocol.process "rot13-filter.pl debug.log clean smudge" && +test_expect_success 'required process filter with clean error should fail' ' + test_config_global filter.protocol.process "test-tool rot13-filter --log=debug.log clean smudge" && test_config_global filter.protocol.required true && rm -rf repo && mkdir repo && @@ -706,8 +703,8 @@ test_expect_success PERL 'required process filter with clean error should fail' ) ' -test_expect_success PERL 'process filter should restart after unexpected write failure' ' - test_config_global filter.protocol.process "rot13-filter.pl debug.log clean smudge" && +test_expect_success 'process filter should restart after unexpected write failure' ' + test_config_global filter.protocol.process "test-tool rot13-filter --log=debug.log clean smudge" && rm -rf repo && mkdir repo && ( @@ -735,7 +732,7 @@ test_expect_success PERL 'process filter should restart after unexpected write f rm -f debug.log && git checkout --quiet --no-progress . 2>git-stderr.log && - grep "smudge write error at" git-stderr.log && + grep "smudge write error" git-stderr.log && test_i18ngrep "error: external filter" git-stderr.log && cat >expected.log <<-EOF && @@ -761,8 +758,8 @@ test_expect_success PERL 'process filter should restart after unexpected write f ) ' -test_expect_success PERL 'process filter should not be restarted if it signals an error' ' - test_config_global filter.protocol.process "rot13-filter.pl debug.log clean smudge" && +test_expect_success 'process filter should not be restarted if it signals an error' ' + test_config_global filter.protocol.process "test-tool rot13-filter --log=debug.log clean smudge" && rm -rf repo && mkdir repo && ( @@ -804,8 +801,8 @@ test_expect_success PERL 'process filter should not be restarted if it signals a ) ' -test_expect_success PERL 'process filter abort stops processing of all further files' ' - test_config_global filter.protocol.process "rot13-filter.pl debug.log clean smudge" && +test_expect_success 'process filter abort stops processing of all further files' ' + test_config_global filter.protocol.process "test-tool rot13-filter --log=debug.log clean smudge" && rm -rf repo && mkdir repo && ( @@ -861,10 +858,10 @@ test_expect_success PERL 'invalid process filter must fail (and not hang!)' ' ) ' -test_expect_success PERL 'delayed checkout in process filter' ' - test_config_global filter.a.process "rot13-filter.pl a.log clean smudge delay" && +test_expect_success 'delayed checkout in process filter' ' + test_config_global filter.a.process "test-tool rot13-filter --log=a.log clean smudge delay" && test_config_global filter.a.required true && - test_config_global filter.b.process "rot13-filter.pl b.log clean smudge delay" && + test_config_global filter.b.process "test-tool rot13-filter --log=b.log clean smudge delay" && test_config_global filter.b.required true && rm -rf repo && @@ -940,8 +937,8 @@ test_expect_success PERL 'delayed checkout in process filter' ' ) ' -test_expect_success PERL 'missing file in delayed checkout' ' - test_config_global filter.bug.process "rot13-filter.pl bug.log clean smudge delay" && +test_expect_success 'missing file in delayed checkout' ' + test_config_global filter.bug.process "test-tool rot13-filter --log=bug.log clean smudge delay" && test_config_global filter.bug.required true && rm -rf repo && @@ -960,8 +957,8 @@ test_expect_success PERL 'missing file in delayed checkout' ' grep "error: .missing-delay\.a. was not filtered properly" git-stderr.log ' -test_expect_success PERL 'invalid file in delayed checkout' ' - test_config_global filter.bug.process "rot13-filter.pl bug.log clean smudge delay" && +test_expect_success 'invalid file in delayed checkout' ' + test_config_global filter.bug.process "test-tool rot13-filter --log=bug.log clean smudge delay" && test_config_global filter.bug.required true && rm -rf repo && @@ -990,10 +987,10 @@ do mode_prereq='UTF8_NFD_TO_NFC' ;; esac - test_expect_success PERL,SYMLINKS,$mode_prereq \ + test_expect_success SYMLINKS,$mode_prereq \ "delayed checkout with $mode-collision don't write to the wrong place" ' test_config_global filter.delay.process \ - "\"$TEST_ROOT/rot13-filter.pl\" --always-delay delayed.log clean smudge delay" && + "test-tool rot13-filter --always-delay --log=delayed.log clean smudge delay" && test_config_global filter.delay.required true && git init $mode-collision && @@ -1026,12 +1023,12 @@ do ' done -test_expect_success PERL,SYMLINKS,CASE_INSENSITIVE_FS \ +test_expect_success SYMLINKS,CASE_INSENSITIVE_FS \ "delayed checkout with submodule collision don't write to the wrong place" ' git init collision-with-submodule && ( cd collision-with-submodule && - git config filter.delay.process "\"$TEST_ROOT/rot13-filter.pl\" --always-delay delayed.log clean smudge delay" && + git config filter.delay.process "test-tool rot13-filter --always-delay --log=delayed.log clean smudge delay" && git config filter.delay.required true && # We need Git to treat the submodule "a" and the @@ -1062,11 +1059,11 @@ test_expect_success PERL,SYMLINKS,CASE_INSENSITIVE_FS \ ) ' -test_expect_success PERL 'setup for progress tests' ' +test_expect_success 'setup for progress tests' ' git init progress && ( cd progress && - git config filter.delay.process "rot13-filter.pl delay-progress.log clean smudge delay" && + git config filter.delay.process "test-tool rot13-filter --log=delay-progress.log clean smudge delay" && git config filter.delay.required true && echo "*.a filter=delay" >.gitattributes && @@ -1132,12 +1129,12 @@ do ' done -test_expect_success PERL 'delayed checkout correctly reports the number of updated entries' ' +test_expect_success 'delayed checkout correctly reports the number of updated entries' ' rm -rf repo && git init repo && ( cd repo && - git config filter.delay.process "../rot13-filter.pl delayed.log clean smudge delay" && + git config filter.delay.process "test-tool rot13-filter --log=delayed.log clean smudge delay" && git config filter.delay.required true && echo "*.a filter=delay" >.gitattributes && diff --git a/t/t0021/rot13-filter.pl b/t/t0021/rot13-filter.pl deleted file mode 100644 index 7bb93768f3..0000000000 --- a/t/t0021/rot13-filter.pl +++ /dev/null @@ -1,247 +0,0 @@ -# -# Example implementation for the Git filter protocol version 2 -# See Documentation/gitattributes.txt, section "Filter Protocol" -# -# Usage: rot13-filter.pl [--always-delay] <log path> <capabilities> -# -# Log path defines a debug log file that the script writes to. The -# subsequent arguments define a list of supported protocol capabilities -# ("clean", "smudge", etc). -# -# When --always-delay is given all pathnames with the "can-delay" flag -# that don't appear on the list bellow are delayed with a count of 1 -# (see more below). -# -# This implementation supports special test cases: -# (1) If data with the pathname "clean-write-fail.r" is processed with -# a "clean" operation then the write operation will die. -# (2) If data with the pathname "smudge-write-fail.r" is processed with -# a "smudge" operation then the write operation will die. -# (3) If data with the pathname "error.r" is processed with any -# operation then the filter signals that it cannot or does not want -# to process the file. -# (4) If data with the pathname "abort.r" is processed with any -# operation then the filter signals that it cannot or does not want -# to process the file and any file after that is processed with the -# same command. -# (5) If data with a pathname that is a key in the DELAY hash is -# requested (e.g. "test-delay10.a") then the filter responds with -# a "delay" status and sets the "requested" field in the DELAY hash. -# The filter will signal the availability of this object after -# "count" (field in DELAY hash) "list_available_blobs" commands. -# (6) If data with the pathname "missing-delay.a" is processed that the -# filter will drop the path from the "list_available_blobs" response. -# (7) If data with the pathname "invalid-delay.a" is processed that the -# filter will add the path "unfiltered" which was not delayed before -# to the "list_available_blobs" response. -# - -use 5.008; -sub gitperllib { - # Git assumes that all path lists are Unix-y colon-separated ones. But - # when the Git for Windows executes the test suite, its MSYS2 Bash - # calls git.exe, and colon-separated path lists are converted into - # Windows-y semicolon-separated lists of *Windows* paths (which - # naturally contain a colon after the drive letter, so splitting by - # colons simply does not cut it). - # - # Detect semicolon-separated path list and handle them appropriately. - - if ($ENV{GITPERLLIB} =~ /;/) { - return split(/;/, $ENV{GITPERLLIB}); - } - return split(/:/, $ENV{GITPERLLIB}); -} -use lib (gitperllib()); -use strict; -use warnings; -use IO::File; -use Git::Packet; - -my $MAX_PACKET_CONTENT_SIZE = 65516; - -my $always_delay = 0; -if ( $ARGV[0] eq '--always-delay' ) { - $always_delay = 1; - shift @ARGV; -} - -my $log_file = shift @ARGV; -my @capabilities = @ARGV; - -open my $debug, ">>", $log_file or die "cannot open log file: $!"; - -my %DELAY = ( - 'test-delay10.a' => { "requested" => 0, "count" => 1 }, - 'test-delay11.a' => { "requested" => 0, "count" => 1 }, - 'test-delay20.a' => { "requested" => 0, "count" => 2 }, - 'test-delay10.b' => { "requested" => 0, "count" => 1 }, - 'missing-delay.a' => { "requested" => 0, "count" => 1 }, - 'invalid-delay.a' => { "requested" => 0, "count" => 1 }, -); - -sub rot13 { - my $str = shift; - $str =~ y/A-Za-z/N-ZA-Mn-za-m/; - return $str; -} - -print $debug "START\n"; -$debug->flush(); - -packet_initialize("git-filter", 2); - -my %remote_caps = packet_read_and_check_capabilities("clean", "smudge", "delay"); -packet_check_and_write_capabilities(\%remote_caps, @capabilities); - -print $debug "init handshake complete\n"; -$debug->flush(); - -while (1) { - my ( $res, $command ) = packet_key_val_read("command"); - if ( $res == -1 ) { - print $debug "STOP\n"; - exit(); - } - print $debug "IN: $command"; - $debug->flush(); - - if ( $command eq "list_available_blobs" ) { - # Flush - packet_compare_lists([1, ""], packet_bin_read()) || - die "bad list_available_blobs end"; - - foreach my $pathname ( sort keys %DELAY ) { - if ( $DELAY{$pathname}{"requested"} >= 1 ) { - $DELAY{$pathname}{"count"} = $DELAY{$pathname}{"count"} - 1; - if ( $pathname eq "invalid-delay.a" ) { - # Send Git a pathname that was not delayed earlier - packet_txt_write("pathname=unfiltered"); - } - if ( $pathname eq "missing-delay.a" ) { - # Do not signal Git that this file is available - } elsif ( $DELAY{$pathname}{"count"} == 0 ) { - print $debug " $pathname"; - packet_txt_write("pathname=$pathname"); - } - } - } - - packet_flush(); - - print $debug " [OK]\n"; - $debug->flush(); - packet_txt_write("status=success"); - packet_flush(); - } else { - my ( $res, $pathname ) = packet_key_val_read("pathname"); - if ( $res == -1 ) { - die "unexpected EOF while expecting pathname"; - } - print $debug " $pathname"; - $debug->flush(); - - # Read until flush - my ( $done, $buffer ) = packet_txt_read(); - while ( $buffer ne '' ) { - if ( $buffer eq "can-delay=1" ) { - if ( exists $DELAY{$pathname} and $DELAY{$pathname}{"requested"} == 0 ) { - $DELAY{$pathname}{"requested"} = 1; - } elsif ( !exists $DELAY{$pathname} and $always_delay ) { - $DELAY{$pathname} = { "requested" => 1, "count" => 1 }; - } - } elsif ($buffer =~ /^(ref|treeish|blob)=/) { - print $debug " $buffer"; - } else { - # In general, filters need to be graceful about - # new metadata, since it's documented that we - # can pass any key-value pairs, but for tests, - # let's be a little stricter. - die "Unknown message '$buffer'"; - } - - ( $done, $buffer ) = packet_txt_read(); - } - if ( $done == -1 ) { - die "unexpected EOF after pathname '$pathname'"; - } - - my $input = ""; - { - binmode(STDIN); - my $buffer; - my $done = 0; - while ( !$done ) { - ( $done, $buffer ) = packet_bin_read(); - $input .= $buffer; - } - if ( $done == -1 ) { - die "unexpected EOF while reading input for '$pathname'"; - } - print $debug " " . length($input) . " [OK] -- "; - $debug->flush(); - } - - my $output; - if ( exists $DELAY{$pathname} and exists $DELAY{$pathname}{"output"} ) { - $output = $DELAY{$pathname}{"output"} - } elsif ( $pathname eq "error.r" or $pathname eq "abort.r" ) { - $output = ""; - } elsif ( $command eq "clean" and grep( /^clean$/, @capabilities ) ) { - $output = rot13($input); - } elsif ( $command eq "smudge" and grep( /^smudge$/, @capabilities ) ) { - $output = rot13($input); - } else { - die "bad command '$command'"; - } - - if ( $pathname eq "error.r" ) { - print $debug "[ERROR]\n"; - $debug->flush(); - packet_txt_write("status=error"); - packet_flush(); - } elsif ( $pathname eq "abort.r" ) { - print $debug "[ABORT]\n"; - $debug->flush(); - packet_txt_write("status=abort"); - packet_flush(); - } elsif ( $command eq "smudge" and - exists $DELAY{$pathname} and - $DELAY{$pathname}{"requested"} == 1 ) { - print $debug "[DELAYED]\n"; - $debug->flush(); - packet_txt_write("status=delayed"); - packet_flush(); - $DELAY{$pathname}{"requested"} = 2; - $DELAY{$pathname}{"output"} = $output; - } else { - packet_txt_write("status=success"); - packet_flush(); - - if ( $pathname eq "${command}-write-fail.r" ) { - print $debug "[WRITE FAIL]\n"; - $debug->flush(); - die "${command} write error"; - } - - print $debug "OUT: " . length($output) . " "; - $debug->flush(); - - while ( length($output) > 0 ) { - my $packet = substr( $output, 0, $MAX_PACKET_CONTENT_SIZE ); - packet_bin_write($packet); - # dots represent the number of packets - print $debug "."; - if ( length($output) > $MAX_PACKET_CONTENT_SIZE ) { - $output = substr( $output, $MAX_PACKET_CONTENT_SIZE ); - } else { - $output = ""; - } - } - packet_flush(); - print $debug " [OK]\n"; - $debug->flush(); - packet_flush(); - } - } -} diff --git a/t/t0027-auto-crlf.sh b/t/t0027-auto-crlf.sh index 7f80f46393..a22e0e1382 100755 --- a/t/t0027-auto-crlf.sh +++ b/t/t0027-auto-crlf.sh @@ -2,6 +2,7 @@ test_description='CRLF conversion all combinations' +TEST_PASSES_SANITIZE_LEAK=true . ./test-lib.sh compare_files () { diff --git a/t/t0028-working-tree-encoding.sh b/t/t0028-working-tree-encoding.sh index 82905a2156..c196fdb0ee 100755 --- a/t/t0028-working-tree-encoding.sh +++ b/t/t0028-working-tree-encoding.sh @@ -5,6 +5,8 @@ test_description='working-tree-encoding conversion via gitattributes' GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME +TEST_PASSES_SANITIZE_LEAK=true +TEST_CREATE_REPO_NO_TEMPLATE=1 . ./test-lib.sh . "$TEST_DIRECTORY/lib-encoding.sh" @@ -69,6 +71,7 @@ test_expect_success 'check $GIT_DIR/info/attributes support' ' test_when_finished "rm -f test.utf32.git" && test_when_finished "git reset --hard HEAD" && + mkdir .git/info && echo "*.utf32 text working-tree-encoding=utf-32" >.git/info/attributes && git add test.utf32 && diff --git a/t/t0032-reftable-unittest.sh b/t/t0032-reftable-unittest.sh index 0ed14971a5..471cb37ac2 100755 --- a/t/t0032-reftable-unittest.sh +++ b/t/t0032-reftable-unittest.sh @@ -5,6 +5,7 @@ test_description='reftable unittests' +TEST_PASSES_SANITIZE_LEAK=true . ./test-lib.sh test_expect_success 'unittests' ' diff --git a/t/t0033-safe-directory.sh b/t/t0033-safe-directory.sh index 3908597d42..aecb308cf6 100755 --- a/t/t0033-safe-directory.sh +++ b/t/t0033-safe-directory.sh @@ -2,6 +2,7 @@ test_description='verify safe.directory checks' +TEST_PASSES_SANITIZE_LEAK=true . ./test-lib.sh GIT_TEST_ASSUME_DIFFERENT_OWNER=1 @@ -16,24 +17,20 @@ test_expect_success 'safe.directory is not set' ' expect_rejected_dir ' -test_expect_success 'ignoring safe.directory on the command line' ' - test_must_fail git -c safe.directory="$(pwd)" status 2>err && - grep "dubious ownership" err +test_expect_success 'safe.directory on the command line' ' + git -c safe.directory="$(pwd)" status ' -test_expect_success 'ignoring safe.directory in the environment' ' - test_must_fail env GIT_CONFIG_COUNT=1 \ - GIT_CONFIG_KEY_0="safe.directory" \ - GIT_CONFIG_VALUE_0="$(pwd)" \ - git status 2>err && - grep "dubious ownership" err +test_expect_success 'safe.directory in the environment' ' + env GIT_CONFIG_COUNT=1 \ + GIT_CONFIG_KEY_0="safe.directory" \ + GIT_CONFIG_VALUE_0="$(pwd)" \ + git status ' -test_expect_success 'ignoring safe.directory in GIT_CONFIG_PARAMETERS' ' - test_must_fail env \ - GIT_CONFIG_PARAMETERS="${SQ}safe.directory${SQ}=${SQ}$(pwd)${SQ}" \ - git status 2>err && - grep "dubious ownership" err +test_expect_success 'safe.directory in GIT_CONFIG_PARAMETERS' ' + env GIT_CONFIG_PARAMETERS="${SQ}safe.directory${SQ}=${SQ}$(pwd)${SQ}" \ + git status ' test_expect_success 'ignoring safe.directory in repo config' ' diff --git a/t/t0035-safe-bare-repository.sh b/t/t0035-safe-bare-repository.sh new file mode 100755 index 0000000000..ecbdc8238d --- /dev/null +++ b/t/t0035-safe-bare-repository.sh @@ -0,0 +1,54 @@ +#!/bin/sh + +test_description='verify safe.bareRepository checks' + +TEST_PASSES_SANITIZE_LEAK=true +. ./test-lib.sh + +pwd="$(pwd)" + +expect_accepted () { + git "$@" rev-parse --git-dir +} + +expect_rejected () { + test_must_fail git "$@" rev-parse --git-dir 2>err && + grep -F "cannot use bare repository" err +} + +test_expect_success 'setup bare repo in worktree' ' + git init outer-repo && + git init --bare outer-repo/bare-repo +' + +test_expect_success 'safe.bareRepository unset' ' + expect_accepted -C outer-repo/bare-repo +' + +test_expect_success 'safe.bareRepository=all' ' + test_config_global safe.bareRepository all && + expect_accepted -C outer-repo/bare-repo +' + +test_expect_success 'safe.bareRepository=explicit' ' + test_config_global safe.bareRepository explicit && + expect_rejected -C outer-repo/bare-repo +' + +test_expect_success 'safe.bareRepository in the repository' ' + # safe.bareRepository must not be "explicit", otherwise + # git config fails with "fatal: not in a git directory" (like + # safe.directory) + test_config -C outer-repo/bare-repo safe.bareRepository \ + all && + test_config_global safe.bareRepository explicit && + expect_rejected -C outer-repo/bare-repo +' + +test_expect_success 'safe.bareRepository on the command line' ' + test_config_global safe.bareRepository explicit && + expect_accepted -C outer-repo/bare-repo \ + -c safe.bareRepository=all +' + +test_done diff --git a/t/t0050-filesystem.sh b/t/t0050-filesystem.sh index 5c9dc90d0b..325eb1c3cd 100755 --- a/t/t0050-filesystem.sh +++ b/t/t0050-filesystem.sh @@ -5,6 +5,7 @@ test_description='Various filesystem issues' GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME +TEST_PASSES_SANITIZE_LEAK=true . ./test-lib.sh auml=$(printf '\303\244') diff --git a/t/t0060-path-utils.sh b/t/t0060-path-utils.sh index aa35350b6f..1f2007e62b 100755 --- a/t/t0060-path-utils.sh +++ b/t/t0060-path-utils.sh @@ -5,6 +5,7 @@ test_description='Test various path utilities' +TEST_PASSES_SANITIZE_LEAK=true . ./test-lib.sh norm_path() { diff --git a/t/t0071-sort.sh b/t/t0071-sort.sh index 6f9a501c72..ba8ad1d1ca 100755 --- a/t/t0071-sort.sh +++ b/t/t0071-sort.sh @@ -5,7 +5,7 @@ test_description='verify sort functions' TEST_PASSES_SANITIZE_LEAK=true . ./test-lib.sh -test_expect_success 'llist_mergesort()' ' +test_expect_success 'DEFINE_LIST_SORT_DEBUG' ' test-tool mergesort test ' diff --git a/t/t0090-cache-tree.sh b/t/t0090-cache-tree.sh index 9067572648..d8e2fc42e1 100755 --- a/t/t0090-cache-tree.sh +++ b/t/t0090-cache-tree.sh @@ -5,6 +5,8 @@ test_description="Test whether cache-tree is properly updated Tests whether various commands properly update and/or rewrite the cache-tree extension. " + +TEST_PASSES_SANITIZE_LEAK=true . ./test-lib.sh cmp_cache_tree () { diff --git a/t/t0091-bugreport.sh b/t/t0091-bugreport.sh index 08f5fe9cae..b6d2f591ac 100755 --- a/t/t0091-bugreport.sh +++ b/t/t0091-bugreport.sh @@ -78,4 +78,52 @@ test_expect_success 'indicates populated hooks' ' test_cmp expect actual ' +test_expect_success UNZIP '--diagnose creates diagnostics zip archive' ' + test_when_finished rm -rf report && + + git bugreport --diagnose -o report -s test >out && + + zip_path=report/git-diagnostics-test.zip && + grep "Available space" out && + test_path_is_file "$zip_path" && + + # Check zipped archive content + "$GIT_UNZIP" -p "$zip_path" diagnostics.log >out && + test_file_not_empty out && + + "$GIT_UNZIP" -p "$zip_path" packs-local.txt >out && + grep ".git/objects" out && + + "$GIT_UNZIP" -p "$zip_path" objects-local.txt >out && + grep "^Total: [0-9][0-9]*" out && + + # Should not include .git directory contents by default + ! "$GIT_UNZIP" -l "$zip_path" | grep ".git/" +' + +test_expect_success UNZIP '--diagnose=stats excludes .git dir contents' ' + test_when_finished rm -rf report && + + git bugreport --diagnose=stats -o report -s test >out && + + # Includes pack quantity/size info + "$GIT_UNZIP" -p "$zip_path" packs-local.txt >out && + grep ".git/objects" out && + + # Does not include .git directory contents + ! "$GIT_UNZIP" -l "$zip_path" | grep ".git/" +' + +test_expect_success UNZIP '--diagnose=all includes .git dir contents' ' + test_when_finished rm -rf report && + + git bugreport --diagnose=all -o report -s test >out && + + # Includes .git directory contents + "$GIT_UNZIP" -l "$zip_path" | grep ".git/" && + + "$GIT_UNZIP" -p "$zip_path" .git/HEAD >out && + test_file_not_empty out +' + test_done diff --git a/t/t0092-diagnose.sh b/t/t0092-diagnose.sh new file mode 100755 index 0000000000..fca9b58489 --- /dev/null +++ b/t/t0092-diagnose.sh @@ -0,0 +1,60 @@ +#!/bin/sh + +test_description='git diagnose' + +TEST_PASSES_SANITIZE_LEAK=true +. ./test-lib.sh + +test_expect_success UNZIP 'creates diagnostics zip archive' ' + test_when_finished rm -rf report && + + git diagnose -o report -s test >out && + grep "Available space" out && + + zip_path=report/git-diagnostics-test.zip && + test_path_is_file "$zip_path" && + + # Check zipped archive content + "$GIT_UNZIP" -p "$zip_path" diagnostics.log >out && + test_file_not_empty out && + + "$GIT_UNZIP" -p "$zip_path" packs-local.txt >out && + grep ".git/objects" out && + + "$GIT_UNZIP" -p "$zip_path" objects-local.txt >out && + grep "^Total: [0-9][0-9]*" out && + + # Should not include .git directory contents by default + ! "$GIT_UNZIP" -l "$zip_path" | grep ".git/" +' + +test_expect_success UNZIP '--mode=stats excludes .git dir contents' ' + test_when_finished rm -rf report && + + git diagnose -o report -s test --mode=stats >out && + + # Includes pack quantity/size info + "$GIT_UNZIP" -p "$zip_path" packs-local.txt >out && + grep ".git/objects" out && + + # Does not include .git directory contents + ! "$GIT_UNZIP" -l "$zip_path" | grep ".git/" +' + +test_expect_success UNZIP '--mode=all includes .git dir contents' ' + test_when_finished rm -rf report && + + git diagnose -o report -s test --mode=all >out && + + # Includes pack quantity/size info + "$GIT_UNZIP" -p "$zip_path" packs-local.txt >out && + grep ".git/objects" out && + + # Includes .git directory contents + "$GIT_UNZIP" -l "$zip_path" | grep ".git/" && + + "$GIT_UNZIP" -p "$zip_path" .git/HEAD >out && + test_file_not_empty out +' + +test_done diff --git a/t/t0095-bloom.sh b/t/t0095-bloom.sh index 5945973552..b567383eb8 100755 --- a/t/t0095-bloom.sh +++ b/t/t0095-bloom.sh @@ -1,6 +1,8 @@ #!/bin/sh test_description='Testing the various Bloom filter computations in bloom.c' + +TEST_PASSES_SANITIZE_LEAK=true . ./test-lib.sh test_expect_success 'compute unseeded murmur3 hash for empty string' ' @@ -67,7 +69,7 @@ test_expect_success 'compute bloom key for test string 2' ' test_cmp expect actual ' -test_expect_success 'get bloom filters for commit with no changes' ' +test_expect_success !SANITIZE_LEAK 'get bloom filters for commit with no changes' ' git init && git commit --allow-empty -m "c0" && cat >expect <<-\EOF && diff --git a/t/t0110-urlmatch-normalization.sh b/t/t0110-urlmatch-normalization.sh index 4dc9fecf72..12d817fbd3 100755 --- a/t/t0110-urlmatch-normalization.sh +++ b/t/t0110-urlmatch-normalization.sh @@ -1,6 +1,8 @@ #!/bin/sh test_description='urlmatch URL normalization' + +TEST_PASSES_SANITIZE_LEAK=true . ./test-lib.sh # The base name of the test url files diff --git a/t/t0202-gettext-perl.sh b/t/t0202-gettext-perl.sh index df2ea34932..5a6f28051b 100755 --- a/t/t0202-gettext-perl.sh +++ b/t/t0202-gettext-perl.sh @@ -7,22 +7,12 @@ test_description='Perl gettext interface (Git::I18N)' TEST_PASSES_SANITIZE_LEAK=true . ./lib-gettext.sh +. "$TEST_DIRECTORY"/lib-perl.sh +skip_all_if_no_Test_More -if ! test_have_prereq PERL; then - skip_all='skipping perl interface tests, perl not available' - test_done -fi - -perl -MTest::More -e 0 2>/dev/null || { - skip_all="Perl Test::More unavailable, skipping test" - test_done -} - -# The external test will outputs its own plan -test_external_has_tap=1 - -test_external_without_stderr \ - 'Perl Git::I18N API' \ - perl "$TEST_DIRECTORY"/t0202/test.pl +test_expect_success 'run t0202/test.pl to test Git::I18N.pm' ' + "$PERL_PATH" "$TEST_DIRECTORY"/t0202/test.pl 2>stderr && + test_must_be_empty stderr +' test_done diff --git a/t/t0203-gettext-setlocale-sanity.sh b/t/t0203-gettext-setlocale-sanity.sh index 0ce1f22eff..86cff324ff 100755 --- a/t/t0203-gettext-setlocale-sanity.sh +++ b/t/t0203-gettext-setlocale-sanity.sh @@ -5,6 +5,7 @@ test_description="The Git C functions aren't broken by setlocale(3)" +TEST_PASSES_SANITIZE_LEAK=true . ./lib-gettext.sh test_expect_success 'git show a ISO-8859-1 commit under C locale' ' diff --git a/t/t0212/parse_events.perl b/t/t0212/parse_events.perl index b6408560c0..30a9f51e9f 100644 --- a/t/t0212/parse_events.perl +++ b/t/t0212/parse_events.perl @@ -216,12 +216,19 @@ while (<>) { elsif ($event eq 'data') { my $cat = $line->{'category'}; - if ($cat eq 'test_category') { - - my $key = $line->{'key'}; - my $value = $line->{'value'}; - $processes->{$sid}->{'data'}->{$cat}->{$key} = $value; - } + my $key = $line->{'key'}; + my $value = $line->{'value'}; + $processes->{$sid}->{'data'}->{$cat}->{$key} = $value; + } + + elsif ($event eq 'data_json') { + # NEEDSWORK: Ignore due to + # compat/win32/trace2_win32_process_info.c, which should log a + # "cmd_ancestry" event instead. + } + + else { + push @{$processes->{$sid}->{$event}} => $line->{value}; } # This trace2 target does not emit 'printf' events. diff --git a/t/t1006-cat-file.sh b/t/t1006-cat-file.sh index dadf3b1458..23b8942edb 100755 --- a/t/t1006-cat-file.sh +++ b/t/t1006-cat-file.sh @@ -88,7 +88,8 @@ done for opt in --buffer \ --follow-symlinks \ - --batch-all-objects + --batch-all-objects \ + -z do test_expect_success "usage: bad option combination: $opt without batch mode" ' test_incompatible_usage git cat-file $opt && @@ -100,6 +101,10 @@ echo_without_newline () { printf '%s' "$*" } +echo_without_newline_nul () { + echo_without_newline "$@" | tr '\n' '\0' +} + strlen () { echo_without_newline "$1" | wc -c | sed -e 's/^ *//' } @@ -398,6 +403,12 @@ test_expect_success '--batch with multiple sha1s gives correct format' ' test "$(maybe_remove_timestamp "$batch_output" 1)" = "$(maybe_remove_timestamp "$(echo_without_newline "$batch_input" | git cat-file --batch)" 1)" ' +test_expect_success '--batch, -z with multiple sha1s gives correct format' ' + echo_without_newline_nul "$batch_input" >in && + test "$(maybe_remove_timestamp "$batch_output" 1)" = \ + "$(maybe_remove_timestamp "$(git cat-file --batch -z <in)" 1)" +' + batch_check_input="$hello_sha1 $tree_sha1 $commit_sha1 @@ -418,6 +429,30 @@ test_expect_success "--batch-check with multiple sha1s gives correct format" ' "$(echo_without_newline "$batch_check_input" | git cat-file --batch-check)" ' +test_expect_success "--batch-check, -z with multiple sha1s gives correct format" ' + echo_without_newline_nul "$batch_check_input" >in && + test "$batch_check_output" = "$(git cat-file --batch-check -z <in)" +' + +test_expect_success FUNNYNAMES '--batch-check, -z with newline in input' ' + touch -- "newline${LF}embedded" && + git add -- "newline${LF}embedded" && + git commit -m "file with newline embedded" && + test_tick && + + printf "HEAD:newline${LF}embedded" >in && + git cat-file --batch-check -z <in >actual && + + echo "$(git rev-parse "HEAD:newline${LF}embedded") blob 0" >expect && + test_cmp expect actual +' + +batch_command_multiple_info="info $hello_sha1 +info $tree_sha1 +info $commit_sha1 +info $tag_sha1 +info deadbeef" + test_expect_success '--batch-command with multiple info calls gives correct format' ' cat >expect <<-EOF && $hello_sha1 blob $hello_size @@ -427,17 +462,23 @@ test_expect_success '--batch-command with multiple info calls gives correct form deadbeef missing EOF - git cat-file --batch-command --buffer >actual <<-EOF && - info $hello_sha1 - info $tree_sha1 - info $commit_sha1 - info $tag_sha1 - info deadbeef - EOF + echo "$batch_command_multiple_info" >in && + git cat-file --batch-command --buffer <in >actual && + + test_cmp expect actual && + + echo "$batch_command_multiple_info" | tr "\n" "\0" >in && + git cat-file --batch-command --buffer -z <in >actual && test_cmp expect actual ' +batch_command_multiple_contents="contents $hello_sha1 +contents $commit_sha1 +contents $tag_sha1 +contents deadbeef +flush" + test_expect_success '--batch-command with multiple command calls gives correct format' ' remove_timestamp >expect <<-EOF && $hello_sha1 blob $hello_size @@ -449,13 +490,14 @@ test_expect_success '--batch-command with multiple command calls gives correct f deadbeef missing EOF - git cat-file --batch-command --buffer >actual_raw <<-EOF && - contents $hello_sha1 - contents $commit_sha1 - contents $tag_sha1 - contents deadbeef - flush - EOF + echo "$batch_command_multiple_contents" >in && + git cat-file --batch-command --buffer <in >actual_raw && + + remove_timestamp <actual_raw >actual && + test_cmp expect actual && + + echo "$batch_command_multiple_contents" | tr "\n" "\0" >in && + git cat-file --batch-command --buffer -z <in >actual_raw && remove_timestamp <actual_raw >actual && test_cmp expect actual diff --git a/t/t1011-read-tree-sparse-checkout.sh b/t/t1011-read-tree-sparse-checkout.sh index 63a553d7b3..742f0fa909 100755 --- a/t/t1011-read-tree-sparse-checkout.sh +++ b/t/t1011-read-tree-sparse-checkout.sh @@ -11,6 +11,7 @@ test_description='sparse checkout tests A init.t ' +TEST_CREATE_REPO_NO_TEMPLATE=1 . ./test-lib.sh . "$TEST_DIRECTORY"/lib-read-tree.sh @@ -53,6 +54,7 @@ test_expect_success 'read-tree without .git/info/sparse-checkout' ' ' test_expect_success 'read-tree with .git/info/sparse-checkout but disabled' ' + mkdir .git/info && echo >.git/info/sparse-checkout && read_tree_u_must_succeed -m -u HEAD && git ls-files -t >result && diff --git a/t/t1020-subdirectory.sh b/t/t1020-subdirectory.sh index 9fdbb2af80..45eef9457f 100755 --- a/t/t1020-subdirectory.sh +++ b/t/t1020-subdirectory.sh @@ -6,6 +6,7 @@ test_description='Try various core-level commands in subdirectory. ' +TEST_PASSES_SANITIZE_LEAK=true . ./test-lib.sh . "$TEST_DIRECTORY"/lib-read-tree.sh diff --git a/t/t1051-large-conversion.sh b/t/t1051-large-conversion.sh index 042b0e4429..f6709c9f56 100755 --- a/t/t1051-large-conversion.sh +++ b/t/t1051-large-conversion.sh @@ -1,6 +1,8 @@ #!/bin/sh test_description='test conversion filters on large files' + +TEST_PASSES_SANITIZE_LEAK=true . ./test-lib.sh set_attr() { diff --git a/t/t1090-sparse-checkout-scope.sh b/t/t1090-sparse-checkout-scope.sh index d1833c0f31..3a14218b24 100755 --- a/t/t1090-sparse-checkout-scope.sh +++ b/t/t1090-sparse-checkout-scope.sh @@ -5,6 +5,7 @@ test_description='sparse checkout scope tests' GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME +TEST_CREATE_REPO_NO_TEMPLATE=1 . ./test-lib.sh test_expect_success 'setup' ' @@ -25,6 +26,7 @@ test_expect_success 'create feature branch' ' test_expect_success 'perform sparse checkout of main' ' git config --local --bool core.sparsecheckout true && + mkdir .git/info && echo "!/*" >.git/info/sparse-checkout && echo "/a" >>.git/info/sparse-checkout && echo "/c" >>.git/info/sparse-checkout && @@ -73,7 +75,7 @@ test_expect_success 'skip-worktree on files outside sparse patterns' ' test_expect_success 'in partial clone, sparse checkout only fetches needed blobs' ' test_create_repo server && - git clone "file://$(pwd)/server" client && + git clone --template= "file://$(pwd)/server" client && test_config -C server uploadpack.allowfilter 1 && test_config -C server uploadpack.allowanysha1inwant 1 && @@ -85,6 +87,7 @@ test_expect_success 'in partial clone, sparse checkout only fetches needed blobs git -C server commit -m message && test_config -C client core.sparsecheckout 1 && + mkdir client/.git/info && echo "!/*" >client/.git/info/sparse-checkout && echo "/a" >>client/.git/info/sparse-checkout && git -C client fetch --filter=blob:none origin && diff --git a/t/t1092-sparse-checkout-compatibility.sh b/t/t1092-sparse-checkout-compatibility.sh index 5dced39889..0302e36fd6 100755 --- a/t/t1092-sparse-checkout-compatibility.sh +++ b/t/t1092-sparse-checkout-compatibility.sh @@ -556,7 +556,7 @@ test_expect_success 'blame with pathspec inside sparse definition' ' deep/deeper1/a \ deep/deeper1/deepest/a do - test_all_match git blame $file + test_all_match git blame $file || return 1 done ' @@ -567,7 +567,7 @@ test_expect_success 'blame with pathspec outside sparse definition' ' init_repos && test_sparse_match git sparse-checkout set && - for file in a \ + for file in \ deep/a \ deep/deeper1/a \ deep/deeper1/deepest/a @@ -579,7 +579,7 @@ test_expect_success 'blame with pathspec outside sparse definition' ' # We compare sparse-checkout-err and sparse-index-err in # `test_sparse_match`. Given we know they are the same, we # only check the content of sparse-index-err here. - test_cmp expect sparse-index-err + test_cmp expect sparse-index-err || return 1 done ' @@ -937,7 +937,7 @@ test_expect_success 'read-tree --prefix' ' test_all_match git read-tree --prefix=deep/deeper1/deepest -u deepest && test_all_match git status --porcelain=v2 && - test_all_match git rm -rf --sparse folder1/ && + run_on_all git rm -rf --sparse folder1/ && test_all_match git read-tree --prefix=folder1/ -u update-folder1 && test_all_match git status --porcelain=v2 && @@ -1365,10 +1365,14 @@ ensure_not_expanded () { shift && test_must_fail env \ GIT_TRACE2_EVENT="$(pwd)/trace2.txt" \ - git -C sparse-index "$@" || return 1 + git -C sparse-index "$@" \ + >sparse-index-out \ + 2>sparse-index-error || return 1 else GIT_TRACE2_EVENT="$(pwd)/trace2.txt" \ - git -C sparse-index "$@" || return 1 + git -C sparse-index "$@" \ + >sparse-index-out \ + 2>sparse-index-error || return 1 fi && test_region ! index ensure_full_index trace2.txt } @@ -1567,7 +1571,7 @@ test_expect_success 'sparse index is not expanded: blame' ' deep/deeper1/a \ deep/deeper1/deepest/a do - ensure_not_expanded blame $file + ensure_not_expanded blame $file || return 1 done ' @@ -1853,4 +1857,119 @@ test_expect_success 'checkout behaves oddly with df-conflict-2' ' test_cmp full-checkout-err sparse-index-err ' +test_expect_success 'mv directory from out-of-cone to in-cone' ' + init_repos && + + # <source> as a sparse directory (or SKIP_WORKTREE_DIR without enabling + # sparse index). + test_all_match git mv --sparse folder1 deep && + test_all_match git status --porcelain=v2 && + test_sparse_match git ls-files -t && + git -C sparse-checkout ls-files -t >actual && + grep -e "H deep/folder1/0/0/0" actual && + grep -e "H deep/folder1/0/1" actual && + grep -e "H deep/folder1/a" actual && + + test_all_match git reset --hard && + + # <source> as a directory deeper than sparse index boundary (where + # sparse index will expand). + test_sparse_match git mv --sparse folder1/0 deep && + test_sparse_match git status --porcelain=v2 && + test_sparse_match git ls-files -t && + git -C sparse-checkout ls-files -t >actual && + grep -e "H deep/0/0/0" actual && + grep -e "H deep/0/1" actual +' + +test_expect_success 'rm pathspec inside sparse definition' ' + init_repos && + + test_all_match git rm deep/a && + test_all_match git status --porcelain=v2 && + + # test wildcard + run_on_all git reset --hard && + test_all_match git rm deep/* && + test_all_match git status --porcelain=v2 && + + # test recursive rm + run_on_all git reset --hard && + test_all_match git rm -r deep && + test_all_match git status --porcelain=v2 +' + +test_expect_success 'rm pathspec outside sparse definition' ' + init_repos && + + for file in folder1/a folder1/0/1 + do + test_sparse_match test_must_fail git rm $file && + test_sparse_match test_must_fail git rm --cached $file && + test_sparse_match git rm --sparse $file && + test_sparse_match git status --porcelain=v2 || return 1 + done && + + cat >folder1-full <<-EOF && + rm ${SQ}folder1/0/0/0${SQ} + rm ${SQ}folder1/0/1${SQ} + rm ${SQ}folder1/a${SQ} + EOF + + cat >folder1-sparse <<-EOF && + rm ${SQ}folder1/${SQ} + EOF + + # test wildcard + run_on_sparse git reset --hard && + run_on_sparse git sparse-checkout reapply && + test_sparse_match test_must_fail git rm folder1/* && + run_on_sparse git rm --sparse folder1/* && + test_cmp folder1-full sparse-checkout-out && + test_cmp folder1-sparse sparse-index-out && + test_sparse_match git status --porcelain=v2 && + + # test recursive rm + run_on_sparse git reset --hard && + run_on_sparse git sparse-checkout reapply && + test_sparse_match test_must_fail git rm --sparse folder1 && + run_on_sparse git rm --sparse -r folder1 && + test_cmp folder1-full sparse-checkout-out && + test_cmp folder1-sparse sparse-index-out && + test_sparse_match git status --porcelain=v2 +' + +test_expect_success 'rm pathspec expands index when necessary' ' + init_repos && + + # in-cone pathspec (do not expand) + ensure_not_expanded rm "deep/deep*" && + test_must_be_empty sparse-index-err && + + # out-of-cone pathspec (expand) + ! ensure_not_expanded rm --sparse "folder1/a*" && + test_must_be_empty sparse-index-err && + + # pathspec that should expand index + ! ensure_not_expanded rm "*/a" && + test_must_be_empty sparse-index-err && + + ! ensure_not_expanded rm "**a" && + test_must_be_empty sparse-index-err +' + +test_expect_success 'sparse index is not expanded: rm' ' + init_repos && + + ensure_not_expanded rm deep/a && + + # test in-cone wildcard + git -C sparse-index reset --hard && + ensure_not_expanded rm deep/* && + + # test recursive rm + git -C sparse-index reset --hard && + ensure_not_expanded rm -r deep +' + test_done diff --git a/t/t1301-shared-repo.sh b/t/t1301-shared-repo.sh index 84bf1970d8..93a2f91f8a 100755 --- a/t/t1301-shared-repo.sh +++ b/t/t1301-shared-repo.sh @@ -48,7 +48,7 @@ done test_expect_success 'shared=all' ' mkdir sub && cd sub && - git init --shared=all && + git init --template= --shared=all && test 2 = $(git config core.sharedrepository) ' @@ -57,6 +57,7 @@ test_expect_success POSIXPERM 'update-server-info honors core.sharedRepository' git add a1 && test_tick && git commit -m a1 && + mkdir .git/info && umask 0277 && git update-server-info && actual="$(ls -l .git/info/refs)" && diff --git a/t/t1401-symbolic-ref.sh b/t/t1401-symbolic-ref.sh index 9fb0b90f25..0c204089b8 100755 --- a/t/t1401-symbolic-ref.sh +++ b/t/t1401-symbolic-ref.sh @@ -165,4 +165,14 @@ test_expect_success 'symbolic-ref can resolve d/f name (ENOTDIR)' ' test_cmp expect actual ' +test_expect_success 'symbolic-ref refuses invalid target for non-HEAD' ' + test_must_fail git symbolic-ref refs/heads/invalid foo..bar +' + +test_expect_success 'symbolic-ref allows top-level target for non-HEAD' ' + git symbolic-ref refs/heads/top-level FETCH_HEAD && + git update-ref FETCH_HEAD HEAD && + test_cmp_rev top-level HEAD +' + test_done diff --git a/t/t1402-check-ref-format.sh b/t/t1402-check-ref-format.sh index cabc516ae9..5ed9d7318e 100755 --- a/t/t1402-check-ref-format.sh +++ b/t/t1402-check-ref-format.sh @@ -2,6 +2,7 @@ test_description='Test git check-ref-format' +TEST_PASSES_SANITIZE_LEAK=true . ./test-lib.sh valid_ref() { diff --git a/t/t1405-main-ref-store.sh b/t/t1405-main-ref-store.sh index 51f8291628..e4627cf1b6 100755 --- a/t/t1405-main-ref-store.sh +++ b/t/t1405-main-ref-store.sh @@ -5,6 +5,7 @@ test_description='test main ref store api' GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME +TEST_PASSES_SANITIZE_LEAK=true . ./test-lib.sh RUN="test-tool ref-store main" diff --git a/t/t1407-worktree-ref-store.sh b/t/t1407-worktree-ref-store.sh index ad8006c813..05b1881c59 100755 --- a/t/t1407-worktree-ref-store.sh +++ b/t/t1407-worktree-ref-store.sh @@ -5,6 +5,7 @@ test_description='test worktree ref store api' GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME +TEST_PASSES_SANITIZE_LEAK=true . ./test-lib.sh RWT="test-tool ref-store worktree:wt" diff --git a/t/t1418-reflog-exists.sh b/t/t1418-reflog-exists.sh index d51ecd5e92..2268bca3c1 100755 --- a/t/t1418-reflog-exists.sh +++ b/t/t1418-reflog-exists.sh @@ -4,6 +4,7 @@ test_description='Test reflog display routines' GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME +TEST_PASSES_SANITIZE_LEAK=true . ./test-lib.sh test_expect_success 'setup' ' diff --git a/t/t1503-rev-parse-verify.sh b/t/t1503-rev-parse-verify.sh index ba43168d12..bc136833c1 100755 --- a/t/t1503-rev-parse-verify.sh +++ b/t/t1503-rev-parse-verify.sh @@ -132,7 +132,7 @@ test_expect_success 'use --default' ' test_must_fail git rev-parse --verify --default bar ' -test_expect_success !SANITIZE_LEAK 'main@{n} for various n' ' +test_expect_success 'main@{n} for various n' ' git reflog >out && N=$(wc -l <out) && Nm1=$(($N-1)) && diff --git a/t/t1701-racy-split-index.sh b/t/t1701-racy-split-index.sh index 5dc221ef38..d8fa489998 100755 --- a/t/t1701-racy-split-index.sh +++ b/t/t1701-racy-split-index.sh @@ -5,6 +5,7 @@ test_description='racy split index' +TEST_PASSES_SANITIZE_LEAK=true . ./test-lib.sh test_expect_success 'setup' ' diff --git a/t/t2006-checkout-index-basic.sh b/t/t2006-checkout-index-basic.sh index 7705e3a317..5d119871d4 100755 --- a/t/t2006-checkout-index-basic.sh +++ b/t/t2006-checkout-index-basic.sh @@ -3,6 +3,7 @@ test_description='basic checkout-index tests ' +TEST_PASSES_SANITIZE_LEAK=true . ./test-lib.sh test_expect_success 'checkout-index --gobbledegook' ' diff --git a/t/t2018-checkout-branch.sh b/t/t2018-checkout-branch.sh index 52e51b0726..771c3c3c50 100755 --- a/t/t2018-checkout-branch.sh +++ b/t/t2018-checkout-branch.sh @@ -2,6 +2,7 @@ test_description='checkout' +TEST_CREATE_REPO_NO_TEMPLATE=1 . ./test-lib.sh # Arguments: [!] <branch> <oid> [<checkout options>] @@ -257,11 +258,12 @@ test_expect_success 'checkout -b to a new branch preserves mergeable changes des git checkout branch1-scratch && test_might_fail git branch -D branch3 && git config core.sparseCheckout false && - rm .git/info/sparse-checkout" && + rm -rf .git/info" && test_commit file2 && echo stuff >>file1 && + mkdir .git/info && echo file2 >.git/info/sparse-checkout && git config core.sparseCheckout true && diff --git a/t/t2020-checkout-detach.sh b/t/t2020-checkout-detach.sh index bc46713a43..2eab6474f8 100755 --- a/t/t2020-checkout-detach.sh +++ b/t/t2020-checkout-detach.sh @@ -4,6 +4,7 @@ test_description='checkout into detached HEAD state' GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME +TEST_PASSES_SANITIZE_LEAK=true . ./test-lib.sh check_detached () { diff --git a/t/t2023-checkout-m.sh b/t/t2023-checkout-m.sh index 7b327b7544..81e772fb4e 100755 --- a/t/t2023-checkout-m.sh +++ b/t/t2023-checkout-m.sh @@ -7,6 +7,7 @@ Ensures that checkout -m on a resolved file restores the conflicted file' GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME +TEST_PASSES_SANITIZE_LEAK=true . ./test-lib.sh test_expect_success setup ' diff --git a/t/t2080-parallel-checkout-basics.sh b/t/t2080-parallel-checkout-basics.sh index c683e60007..00ce3033d3 100755 --- a/t/t2080-parallel-checkout-basics.sh +++ b/t/t2080-parallel-checkout-basics.sh @@ -230,12 +230,9 @@ test_expect_success SYMLINKS 'parallel checkout checks for symlinks in leading d # check the final report including sequential, parallel, and delayed entries # all at the same time. So we must have finer control of the parallel checkout # variables. -test_expect_success PERL '"git checkout ." report should not include failed entries' ' - write_script rot13-filter.pl "$PERL_PATH" \ - <"$TEST_DIRECTORY"/t0021/rot13-filter.pl && - +test_expect_success '"git checkout ." report should not include failed entries' ' test_config_global filter.delay.process \ - "\"$(pwd)/rot13-filter.pl\" --always-delay delayed.log clean smudge delay" && + "test-tool rot13-filter --always-delay --log=delayed.log clean smudge delay" && test_config_global filter.delay.required true && test_config_global filter.cat.clean cat && test_config_global filter.cat.smudge cat && diff --git a/t/t2082-parallel-checkout-attributes.sh b/t/t2082-parallel-checkout-attributes.sh index 2525457961..f3511cd43a 100755 --- a/t/t2082-parallel-checkout-attributes.sh +++ b/t/t2082-parallel-checkout-attributes.sh @@ -138,12 +138,9 @@ test_expect_success 'parallel-checkout and external filter' ' # The delayed queue is independent from the parallel queue, and they should be # able to work together in the same checkout process. # -test_expect_success PERL 'parallel-checkout and delayed checkout' ' - write_script rot13-filter.pl "$PERL_PATH" \ - <"$TEST_DIRECTORY"/t0021/rot13-filter.pl && - +test_expect_success 'parallel-checkout and delayed checkout' ' test_config_global filter.delay.process \ - "\"$(pwd)/rot13-filter.pl\" --always-delay \"$(pwd)/delayed.log\" clean smudge delay" && + "test-tool rot13-filter --always-delay --log=\"$(pwd)/delayed.log\" clean smudge delay" && test_config_global filter.delay.required true && echo "abcd" >original && diff --git a/t/t2205-add-worktree-config.sh b/t/t2205-add-worktree-config.sh index 43d950de64..98265ba1b4 100755 --- a/t/t2205-add-worktree-config.sh +++ b/t/t2205-add-worktree-config.sh @@ -17,6 +17,7 @@ outside the repository. Two instances for which this can occur are tested: repository can be added to the index. ' +TEST_PASSES_SANITIZE_LEAK=true . ./test-lib.sh test_expect_success '1a: setup--config worktree' ' diff --git a/t/t2400-worktree-add.sh b/t/t2400-worktree-add.sh index 2f564d533d..f3242fef6b 100755 --- a/t/t2400-worktree-add.sh +++ b/t/t2400-worktree-add.sh @@ -5,6 +5,7 @@ test_description='test git worktree add' GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME +TEST_CREATE_REPO_NO_TEMPLATE=1 . ./test-lib.sh . "$TEST_DIRECTORY"/lib-rebase.sh @@ -229,6 +230,7 @@ test_expect_success 'checkout with grafts' ' SHA1=$(git rev-parse HEAD) && test_commit def && test_commit xyz && + mkdir .git/info && echo "$(git rev-parse HEAD) $SHA1" >.git/info/grafts && cat >expected <<-\EOF && xyz @@ -559,6 +561,8 @@ test_expect_success 'git worktree --no-guess-remote option overrides config' ' ' post_checkout_hook () { + test_when_finished "rm -rf .git/hooks" && + mkdir .git/hooks && test_hook -C "$1" post-checkout <<-\EOF { echo $* diff --git a/t/t2407-worktree-heads.sh b/t/t2407-worktree-heads.sh new file mode 100755 index 0000000000..019a40df2c --- /dev/null +++ b/t/t2407-worktree-heads.sh @@ -0,0 +1,180 @@ +#!/bin/sh + +test_description='test operations trying to overwrite refs at worktree HEAD' + +TEST_PASSES_SANITIZE_LEAK=true +. ./test-lib.sh + +test_expect_success 'setup' ' + test_commit init && + + for i in 1 2 3 4 + do + git checkout -b conflict-$i && + echo "not I" >$i.t && + git add $i.t && + git commit -m "will conflict" && + + git checkout - && + test_commit $i && + git branch wt-$i && + git branch fake-$i && + git worktree add wt-$i wt-$i || return 1 + done && + + # Create a server that updates each branch by one commit + git init server && + test_commit -C server initial && + git remote add server ./server && + for i in 1 2 3 4 + do + git -C server checkout -b wt-$i && + test_commit -C server A-$i || return 1 + done && + for i in 1 2 + do + git -C server checkout -b fake-$i && + test_commit -C server f-$i || return 1 + done +' + +test_expect_success 'refuse to overwrite: checked out in worktree' ' + for i in 1 2 3 4 + do + test_must_fail git branch -f wt-$i HEAD 2>err && + grep "cannot force update the branch" err && + + test_must_fail git branch -D wt-$i 2>err && + grep "Cannot delete branch" err || return 1 + done +' + +test_expect_success !SANITIZE_LEAK 'refuse to overwrite: worktree in bisect' ' + test_when_finished git -C wt-4 bisect reset && + + # Set up a bisect so HEAD no longer points to wt-4. + git -C wt-4 bisect start && + git -C wt-4 bisect bad wt-4 && + git -C wt-4 bisect good wt-1 && + + test_must_fail git branch -f wt-4 HEAD 2>err && + grep "cannot force update the branch '\''wt-4'\'' checked out at.*wt-4" err +' + +test_expect_success !SANITIZE_LEAK 'refuse to overwrite: worktree in rebase (apply)' ' + test_when_finished git -C wt-2 rebase --abort && + + # This will fail part-way through due to a conflict. + test_must_fail git -C wt-2 rebase --apply conflict-2 && + + test_must_fail git branch -f wt-2 HEAD 2>err && + grep "cannot force update the branch '\''wt-2'\'' checked out at.*wt-2" err +' + +test_expect_success !SANITIZE_LEAK 'refuse to overwrite: worktree in rebase (merge)' ' + test_when_finished git -C wt-2 rebase --abort && + + # This will fail part-way through due to a conflict. + test_must_fail git -C wt-2 rebase conflict-2 && + + test_must_fail git branch -f wt-2 HEAD 2>err && + grep "cannot force update the branch '\''wt-2'\'' checked out at.*wt-2" err +' + +test_expect_success !SANITIZE_LEAK 'refuse to overwrite: worktree in rebase with --update-refs' ' + test_when_finished git -C wt-3 rebase --abort && + + git branch -f can-be-updated wt-3 && + test_must_fail git -C wt-3 rebase --update-refs conflict-3 && + + for i in 3 4 + do + test_must_fail git branch -f can-be-updated HEAD 2>err && + grep "cannot force update the branch '\''can-be-updated'\'' checked out at.*wt-3" err || + return 1 + done +' + +test_expect_success !SANITIZE_LEAK 'refuse to fetch over ref: checked out' ' + test_must_fail git fetch server +refs/heads/wt-3:refs/heads/wt-3 2>err && + grep "refusing to fetch into branch '\''refs/heads/wt-3'\''" err && + + # General fetch into refs/heads/ will fail on first ref, + # so use a generic error message check. + test_must_fail git fetch server +refs/heads/*:refs/heads/* 2>err && + grep "refusing to fetch into branch" err +' + +test_expect_success !SANITIZE_LEAK 'refuse to fetch over ref: worktree in bisect' ' + test_when_finished git -C wt-4 bisect reset && + + # Set up a bisect so HEAD no longer points to wt-4. + git -C wt-4 bisect start && + git -C wt-4 bisect bad wt-4 && + git -C wt-4 bisect good wt-1 && + + test_must_fail git fetch server +refs/heads/wt-4:refs/heads/wt-4 2>err && + grep "refusing to fetch into branch" err +' + +test_expect_success !SANITIZE_LEAK 'refuse to fetch over ref: worktree in rebase' ' + test_when_finished git -C wt-3 rebase --abort && + + # This will fail part-way through due to a conflict. + test_must_fail git -C wt-3 rebase conflict-3 && + + test_must_fail git fetch server +refs/heads/wt-3:refs/heads/wt-3 2>err && + grep "refusing to fetch into branch" err +' + +test_expect_success 'refuse to overwrite when in error states' ' + test_when_finished rm -rf .git/worktrees/wt-*/rebase-merge && + test_when_finished rm -rf .git/worktrees/wt-*/BISECT_* && + + # Both branches are currently under rebase. + mkdir -p .git/worktrees/wt-3/rebase-merge && + touch .git/worktrees/wt-3/rebase-merge/interactive && + echo refs/heads/fake-1 >.git/worktrees/wt-3/rebase-merge/head-name && + echo refs/heads/fake-2 >.git/worktrees/wt-3/rebase-merge/onto && + mkdir -p .git/worktrees/wt-4/rebase-merge && + touch .git/worktrees/wt-4/rebase-merge/interactive && + echo refs/heads/fake-2 >.git/worktrees/wt-4/rebase-merge/head-name && + echo refs/heads/fake-1 >.git/worktrees/wt-4/rebase-merge/onto && + + # Both branches are currently under bisect. + touch .git/worktrees/wt-4/BISECT_LOG && + echo refs/heads/fake-2 >.git/worktrees/wt-4/BISECT_START && + touch .git/worktrees/wt-1/BISECT_LOG && + echo refs/heads/fake-1 >.git/worktrees/wt-1/BISECT_START && + + for i in 1 2 + do + test_must_fail git branch -f fake-$i HEAD 2>err && + grep "cannot force update the branch '\''fake-$i'\'' checked out at" err || + return 1 + done +' + +. "$TEST_DIRECTORY"/lib-rebase.sh + +test_expect_success !SANITIZE_LEAK 'refuse to overwrite during rebase with --update-refs' ' + git commit --fixup HEAD~2 --allow-empty && + ( + set_cat_todo_editor && + test_must_fail git rebase -i --update-refs HEAD~3 >todo && + ! grep "update-refs" todo + ) && + git branch -f allow-update HEAD~2 && + ( + set_cat_todo_editor && + test_must_fail git rebase -i --update-refs HEAD~3 >todo && + grep "update-ref refs/heads/allow-update" todo + ) +' + +# This must be the last test in this file +test_expect_success '$EDITOR and friends are unchanged' ' + test_editor_unchanged +' + +test_done diff --git a/t/t3001-ls-files-others-exclude.sh b/t/t3001-ls-files-others-exclude.sh index e07ac6c6dc..1ed0aa967e 100755 --- a/t/t3001-ls-files-others-exclude.sh +++ b/t/t3001-ls-files-others-exclude.sh @@ -103,7 +103,7 @@ test_expect_success 'git ls-files --others with various exclude options.' ' test_cmp expect output ' -test_expect_success !SANITIZE_LEAK 'restore gitignore' ' +test_expect_success 'restore gitignore' ' git checkout --ignore-skip-worktree-bits $allignores && rm .git/index ' @@ -126,7 +126,7 @@ cat > expect << EOF # three/ EOF -test_expect_success !SANITIZE_LEAK 'git status honors core.excludesfile' \ +test_expect_success 'git status honors core.excludesfile' \ 'test_cmp expect output' test_expect_success 'trailing slash in exclude allows directory match(1)' ' diff --git a/t/t3012-ls-files-dedup.sh b/t/t3012-ls-files-dedup.sh index 2682b1f43a..190e2f6eed 100755 --- a/t/t3012-ls-files-dedup.sh +++ b/t/t3012-ls-files-dedup.sh @@ -2,6 +2,7 @@ test_description='git ls-files --deduplicate test' +TEST_PASSES_SANITIZE_LEAK=true . ./test-lib.sh test_expect_success 'setup' ' diff --git a/t/t3013-ls-files-format.sh b/t/t3013-ls-files-format.sh new file mode 100755 index 0000000000..efb7450bf1 --- /dev/null +++ b/t/t3013-ls-files-format.sh @@ -0,0 +1,95 @@ +#!/bin/sh + +test_description='git ls-files --format test' + +TEST_PASSES_SANITIZE_LEAK=true +. ./test-lib.sh + +for flag in -s -o -k -t --resolve-undo --deduplicate --eol +do + test_expect_success "usage: --format is incompatible with $flag" ' + test_expect_code 129 git ls-files --format="%(objectname)" $flag + ' +done + +test_expect_success 'setup' ' + printf "LINEONE\nLINETWO\nLINETHREE\n" >o1.txt && + printf "LINEONE\r\nLINETWO\r\nLINETHREE\r\n" >o2.txt && + printf "LINEONE\r\nLINETWO\nLINETHREE\n" >o3.txt && + git add o?.txt && + oid=$(git hash-object o1.txt) && + git update-index --add --cacheinfo 120000 $oid o4.txt && + git update-index --add --cacheinfo 160000 $oid o5.txt && + git update-index --add --cacheinfo 100755 $oid o6.txt && + git commit -m base +' + +test_expect_success 'git ls-files --format objectmode v.s. -s' ' + git ls-files -s >files && + cut -d" " -f1 files >expect && + git ls-files --format="%(objectmode)" >actual && + test_cmp expect actual +' + +test_expect_success 'git ls-files --format objectname v.s. -s' ' + git ls-files -s >files && + cut -d" " -f2 files >expect && + git ls-files --format="%(objectname)" >actual && + test_cmp expect actual +' + +test_expect_success 'git ls-files --format v.s. --eol' ' + git ls-files --eol >tmp && + sed -e "s/ / /g" -e "s/ */ /g" tmp >expect 2>err && + test_must_be_empty err && + git ls-files --format="i/%(eolinfo:index) w/%(eolinfo:worktree) attr/%(eolattr) %(path)" >actual 2>err && + test_must_be_empty err && + test_cmp expect actual +' + +test_expect_success 'git ls-files --format path v.s. -s' ' + git ls-files -s >files && + cut -f2 files >expect && + git ls-files --format="%(path)" >actual && + test_cmp expect actual +' + +test_expect_success 'git ls-files --format with -m' ' + echo change >o1.txt && + cat >expect <<-\EOF && + o1.txt + o4.txt + o5.txt + o6.txt + EOF + git ls-files --format="%(path)" -m >actual && + test_cmp expect actual +' + +test_expect_success 'git ls-files --format with -d' ' + echo o7 >o7.txt && + git add o7.txt && + rm o7.txt && + cat >expect <<-\EOF && + o4.txt + o5.txt + o6.txt + o7.txt + EOF + git ls-files --format="%(path)" -d >actual && + test_cmp expect actual +' + +test_expect_success 'git ls-files --format v.s -s' ' + git ls-files --stage >expect && + git ls-files --format="%(objectmode) %(objectname) %(stage)%x09%(path)" >actual && + test_cmp expect actual +' + +test_expect_success 'git ls-files --format with --debug' ' + git ls-files --debug >expect && + git ls-files --format="%(path)" --debug >actual && + test_cmp expect actual +' + +test_done diff --git a/t/t3304-notes-mixed.sh b/t/t3304-notes-mixed.sh index 03dfcd3954..2c3a245266 100755 --- a/t/t3304-notes-mixed.sh +++ b/t/t3304-notes-mixed.sh @@ -5,6 +5,7 @@ test_description='Test notes trees that also contain non-notes' GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME +TEST_PASSES_SANITIZE_LEAK=true . ./test-lib.sh number_of_commits=100 diff --git a/t/t3305-notes-fanout.sh b/t/t3305-notes-fanout.sh index 64a9915761..22ffe5bcb9 100755 --- a/t/t3305-notes-fanout.sh +++ b/t/t3305-notes-fanout.sh @@ -51,7 +51,7 @@ test_expect_success 'creating many notes with git-notes' ' done ' -test_expect_success !SANITIZE_LEAK 'many notes created correctly with git-notes' ' +test_expect_success 'many notes created correctly with git-notes' ' git log >output.raw && grep "^ " output.raw >output && i=$num_notes && diff --git a/t/t3307-notes-man.sh b/t/t3307-notes-man.sh index 1aa366a410..ae316502c4 100755 --- a/t/t3307-notes-man.sh +++ b/t/t3307-notes-man.sh @@ -4,6 +4,7 @@ test_description='Examples from the git-notes man page Make sure the manual is not full of lies.' +TEST_PASSES_SANITIZE_LEAK=true . ./test-lib.sh test_expect_success 'setup' ' diff --git a/t/t3404-rebase-interactive.sh b/t/t3404-rebase-interactive.sh index f31afd4a54..688b01e3eb 100755 --- a/t/t3404-rebase-interactive.sh +++ b/t/t3404-rebase-interactive.sh @@ -1743,6 +1743,279 @@ test_expect_success 'ORIG_HEAD is updated correctly' ' test_cmp_rev ORIG_HEAD test-orig-head@{1} ' +test_expect_success '--update-refs adds label and update-ref commands' ' + git checkout -b update-refs no-conflict-branch && + git branch -f base HEAD~4 && + git branch -f first HEAD~3 && + git branch -f second HEAD~3 && + git branch -f third HEAD~1 && + git commit --allow-empty --fixup=third && + git branch -f is-not-reordered && + git commit --allow-empty --fixup=HEAD~4 && + git branch -f shared-tip && + ( + set_cat_todo_editor && + + cat >expect <<-EOF && + pick $(git log -1 --format=%h J) J + fixup $(git log -1 --format=%h update-refs) fixup! J # empty + update-ref refs/heads/second + update-ref refs/heads/first + pick $(git log -1 --format=%h K) K + pick $(git log -1 --format=%h L) L + fixup $(git log -1 --format=%h is-not-reordered) fixup! L # empty + update-ref refs/heads/third + pick $(git log -1 --format=%h M) M + update-ref refs/heads/no-conflict-branch + update-ref refs/heads/is-not-reordered + update-ref refs/heads/shared-tip + EOF + + test_must_fail git rebase -i --autosquash --update-refs primary >todo && + test_cmp expect todo && + + test_must_fail git -c rebase.autosquash=true \ + -c rebase.updaterefs=true \ + rebase -i primary >todo && + + test_cmp expect todo + ) +' + +test_expect_success '--update-refs adds commands with --rebase-merges' ' + git checkout -b update-refs-with-merge no-conflict-branch && + git branch -f base HEAD~4 && + git branch -f first HEAD~3 && + git branch -f second HEAD~3 && + git branch -f third HEAD~1 && + git merge -m merge branch2 && + git branch -f merge-branch && + git commit --fixup=third --allow-empty && + ( + set_cat_todo_editor && + + cat >expect <<-EOF && + label onto + reset onto + pick $(git log -1 --format=%h branch2~1) F + pick $(git log -1 --format=%h branch2) I + update-ref refs/heads/branch2 + label merge + reset onto + pick $(git log -1 --format=%h refs/heads/second) J + update-ref refs/heads/second + update-ref refs/heads/first + pick $(git log -1 --format=%h refs/heads/third~1) K + pick $(git log -1 --format=%h refs/heads/third) L + fixup $(git log -1 --format=%h update-refs-with-merge) fixup! L # empty + update-ref refs/heads/third + pick $(git log -1 --format=%h HEAD~2) M + update-ref refs/heads/no-conflict-branch + merge -C $(git log -1 --format=%h HEAD~1) merge # merge + update-ref refs/heads/merge-branch + EOF + + test_must_fail git rebase -i --autosquash \ + --rebase-merges=rebase-cousins \ + --update-refs primary >todo && + + test_cmp expect todo && + + test_must_fail git -c rebase.autosquash=true \ + -c rebase.updaterefs=true \ + rebase -i \ + --rebase-merges=rebase-cousins \ + primary >todo && + + test_cmp expect todo + ) +' + +test_expect_success '--update-refs updates refs correctly' ' + git checkout -B update-refs no-conflict-branch && + git branch -f base HEAD~4 && + git branch -f first HEAD~3 && + git branch -f second HEAD~3 && + git branch -f third HEAD~1 && + test_commit extra2 fileX && + git commit --amend --fixup=L && + + git rebase -i --autosquash --update-refs primary 2>err && + + test_cmp_rev HEAD~3 refs/heads/first && + test_cmp_rev HEAD~3 refs/heads/second && + test_cmp_rev HEAD~1 refs/heads/third && + test_cmp_rev HEAD refs/heads/no-conflict-branch && + + cat >expect <<-\EOF && + Successfully rebased and updated refs/heads/update-refs. + Updated the following refs with --update-refs: + refs/heads/first + refs/heads/no-conflict-branch + refs/heads/second + refs/heads/third + EOF + + # Clear "Rebasing (X/Y)" progress lines and drop leading tabs. + sed -e "s/Rebasing.*Successfully/Successfully/g" -e "s/^\t//g" \ + <err >err.trimmed && + test_cmp expect err.trimmed +' + +test_expect_success 'respect user edits to update-ref steps' ' + git checkout -B update-refs-break no-conflict-branch && + git branch -f base HEAD~4 && + git branch -f first HEAD~3 && + git branch -f second HEAD~3 && + git branch -f third HEAD~1 && + git branch -f unseen base && + + # First, we will add breaks to the expected todo file + cat >fake-todo-1 <<-EOF && + pick $(git rev-parse HEAD~3) + break + update-ref refs/heads/second + update-ref refs/heads/first + + pick $(git rev-parse HEAD~2) + pick $(git rev-parse HEAD~1) + update-ref refs/heads/third + + pick $(git rev-parse HEAD) + update-ref refs/heads/no-conflict-branch + EOF + + # Second, we will drop some update-refs commands (and move one) + cat >fake-todo-2 <<-EOF && + update-ref refs/heads/second + + pick $(git rev-parse HEAD~2) + update-ref refs/heads/third + pick $(git rev-parse HEAD~1) + break + + pick $(git rev-parse HEAD) + EOF + + # Third, we will: + # * insert a new one (new-branch), + # * re-add an old one (first), and + # * add a second instance of a previously-stored one (second) + cat >fake-todo-3 <<-EOF && + update-ref refs/heads/unseen + update-ref refs/heads/new-branch + pick $(git rev-parse HEAD) + update-ref refs/heads/first + update-ref refs/heads/second + EOF + + ( + set_replace_editor fake-todo-1 && + git rebase -i --update-refs primary && + + # These branches are currently locked. + for b in first second third no-conflict-branch + do + test_must_fail git branch -f $b base || return 1 + done && + + set_replace_editor fake-todo-2 && + git rebase --edit-todo && + + # These branches are currently locked. + for b in second third + do + test_must_fail git branch -f $b base || return 1 + done && + + # These branches are currently unlocked for checkout. + for b in first no-conflict-branch + do + git worktree add wt-$b $b && + git worktree remove wt-$b || return 1 + done && + + git rebase --continue && + + set_replace_editor fake-todo-3 && + git rebase --edit-todo && + + # These branches are currently locked. + for b in second third first unseen + do + test_must_fail git branch -f $b base || return 1 + done && + + # These branches are currently unlocked for checkout. + for b in no-conflict-branch + do + git worktree add wt-$b $b && + git worktree remove wt-$b || return 1 + done && + + git rebase --continue + ) && + + test_cmp_rev HEAD~2 refs/heads/third && + test_cmp_rev HEAD~1 refs/heads/unseen && + test_cmp_rev HEAD~1 refs/heads/new-branch && + test_cmp_rev HEAD refs/heads/first && + test_cmp_rev HEAD refs/heads/second && + test_cmp_rev HEAD refs/heads/no-conflict-branch +' + +test_expect_success '--update-refs: check failed ref update' ' + git checkout -B update-refs-error no-conflict-branch && + git branch -f base HEAD~4 && + git branch -f first HEAD~3 && + git branch -f second HEAD~2 && + git branch -f third HEAD~1 && + + cat >fake-todo <<-EOF && + pick $(git rev-parse HEAD~3) + break + update-ref refs/heads/first + + pick $(git rev-parse HEAD~2) + update-ref refs/heads/second + + pick $(git rev-parse HEAD~1) + update-ref refs/heads/third + + pick $(git rev-parse HEAD) + update-ref refs/heads/no-conflict-branch + EOF + + ( + set_replace_editor fake-todo && + git rebase -i --update-refs base + ) && + + # At this point, the values of first, second, and third are + # recorded in the update-refs file. We will force-update the + # "second" ref, but "git branch -f" will not work because of + # the lock in the update-refs file. + git rev-parse third >.git/refs/heads/second && + + test_must_fail git rebase --continue 2>err && + grep "update_ref failed for ref '\''refs/heads/second'\''" err && + + cat >expect <<-\EOF && + Updated the following refs with --update-refs: + refs/heads/first + refs/heads/no-conflict-branch + refs/heads/third + Failed to update the following refs with --update-refs: + refs/heads/second + EOF + + # Clear "Rebasing (X/Y)" progress lines and drop leading tabs. + tail -n 6 err >err.last && + sed -e "s/Rebasing.*Successfully/Successfully/g" -e "s/^\t//g" \ + <err.last >err.trimmed && + test_cmp expect err.trimmed +' + # This must be the last test in this file test_expect_success '$EDITOR and friends are unchanged' ' test_editor_unchanged diff --git a/t/t3426-rebase-submodule.sh b/t/t3426-rebase-submodule.sh index 0ad3a07bf4..7a9f1127a4 100755 --- a/t/t3426-rebase-submodule.sh +++ b/t/t3426-rebase-submodule.sh @@ -35,6 +35,7 @@ git_rebase_interactive () { ls -1pR * >>actual && test_cmp expect actual && set_fake_editor && + mkdir .git/info && echo "fake-editor.sh" >.git/info/exclude && may_only_be_test_must_fail "$2" && $2 git rebase -i "$1" diff --git a/t/t3507-cherry-pick-conflict.sh b/t/t3507-cherry-pick-conflict.sh index 979e843c65..f32799e046 100755 --- a/t/t3507-cherry-pick-conflict.sh +++ b/t/t3507-cherry-pick-conflict.sh @@ -12,6 +12,7 @@ test_description='test cherry-pick and revert with conflicts GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME +TEST_CREATE_REPO_NO_TEMPLATE=1 . ./test-lib.sh pristine_detach () { @@ -558,6 +559,7 @@ test_expect_success 'cherry-pick preserves sparse-checkout' ' echo \"/*\" >.git/info/sparse-checkout git read-tree --reset -u HEAD rm .git/info/sparse-checkout" && + mkdir .git/info && echo /unrelated >.git/info/sparse-checkout && git read-tree --reset -u HEAD && test_must_fail git cherry-pick -Xours picked>actual && diff --git a/t/t3701-add-interactive.sh b/t/t3701-add-interactive.sh index b354fb39de..3b7df9bed5 100755 --- a/t/t3701-add-interactive.sh +++ b/t/t3701-add-interactive.sh @@ -766,6 +766,19 @@ test_expect_success 'detect bogus diffFilter output' ' force_color test_must_fail git add -p <y ' +test_expect_success 'handle very large filtered diff' ' + git reset --hard && + # The specific number here is not important, but it must + # be large enough that the output of "git diff --color" + # fills up the pipe buffer. 10,000 results in ~200k of + # colored output. + test_seq 10000 >test && + test_config interactive.diffFilter cat && + printf y >y && + force_color git add -p >output 2>&1 <y && + git diff-files --exit-code -- test +' + test_expect_success 'diff.algorithm is passed to `git diff-files`' ' git reset --hard && diff --git a/t/t3920-crlf-messages.sh b/t/t3920-crlf-messages.sh index 0276edbe3d..4c661d4d54 100755 --- a/t/t3920-crlf-messages.sh +++ b/t/t3920-crlf-messages.sh @@ -1,6 +1,8 @@ #!/bin/sh test_description='Test ref-filter and pretty APIs for commit and tag messages using CRLF' + +TEST_PASSES_SANITIZE_LEAK=true . ./test-lib.sh LIB_CRLF_BRANCHES="" diff --git a/t/t4013-diff-various.sh b/t/t4013-diff-various.sh index 056e922164..dfcf3a0aaa 100755 --- a/t/t4013-diff-various.sh +++ b/t/t4013-diff-various.sh @@ -352,6 +352,8 @@ log -GF -p --pickaxe-all master log -IA -IB -I1 -I2 -p master log --decorate --all log --decorate=full --all +log --decorate --clear-decorations --all +log --decorate=full --clear-decorations --all rev-list --parents HEAD rev-list --children HEAD diff --git a/t/t4013/diff.log_--decorate=full_--all b/t/t4013/diff.log_--decorate=full_--all index 3f9b872ece..6b0b334a5d 100644 --- a/t/t4013/diff.log_--decorate=full_--all +++ b/t/t4013/diff.log_--decorate=full_--all @@ -20,7 +20,7 @@ Date: Mon Jun 26 00:06:00 2006 +0000 Rearranged lines in dir/sub -commit cbacedd14cb8b89255a2c02b59e77a2e9a8021a0 (refs/notes/commits) +commit cbacedd14cb8b89255a2c02b59e77a2e9a8021a0 Author: A U Thor <author@example.com> Date: Mon Jun 26 00:06:00 2006 +0000 diff --git a/t/t4013/diff.log_--decorate=full_--clear-decorations_--all b/t/t4013/diff.log_--decorate=full_--clear-decorations_--all new file mode 100644 index 0000000000..1c030a6554 --- /dev/null +++ b/t/t4013/diff.log_--decorate=full_--clear-decorations_--all @@ -0,0 +1,61 @@ +$ git log --decorate=full --clear-decorations --all +commit b7e0bc69303b488b47deca799a7d723971dfa6cd (refs/heads/mode) +Author: A U Thor <author@example.com> +Date: Mon Jun 26 00:06:00 2006 +0000 + + update mode + +commit a6f364368ca320bc5a92e18912e16fa6b3dff598 (refs/heads/note) +Author: A U Thor <author@example.com> +Date: Mon Jun 26 00:06:00 2006 +0000 + + update mode (file2) + +Notes: + note + +commit cd4e72fd96faed3f0ba949dc42967430374e2290 (refs/heads/rearrange) +Author: A U Thor <author@example.com> +Date: Mon Jun 26 00:06:00 2006 +0000 + + Rearranged lines in dir/sub + +commit cbacedd14cb8b89255a2c02b59e77a2e9a8021a0 (refs/notes/commits) +Author: A U Thor <author@example.com> +Date: Mon Jun 26 00:06:00 2006 +0000 + + Notes added by 'git notes add' + +commit 59d314ad6f356dd08601a4cd5e530381da3e3c64 (HEAD -> refs/heads/master) +Merge: 9a6d494 c7a2ab9 +Author: A U Thor <author@example.com> +Date: Mon Jun 26 00:04:00 2006 +0000 + + Merge branch 'side' + +commit c7a2ab9e8eac7b117442a607d5a9b3950ae34d5a (refs/heads/side) +Author: A U Thor <author@example.com> +Date: Mon Jun 26 00:03:00 2006 +0000 + + Side + +commit 9a6d4949b6b76956d9d5e26f2791ec2ceff5fdc0 +Author: A U Thor <author@example.com> +Date: Mon Jun 26 00:02:00 2006 +0000 + + Third + +commit 1bde4ae5f36c8d9abe3a0fce0c6aab3c4a12fe44 +Author: A U Thor <author@example.com> +Date: Mon Jun 26 00:01:00 2006 +0000 + + Second + + This is the second commit. + +commit 444ac553ac7612cc88969031b02b3767fb8a353a (refs/heads/initial) +Author: A U Thor <author@example.com> +Date: Mon Jun 26 00:00:00 2006 +0000 + + Initial +$ diff --git a/t/t4013/diff.log_--decorate=full_--decorate-all_--all b/t/t4013/diff.log_--decorate=full_--decorate-all_--all new file mode 100644 index 0000000000..d6e7928784 --- /dev/null +++ b/t/t4013/diff.log_--decorate=full_--decorate-all_--all @@ -0,0 +1,61 @@ +$ git log --decorate=full --decorate-all --all +commit b7e0bc69303b488b47deca799a7d723971dfa6cd (refs/heads/mode) +Author: A U Thor <author@example.com> +Date: Mon Jun 26 00:06:00 2006 +0000 + + update mode + +commit a6f364368ca320bc5a92e18912e16fa6b3dff598 (refs/heads/note) +Author: A U Thor <author@example.com> +Date: Mon Jun 26 00:06:00 2006 +0000 + + update mode (file2) + +Notes: + note + +commit cd4e72fd96faed3f0ba949dc42967430374e2290 (refs/heads/rearrange) +Author: A U Thor <author@example.com> +Date: Mon Jun 26 00:06:00 2006 +0000 + + Rearranged lines in dir/sub + +commit cbacedd14cb8b89255a2c02b59e77a2e9a8021a0 (refs/notes/commits) +Author: A U Thor <author@example.com> +Date: Mon Jun 26 00:06:00 2006 +0000 + + Notes added by 'git notes add' + +commit 59d314ad6f356dd08601a4cd5e530381da3e3c64 (HEAD -> refs/heads/master) +Merge: 9a6d494 c7a2ab9 +Author: A U Thor <author@example.com> +Date: Mon Jun 26 00:04:00 2006 +0000 + + Merge branch 'side' + +commit c7a2ab9e8eac7b117442a607d5a9b3950ae34d5a (refs/heads/side) +Author: A U Thor <author@example.com> +Date: Mon Jun 26 00:03:00 2006 +0000 + + Side + +commit 9a6d4949b6b76956d9d5e26f2791ec2ceff5fdc0 +Author: A U Thor <author@example.com> +Date: Mon Jun 26 00:02:00 2006 +0000 + + Third + +commit 1bde4ae5f36c8d9abe3a0fce0c6aab3c4a12fe44 +Author: A U Thor <author@example.com> +Date: Mon Jun 26 00:01:00 2006 +0000 + + Second + + This is the second commit. + +commit 444ac553ac7612cc88969031b02b3767fb8a353a (refs/heads/initial) +Author: A U Thor <author@example.com> +Date: Mon Jun 26 00:00:00 2006 +0000 + + Initial +$ diff --git a/t/t4013/diff.log_--decorate_--all b/t/t4013/diff.log_--decorate_--all index f5e20e1e14..c7df1f5814 100644 --- a/t/t4013/diff.log_--decorate_--all +++ b/t/t4013/diff.log_--decorate_--all @@ -20,7 +20,7 @@ Date: Mon Jun 26 00:06:00 2006 +0000 Rearranged lines in dir/sub -commit cbacedd14cb8b89255a2c02b59e77a2e9a8021a0 (refs/notes/commits) +commit cbacedd14cb8b89255a2c02b59e77a2e9a8021a0 Author: A U Thor <author@example.com> Date: Mon Jun 26 00:06:00 2006 +0000 diff --git a/t/t4013/diff.log_--decorate_--clear-decorations_--all b/t/t4013/diff.log_--decorate_--clear-decorations_--all new file mode 100644 index 0000000000..88be82cce3 --- /dev/null +++ b/t/t4013/diff.log_--decorate_--clear-decorations_--all @@ -0,0 +1,61 @@ +$ git log --decorate --clear-decorations --all +commit b7e0bc69303b488b47deca799a7d723971dfa6cd (mode) +Author: A U Thor <author@example.com> +Date: Mon Jun 26 00:06:00 2006 +0000 + + update mode + +commit a6f364368ca320bc5a92e18912e16fa6b3dff598 (note) +Author: A U Thor <author@example.com> +Date: Mon Jun 26 00:06:00 2006 +0000 + + update mode (file2) + +Notes: + note + +commit cd4e72fd96faed3f0ba949dc42967430374e2290 (rearrange) +Author: A U Thor <author@example.com> +Date: Mon Jun 26 00:06:00 2006 +0000 + + Rearranged lines in dir/sub + +commit cbacedd14cb8b89255a2c02b59e77a2e9a8021a0 (refs/notes/commits) +Author: A U Thor <author@example.com> +Date: Mon Jun 26 00:06:00 2006 +0000 + + Notes added by 'git notes add' + +commit 59d314ad6f356dd08601a4cd5e530381da3e3c64 (HEAD -> master) +Merge: 9a6d494 c7a2ab9 +Author: A U Thor <author@example.com> +Date: Mon Jun 26 00:04:00 2006 +0000 + + Merge branch 'side' + +commit c7a2ab9e8eac7b117442a607d5a9b3950ae34d5a (side) +Author: A U Thor <author@example.com> +Date: Mon Jun 26 00:03:00 2006 +0000 + + Side + +commit 9a6d4949b6b76956d9d5e26f2791ec2ceff5fdc0 +Author: A U Thor <author@example.com> +Date: Mon Jun 26 00:02:00 2006 +0000 + + Third + +commit 1bde4ae5f36c8d9abe3a0fce0c6aab3c4a12fe44 +Author: A U Thor <author@example.com> +Date: Mon Jun 26 00:01:00 2006 +0000 + + Second + + This is the second commit. + +commit 444ac553ac7612cc88969031b02b3767fb8a353a (initial) +Author: A U Thor <author@example.com> +Date: Mon Jun 26 00:00:00 2006 +0000 + + Initial +$ diff --git a/t/t4013/diff.log_--decorate_--decorate-all_--all b/t/t4013/diff.log_--decorate_--decorate-all_--all new file mode 100644 index 0000000000..5d22618bb6 --- /dev/null +++ b/t/t4013/diff.log_--decorate_--decorate-all_--all @@ -0,0 +1,61 @@ +$ git log --decorate --decorate-all --all +commit b7e0bc69303b488b47deca799a7d723971dfa6cd (mode) +Author: A U Thor <author@example.com> +Date: Mon Jun 26 00:06:00 2006 +0000 + + update mode + +commit a6f364368ca320bc5a92e18912e16fa6b3dff598 (note) +Author: A U Thor <author@example.com> +Date: Mon Jun 26 00:06:00 2006 +0000 + + update mode (file2) + +Notes: + note + +commit cd4e72fd96faed3f0ba949dc42967430374e2290 (rearrange) +Author: A U Thor <author@example.com> +Date: Mon Jun 26 00:06:00 2006 +0000 + + Rearranged lines in dir/sub + +commit cbacedd14cb8b89255a2c02b59e77a2e9a8021a0 (refs/notes/commits) +Author: A U Thor <author@example.com> +Date: Mon Jun 26 00:06:00 2006 +0000 + + Notes added by 'git notes add' + +commit 59d314ad6f356dd08601a4cd5e530381da3e3c64 (HEAD -> master) +Merge: 9a6d494 c7a2ab9 +Author: A U Thor <author@example.com> +Date: Mon Jun 26 00:04:00 2006 +0000 + + Merge branch 'side' + +commit c7a2ab9e8eac7b117442a607d5a9b3950ae34d5a (side) +Author: A U Thor <author@example.com> +Date: Mon Jun 26 00:03:00 2006 +0000 + + Side + +commit 9a6d4949b6b76956d9d5e26f2791ec2ceff5fdc0 +Author: A U Thor <author@example.com> +Date: Mon Jun 26 00:02:00 2006 +0000 + + Third + +commit 1bde4ae5f36c8d9abe3a0fce0c6aab3c4a12fe44 +Author: A U Thor <author@example.com> +Date: Mon Jun 26 00:01:00 2006 +0000 + + Second + + This is the second commit. + +commit 444ac553ac7612cc88969031b02b3767fb8a353a (initial) +Author: A U Thor <author@example.com> +Date: Mon Jun 26 00:00:00 2006 +0000 + + Initial +$ diff --git a/t/t4017-diff-retval.sh b/t/t4017-diff-retval.sh index ed461f481e..5bc28ad9f0 100755 --- a/t/t4017-diff-retval.sh +++ b/t/t4017-diff-retval.sh @@ -5,6 +5,7 @@ test_description='Return value of diffs' GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME +TEST_PASSES_SANITIZE_LEAK=true . ./test-lib.sh test_expect_success 'setup' ' diff --git a/t/t4020-diff-external.sh b/t/t4020-diff-external.sh index 858a5522f9..c1ac09ecc7 100755 --- a/t/t4020-diff-external.sh +++ b/t/t4020-diff-external.sh @@ -33,7 +33,7 @@ test_expect_success 'GIT_EXTERNAL_DIFF environment' ' ' -test_expect_success !SANITIZE_LEAK 'GIT_EXTERNAL_DIFF environment should apply only to diff' ' +test_expect_success 'GIT_EXTERNAL_DIFF environment should apply only to diff' ' GIT_EXTERNAL_DIFF=echo git log -p -1 HEAD >out && grep "^diff --git a/file b/file" out @@ -74,7 +74,7 @@ test_expect_success 'diff.external' ' test_cmp expect actual ' -test_expect_success !SANITIZE_LEAK 'diff.external should apply only to diff' ' +test_expect_success 'diff.external should apply only to diff' ' test_config diff.external echo && git log -p -1 HEAD >out && grep "^diff --git a/file b/file" out diff --git a/t/t4044-diff-index-unique-abbrev.sh b/t/t4044-diff-index-unique-abbrev.sh index 4701796d10..29e49d2290 100755 --- a/t/t4044-diff-index-unique-abbrev.sh +++ b/t/t4044-diff-index-unique-abbrev.sh @@ -1,6 +1,8 @@ #!/bin/sh test_description='test unique sha1 abbreviation on "index from..to" line' + +TEST_PASSES_SANITIZE_LEAK=true . ./test-lib.sh test_expect_success 'setup' ' diff --git a/t/t4051-diff-function-context.sh b/t/t4051-diff-function-context.sh index 4838a1df8b..725278ad19 100755 --- a/t/t4051-diff-function-context.sh +++ b/t/t4051-diff-function-context.sh @@ -2,6 +2,7 @@ test_description='diff function context' +TEST_PASSES_SANITIZE_LEAK=true . ./test-lib.sh dir="$TEST_DIRECTORY/t4051" diff --git a/t/t4057-diff-combined-paths.sh b/t/t4057-diff-combined-paths.sh index 04b8a1542a..9a7505cbb8 100755 --- a/t/t4057-diff-combined-paths.sh +++ b/t/t4057-diff-combined-paths.sh @@ -5,6 +5,7 @@ test_description='combined diff show only paths that are different to all parent GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME +TEST_PASSES_SANITIZE_LEAK=true . ./test-lib.sh # verify that diffc.expect matches output of diff --git a/t/t4069-remerge-diff.sh b/t/t4069-remerge-diff.sh index 35f94957fc..9e7cac68b1 100755 --- a/t/t4069-remerge-diff.sh +++ b/t/t4069-remerge-diff.sh @@ -2,6 +2,7 @@ test_description='remerge-diff handling' +TEST_PASSES_SANITIZE_LEAK=true . ./test-lib.sh # This test is ort-specific diff --git a/t/t4114-apply-typechange.sh b/t/t4114-apply-typechange.sh index da3e64f811..8ff3640766 100755 --- a/t/t4114-apply-typechange.sh +++ b/t/t4114-apply-typechange.sh @@ -7,6 +7,7 @@ test_description='git apply should not get confused with type changes. ' +TEST_PASSES_SANITIZE_LEAK=true . ./test-lib.sh test_expect_success 'setup repository and commits' ' diff --git a/t/t4140-apply-ita.sh b/t/t4140-apply-ita.sh index c614eaf04c..b375aca0d7 100755 --- a/t/t4140-apply-ita.sh +++ b/t/t4140-apply-ita.sh @@ -2,6 +2,7 @@ test_description='git apply of i-t-a file' +TEST_PASSES_SANITIZE_LEAK=true . ./test-lib.sh test_expect_success setup ' diff --git a/t/t4202-log.sh b/t/t4202-log.sh index 6e66352558..cc15cb4ff6 100755 --- a/t/t4202-log.sh +++ b/t/t4202-log.sh @@ -704,9 +704,12 @@ test_expect_success 'set up more tangled history' ' git checkout -b tangle HEAD~6 && test_commit tangle-a tangle-a a && git merge main~3 && + git update-ref refs/prefetch/merge HEAD && git merge side~1 && + git update-ref refs/rewritten/merge HEAD && git checkout main && git merge tangle && + git update-ref refs/hidden/tangle HEAD && git checkout -b reach && test_commit reach && git checkout main && @@ -974,9 +977,9 @@ test_expect_success 'decorate-refs-exclude and simplify-by-decoration' ' Merge-tag-reach (HEAD -> main) reach (tag: reach, reach) seventh (tag: seventh) - Merge-branch-tangle - Merge-branch-side-early-part-into-tangle (tangle) - tangle-a (tag: tangle-a) + Merge-branch-tangle (refs/hidden/tangle) + Merge-branch-side-early-part-into-tangle (refs/rewritten/merge, tangle) + Merge-branch-main-early-part-into-tangle (refs/prefetch/merge) EOF git log -n6 --decorate=short --pretty="tformat:%f%d" \ --decorate-refs-exclude="*octopus*" \ @@ -1025,6 +1028,115 @@ test_expect_success 'decorate-refs and simplify-by-decoration without output' ' test_cmp expect actual ' +test_expect_success 'decorate-refs-exclude HEAD' ' + git log --decorate=full --oneline \ + --decorate-refs-exclude="HEAD" >actual && + ! grep HEAD actual +' + +test_expect_success 'decorate-refs focus from default' ' + git log --decorate=full --oneline \ + --decorate-refs="refs/heads" >actual && + ! grep HEAD actual +' + +test_expect_success '--clear-decorations overrides defaults' ' + cat >expect.default <<-\EOF && + Merge-tag-reach (HEAD -> refs/heads/main) + Merge-tags-octopus-a-and-octopus-b + seventh (tag: refs/tags/seventh) + octopus-b (tag: refs/tags/octopus-b, refs/heads/octopus-b) + octopus-a (tag: refs/tags/octopus-a, refs/heads/octopus-a) + reach (tag: refs/tags/reach, refs/heads/reach) + Merge-branch-tangle + Merge-branch-side-early-part-into-tangle (refs/heads/tangle) + Merge-branch-main-early-part-into-tangle + tangle-a (tag: refs/tags/tangle-a) + Merge-branch-side + side-2 (tag: refs/tags/side-2, refs/heads/side) + side-1 (tag: refs/tags/side-1) + Second + sixth + fifth + fourth + third + second + initial + EOF + git log --decorate=full --pretty="tformat:%f%d" >actual && + test_cmp expect.default actual && + + cat >expect.all <<-\EOF && + Merge-tag-reach (HEAD -> refs/heads/main) + Merge-tags-octopus-a-and-octopus-b + seventh (tag: refs/tags/seventh) + octopus-b (tag: refs/tags/octopus-b, refs/heads/octopus-b) + octopus-a (tag: refs/tags/octopus-a, refs/heads/octopus-a) + reach (tag: refs/tags/reach, refs/heads/reach) + Merge-branch-tangle (refs/hidden/tangle) + Merge-branch-side-early-part-into-tangle (refs/rewritten/merge, refs/heads/tangle) + Merge-branch-main-early-part-into-tangle (refs/prefetch/merge) + tangle-a (tag: refs/tags/tangle-a) + Merge-branch-side + side-2 (tag: refs/tags/side-2, refs/heads/side) + side-1 (tag: refs/tags/side-1) + Second + sixth + fifth + fourth + third + second + initial + EOF + git log --decorate=full --pretty="tformat:%f%d" \ + --clear-decorations >actual && + test_cmp expect.all actual && + git -c log.initialDecorationSet=all log \ + --decorate=full --pretty="tformat:%f%d" >actual && + test_cmp expect.all actual +' + +test_expect_success '--clear-decorations clears previous exclusions' ' + cat >expect.all <<-\EOF && + Merge-tag-reach (HEAD -> refs/heads/main) + reach (tag: refs/tags/reach, refs/heads/reach) + Merge-tags-octopus-a-and-octopus-b + octopus-b (tag: refs/tags/octopus-b, refs/heads/octopus-b) + octopus-a (tag: refs/tags/octopus-a, refs/heads/octopus-a) + seventh (tag: refs/tags/seventh) + Merge-branch-tangle (refs/hidden/tangle) + Merge-branch-side-early-part-into-tangle (refs/rewritten/merge, refs/heads/tangle) + Merge-branch-main-early-part-into-tangle (refs/prefetch/merge) + tangle-a (tag: refs/tags/tangle-a) + side-2 (tag: refs/tags/side-2, refs/heads/side) + side-1 (tag: refs/tags/side-1) + initial + EOF + + git log --decorate=full --pretty="tformat:%f%d" \ + --simplify-by-decoration \ + --decorate-refs-exclude="heads/octopus*" \ + --decorate-refs="heads" \ + --clear-decorations >actual && + test_cmp expect.all actual && + + cat >expect.filtered <<-\EOF && + Merge-tags-octopus-a-and-octopus-b + octopus-b (refs/heads/octopus-b) + octopus-a (refs/heads/octopus-a) + initial + EOF + + git log --decorate=full --pretty="tformat:%f%d" \ + --simplify-by-decoration \ + --decorate-refs-exclude="heads/octopus" \ + --decorate-refs="heads" \ + --clear-decorations \ + --decorate-refs-exclude="tags/" \ + --decorate-refs="heads/octopus*" >actual && + test_cmp expect.filtered actual +' + test_expect_success 'log.decorate config parsing' ' git log --oneline --decorate=full >expect.full && git log --oneline --decorate=short >expect.short && @@ -2112,9 +2224,9 @@ test_expect_success REFFILES 'log diagnoses bogus HEAD hash' ' test_i18ngrep broken stderr ' -test_expect_success 'log diagnoses bogus HEAD symref' ' +test_expect_success REFFILES 'log diagnoses bogus HEAD symref' ' git init empty && - git --git-dir empty/.git symbolic-ref HEAD refs/heads/invalid.lock && + echo "ref: refs/heads/invalid.lock" > empty/.git/HEAD && test_must_fail git -C empty log 2>stderr && test_i18ngrep broken stderr && test_must_fail git -C empty log --default totally-bogus 2>stderr && @@ -2192,6 +2304,20 @@ test_expect_success 'log --decorate includes all levels of tag annotated tags' ' test_cmp expect actual ' +test_expect_success 'log --decorate does not include things outside filter' ' + reflist="refs/prefetch refs/rebase-merge refs/bundle" && + + for ref in $reflist + do + git update-ref $ref/fake HEAD || return 1 + done && + + git log --decorate=full --oneline >actual && + + # None of the refs are visible: + ! grep /fake actual +' + test_expect_success 'log --end-of-options' ' git update-ref refs/heads/--source HEAD && git log --end-of-options --source >actual && diff --git a/t/t4203-mailmap.sh b/t/t4203-mailmap.sh index 0b2d21ec55..cd1cab3e54 100755 --- a/t/t4203-mailmap.sh +++ b/t/t4203-mailmap.sh @@ -963,4 +963,63 @@ test_expect_success SYMLINKS 'symlinks not respected in-tree' ' test_cmp expect actual ' +test_expect_success 'prepare for cat-file --mailmap' ' + rm -f .mailmap && + git commit --allow-empty -m foo --author="Orig <orig@example.com>" +' + +test_expect_success '--no-use-mailmap disables mailmap in cat-file' ' + test_when_finished "rm .mailmap" && + cat >.mailmap <<-EOF && + A U Thor <author@example.com> Orig <orig@example.com> + EOF + cat >expect <<-EOF && + author Orig <orig@example.com> + EOF + git cat-file --no-use-mailmap commit HEAD >log && + sed -n "/^author /s/\([^>]*>\).*/\1/p" log >actual && + test_cmp expect actual +' + +test_expect_success '--use-mailmap enables mailmap in cat-file' ' + test_when_finished "rm .mailmap" && + cat >.mailmap <<-EOF && + A U Thor <author@example.com> Orig <orig@example.com> + EOF + cat >expect <<-EOF && + author A U Thor <author@example.com> + EOF + git cat-file --use-mailmap commit HEAD >log && + sed -n "/^author /s/\([^>]*>\).*/\1/p" log >actual && + test_cmp expect actual +' + +test_expect_success '--no-mailmap disables mailmap in cat-file for annotated tag objects' ' + test_when_finished "rm .mailmap" && + cat >.mailmap <<-EOF && + Orig <orig@example.com> C O Mitter <committer@example.com> + EOF + cat >expect <<-EOF && + tagger C O Mitter <committer@example.com> + EOF + git tag -a -m "annotated tag" v1 && + git cat-file --no-mailmap -p v1 >log && + sed -n "/^tagger /s/\([^>]*>\).*/\1/p" log >actual && + test_cmp expect actual +' + +test_expect_success '--mailmap enables mailmap in cat-file for annotated tag objects' ' + test_when_finished "rm .mailmap" && + cat >.mailmap <<-EOF && + Orig <orig@example.com> C O Mitter <committer@example.com> + EOF + cat >expect <<-EOF && + tagger Orig <orig@example.com> + EOF + git tag -a -m "annotated tag" v2 && + git cat-file --mailmap -p v2 >log && + sed -n "/^tagger /s/\([^>]*>\).*/\1/p" log >actual && + test_cmp expect actual +' + test_done diff --git a/t/t4207-log-decoration-colors.sh b/t/t4207-log-decoration-colors.sh index 36ac6aff1e..ded33a82e2 100755 --- a/t/t4207-log-decoration-colors.sh +++ b/t/t4207-log-decoration-colors.sh @@ -3,7 +3,7 @@ # Copyright (c) 2010 Nazri Ramliy # -test_description='Test for "git log --decorate" colors' +test_description='test "git log --decorate" colors' GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME @@ -17,6 +17,7 @@ test_expect_success setup ' git config color.decorate.remoteBranch red && git config color.decorate.tag "reverse bold yellow" && git config color.decorate.stash magenta && + git config color.decorate.grafted black && git config color.decorate.HEAD cyan && c_reset="<RESET>" && @@ -27,6 +28,7 @@ test_expect_success setup ' c_tag="<BOLD;REVERSE;YELLOW>" && c_stash="<MAGENTA>" && c_HEAD="<CYAN>" && + c_grafted="<BLACK>" && test_commit A && git clone . other && @@ -42,25 +44,79 @@ test_expect_success setup ' git stash save Changes to A.t ' -cat >expected <<EOF -${c_commit}COMMIT_ID${c_reset}${c_commit} (${c_reset}${c_HEAD}HEAD ->\ - ${c_reset}${c_branch}main${c_reset}${c_commit},\ - ${c_reset}${c_tag}tag: v1.0${c_reset}${c_commit},\ - ${c_reset}${c_tag}tag: B${c_reset}${c_commit})${c_reset} B -${c_commit}COMMIT_ID${c_reset}${c_commit} (${c_reset}${c_tag}tag: A1${c_reset}${c_commit},\ - ${c_reset}${c_remoteBranch}other/main${c_reset}${c_commit})${c_reset} A1 -${c_commit}COMMIT_ID${c_reset}${c_commit} (${c_reset}${c_stash}refs/stash${c_reset}${c_commit})${c_reset}\ - On main: Changes to A.t -${c_commit}COMMIT_ID${c_reset}${c_commit} (${c_reset}${c_tag}tag: A${c_reset}${c_commit})${c_reset} A -EOF +cmp_filtered_decorations () { + sed "s/$OID_REGEX/COMMIT_ID/" actual | test_decode_color >filtered && + test_cmp expect filtered +} # We want log to show all, but the second parent to refs/stash is irrelevant # to this test since it does not contain any decoration, hence --first-parent -test_expect_success 'Commit Decorations Colored Correctly' ' - git log --first-parent --abbrev=10 --all --decorate --oneline --color=always | - sed "s/[0-9a-f]\{10,10\}/COMMIT_ID/" | - test_decode_color >out && - test_cmp expected out +test_expect_success 'commit decorations colored correctly' ' + cat >expect <<-EOF && + ${c_commit}COMMIT_ID${c_reset}${c_commit} (${c_reset}${c_HEAD}HEAD -> \ +${c_reset}${c_branch}main${c_reset}${c_commit}, \ +${c_reset}${c_tag}tag: v1.0${c_reset}${c_commit}, \ +${c_reset}${c_tag}tag: B${c_reset}${c_commit})${c_reset} B +${c_commit}COMMIT_ID${c_reset}${c_commit} (${c_reset}${c_tag}tag: A1${c_reset}${c_commit}, \ +${c_reset}${c_remoteBranch}other/main${c_reset}${c_commit})${c_reset} A1 + ${c_commit}COMMIT_ID${c_reset}${c_commit} (${c_reset}${c_stash}refs/stash${c_reset}${c_commit})${c_reset} \ +On main: Changes to A.t + ${c_commit}COMMIT_ID${c_reset}${c_commit} (${c_reset}${c_tag}tag: A${c_reset}${c_commit})${c_reset} A + EOF + + git log --first-parent --no-abbrev --decorate --oneline --color=always --all >actual && + cmp_filtered_decorations +' + +test_expect_success 'test coloring with replace-objects' ' + test_when_finished rm -rf .git/refs/replace* && + test_commit C && + test_commit D && + + git replace HEAD~1 HEAD~2 && + + cat >expect <<-EOF && + ${c_commit}COMMIT_ID${c_reset}${c_commit} (${c_reset}${c_HEAD}HEAD -> \ +${c_reset}${c_branch}main${c_reset}${c_commit}, \ +${c_reset}${c_tag}tag: D${c_reset}${c_commit})${c_reset} D + ${c_commit}COMMIT_ID${c_reset}${c_commit} (${c_reset}${c_tag}tag: C${c_reset}${c_commit}, \ +${c_reset}${c_grafted}replaced${c_reset}${c_commit})${c_reset} B + ${c_commit}COMMIT_ID${c_reset}${c_commit} (${c_reset}${c_tag}tag: A${c_reset}${c_commit})${c_reset} A +EOF + + git log --first-parent --no-abbrev --decorate --oneline --color=always HEAD >actual && + cmp_filtered_decorations && + git replace -d HEAD~1 && + + GIT_REPLACE_REF_BASE=refs/replace2/ git replace HEAD~1 HEAD~2 && + GIT_REPLACE_REF_BASE=refs/replace2/ git log --first-parent \ + --no-abbrev --decorate --oneline --color=always HEAD >actual && + cmp_filtered_decorations +' + +test_expect_success 'test coloring with grafted commit' ' + test_when_finished rm -rf .git/refs/replace* && + + git replace --graft HEAD HEAD~2 && + + cat >expect <<-EOF && + ${c_commit}COMMIT_ID${c_reset}${c_commit} (${c_reset}${c_HEAD}HEAD -> \ +${c_reset}${c_branch}main${c_reset}${c_commit}, \ +${c_reset}${c_tag}tag: D${c_reset}${c_commit}, \ +${c_reset}${c_grafted}replaced${c_reset}${c_commit})${c_reset} D + ${c_commit}COMMIT_ID${c_reset}${c_commit} (${c_reset}${c_tag}tag: v1.0${c_reset}${c_commit}, \ +${c_reset}${c_tag}tag: B${c_reset}${c_commit})${c_reset} B + ${c_commit}COMMIT_ID${c_reset}${c_commit} (${c_reset}${c_tag}tag: A${c_reset}${c_commit})${c_reset} A + EOF + + git log --first-parent --no-abbrev --decorate --oneline --color=always HEAD >actual && + cmp_filtered_decorations && + git replace -d HEAD && + + GIT_REPLACE_REF_BASE=refs/replace2/ git replace --graft HEAD HEAD~2 && + GIT_REPLACE_REF_BASE=refs/replace2/ git log --first-parent \ + --no-abbrev --decorate --oneline --color=always HEAD >actual && + cmp_filtered_decorations ' test_done diff --git a/t/t4301-merge-tree-write-tree.sh b/t/t4301-merge-tree-write-tree.sh new file mode 100755 index 0000000000..a243e3c517 --- /dev/null +++ b/t/t4301-merge-tree-write-tree.sh @@ -0,0 +1,241 @@ +#!/bin/sh + +test_description='git merge-tree --write-tree' + +TEST_PASSES_SANITIZE_LEAK=true +. ./test-lib.sh + +# This test is ort-specific +if test "$GIT_TEST_MERGE_ALGORITHM" != "ort" +then + skip_all="GIT_TEST_MERGE_ALGORITHM != ort" + test_done +fi + +test_expect_success setup ' + test_write_lines 1 2 3 4 5 >numbers && + echo hello >greeting && + echo foo >whatever && + git add numbers greeting whatever && + test_tick && + git commit -m initial && + + git branch side1 && + git branch side2 && + git branch side3 && + + git checkout side1 && + test_write_lines 1 2 3 4 5 6 >numbers && + echo hi >greeting && + echo bar >whatever && + git add numbers greeting whatever && + test_tick && + git commit -m modify-stuff && + + git checkout side2 && + test_write_lines 0 1 2 3 4 5 >numbers && + echo yo >greeting && + git rm whatever && + mkdir whatever && + >whatever/empty && + git add numbers greeting whatever/empty && + test_tick && + git commit -m other-modifications && + + git checkout side3 && + git mv numbers sequence && + test_tick && + git commit -m rename-numbers && + + git switch --orphan unrelated && + >something-else && + git add something-else && + test_tick && + git commit -m first-commit +' + +test_expect_success 'Clean merge' ' + TREE_OID=$(git merge-tree --write-tree side1 side3) && + q_to_tab <<-EOF >expect && + 100644 blob $(git rev-parse side1:greeting)Qgreeting + 100644 blob $(git rev-parse side1:numbers)Qsequence + 100644 blob $(git rev-parse side1:whatever)Qwhatever + EOF + + git ls-tree $TREE_OID >actual && + test_cmp expect actual +' + +test_expect_success 'Content merge and a few conflicts' ' + git checkout side1^0 && + test_must_fail git merge side2 && + expected_tree=$(git rev-parse AUTO_MERGE) && + + # We will redo the merge, while we are still in a conflicted state! + git ls-files -u >conflicted-file-info && + test_when_finished "git reset --hard" && + + test_expect_code 1 git merge-tree --write-tree side1 side2 >RESULT && + actual_tree=$(head -n 1 RESULT) && + + # Due to differences of e.g. "HEAD" vs "side1", the results will not + # exactly match. Dig into individual files. + + # Numbers should have three-way merged cleanly + test_write_lines 0 1 2 3 4 5 6 >expect && + git show ${actual_tree}:numbers >actual && + test_cmp expect actual && + + # whatever and whatever~<branch> should have same HASHES + git rev-parse ${expected_tree}:whatever ${expected_tree}:whatever~HEAD >expect && + git rev-parse ${actual_tree}:whatever ${actual_tree}:whatever~side1 >actual && + test_cmp expect actual && + + # greeting should have a merge conflict + git show ${expected_tree}:greeting >tmp && + sed -e s/HEAD/side1/ tmp >expect && + git show ${actual_tree}:greeting >actual && + test_cmp expect actual +' + +test_expect_success 'Barf on misspelled option, with exit code other than 0 or 1' ' + # Mis-spell with single "s" instead of double "s" + test_expect_code 129 git merge-tree --write-tree --mesages FOOBAR side1 side2 2>expect && + + grep "error: unknown option.*mesages" expect +' + +test_expect_success 'Barf on too many arguments' ' + test_expect_code 129 git merge-tree --write-tree side1 side2 invalid 2>expect && + + grep "^usage: git merge-tree" expect +' + +anonymize_hash() { + sed -e "s/[0-9a-f]\{40,\}/HASH/g" "$@" +} + +test_expect_success 'test conflict notices and such' ' + test_expect_code 1 git merge-tree --write-tree --name-only side1 side2 >out && + anonymize_hash out >actual && + + # Expected results: + # "greeting" should merge with conflicts + # "numbers" should merge cleanly + # "whatever" has *both* a modify/delete and a file/directory conflict + cat <<-EOF >expect && + HASH + greeting + whatever~side1 + + Auto-merging greeting + CONFLICT (content): Merge conflict in greeting + Auto-merging numbers + CONFLICT (file/directory): directory in the way of whatever from side1; moving it to whatever~side1 instead. + CONFLICT (modify/delete): whatever~side1 deleted in side2 and modified in side1. Version side1 of whatever~side1 left in tree. + EOF + + test_cmp expect actual +' + +for opt in $(git merge-tree --git-completion-helper-all) +do + if test $opt = "--trivial-merge" || test $opt = "--write-tree" + then + continue + fi + + test_expect_success "usage: --trivial-merge is incompatible with $opt" ' + test_expect_code 128 git merge-tree --trivial-merge $opt side1 side2 side3 + ' +done + +test_expect_success 'Just the conflicted files without the messages' ' + test_expect_code 1 git merge-tree --write-tree --no-messages --name-only side1 side2 >out && + anonymize_hash out >actual && + + test_write_lines HASH greeting whatever~side1 >expect && + + test_cmp expect actual +' + +test_expect_success 'Check conflicted oids and modes without messages' ' + test_expect_code 1 git merge-tree --write-tree --no-messages side1 side2 >out && + anonymize_hash out >actual && + + # Compare the basic output format + q_to_tab >expect <<-\EOF && + HASH + 100644 HASH 1Qgreeting + 100644 HASH 2Qgreeting + 100644 HASH 3Qgreeting + 100644 HASH 1Qwhatever~side1 + 100644 HASH 2Qwhatever~side1 + EOF + + test_cmp expect actual && + + # Check the actual hashes against the `ls-files -u` output too + tail -n +2 out | sed -e s/side1/HEAD/ >actual && + test_cmp conflicted-file-info actual +' + +test_expect_success 'NUL terminated conflicted file "lines"' ' + git checkout -b tweak1 side1 && + test_write_lines zero 1 2 3 4 5 6 >numbers && + git add numbers && + git mv numbers "Αυτά μου φαίνονται κινÎζικα" && + git commit -m "Renamed numbers" && + + test_expect_code 1 git merge-tree --write-tree -z tweak1 side2 >out && + anonymize_hash out >actual && + printf "\\n" >>actual && + + # Expected results: + # "greeting" should merge with conflicts + # "whatever" has *both* a modify/delete and a file/directory conflict + # "Αυτά μου φαίνονται κινÎζικα" should have a conflict + echo HASH | lf_to_nul >expect && + + q_to_tab <<-EOF | lf_to_nul >>expect && + 100644 HASH 1Qgreeting + 100644 HASH 2Qgreeting + 100644 HASH 3Qgreeting + 100644 HASH 1Qwhatever~tweak1 + 100644 HASH 2Qwhatever~tweak1 + 100644 HASH 1QΑυτά μου φαίνονται κινÎζικα + 100644 HASH 2QΑυτά μου φαίνονται κινÎζικα + 100644 HASH 3QΑυτά μου φαίνονται κινÎζικα + + EOF + + q_to_nul <<-EOF >>expect && + 1QgreetingQAuto-mergingQAuto-merging greeting + Q1QgreetingQCONFLICT (contents)QCONFLICT (content): Merge conflict in greeting + Q2Qwhatever~tweak1QwhateverQCONFLICT (file/directory)QCONFLICT (file/directory): directory in the way of whatever from tweak1; moving it to whatever~tweak1 instead. + Q1Qwhatever~tweak1QCONFLICT (modify/delete)QCONFLICT (modify/delete): whatever~tweak1 deleted in side2 and modified in tweak1. Version tweak1 of whatever~tweak1 left in tree. + Q1QΑυτά μου φαίνονται κινÎζικαQAuto-mergingQAuto-merging Αυτά μου φαίνονται κινÎζικα + Q1QΑυτά μου φαίνονται κινÎζικαQCONFLICT (contents)QCONFLICT (content): Merge conflict in Αυτά μου φαίνονται κινÎζικα + Q + EOF + + test_cmp expect actual +' + +test_expect_success 'error out by default for unrelated histories' ' + test_expect_code 128 git merge-tree --write-tree side1 unrelated 2>error && + + grep "refusing to merge unrelated histories" error +' + +test_expect_success 'can override merge of unrelated histories' ' + git merge-tree --write-tree --allow-unrelated-histories side1 unrelated >tree && + TREE=$(cat tree) && + + git rev-parse side1:numbers side1:greeting side1:whatever unrelated:something-else >expect && + git rev-parse $TREE:numbers $TREE:greeting $TREE:whatever $TREE:something-else >actual && + + test_cmp expect actual +' + +test_done diff --git a/t/t5000-tar-tree.sh b/t/t5000-tar-tree.sh index 7f8d2ab0a7..eaa0b22ece 100755 --- a/t/t5000-tar-tree.sh +++ b/t/t5000-tar-tree.sh @@ -24,6 +24,7 @@ commit id embedding: ' +TEST_CREATE_REPO_NO_TEMPLATE=1 . ./test-lib.sh SUBSTFORMAT=%H%n @@ -143,6 +144,7 @@ test_expect_success 'populate workdir' ' test_expect_success \ 'add ignored file' \ 'echo ignore me >a/ignored && + mkdir .git/info && echo ignored export-ignore >.git/info/attributes' test_expect_success 'add files to repository' ' @@ -157,7 +159,8 @@ test_expect_success 'setup export-subst' ' ' test_expect_success 'create bare clone' ' - git clone --bare . bare.git && + git clone --template= --bare . bare.git && + mkdir bare.git/info && cp .git/info/attributes bare.git/info/attributes ' @@ -339,21 +342,21 @@ test_expect_success 'only enabled filters are available remotely' ' test_cmp_bin remote.bar config.bar ' -test_expect_success GZIP 'git archive --format=tgz' ' +test_expect_success 'git archive --format=tgz' ' git archive --format=tgz HEAD >j.tgz ' -test_expect_success GZIP 'git archive --format=tar.gz' ' +test_expect_success 'git archive --format=tar.gz' ' git archive --format=tar.gz HEAD >j1.tar.gz && test_cmp_bin j.tgz j1.tar.gz ' -test_expect_success GZIP 'infer tgz from .tgz filename' ' +test_expect_success 'infer tgz from .tgz filename' ' git archive --output=j2.tgz HEAD && test_cmp_bin j.tgz j2.tgz ' -test_expect_success GZIP 'infer tgz from .tar.gz filename' ' +test_expect_success 'infer tgz from .tar.gz filename' ' git archive --output=j3.tar.gz HEAD && test_cmp_bin j.tgz j3.tar.gz ' @@ -363,17 +366,33 @@ test_expect_success GZIP 'extract tgz file' ' test_cmp_bin b.tar j.tar ' -test_expect_success GZIP 'remote tar.gz is allowed by default' ' +test_expect_success 'remote tar.gz is allowed by default' ' git archive --remote=. --format=tar.gz HEAD >remote.tar.gz && test_cmp_bin j.tgz remote.tar.gz ' -test_expect_success GZIP 'remote tar.gz can be disabled' ' +test_expect_success 'remote tar.gz can be disabled' ' git config tar.tar.gz.remote false && test_must_fail git archive --remote=. --format=tar.gz HEAD \ >remote.tar.gz ' +test_expect_success GZIP 'git archive --format=tgz (external gzip)' ' + test_config tar.tgz.command "gzip -cn" && + git archive --format=tgz HEAD >external_gzip.tgz +' + +test_expect_success GZIP 'git archive --format=tar.gz (external gzip)' ' + test_config tar.tar.gz.command "gzip -cn" && + git archive --format=tar.gz HEAD >external_gzip.tar.gz && + test_cmp_bin external_gzip.tgz external_gzip.tar.gz +' + +test_expect_success GZIP 'extract tgz file (external gzip)' ' + gzip -d -c <external_gzip.tgz >external_gzip.tar && + test_cmp_bin b.tar external_gzip.tar +' + test_expect_success 'archive and :(glob)' ' git archive -v HEAD -- ":(glob)**/sh" >/dev/null 2>actual && cat >expect <<EOF && diff --git a/t/t5001-archive-attr.sh b/t/t5001-archive-attr.sh index 712ae52299..2f6eef5e37 100755 --- a/t/t5001-archive-attr.sh +++ b/t/t5001-archive-attr.sh @@ -2,6 +2,7 @@ test_description='git archive attribute tests' +TEST_CREATE_REPO_NO_TEMPLATE=1 . ./test-lib.sh SUBSTFORMAT='%H (%h)%n' @@ -20,6 +21,7 @@ extract_tar_to_dir () { test_expect_success 'setup' ' echo ignored >ignored && + mkdir .git/info && echo ignored export-ignore >>.git/info/attributes && git add ignored && @@ -46,7 +48,8 @@ test_expect_success 'setup' ' git commit -m. && - git clone --bare . bare && + git clone --template= --bare . bare && + mkdir bare/info && cp .git/info/attributes bare/info/attributes ' diff --git a/t/t5002-archive-attr-pattern.sh b/t/t5002-archive-attr-pattern.sh index a66b5ba27e..78ab75f1bc 100755 --- a/t/t5002-archive-attr-pattern.sh +++ b/t/t5002-archive-attr-pattern.sh @@ -3,6 +3,7 @@ test_description='git archive attribute pattern tests' TEST_PASSES_SANITIZE_LEAK=true +TEST_CREATE_REPO_NO_TEMPLATE=1 . ./test-lib.sh test_expect_exists() { @@ -15,6 +16,7 @@ test_expect_missing() { test_expect_success 'setup' ' echo ignored >ignored && + mkdir .git/info && echo ignored export-ignore >>.git/info/attributes && git add ignored && @@ -54,7 +56,8 @@ test_expect_success 'setup' ' git commit -m. && - git clone --bare . bare && + git clone --template= --bare . bare && + mkdir bare/info && cp .git/info/attributes bare/info/attributes ' diff --git a/t/t5003-archive-zip.sh b/t/t5003-archive-zip.sh index 3992d08158..fc499cdff0 100755 --- a/t/t5003-archive-zip.sh +++ b/t/t5003-archive-zip.sh @@ -2,6 +2,7 @@ test_description='git archive --format=zip test' +TEST_CREATE_REPO_NO_TEMPLATE=1 . ./test-lib.sh SUBSTFORMAT=%H%n @@ -121,6 +122,7 @@ test_expect_success 'prepare file list' ' test_expect_success \ 'add ignored file' \ 'echo ignore me >a/ignored && + mkdir .git/info && echo ignored export-ignore >.git/info/attributes' test_expect_success 'add files to repository' ' @@ -139,7 +141,8 @@ test_expect_success 'setup export-subst and diff attributes' ' ' test_expect_success 'create bare clone' ' - git clone --bare . bare.git && + git clone --template= --bare . bare.git && + mkdir bare.git/info && cp .git/info/attributes bare.git/info/attributes && # Recreate our changes to .git/config rather than just copying it, as # we do not want to clobber core.bare or other settings. diff --git a/t/t5303-pack-corruption-resilience.sh b/t/t5303-pack-corruption-resilience.sh index 41e6dc4dcf..2926e8dfc4 100755 --- a/t/t5303-pack-corruption-resilience.sh +++ b/t/t5303-pack-corruption-resilience.sh @@ -4,6 +4,8 @@ # test_description='resilience to pack corruptions with redundant objects' + +TEST_PASSES_SANITIZE_LEAK=true . ./test-lib.sh # Note: the test objects are created with knowledge of their pack encoding diff --git a/t/t5308-pack-detect-duplicates.sh b/t/t5308-pack-detect-duplicates.sh index 693b2411c8..655cafa054 100755 --- a/t/t5308-pack-detect-duplicates.sh +++ b/t/t5308-pack-detect-duplicates.sh @@ -1,6 +1,8 @@ #!/bin/sh test_description='handling of duplicate objects in incoming packfiles' + +TEST_PASSES_SANITIZE_LEAK=true . ./test-lib.sh . "$TEST_DIRECTORY"/lib-pack.sh diff --git a/t/t5309-pack-delta-cycles.sh b/t/t5309-pack-delta-cycles.sh index 55b787630f..4e910c5b9d 100755 --- a/t/t5309-pack-delta-cycles.sh +++ b/t/t5309-pack-delta-cycles.sh @@ -1,6 +1,8 @@ #!/bin/sh test_description='test index-pack handling of delta cycles in packfiles' + +TEST_PASSES_SANITIZE_LEAK=true . ./test-lib.sh . "$TEST_DIRECTORY"/lib-pack.sh diff --git a/t/t5314-pack-cycle-detection.sh b/t/t5314-pack-cycle-detection.sh index 0aec8619e2..73a241743a 100755 --- a/t/t5314-pack-cycle-detection.sh +++ b/t/t5314-pack-cycle-detection.sh @@ -49,9 +49,9 @@ Then no matter which order we start looking at the packs in, we know that we will always find a delta for "file", because its lookup will always come immediately after the lookup for "dummy". ' -. ./test-lib.sh - +TEST_PASSES_SANITIZE_LEAK=true +. ./test-lib.sh # Create a pack containing the tree $1 and blob $1:file, with # the latter stored as a delta against $2:file. diff --git a/t/t5315-pack-objects-compression.sh b/t/t5315-pack-objects-compression.sh index 8bacd96275..c80ea9e8b7 100755 --- a/t/t5315-pack-objects-compression.sh +++ b/t/t5315-pack-objects-compression.sh @@ -2,6 +2,7 @@ test_description='pack-object compression configuration' +TEST_PASSES_SANITIZE_LEAK=true . ./test-lib.sh test_expect_success setup ' diff --git a/t/t5318-commit-graph.sh b/t/t5318-commit-graph.sh index db89542dfb..1b0cd82359 100755 --- a/t/t5318-commit-graph.sh +++ b/t/t5318-commit-graph.sh @@ -361,13 +361,14 @@ test_expect_success 'replace-objects invalidates commit-graph' ' test_expect_success 'commit grafts invalidate commit-graph' ' cd "$TRASH_DIRECTORY" && test_when_finished rm -rf graft && - git clone full graft && + git clone --template= full graft && ( cd graft && git commit-graph write --reachable && test_path_is_file .git/objects/info/commit-graph && H1=$(git rev-parse --verify HEAD~1) && H3=$(git rev-parse --verify HEAD~3) && + mkdir .git/info && echo "$H1 $H3" >.git/info/grafts && git -c core.commitGraph=false log >expect && git -c core.commitGraph=true log >actual && diff --git a/t/t5321-pack-large-objects.sh b/t/t5321-pack-large-objects.sh index 8a56d98a0e..70770fe274 100755 --- a/t/t5321-pack-large-objects.sh +++ b/t/t5321-pack-large-objects.sh @@ -6,6 +6,8 @@ test_description='git pack-object with "large" deltas ' + +TEST_PASSES_SANITIZE_LEAK=true . ./test-lib.sh . "$TEST_DIRECTORY"/lib-pack.sh diff --git a/t/t5329-pack-objects-cruft.sh b/t/t5329-pack-objects-cruft.sh index 8968f7a08d..303f7a5d84 100755 --- a/t/t5329-pack-objects-cruft.sh +++ b/t/t5329-pack-objects-cruft.sh @@ -29,7 +29,8 @@ basic_cruft_pack_tests () { while read oid do path="$objdir/$(test_oid_to_path "$oid")" && - printf "%s %d\n" "$oid" "$(test-tool chmtime --get "$path")" + printf "%s %d\n" "$oid" "$(test-tool chmtime --get "$path")" || + echo "object list generation failed for $oid" done | sort -k1 ) >expect && @@ -232,7 +233,7 @@ test_expect_success 'cruft tags rescue tagged objects' ' while read oid do test-tool chmtime -1000 \ - "$objdir/$(test_oid_to_path $oid)" + "$objdir/$(test_oid_to_path $oid)" || exit 1 done <objects && test-tool chmtime -500 \ @@ -272,7 +273,7 @@ test_expect_success 'cruft commits rescue parents, trees' ' while read object do test-tool chmtime -1000 \ - "$objdir/$(test_oid_to_path $object)" + "$objdir/$(test_oid_to_path $object)" || exit 1 done <objects && test-tool chmtime +500 "$objdir/$(test_oid_to_path \ $(git rev-parse HEAD))" && @@ -345,7 +346,7 @@ test_expect_success 'expired objects are pruned' ' while read object do test-tool chmtime -1000 \ - "$objdir/$(test_oid_to_path $object)" + "$objdir/$(test_oid_to_path $object)" || exit 1 done <objects && keep="$(basename "$(ls $packdir/pack-*.pack)")" && diff --git a/t/t5351-unpack-large-objects.sh b/t/t5351-unpack-large-objects.sh new file mode 100755 index 0000000000..8c8af99b84 --- /dev/null +++ b/t/t5351-unpack-large-objects.sh @@ -0,0 +1,103 @@ +#!/bin/sh +# +# Copyright (c) 2022 Han Xin +# + +test_description='git unpack-objects with large objects' + +TEST_PASSES_SANITIZE_LEAK=true +. ./test-lib.sh + +prepare_dest () { + test_when_finished "rm -rf dest.git" && + git init --bare dest.git && + git -C dest.git config core.bigFileThreshold "$1" +} + +test_expect_success "create large objects (1.5 MB) and PACK" ' + test-tool genrandom foo 1500000 >big-blob && + test_commit --append foo big-blob && + test-tool genrandom bar 1500000 >big-blob && + test_commit --append bar big-blob && + PACK=$(echo HEAD | git pack-objects --revs pack) && + git verify-pack -v pack-$PACK.pack >out && + sed -n -e "s/^\([0-9a-f][0-9a-f]*\).*\(commit\|tree\|blob\).*/\1/p" \ + <out >obj-list +' + +test_expect_success 'set memory limitation to 1MB' ' + GIT_ALLOC_LIMIT=1m && + export GIT_ALLOC_LIMIT +' + +test_expect_success 'unpack-objects failed under memory limitation' ' + prepare_dest 2m && + test_must_fail git -C dest.git unpack-objects <pack-$PACK.pack 2>err && + grep "fatal: attempting to allocate" err +' + +test_expect_success 'unpack-objects works with memory limitation in dry-run mode' ' + prepare_dest 2m && + git -C dest.git unpack-objects -n <pack-$PACK.pack && + test_stdout_line_count = 0 find dest.git/objects -type f && + test_dir_is_empty dest.git/objects/pack +' + +test_expect_success 'unpack big object in stream' ' + prepare_dest 1m && + git -C dest.git unpack-objects <pack-$PACK.pack && + test_dir_is_empty dest.git/objects/pack +' + +check_fsync_events () { + local trace="$1" && + shift && + + cat >expect && + sed -n \ + -e '/^{"event":"data",.*"category":"fsync",/ { + s/.*"category":"fsync",//; + s/}$//; + p; + }' \ + <"$trace" >actual && + test_cmp expect actual +} + +BATCH_CONFIGURATION='-c core.fsync=loose-object -c core.fsyncmethod=batch' + +test_expect_success 'unpack big object in stream (core.fsyncmethod=batch)' ' + prepare_dest 1m && + GIT_TRACE2_EVENT="$(pwd)/trace2.txt" \ + GIT_TEST_FSYNC=true \ + git -C dest.git $BATCH_CONFIGURATION unpack-objects <pack-$PACK.pack && + if grep "core.fsyncMethod = batch is unsupported" trace2.txt + then + flush_count=7 + else + flush_count=1 + fi && + check_fsync_events trace2.txt <<-EOF && + "key":"fsync/writeout-only","value":"6" + "key":"fsync/hardware-flush","value":"$flush_count" + EOF + + test_dir_is_empty dest.git/objects/pack && + git -C dest.git cat-file --batch-check="%(objectname)" <obj-list >current && + cmp obj-list current +' + +test_expect_success 'do not unpack existing large objects' ' + prepare_dest 1m && + git -C dest.git index-pack --stdin <pack-$PACK.pack && + git -C dest.git unpack-objects <pack-$PACK.pack && + + # The destination came up with the exact same pack... + DEST_PACK=$(echo dest.git/objects/pack/pack-*.pack) && + cmp pack-$PACK.pack $DEST_PACK && + + # ...and wrote no loose objects + test_stdout_line_count = 0 find dest.git/objects -type f ! -name "pack-*" +' + +test_done diff --git a/t/t5402-post-merge-hook.sh b/t/t5402-post-merge-hook.sh index 915af2de95..46ebdfbeeb 100755 --- a/t/t5402-post-merge-hook.sh +++ b/t/t5402-post-merge-hook.sh @@ -7,6 +7,7 @@ test_description='Test the post-merge hook.' GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME +TEST_PASSES_SANITIZE_LEAK=true . ./test-lib.sh test_expect_success setup ' diff --git a/t/t5500-fetch-pack.sh b/t/t5500-fetch-pack.sh index ee6d2dde9f..d18f2823d8 100755 --- a/t/t5500-fetch-pack.sh +++ b/t/t5500-fetch-pack.sh @@ -407,6 +407,7 @@ test_expect_success 'in_vain not triggered before first ACK' ' ' test_expect_success 'in_vain resetted upon ACK' ' + test_when_finished rm -f log trace2 && rm -rf myserver myclient && git init myserver && @@ -432,7 +433,8 @@ test_expect_success 'in_vain resetted upon ACK' ' # first. The 256th commit is common between the client and the server, # and should reset in_vain. This allows negotiation to continue until # the client reports that first_anotherbranch_commit is common. - git -C myclient fetch --progress origin main 2>log && + GIT_TRACE2_EVENT="$(pwd)/trace2" git -C myclient fetch --progress origin main 2>log && + grep \"key\":\"total_rounds\",\"value\":\"6\" trace2 && test_i18ngrep "Total 3 " log ' diff --git a/t/t5503-tagfollow.sh b/t/t5503-tagfollow.sh index 195fc64dd4..5ebbaa4896 100755 --- a/t/t5503-tagfollow.sh +++ b/t/t5503-tagfollow.sh @@ -5,6 +5,7 @@ test_description='test automatic tag following' GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME +TEST_PASSES_SANITIZE_LEAK=true . ./test-lib.sh # End state of the repository: diff --git a/t/t5505-remote.sh b/t/t5505-remote.sh index fff14e13ed..6c7370f87f 100755 --- a/t/t5505-remote.sh +++ b/t/t5505-remote.sh @@ -302,6 +302,52 @@ test_expect_success 'show' ' ) ' +cat >expect <<EOF +* remote origin + Fetch URL: $(pwd)/one + Push URL: $(pwd)/one + HEAD branch: main + Remote branches: + main skipped + side tracked + Local branches configured for 'git pull': + ahead merges with remote main + main merges with remote main + Local refs configured for 'git push': + main pushes to main (local out of date) + main pushes to upstream (create) +EOF + +test_expect_success 'show with negative refspecs' ' + test_when_finished "git -C test config --unset-all --fixed-value remote.origin.fetch ^refs/heads/main" && + git -C test config --add remote.origin.fetch ^refs/heads/main && + git -C test remote show origin >output && + test_cmp expect output +' + +cat >expect <<EOF +* remote origin + Fetch URL: $(pwd)/one + Push URL: $(pwd)/one + HEAD branch: main + Remote branches: + main new (next fetch will store in remotes/origin) + side stale (use 'git remote prune' to remove) + Local branches configured for 'git pull': + ahead merges with remote main + main merges with remote main + Local refs configured for 'git push': + main pushes to main (local out of date) + main pushes to upstream (create) +EOF + +test_expect_failure 'show stale with negative refspecs' ' + test_when_finished "git -C test config --unset-all --fixed-value remote.origin.fetch ^refs/heads/side" && + git -C test config --add remote.origin.fetch ^refs/heads/side && + git -C test remote show origin >output && + test_cmp expect output +' + cat >test/expect <<EOF * remote origin Fetch URL: $(pwd)/one @@ -957,11 +1003,12 @@ test_expect_success 'migrate a remote from named file in $GIT_DIR/remotes' ' ' test_expect_success 'migrate a remote from named file in $GIT_DIR/branches' ' - git clone one six && + git clone --template= one six && origin_url=$(pwd)/one && ( cd six && git remote rm origin && + mkdir .git/branches && echo "$origin_url#main" >.git/branches/origin && git remote rename origin origin && test_path_is_missing .git/branches/origin && @@ -972,10 +1019,11 @@ test_expect_success 'migrate a remote from named file in $GIT_DIR/branches' ' ' test_expect_success 'migrate a remote from named file in $GIT_DIR/branches (2)' ' - git clone one seven && + git clone --template= one seven && ( cd seven && git remote rm origin && + mkdir .git/branches && echo "quux#foom" > .git/branches/origin && git remote rename origin origin && test_path_is_missing .git/branches/origin && diff --git a/t/t5516-fetch-push.sh b/t/t5516-fetch-push.sh index 541adbb310..3211002d46 100755 --- a/t/t5516-fetch-push.sh +++ b/t/t5516-fetch-push.sh @@ -18,6 +18,7 @@ This test checks the following functionality: GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME +TEST_CREATE_REPO_NO_TEMPLATE=1 . ./test-lib.sh D=$(pwd) @@ -26,7 +27,8 @@ mk_empty () { repo_name="$1" test_when_finished "rm -rf \"$repo_name\"" && test_path_is_missing "$repo_name" && - git init "$repo_name" && + git init --template= "$repo_name" && + mkdir "$repo_name"/.git/hooks && git -C "$repo_name" config receive.denyCurrentBranch warn } @@ -78,7 +80,7 @@ mk_test_with_hooks() { mk_child() { test_when_finished "rm -rf \"$2\"" && - git clone "$1" "$2" + git clone --template= "$1" "$2" } check_push_result () { @@ -198,7 +200,10 @@ test_expect_success 'push with negotiation' ' test_commit -C testrepo unrelated_commit && git -C testrepo config receive.hideRefs refs/remotes/origin/first_commit && test_when_finished "rm event" && - GIT_TRACE2_EVENT="$(pwd)/event" git -c protocol.version=2 -c push.negotiate=1 push testrepo refs/heads/main:refs/remotes/origin/main && + GIT_TRACE2_EVENT="$(pwd)/event" \ + git -c protocol.version=2 -c push.negotiate=1 \ + push testrepo refs/heads/main:refs/remotes/origin/main && + grep \"key\":\"total_rounds\",\"value\":\"1\" event && grep_wrote 2 event # 1 commit, 1 tree ' @@ -222,7 +227,10 @@ test_expect_success 'push with negotiation does not attempt to fetch submodules' git push testrepo $the_first_commit:refs/remotes/origin/first_commit && test_commit -C testrepo unrelated_commit && git -C testrepo config receive.hideRefs refs/remotes/origin/first_commit && - git -c submodule.recurse=true -c protocol.version=2 -c push.negotiate=1 push testrepo refs/heads/main:refs/remotes/origin/main 2>err && + GIT_TRACE2_EVENT="$(pwd)/event" git -c submodule.recurse=true \ + -c protocol.version=2 -c push.negotiate=1 \ + push testrepo refs/heads/main:refs/remotes/origin/main 2>err && + grep \"key\":\"total_rounds\",\"value\":\"1\" event && ! grep "Fetching submodule" err ' @@ -937,6 +945,7 @@ test_expect_success 'fetch with branches' ' mk_empty testrepo && git branch second $the_first_commit && git checkout second && + mkdir testrepo/.git/branches && echo ".." > testrepo/.git/branches/branch1 && ( cd testrepo && @@ -950,6 +959,7 @@ test_expect_success 'fetch with branches' ' test_expect_success 'fetch with branches containing #' ' mk_empty testrepo && + mkdir testrepo/.git/branches && echo "..#second" > testrepo/.git/branches/branch2 && ( cd testrepo && @@ -964,7 +974,11 @@ test_expect_success 'fetch with branches containing #' ' test_expect_success 'push with branches' ' mk_empty testrepo && git checkout second && + + test_when_finished "rm -rf .git/branches" && + mkdir .git/branches && echo "testrepo" > .git/branches/branch1 && + git push branch1 && ( cd testrepo && @@ -976,7 +990,11 @@ test_expect_success 'push with branches' ' test_expect_success 'push with branches containing #' ' mk_empty testrepo && + + test_when_finished "rm -rf .git/branches" && + mkdir .git/branches && echo "testrepo#branch3" > .git/branches/branch2 && + git push branch2 && ( cd testrepo && @@ -1865,4 +1883,26 @@ test_expect_success LIBCURL 'push warns or fails when using username:password' ' test_line_count = 1 warnings ' +test_expect_success 'push with config push.useBitmaps' ' + mk_test testrepo heads/main && + git checkout main && + test_unconfig push.useBitmaps && + GIT_TRACE2_EVENT="$PWD/default" \ + git push testrepo main:test && + test_subcommand git pack-objects --all-progress-implied --revs --stdout \ + --thin --delta-base-offset -q <default && + + test_config push.useBitmaps true && + GIT_TRACE2_EVENT="$PWD/true" \ + git push testrepo main:test2 && + test_subcommand git pack-objects --all-progress-implied --revs --stdout \ + --thin --delta-base-offset -q <true && + + test_config push.useBitmaps false && + GIT_TRACE2_EVENT="$PWD/false" \ + git push testrepo main:test3 && + test_subcommand git pack-objects --all-progress-implied --revs --stdout \ + --thin --delta-base-offset -q --no-use-bitmap-index <false +' + test_done diff --git a/t/t5524-pull-msg.sh b/t/t5524-pull-msg.sh index b2be3605f5..56716e29dd 100755 --- a/t/t5524-pull-msg.sh +++ b/t/t5524-pull-msg.sh @@ -2,6 +2,7 @@ test_description='git pull message generation' +TEST_PASSES_SANITIZE_LEAK=true . ./test-lib.sh dollar='$Dollar' diff --git a/t/t5541-http-push-smart.sh b/t/t5541-http-push-smart.sh index 2f09ff4fac..fbad2d5ff5 100755 --- a/t/t5541-http-push-smart.sh +++ b/t/t5541-http-push-smart.sh @@ -80,6 +80,25 @@ test_expect_success 'push to remote repository (standard)' ' test $HEAD = $(git rev-parse --verify HEAD)) ' +test_expect_success 'push to remote repository (standard) with sending Accept-Language' ' + cat >exp <<-\EOF && + => Send header: Accept-Language: ko-KR, *;q=0.9 + => Send header: Accept-Language: ko-KR, *;q=0.9 + EOF + + cd "$ROOT_PATH"/test_repo_clone && + : >path_lang && + git add path_lang && + test_tick && + git commit -m path_lang && + HEAD=$(git rev-parse --verify HEAD) && + GIT_TRACE_CURL=true LANGUAGE="ko_KR.UTF-8" git push -v -v 2>err && + ! grep "Expect: 100-continue" err && + + grep "=> Send header: Accept-Language:" err >err.language && + test_cmp exp err.language +' + test_expect_success 'push already up-to-date' ' git push ' diff --git a/t/t5544-pack-objects-hook.sh b/t/t5544-pack-objects-hook.sh index dd5f44d986..54f54f8d2e 100755 --- a/t/t5544-pack-objects-hook.sh +++ b/t/t5544-pack-objects-hook.sh @@ -56,7 +56,12 @@ test_expect_success 'hook does not run from repo config' ' ! grep "hook running" stderr && test_path_is_missing .git/hook.args && test_path_is_missing .git/hook.stdin && - test_path_is_missing .git/hook.stdout + test_path_is_missing .git/hook.stdout && + + # check that global config is used instead + test_config_global uploadpack.packObjectsHook ./hook && + git clone --no-local . dst2.git 2>stderr && + grep "hook running" stderr ' test_expect_success 'hook works with partial clone' ' diff --git a/t/t5550-http-fetch-dumb.sh b/t/t5550-http-fetch-dumb.sh index f0d9cd584d..d7cf85ffea 100755 --- a/t/t5550-http-fetch-dumb.sh +++ b/t/t5550-http-fetch-dumb.sh @@ -369,7 +369,7 @@ ja;q=0.95, zh;q=0.94, sv;q=0.93, pt;q=0.92, nb;q=0.91, *;q=0.90" \ ko_KR.EUC-KR:en_US.UTF-8:fr_CA:de.UTF-8@euro:sr@latin:ja:zh:sv:pt:nb ' -test_expect_success 'git client does not send an empty Accept-Language' ' +test_expect_success 'git client send an empty Accept-Language' ' GIT_TRACE_CURL=true LANGUAGE= git ls-remote "$HTTPD_URL/dumb/repo.git" 2>stderr && ! grep "^=> Send header: Accept-Language:" stderr ' @@ -422,7 +422,8 @@ test_expect_success 'set up evil alternates scheme' ' sha1=$(git -C "$victim" rev-parse HEAD) && evil=$HTTPD_DOCUMENT_ROOT_PATH/evil.git && - git init --bare "$evil" && + git init --template= --bare "$evil" && + mkdir "$evil/info" && # do this by hand to avoid object existence check printf "%s\\t%s\\n" $sha1 refs/heads/main >"$evil/info/refs" ' diff --git a/t/t5551-http-fetch-smart.sh b/t/t5551-http-fetch-smart.sh index b9351a732f..6a38294a47 100755 --- a/t/t5551-http-fetch-smart.sh +++ b/t/t5551-http-fetch-smart.sh @@ -31,6 +31,7 @@ test_expect_success 'clone http repository' ' > GET /smart/repo.git/info/refs?service=git-upload-pack HTTP/1.1 > Accept: */* > Accept-Encoding: ENCODINGS + > Accept-Language: ko-KR, *;q=0.9 > Pragma: no-cache < HTTP/1.1 200 OK < Pragma: no-cache @@ -40,13 +41,15 @@ test_expect_success 'clone http repository' ' > Accept-Encoding: ENCODINGS > Content-Type: application/x-git-upload-pack-request > Accept: application/x-git-upload-pack-result + > Accept-Language: ko-KR, *;q=0.9 > Content-Length: xxx < HTTP/1.1 200 OK < Pragma: no-cache < Cache-Control: no-cache, max-age=0, must-revalidate < Content-Type: application/x-git-upload-pack-result EOF - GIT_TRACE_CURL=true GIT_TEST_PROTOCOL_VERSION=0 \ + + GIT_TRACE_CURL=true GIT_TEST_PROTOCOL_VERSION=0 LANGUAGE="ko_KR.UTF-8" \ git clone --quiet $HTTPD_URL/smart/repo.git clone 2>err && test_cmp file clone/file && tr '\''\015'\'' Q <err | @@ -94,7 +97,10 @@ test_expect_success 'clone http repository' ' test_cmp exp actual.smudged && grep "Accept-Encoding:.*gzip" actual >actual.gzip && - test_line_count = 2 actual.gzip + test_line_count = 2 actual.gzip && + + grep "Accept-Language: ko-KR, *" actual >actual.language && + test_line_count = 2 actual.language fi ' @@ -175,8 +181,8 @@ test_expect_success 'no-op half-auth fetch does not require a password' ' # This is not possible with protocol v2, since both objects and refs # are obtained from the "git-upload-pack" path. A solution to this is # to teach the server and client to be able to inline ls-refs requests - # as an Extra Parameter (see pack-protocol.txt), so that "info/refs" - # can serve refs, just like it does in protocol v0. + # as an Extra Parameter (see "git help gitformat-pack-protocol"), so that + # "info/refs" can serve refs, just like it does in protocol v0. GIT_TEST_PROTOCOL_VERSION=0 git --git-dir=half-auth fetch && expect_askpass none ' diff --git a/t/t5601-clone.sh b/t/t5601-clone.sh index cf3be0584f..2e57de9c12 100755 --- a/t/t5601-clone.sh +++ b/t/t5601-clone.sh @@ -743,7 +743,11 @@ test_expect_success 'batch missing blob request during checkout' ' # Ensure that there is only one negotiation by checking that there is # only "done" line sent. ("done" marks the end of negotiation.) - GIT_TRACE_PACKET="$(pwd)/trace" git -C client checkout HEAD^ && + GIT_TRACE_PACKET="$(pwd)/trace" \ + GIT_TRACE2_EVENT="$(pwd)/trace2_event" \ + git -C client -c trace2.eventNesting=5 checkout HEAD^ && + grep \"key\":\"total_rounds\",\"value\":\"1\" trace2_event >trace_lines && + test_line_count = 1 trace_lines && grep "fetch> done" trace >done_lines && test_line_count = 1 done_lines ' diff --git a/t/t5616-partial-clone.sh b/t/t5616-partial-clone.sh index 4a3778d04a..9aeacc2f6a 100755 --- a/t/t5616-partial-clone.sh +++ b/t/t5616-partial-clone.sh @@ -49,6 +49,13 @@ test_expect_success 'do partial clone 1' ' test "$(git -C pc1 config --local remote.origin.partialclonefilter)" = "blob:none" ' +test_expect_success 'rev-list --missing=allow-promisor on partial clone' ' + git -C pc1 rev-list --objects --missing=allow-promisor HEAD >actual && + git -C pc1 rev-list --objects --missing=print HEAD >expect.raw && + grep -v "^?" expect.raw >expect && + test_cmp expect actual +' + test_expect_success 'verify that .promisor file contains refs fetched' ' ls pc1/.git/objects/pack/pack-*.promisor >promisorlist && test_line_count = 1 promisorlist && diff --git a/t/t5703-upload-pack-ref-in-want.sh b/t/t5703-upload-pack-ref-in-want.sh index 9d6cd7d986..df74f80061 100755 --- a/t/t5703-upload-pack-ref-in-want.sh +++ b/t/t5703-upload-pack-ref-in-want.sh @@ -229,14 +229,16 @@ test_expect_success 'setup repos for fetching with ref-in-want tests' ' ' test_expect_success 'fetching with exact OID' ' - test_when_finished "rm -f log" && + test_when_finished "rm -f log trace2" && rm -rf local && cp -r "$LOCAL_PRISTINE" local && oid=$(git -C "$REPO" rev-parse d) && - GIT_TRACE_PACKET="$(pwd)/log" git -C local fetch origin \ + GIT_TRACE_PACKET="$(pwd)/log" GIT_TRACE2_EVENT="$(pwd)/trace2" \ + git -C local fetch origin \ "$oid":refs/heads/actual && + grep \"key\":\"total_rounds\",\"value\":\"2\" trace2 && git -C "$REPO" rev-parse "d" >expected && git -C local rev-parse refs/heads/actual >actual && test_cmp expected actual && diff --git a/t/t6001-rev-list-graft.sh b/t/t6001-rev-list-graft.sh index 7294147334..16635ecc33 100755 --- a/t/t6001-rev-list-graft.sh +++ b/t/t6001-rev-list-graft.sh @@ -99,6 +99,7 @@ do " test_expect_success 'with grafts' " + mkdir -p .git/info && echo '$B0 $A2' >.git/info/grafts && check $type $B2 -- $B2 $B1 $B0 $A2 $A1 $A0 " diff --git a/t/t6019-rev-list-ancestry-path.sh b/t/t6019-rev-list-ancestry-path.sh index af57a04b7f..738da23628 100755 --- a/t/t6019-rev-list-ancestry-path.sh +++ b/t/t6019-rev-list-ancestry-path.sh @@ -8,8 +8,13 @@ test_description='--ancestry-path' # / \ # A-------K---------------L--M # -# D..M == E F G H I J K L M -# --ancestry-path D..M == E F H I J L M +# D..M == E F G H I J K L M +# --ancestry-path D..M == E F H I J L M +# --ancestry-path=F D..M == E F J L M +# --ancestry-path=G D..M == G H I J L M +# --ancestry-path=H D..M == E G H I J L M +# --ancestry-path=K D..M == K L M +# --ancestry-path=K --ancestry-path=F D..M == E F J K L M # # D..M -- M.t == M # --ancestry-path D..M -- M.t == M @@ -50,73 +55,41 @@ test_expect_success setup ' test_commit M ' -test_expect_success 'rev-list D..M' ' - test_write_lines E F G H I J K L M >expect && - git rev-list --format=%s D..M | - sed -e "/^commit /d" | - sort >actual && - test_cmp expect actual -' - -test_expect_success 'rev-list --ancestry-path D..M' ' - test_write_lines E F H I J L M >expect && - git rev-list --ancestry-path --format=%s D..M | - sed -e "/^commit /d" | - sort >actual && - test_cmp expect actual -' - -test_expect_success 'rev-list D..M -- M.t' ' - echo M >expect && - git rev-list --format=%s D..M -- M.t | - sed -e "/^commit /d" >actual && - test_cmp expect actual -' - -test_expect_success 'rev-list --ancestry-path D..M -- M.t' ' - echo M >expect && - git rev-list --ancestry-path --format=%s D..M -- M.t | - sed -e "/^commit /d" >actual && - test_cmp expect actual -' +test_ancestry () { + args=$1 + expected=$2 + test_expect_success "log $args" " + test_write_lines $expected >expect && + git log --format=%s $args >raw && + + if test -n \"$expected\" + then + sort raw >actual && + test_cmp expect actual + else + test_must_be_empty raw + fi + " +} -test_expect_success 'rev-list F...I' ' - test_write_lines F G H I >expect && - git rev-list --format=%s F...I | - sed -e "/^commit /d" | - sort >actual && - test_cmp expect actual -' +test_ancestry "D..M" "E F G H I J K L M" -test_expect_success 'rev-list --ancestry-path F...I' ' - test_write_lines F H I >expect && - git rev-list --ancestry-path --format=%s F...I | - sed -e "/^commit /d" | - sort >actual && - test_cmp expect actual -' +test_ancestry "--ancestry-path D..M" "E F H I J L M" +test_ancestry "--ancestry-path=F D..M" "E F J L M" +test_ancestry "--ancestry-path=G D..M" "G H I J L M" +test_ancestry "--ancestry-path=H D..M" "E G H I J L M" +test_ancestry "--ancestry-path=K D..M" "K L M" +test_ancestry "--ancestry-path=F --ancestry-path=K D..M" "E F J K L M" -# G.t is dropped in an "-s ours" merge -test_expect_success 'rev-list G..M -- G.t' ' - git rev-list --format=%s G..M -- G.t | - sed -e "/^commit /d" >actual && - test_must_be_empty actual -' +test_ancestry "D..M -- M.t" "M" +test_ancestry "--ancestry-path D..M -- M.t" "M" -test_expect_success 'rev-list --ancestry-path G..M -- G.t' ' - echo L >expect && - git rev-list --ancestry-path --format=%s G..M -- G.t | - sed -e "/^commit /d" >actual && - test_cmp expect actual -' +test_ancestry "F...I" "F G H I" +test_ancestry "--ancestry-path F...I" "F H I" -test_expect_success 'rev-list --ancestry-path --simplify-merges G^..M -- G.t' ' - test_write_lines G L >expect && - git rev-list --ancestry-path --simplify-merges --format=%s G^..M -- G.t | - sed -e "/^commit /d" | - sort >actual && - test_cmp expect actual -' +test_ancestry "G..M -- G.t" "" +test_ancestry "--ancestry-path G..M -- G.t" "L" +test_ancestry "--ancestry-path --simplify-merges G^..M -- G.t" "G L" # b---bc # / \ / diff --git a/t/t6101-rev-parse-parents.sh b/t/t6101-rev-parse-parents.sh index a3a41c7a3e..d20723d627 100755 --- a/t/t6101-rev-parse-parents.sh +++ b/t/t6101-rev-parse-parents.sh @@ -9,6 +9,7 @@ GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME TEST_PASSES_SANITIZE_LEAK=true +TEST_CREATE_REPO_NO_TEMPLATE=1 . ./test-lib.sh test_cmp_rev_output () { @@ -26,6 +27,7 @@ test_expect_success 'setup' ' git merge -m next --allow-unrelated-histories start2 && test_commit final && + mkdir .git/info && test_seq 40 | while read i do diff --git a/t/t6102-rev-list-unexpected-objects.sh b/t/t6102-rev-list-unexpected-objects.sh index cf0195e826..4a9a4436e2 100755 --- a/t/t6102-rev-list-unexpected-objects.sh +++ b/t/t6102-rev-list-unexpected-objects.sh @@ -17,7 +17,7 @@ test_expect_success 'setup unexpected non-blob entry' ' broken_tree="$(git hash-object -w --literally -t tree broken-tree)" ' -test_expect_success !SANITIZE_LEAK 'TODO (should fail!): traverse unexpected non-blob entry (lone)' ' +test_expect_success 'TODO (should fail!): traverse unexpected non-blob entry (lone)' ' sed "s/Z$//" >expect <<-EOF && $broken_tree Z $tree foo @@ -121,7 +121,7 @@ test_expect_success 'setup unexpected non-blob tag' ' tag=$(git hash-object -w --literally -t tag broken-tag) ' -test_expect_success !SANITIZE_LEAK 'TODO (should fail!): traverse unexpected non-blob tag (lone)' ' +test_expect_success 'TODO (should fail!): traverse unexpected non-blob tag (lone)' ' git rev-list --objects $tag ' diff --git a/t/t6115-rev-list-du.sh b/t/t6115-rev-list-du.sh index b4aef32b71..d59111dede 100755 --- a/t/t6115-rev-list-du.sh +++ b/t/t6115-rev-list-du.sh @@ -48,4 +48,26 @@ check_du HEAD check_du --objects HEAD check_du --objects HEAD^..HEAD +# As mentioned above, don't use hardcode sizes as actual size, but use the +# output from git cat-file. +test_expect_success 'rev-list --disk-usage=human' ' + git rev-list --objects HEAD --disk-usage=human >actual && + disk_usage_slow --objects HEAD >actual_size && + grep "$(cat actual_size) bytes" actual +' + +test_expect_success 'rev-list --disk-usage=human with bitmaps' ' + git rev-list --objects HEAD --use-bitmap-index --disk-usage=human >actual && + disk_usage_slow --objects HEAD >actual_size && + grep "$(cat actual_size) bytes" actual +' + +test_expect_success 'rev-list use --disk-usage unproperly' ' + test_must_fail git rev-list --objects HEAD --disk-usage=typo 2>err && + cat >expect <<-\EOF && + fatal: invalid value for '\''--disk-usage=<format>'\'': '\''typo'\'', the only allowed format is '\''human'\'' + EOF + test_cmp err expect +' + test_done diff --git a/t/t6402-merge-rename.sh b/t/t6402-merge-rename.sh index 3a32b1a45c..772238e582 100755 --- a/t/t6402-merge-rename.sh +++ b/t/t6402-merge-rename.sh @@ -210,7 +210,7 @@ test_expect_success 'updated working tree file should prevent the merge' ' echo >>M one line addition && cat M >M.saved && git update-index M && - test_expect_code 128 git pull --no-rebase . yellow && + test_expect_code 2 git pull --no-rebase . yellow && test_cmp M M.saved && rm -f M.saved ' diff --git a/t/t6403-merge-file.sh b/t/t6403-merge-file.sh index 2f421d967a..1a7082323d 100755 --- a/t/t6403-merge-file.sh +++ b/t/t6403-merge-file.sh @@ -1,6 +1,8 @@ #!/bin/sh test_description='RCS merge replacement: merge-file' + +TEST_PASSES_SANITIZE_LEAK=true . ./test-lib.sh test_expect_success 'setup' ' diff --git a/t/t6404-recursive-merge.sh b/t/t6404-recursive-merge.sh index b8735c6db4..36215518b6 100755 --- a/t/t6404-recursive-merge.sh +++ b/t/t6404-recursive-merge.sh @@ -4,6 +4,7 @@ test_description='Test merge without common ancestors' GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME +TEST_PASSES_SANITIZE_LEAK=true . ./test-lib.sh # This scenario is based on a real-world repository of Shawn Pearce. diff --git a/t/t6405-merge-symlinks.sh b/t/t6405-merge-symlinks.sh index 7435fce71e..29e2b25ce5 100755 --- a/t/t6405-merge-symlinks.sh +++ b/t/t6405-merge-symlinks.sh @@ -11,6 +11,7 @@ if core.symlinks is false.' GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME +TEST_PASSES_SANITIZE_LEAK=true . ./test-lib.sh test_expect_success 'setup' ' diff --git a/t/t6407-merge-binary.sh b/t/t6407-merge-binary.sh index 0753fc95f4..e8a28717ce 100755 --- a/t/t6407-merge-binary.sh +++ b/t/t6407-merge-binary.sh @@ -5,7 +5,6 @@ test_description='ask merge-recursive to merge binary files' GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME -TEST_PASSES_SANITIZE_LEAK=true . ./test-lib.sh test_expect_success setup ' diff --git a/t/t6408-merge-up-to-date.sh b/t/t6408-merge-up-to-date.sh index 7763c1ba98..8a1ba6d23a 100755 --- a/t/t6408-merge-up-to-date.sh +++ b/t/t6408-merge-up-to-date.sh @@ -2,6 +2,7 @@ test_description='merge fast-forward and up to date' +TEST_PASSES_SANITIZE_LEAK=true . ./test-lib.sh test_expect_success setup ' diff --git a/t/t6411-merge-filemode.sh b/t/t6411-merge-filemode.sh index 6ae2489286..b6182723aa 100755 --- a/t/t6411-merge-filemode.sh +++ b/t/t6411-merge-filemode.sh @@ -4,6 +4,7 @@ test_description='merge: handle file mode' GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME +TEST_PASSES_SANITIZE_LEAK=true . ./test-lib.sh test_expect_success 'set up mode change in one branch' ' diff --git a/t/t6413-merge-crlf.sh b/t/t6413-merge-crlf.sh index affea255fe..b4f4a313f4 100755 --- a/t/t6413-merge-crlf.sh +++ b/t/t6413-merge-crlf.sh @@ -11,6 +11,7 @@ test_description='merge conflict in crlf repo GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME +TEST_PASSES_SANITIZE_LEAK=true . ./test-lib.sh test_expect_success setup ' diff --git a/t/t6417-merge-ours-theirs.sh b/t/t6417-merge-ours-theirs.sh index 62d1406119..482b73a998 100755 --- a/t/t6417-merge-ours-theirs.sh +++ b/t/t6417-merge-ours-theirs.sh @@ -4,6 +4,7 @@ test_description='Merge-recursive ours and theirs variants' GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME +TEST_PASSES_SANITIZE_LEAK=true . ./test-lib.sh test_expect_success setup ' diff --git a/t/t6422-merge-rename-corner-cases.sh b/t/t6422-merge-rename-corner-cases.sh index bf4ce3c63d..9b65768aed 100755 --- a/t/t6422-merge-rename-corner-cases.sh +++ b/t/t6422-merge-rename-corner-cases.sh @@ -6,6 +6,7 @@ test_description="recursive merge corner cases w/ renames but not criss-crosses" GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME +TEST_PASSES_SANITIZE_LEAK=true . ./test-lib.sh . "$TEST_DIRECTORY"/lib-merge.sh diff --git a/t/t6424-merge-unrelated-index-changes.sh b/t/t6424-merge-unrelated-index-changes.sh index b6e424a427..a61f20c22f 100755 --- a/t/t6424-merge-unrelated-index-changes.sh +++ b/t/t6424-merge-unrelated-index-changes.sh @@ -114,6 +114,39 @@ test_expect_success 'resolve, non-trivial' ' test_path_is_missing .git/MERGE_HEAD ' +test_expect_success 'resolve, trivial, related file removed' ' + git reset --hard && + git checkout B^0 && + + git rm a && + test_path_is_missing a && + + test_must_fail git merge -s resolve C^0 && + + test_path_is_missing a && + test_path_is_missing .git/MERGE_HEAD +' + +test_expect_success 'resolve, non-trivial, related file removed' ' + git reset --hard && + git checkout B^0 && + + git rm a && + test_path_is_missing a && + + # We also ask for recursive in order to turn off the "allow_trivial" + # setting in builtin/merge.c, and ensure that resolve really does + # correctly fail the merge (I guess this also tests that recursive + # correctly fails the merge, but the main thing we are attempting + # to test here is resolve and are just using the side effect of + # adding recursive to ensure that resolve is actually tested rather + # than the trivial merge codepath) + test_must_fail git merge -s resolve -s recursive D^0 && + + test_path_is_missing a && + test_path_is_missing .git/MERGE_HEAD +' + test_expect_success 'recursive' ' git reset --hard && git checkout B^0 && @@ -242,4 +275,36 @@ test_expect_success 'subtree' ' test_path_is_missing .git/MERGE_HEAD ' +test_expect_success 'avoid failure due to stat-dirty files' ' + git reset --hard && + git checkout B^0 && + + # Make "a" be stat-dirty + test-tool chmtime =+1 a && + + # stat-dirty file should not prevent stash creation in builtin/merge.c + git merge -s resolve -s recursive D^0 +' + +test_expect_success 'with multiple strategies, recursive or ort failure do not early abort' ' + git reset --hard && + git checkout B^0 && + + test_seq 0 10 >a && + git add a && + git rev-parse :a >expect && + + sane_unset GIT_TEST_MERGE_ALGORITHM && + test_must_fail git merge -s recursive -s ort -s octopus C^0 >output 2>&1 && + + grep "Trying merge strategy recursive..." output && + grep "Trying merge strategy ort..." output && + grep "Trying merge strategy octopus..." output && + grep "No merge strategy handled the merge." output && + + # Changes to "a" should remain staged + git rev-parse :a >actual && + test_cmp expect actual +' + test_done diff --git a/t/t6425-merge-rename-delete.sh b/t/t6425-merge-rename-delete.sh index 459b431a60..93cd2869b1 100755 --- a/t/t6425-merge-rename-delete.sh +++ b/t/t6425-merge-rename-delete.sh @@ -4,6 +4,7 @@ test_description='Merge-recursive rename/delete conflict message' GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME +TEST_PASSES_SANITIZE_LEAK=true . ./test-lib.sh test_expect_success 'rename/delete' ' diff --git a/t/t6429-merge-sequence-rename-caching.sh b/t/t6429-merge-sequence-rename-caching.sh index e1ce919916..650b3cd14f 100755 --- a/t/t6429-merge-sequence-rename-caching.sh +++ b/t/t6429-merge-sequence-rename-caching.sh @@ -725,7 +725,7 @@ test_expect_success 'avoid assuming we detected renames' ' mkdir unrelated && for i in $(test_seq 1 10) do - >unrelated/$i + >unrelated/$i || exit 1 done && test_seq 2 10 >numbers && test_seq 12 20 >values && diff --git a/t/t6431-merge-criscross.sh b/t/t6431-merge-criscross.sh index 3824756a02..3fe14cd73e 100755 --- a/t/t6431-merge-criscross.sh +++ b/t/t6431-merge-criscross.sh @@ -2,6 +2,7 @@ test_description='merge-recursive backend test' +TEST_PASSES_SANITIZE_LEAK=true . ./test-lib.sh # A <- create some files diff --git a/t/t6435-merge-sparse.sh b/t/t6435-merge-sparse.sh index 74562e1235..fde4aa3cd1 100755 --- a/t/t6435-merge-sparse.sh +++ b/t/t6435-merge-sparse.sh @@ -2,6 +2,7 @@ test_description='merge with sparse files' +TEST_CREATE_REPO_NO_TEMPLATE=1 . ./test-lib.sh # test_file $filename $content @@ -26,6 +27,7 @@ test_expect_success 'setup' ' git rm modify_delete && test_commit_this ours && git config core.sparseCheckout true && + mkdir .git/info && echo "/checked-out" >.git/info/sparse-checkout && git reset --hard && test_must_fail git merge theirs diff --git a/t/t6437-submodule-merge.sh b/t/t6437-submodule-merge.sh index 178413c22f..d5f3e0fed6 100755 --- a/t/t6437-submodule-merge.sh +++ b/t/t6437-submodule-merge.sh @@ -103,8 +103,25 @@ test_expect_success 'setup for merge search' ' echo "file-c" > file-c && git add file-c && git commit -m "sub-c") && - git commit -a -m "c" && + git commit -a -m "c") +' +test_expect_success 'merging should conflict for non fast-forward' ' + test_when_finished "git -C merge-search reset --hard" && + (cd merge-search && + git checkout -b test-nonforward-a b && + if test "$GIT_TEST_MERGE_ALGORITHM" = ort + then + test_must_fail git merge c >actual && + sub_expect="go to submodule (sub), and either merge commit $(git -C sub rev-parse --short sub-c)" && + grep "$sub_expect" actual + else + test_must_fail git merge c 2> actual + fi) +' + +test_expect_success 'finish setup for merge-search' ' + (cd merge-search && git checkout -b d a && (cd sub && git checkout -b sub-d sub-b && @@ -129,14 +146,16 @@ test_expect_success 'merge with one side as a fast-forward of the other' ' test_cmp expect actual) ' -test_expect_success 'merging should conflict for non fast-forward' ' +test_expect_success 'merging should conflict for non fast-forward (resolution exists)' ' (cd merge-search && - git checkout -b test-nonforward b && + git checkout -b test-nonforward-b b && (cd sub && - git rev-parse sub-d > ../expect) && + git rev-parse --short sub-d > ../expect) && if test "$GIT_TEST_MERGE_ALGORITHM" = ort then - test_must_fail git merge c >actual + test_must_fail git merge c >actual && + sub_expect="go to submodule (sub), and either merge commit $(git -C sub rev-parse --short sub-c)" && + grep "$sub_expect" actual else test_must_fail git merge c 2> actual fi && @@ -161,7 +180,9 @@ test_expect_success 'merging should fail for ambiguous common parent' ' ) && if test "$GIT_TEST_MERGE_ALGORITHM" = ort then - test_must_fail git merge c >actual + test_must_fail git merge c >actual && + sub_expect="go to submodule (sub), and either merge commit $(git -C sub rev-parse --short sub-c)" && + grep "$sub_expect" actual else test_must_fail git merge c 2> actual fi && @@ -205,7 +226,12 @@ test_expect_success 'merging should fail for changes that are backwards' ' git commit -a -m "f" && git checkout -b test-backward e && - test_must_fail git merge f) + test_must_fail git merge f >actual && + if test "$GIT_TEST_MERGE_ALGORITHM" = ort + then + sub_expect="go to submodule (sub), and either merge commit $(git -C sub rev-parse --short sub-d)" && + grep "$sub_expect" actual + fi) ' @@ -476,4 +502,44 @@ test_expect_failure 'directory/submodule conflict; merge --abort works afterward ) ' +# Setup: +# - Submodule has 2 commits: a and b +# - Superproject branch 'a' adds and commits submodule pointing to 'commit a' +# - Superproject branch 'b' adds and commits submodule pointing to 'commit b' +# If these two branches are now merged, there is no merge base +test_expect_success 'setup for null merge base' ' + mkdir no-merge-base && + (cd no-merge-base && + git init && + mkdir sub && + (cd sub && + git init && + echo "file-a" > file-a && + git add file-a && + git commit -m "commit a") && + git commit --allow-empty -m init && + git branch init && + git checkout -b a init && + git add sub && + git commit -m "a" && + git switch main && + (cd sub && + echo "file-b" > file-b && + git add file-b && + git commit -m "commit b")) +' + +test_expect_success 'merging should fail with no merge base' ' + (cd no-merge-base && + git checkout -b b init && + git add sub && + git commit -m "b" && + test_must_fail git merge a >actual && + if test "$GIT_TEST_MERGE_ALGORITHM" = ort + then + sub_expect="go to submodule (sub), and either merge commit $(git -C sub rev-parse --short HEAD^1)" && + grep "$sub_expect" actual + fi) +' + test_done diff --git a/t/t6439-merge-co-error-msgs.sh b/t/t6439-merge-co-error-msgs.sh index 5bfb027099..52cf0c8769 100755 --- a/t/t6439-merge-co-error-msgs.sh +++ b/t/t6439-merge-co-error-msgs.sh @@ -47,6 +47,7 @@ test_expect_success 'untracked files overwritten by merge (fast and non-fast for export GIT_MERGE_VERBOSITY && test_must_fail git merge branch 2>out2 ) && + echo "Merge with strategy ${GIT_TEST_MERGE_ALGORITHM:-ort} failed." >>expect && test_cmp out2 expect && git reset --hard HEAD^ ' diff --git a/t/t7002-mv-sparse-checkout.sh b/t/t7002-mv-sparse-checkout.sh index f0f7cbfcdb..71fe29690f 100755 --- a/t/t7002-mv-sparse-checkout.sh +++ b/t/t7002-mv-sparse-checkout.sh @@ -4,6 +4,18 @@ test_description='git mv in sparse working trees' . ./test-lib.sh +setup_sparse_checkout () { + mkdir folder1 && + touch folder1/file1 && + git add folder1 && + git sparse-checkout set --cone sub +} + +cleanup_sparse_checkout () { + git sparse-checkout disable && + git reset --hard +} + test_expect_success 'setup' " mkdir -p sub/dir sub/dir2 && touch a b c sub/d sub/dir/e sub/dir2/e && @@ -196,6 +208,7 @@ test_expect_success 'can move files to non-sparse dir' ' ' test_expect_success 'refuse to move file to non-skip-worktree sparse path' ' + test_when_finished "cleanup_sparse_checkout" && git reset --hard && git sparse-checkout init --no-cone && git sparse-checkout set a !/x y/ !x/y/z && @@ -206,4 +219,75 @@ test_expect_success 'refuse to move file to non-skip-worktree sparse path' ' test_cmp expect stderr ' +test_expect_success 'refuse to move out-of-cone directory without --sparse' ' + test_when_finished "cleanup_sparse_checkout" && + setup_sparse_checkout && + + test_must_fail git mv folder1 sub 2>stderr && + cat sparse_error_header >expect && + echo folder1/file1 >>expect && + cat sparse_hint >>expect && + test_cmp expect stderr +' + +test_expect_success 'can move out-of-cone directory with --sparse' ' + test_when_finished "cleanup_sparse_checkout" && + setup_sparse_checkout && + + git mv --sparse folder1 sub 2>stderr && + test_must_be_empty stderr && + + test_path_is_dir sub/folder1 && + test_path_is_file sub/folder1/file1 +' + +test_expect_success 'refuse to move out-of-cone file without --sparse' ' + test_when_finished "cleanup_sparse_checkout" && + setup_sparse_checkout && + + test_must_fail git mv folder1/file1 sub 2>stderr && + cat sparse_error_header >expect && + echo folder1/file1 >>expect && + cat sparse_hint >>expect && + test_cmp expect stderr +' + +test_expect_success 'can move out-of-cone file with --sparse' ' + test_when_finished "cleanup_sparse_checkout" && + setup_sparse_checkout && + + git mv --sparse folder1/file1 sub 2>stderr && + test_must_be_empty stderr && + + test_path_is_file sub/file1 +' + +test_expect_success 'refuse to move sparse file to existing destination' ' + test_when_finished "cleanup_sparse_checkout" && + mkdir folder1 && + touch folder1/file1 && + touch sub/file1 && + git add folder1 sub/file1 && + git sparse-checkout set --cone sub && + + test_must_fail git mv --sparse folder1/file1 sub 2>stderr && + echo "fatal: destination exists, source=folder1/file1, destination=sub/file1" >expect && + test_cmp expect stderr +' + +test_expect_success 'move sparse file to existing destination with --force and --sparse' ' + test_when_finished "cleanup_sparse_checkout" && + mkdir folder1 && + touch folder1/file1 && + touch sub/file1 && + echo "overwrite" >folder1/file1 && + git add folder1 sub/file1 && + git sparse-checkout set --cone sub && + + git mv --sparse --force folder1/file1 sub 2>stderr && + test_must_be_empty stderr && + echo "overwrite" >expect && + test_cmp expect sub/file1 +' + test_done diff --git a/t/t7007-show.sh b/t/t7007-show.sh index d6cc69e0f2..f908a4d1ab 100755 --- a/t/t7007-show.sh +++ b/t/t7007-show.sh @@ -2,6 +2,7 @@ test_description='git show' +TEST_PASSES_SANITIZE_LEAK=true . ./test-lib.sh test_expect_success setup ' diff --git a/t/t7060-wtstatus.sh b/t/t7060-wtstatus.sh index 0f4344c55e..aaeb4a5334 100755 --- a/t/t7060-wtstatus.sh +++ b/t/t7060-wtstatus.sh @@ -5,6 +5,7 @@ test_description='basic work tree status reporting' GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME +TEST_PASSES_SANITIZE_LEAK=true . ./test-lib.sh test_expect_success setup ' diff --git a/t/t7062-wtstatus-ignorecase.sh b/t/t7062-wtstatus-ignorecase.sh index 73709dbeee..caf372a3d4 100755 --- a/t/t7062-wtstatus-ignorecase.sh +++ b/t/t7062-wtstatus-ignorecase.sh @@ -2,6 +2,7 @@ test_description='git-status with core.ignorecase=true' +TEST_PASSES_SANITIZE_LEAK=true . ./test-lib.sh test_expect_success 'status with hash collisions' ' diff --git a/t/t7063-status-untracked-cache.sh b/t/t7063-status-untracked-cache.sh index c1f0d95036..8929ef481f 100755 --- a/t/t7063-status-untracked-cache.sh +++ b/t/t7063-status-untracked-cache.sh @@ -86,7 +86,7 @@ test_expect_success 'core.untrackedCache is unset' ' ' test_expect_success 'setup' ' - git init worktree && + git init --template= worktree && cd worktree && mkdir done dtwo dthree && touch one two three done/one dtwo/two dthree/three && @@ -94,6 +94,7 @@ test_expect_success 'setup' ' test-tool chmtime =-300 done dtwo dthree && test-tool chmtime =-300 . && git add one two done/one && + mkdir .git/info && : >.git/info/exclude && git update-index --untracked-cache && test_oid_cache <<-EOF diff --git a/t/t7110-reset-merge.sh b/t/t7110-reset-merge.sh index 3d62e10b53..eb881be95b 100755 --- a/t/t7110-reset-merge.sh +++ b/t/t7110-reset-merge.sh @@ -5,6 +5,7 @@ test_description='Tests for "git reset" with "--merge" and "--keep" options' +TEST_PASSES_SANITIZE_LEAK=true . ./test-lib.sh test_expect_success setup ' diff --git a/t/t7111-reset-table.sh b/t/t7111-reset-table.sh index ce421ad5ac..78f25c1c7e 100755 --- a/t/t7111-reset-table.sh +++ b/t/t7111-reset-table.sh @@ -5,6 +5,7 @@ test_description='Tests to check that "reset" options follow a known table' +TEST_PASSES_SANITIZE_LEAK=true . ./test-lib.sh diff --git a/t/t7402-submodule-rebase.sh b/t/t7402-submodule-rebase.sh index 8e32f19007..ebeca12a71 100755 --- a/t/t7402-submodule-rebase.sh +++ b/t/t7402-submodule-rebase.sh @@ -104,7 +104,7 @@ test_expect_success 'rebasing submodule that should conflict' ' test_tick && git commit -m fourth && - test_must_fail git rebase --onto HEAD^^ HEAD^ HEAD^0 && + test_must_fail git rebase --onto HEAD^^ HEAD^ HEAD^0 >actual_output && git ls-files -s submodule >actual && ( cd submodule && @@ -112,7 +112,12 @@ test_expect_success 'rebasing submodule that should conflict' ' echo "160000 $(git rev-parse HEAD^^) 2 submodule" && echo "160000 $(git rev-parse HEAD) 3 submodule" ) >expect && - test_cmp expect actual + test_cmp expect actual && + if test "$GIT_TEST_MERGE_ALGORITHM" = ort + then + sub_expect="go to submodule (submodule), and either merge commit $(git -C submodule rev-parse --short HEAD^0)" && + grep "$sub_expect" actual_output + fi ' test_done diff --git a/t/t7406-submodule-update.sh b/t/t7406-submodule-update.sh index 43f779d751..6cc07460dd 100755 --- a/t/t7406-submodule-update.sh +++ b/t/t7406-submodule-update.sh @@ -1074,7 +1074,7 @@ test_expect_success 'submodule update --quiet passes quietness to merge/rebase' git submodule update --rebase --quiet >out 2>err && test_must_be_empty out && test_must_be_empty err && - git submodule update --rebase -v >out 2>err && + git submodule update --rebase >out 2>err && test_file_not_empty out && test_must_be_empty err ) @@ -1116,4 +1116,66 @@ test_expect_success 'submodule update --filter sets partial clone settings' ' test_cmp_config -C super-filter/submodule blob:none remote.origin.partialclonefilter ' +# NEEDSWORK: Clean up the tests so that we can reuse the test setup. +# Don't reuse the existing repos because the earlier tests have +# intentionally disruptive configurations. +test_expect_success 'setup clean recursive superproject' ' + git init bottom && + test_commit -C bottom "bottom" && + git init middle && + git -C middle submodule add ../bottom bottom && + git -C middle commit -m "middle" && + git init top && + git -C top submodule add ../middle middle && + git -C top commit -m "top" && + git clone --recurse-submodules top top-clean +' + +test_expect_success 'submodule update should skip unmerged submodules' ' + test_when_finished "rm -fr top-cloned" && + cp -r top-clean top-cloned && + + # Create an upstream commit in each repo, starting with bottom + test_commit -C bottom upstream_commit && + # Create middle commit + git -C middle/bottom fetch && + git -C middle/bottom checkout -f FETCH_HEAD && + git -C middle add bottom && + git -C middle commit -m "upstream_commit" && + # Create top commit + git -C top/middle fetch && + git -C top/middle checkout -f FETCH_HEAD && + git -C top add middle && + git -C top commit -m "upstream_commit" && + + # Create a downstream conflict + test_commit -C top-cloned/middle/bottom downstream_commit && + git -C top-cloned/middle add bottom && + git -C top-cloned/middle commit -m "downstream_commit" && + git -C top-cloned/middle fetch --recurse-submodules origin && + test_must_fail git -C top-cloned/middle merge origin/main && + + # Make the update of "middle" a no-op, otherwise we error out + # because of its unmerged state + test_config -C top-cloned submodule.middle.update !true && + git -C top-cloned submodule update --recursive 2>actual.err && + cat >expect.err <<-\EOF && + Skipping unmerged submodule middle/bottom + EOF + test_cmp expect.err actual.err +' + +test_expect_success 'submodule update --recursive skip submodules with strategy=none' ' + test_when_finished "rm -fr top-cloned" && + cp -r top-clean top-cloned && + + test_commit -C top-cloned/middle/bottom downstream_commit && + git -C top-cloned/middle config submodule.bottom.update none && + git -C top-cloned submodule update --recursive 2>actual.err && + cat >expect.err <<-\EOF && + Skipping submodule '\''middle/bottom'\'' + EOF + test_cmp expect.err actual.err +' + test_done diff --git a/t/t7418-submodule-sparse-gitmodules.sh b/t/t7418-submodule-sparse-gitmodules.sh index f87e524d6d..57d7ab3ece 100755 --- a/t/t7418-submodule-sparse-gitmodules.sh +++ b/t/t7418-submodule-sparse-gitmodules.sh @@ -31,8 +31,9 @@ test_expect_success 'sparse checkout setup which hides .gitmodules' ' test_tick && git commit -m "Add submodule" ) && - git clone upstream super && + git clone --template= upstream super && (cd super && + mkdir .git/info && cat >.git/info/sparse-checkout <<-\EOF && /* !/.gitmodules diff --git a/t/t7503-pre-commit-and-pre-merge-commit-hooks.sh b/t/t7503-pre-commit-and-pre-merge-commit-hooks.sh index ad1eb64ba0..aa004b70a8 100755 --- a/t/t7503-pre-commit-and-pre-merge-commit-hooks.sh +++ b/t/t7503-pre-commit-and-pre-merge-commit-hooks.sh @@ -5,6 +5,7 @@ test_description='pre-commit and pre-merge-commit hooks' GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME +TEST_PASSES_SANITIZE_LEAK=true . ./test-lib.sh test_expect_success 'root commit' ' diff --git a/t/t7607-merge-state.sh b/t/t7607-merge-state.sh new file mode 100755 index 0000000000..89a62ac53b --- /dev/null +++ b/t/t7607-merge-state.sh @@ -0,0 +1,32 @@ +#!/bin/sh + +test_description="Test that merge state is as expected after failed merge" + +GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main +export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME +. ./test-lib.sh + +test_expect_success 'Ensure we restore original state if no merge strategy handles it' ' + test_commit --no-tag "Initial" base base && + + for b in branch1 branch2 branch3 + do + git checkout -b $b main && + test_commit --no-tag "Change on $b" base $b || return 1 + done && + + git checkout branch1 && + # This is a merge that octopus cannot handle. Note, that it does not + # just hit conflicts, it completely fails and says that it cannot + # handle this type of merge. + test_expect_code 2 git merge branch2 branch3 >output 2>&1 && + grep "fatal: merge program failed" output && + grep "Should not be doing an octopus" output && + + # Make sure we did not leave stray changes around when no appropriate + # merge strategy was found + git diff --exit-code --name-status && + test_path_is_missing .git/MERGE_HEAD +' + +test_done diff --git a/t/t7609-mergetool--lib.sh b/t/t7609-mergetool--lib.sh index 330d6d603d..8b1c3bd39f 100755 --- a/t/t7609-mergetool--lib.sh +++ b/t/t7609-mergetool--lib.sh @@ -4,6 +4,7 @@ test_description='git mergetool Testing basic merge tools options' +TEST_PASSES_SANITIZE_LEAK=true . ./test-lib.sh test_expect_success 'mergetool --tool=vimdiff creates the expected layout' ' diff --git a/t/t7810-grep.sh b/t/t7810-grep.sh index 6935601171..0f937990a0 100755 --- a/t/t7810-grep.sh +++ b/t/t7810-grep.sh @@ -77,6 +77,7 @@ test_expect_success setup ' # Say hello. function hello() { echo "Hello world." + echo "Hello again." } # hello # Still a no-op. @@ -595,6 +596,92 @@ test_expect_success 'grep --files-without-match --quiet' ' test_must_be_empty actual ' +test_expect_success 'grep --max-count 0 (must exit with non-zero)' ' + test_must_fail git grep --max-count 0 foo >actual && + test_must_be_empty actual +' + +test_expect_success 'grep --max-count 3' ' + cat >expected <<-EOF && + file:foo mmap bar + file:foo_mmap bar + file:foo_mmap bar mmap + EOF + git grep --max-count 3 foo >actual && + test_cmp expected actual +' + +test_expect_success 'grep --max-count -1 (no limit)' ' + cat >expected <<-EOF && + file:foo mmap bar + file:foo_mmap bar + file:foo_mmap bar mmap + file:foo mmap bar_mmap + file:foo_mmap bar mmap baz + EOF + git grep --max-count -1 foo >actual && + test_cmp expected actual +' + +test_expect_success 'grep --max-count 1 --context 2' ' + cat >expected <<-EOF && + file-foo mmap bar + file:foo_mmap bar + file-foo_mmap bar mmap + EOF + git grep --max-count 1 --context 1 foo_mmap >actual && + test_cmp expected actual +' + +test_expect_success 'grep --max-count 1 --show-function' ' + cat >expected <<-EOF && + hello.ps1=function hello() { + hello.ps1: echo "Hello world." + EOF + git grep --max-count 1 --show-function Hello hello.ps1 >actual && + test_cmp expected actual +' + +test_expect_success 'grep --max-count 2 --show-function' ' + cat >expected <<-EOF && + hello.ps1=function hello() { + hello.ps1: echo "Hello world." + hello.ps1: echo "Hello again." + EOF + git grep --max-count 2 --show-function Hello hello.ps1 >actual && + test_cmp expected actual +' + +test_expect_success 'grep --max-count 1 --count' ' + cat >expected <<-EOF && + hello.ps1:1 + EOF + git grep --max-count 1 --count Hello hello.ps1 >actual && + test_cmp expected actual +' + +test_expect_success 'grep --max-count 1 (multiple files)' ' + cat >expected <<-EOF && + hello.c:#include <stdio.h> + hello.ps1:# No-op. + EOF + git grep --max-count 1 -e o -- hello.\* >actual && + test_cmp expected actual +' + +test_expect_success 'grep --max-count 1 --context 1 (multiple files)' ' + cat >expected <<-EOF && + hello.c-#include <assert.h> + hello.c:#include <stdio.h> + hello.c- + -- + hello.ps1:# No-op. + hello.ps1-function dummy() {} + EOF + git grep --max-count 1 --context 1 -e o -- hello.\* >actual && + test_cmp expected actual +' + cat >expected <<EOF file:foo mmap bar_mmap EOF diff --git a/t/t7812-grep-icase-non-ascii.sh b/t/t7812-grep-icase-non-ascii.sh index ac7be54714..31c66b63c2 100755 --- a/t/t7812-grep-icase-non-ascii.sh +++ b/t/t7812-grep-icase-non-ascii.sh @@ -2,6 +2,7 @@ test_description='grep icase on non-English locales' +TEST_PASSES_SANITIZE_LEAK=true . ./lib-gettext.sh doalarm () { diff --git a/t/t7814-grep-recurse-submodules.sh b/t/t7814-grep-recurse-submodules.sh index a4476dc492..3ad80526c4 100755 --- a/t/t7814-grep-recurse-submodules.sh +++ b/t/t7814-grep-recurse-submodules.sh @@ -6,6 +6,7 @@ This test verifies the recurse-submodules feature correctly greps across submodules. ' +TEST_CREATE_REPO_NO_TEMPLATE=1 . ./test-lib.sh GIT_TEST_FATAL_REGISTER_SUBMODULE_ODB=1 @@ -471,8 +472,10 @@ test_expect_failure 'grep --textconv: superproject .gitattributes (from index) d test_expect_failure 'grep --textconv: superproject .git/info/attributes does not affect submodules' ' reset_and_clean && test_config_global diff.d2x.textconv "sed -e \"s/d/x/\"" && - super_attr="$(git rev-parse --git-path info/attributes)" && + super_info="$(git rev-parse --git-path info)" && + super_attr="$super_info/attributes" && test_when_finished "rm -f \"$super_attr\"" && + mkdir "$super_info" && echo "a diff=d2x" >"$super_attr" && cat >expect <<-\EOF && @@ -516,7 +519,8 @@ test_expect_failure 'grep --textconv correctly reads submodule .git/info/attribu reset_and_clean && test_config_global diff.d2x.textconv "sed -e \"s/d/x/\"" && - submodule_attr="$(git -C submodule rev-parse --path-format=absolute --git-path info/attributes)" && + submodule_info="$(git -C submodule rev-parse --path-format=absolute --git-path info)" && + submodule_attr="$submodule_info/attributes" && test_when_finished "rm -f \"$submodule_attr\"" && echo "a diff=d2x" >"$submodule_attr" && diff --git a/t/t7900-maintenance.sh b/t/t7900-maintenance.sh index 74aa638475..62ed694a40 100755 --- a/t/t7900-maintenance.sh +++ b/t/t7900-maintenance.sh @@ -162,7 +162,6 @@ test_expect_success 'prefetch multiple remotes' ' test_cmp_rev refs/remotes/remote1/one refs/prefetch/remotes/remote1/one && test_cmp_rev refs/remotes/remote2/two refs/prefetch/remotes/remote2/two && - test_cmp_config refs/prefetch/ log.excludedecoration && git log --oneline --decorate --all >log && ! grep "prefetch" log && @@ -173,26 +172,6 @@ test_expect_success 'prefetch multiple remotes' ' test_subcommand git fetch remote2 $fetchargs <skip-remote1.txt ' -test_expect_success 'prefetch and existing log.excludeDecoration values' ' - git config --unset-all log.excludeDecoration && - git config log.excludeDecoration refs/remotes/remote1/ && - git maintenance run --task=prefetch && - - git config --get-all log.excludeDecoration >out && - grep refs/remotes/remote1/ out && - grep refs/prefetch/ out && - - git log --oneline --decorate --all >log && - ! grep "prefetch" log && - ! grep "remote1" log && - grep "remote2" log && - - # a second run does not change the config - git maintenance run --task=prefetch && - git log --oneline --decorate --all >log2 && - test_cmp log log2 -' - test_expect_success 'loose-objects task' ' # Repack everything so we know the state of the object dir git repack -adk && diff --git a/t/t8001-annotate.sh b/t/t8001-annotate.sh index a536a621b2..d7167f5539 100755 --- a/t/t8001-annotate.sh +++ b/t/t8001-annotate.sh @@ -4,6 +4,7 @@ test_description='git annotate' GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME +TEST_CREATE_REPO_NO_TEMPLATE=1 . ./test-lib.sh PROG='git annotate' diff --git a/t/t8002-blame.sh b/t/t8002-blame.sh index ee4fdd8f18..0147de304b 100755 --- a/t/t8002-blame.sh +++ b/t/t8002-blame.sh @@ -4,6 +4,7 @@ test_description='git blame' GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME +TEST_CREATE_REPO_NO_TEMPLATE=1 . ./test-lib.sh PROG='git blame -c' diff --git a/t/t8007-cat-file-textconv.sh b/t/t8007-cat-file-textconv.sh index b067983ba1..c8266f17f1 100755 --- a/t/t8007-cat-file-textconv.sh +++ b/t/t8007-cat-file-textconv.sh @@ -1,6 +1,8 @@ #!/bin/sh test_description='git cat-file textconv support' + +TEST_PASSES_SANITIZE_LEAK=true . ./test-lib.sh cat >helper <<'EOF' diff --git a/t/t8010-cat-file-filters.sh b/t/t8010-cat-file-filters.sh index 31de4b64dc..ca04242ca0 100755 --- a/t/t8010-cat-file-filters.sh +++ b/t/t8010-cat-file-filters.sh @@ -1,6 +1,8 @@ #!/bin/sh test_description='git cat-file filters support' + +TEST_PASSES_SANITIZE_LEAK=true . ./test-lib.sh test_expect_success 'setup ' ' diff --git a/t/t8012-blame-colors.sh b/t/t8012-blame-colors.sh index 90c75dbb28..c3a5f6d01f 100755 --- a/t/t8012-blame-colors.sh +++ b/t/t8012-blame-colors.sh @@ -4,6 +4,7 @@ test_description='colored git blame' GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME +TEST_CREATE_REPO_NO_TEMPLATE=1 . ./test-lib.sh PROG='git blame -c' diff --git a/t/t9100-git-svn-basic.sh b/t/t9100-git-svn-basic.sh index 7c5b847f58..fea41b3c36 100755 --- a/t/t9100-git-svn-basic.sh +++ b/t/t9100-git-svn-basic.sh @@ -8,7 +8,6 @@ test_description='git svn basic tests' GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME -TEST_FAILS_SANITIZE_LEAK=true . ./lib-git-svn.sh prepare_utf8_locale diff --git a/t/t9101-git-svn-props.sh b/t/t9101-git-svn-props.sh index d043e80fc3..52046e60d5 100755 --- a/t/t9101-git-svn-props.sh +++ b/t/t9101-git-svn-props.sh @@ -5,7 +5,6 @@ test_description='git svn property tests' -TEST_FAILS_SANITIZE_LEAK=true . ./lib-git-svn.sh mkdir import diff --git a/t/t9104-git-svn-follow-parent.sh b/t/t9104-git-svn-follow-parent.sh index 5cf2ef4b8b..85d735861f 100755 --- a/t/t9104-git-svn-follow-parent.sh +++ b/t/t9104-git-svn-follow-parent.sh @@ -5,7 +5,6 @@ test_description='git svn fetching' -TEST_FAILS_SANITIZE_LEAK=true . ./lib-git-svn.sh test_expect_success 'initialize repo' ' diff --git a/t/t9122-git-svn-author.sh b/t/t9122-git-svn-author.sh index 527ba3d293..0fc289ae0f 100755 --- a/t/t9122-git-svn-author.sh +++ b/t/t9122-git-svn-author.sh @@ -2,7 +2,6 @@ test_description='git svn authorship' -TEST_FAILS_SANITIZE_LEAK=true . ./lib-git-svn.sh test_expect_success 'setup svn repository' ' diff --git a/t/t9132-git-svn-broken-symlink.sh b/t/t9132-git-svn-broken-symlink.sh index 4d8d0584b7..aeceffaf7b 100755 --- a/t/t9132-git-svn-broken-symlink.sh +++ b/t/t9132-git-svn-broken-symlink.sh @@ -2,7 +2,6 @@ test_description='test that git handles an svn repository with empty symlinks' -TEST_FAILS_SANITIZE_LEAK=true . ./lib-git-svn.sh test_expect_success 'load svn dumpfile' ' svnadmin load "$rawsvnrepo" <<EOF diff --git a/t/t9162-git-svn-dcommit-interactive.sh b/t/t9162-git-svn-dcommit-interactive.sh index e2aa8ed88a..b3ce033a0d 100755 --- a/t/t9162-git-svn-dcommit-interactive.sh +++ b/t/t9162-git-svn-dcommit-interactive.sh @@ -4,7 +4,6 @@ test_description='git svn dcommit --interactive series' -TEST_FAILS_SANITIZE_LEAK=true . ./lib-git-svn.sh test_expect_success 'initialize repo' ' diff --git a/t/t9301-fast-import-notes.sh b/t/t9301-fast-import-notes.sh index 1ae4d7c0d3..58413221e6 100755 --- a/t/t9301-fast-import-notes.sh +++ b/t/t9301-fast-import-notes.sh @@ -7,6 +7,7 @@ test_description='test git fast-import of notes objects' GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME +TEST_PASSES_SANITIZE_LEAK=true . ./test-lib.sh diff --git a/t/t9700-perl-git.sh b/t/t9700-perl-git.sh index 102c133112..4aa5d90d32 100755 --- a/t/t9700-perl-git.sh +++ b/t/t9700-perl-git.sh @@ -4,17 +4,12 @@ # test_description='perl interface (Git.pm)' -. ./test-lib.sh -if ! test_have_prereq PERL; then - skip_all='skipping perl interface tests, perl not available' - test_done -fi +TEST_PASSES_SANITIZE_LEAK=true +. ./test-lib.sh +. "$TEST_DIRECTORY"/lib-perl.sh -perl -MTest::More -e 0 2>/dev/null || { - skip_all="Perl Test::More unavailable, skipping test" - test_done -} +skip_all_if_no_Test_More # set up test repository @@ -50,11 +45,9 @@ test_expect_success \ git config --add test.pathmulti bar ' -# The external test will outputs its own plan -test_external_has_tap=1 - -test_external_without_stderr \ - 'Perl API' \ - perl "$TEST_DIRECTORY"/t9700/test.pl +test_expect_success 'use t9700/test.pl to test Git.pm' ' + "$PERL_PATH" "$TEST_DIRECTORY"/t9700/test.pl 2>stderr && + test_must_be_empty stderr +' test_done diff --git a/t/t9901-git-web--browse.sh b/t/t9901-git-web--browse.sh index de7152f827..19f56e5680 100755 --- a/t/t9901-git-web--browse.sh +++ b/t/t9901-git-web--browse.sh @@ -5,6 +5,7 @@ test_description='git web--browse basic tests This test checks that git web--browse can handle various valid URLs.' +TEST_PASSES_SANITIZE_LEAK=true . ./test-lib.sh test_web_browse () { diff --git a/t/t9903-bash-prompt.sh b/t/t9903-bash-prompt.sh index 6a30f5719c..d459fae655 100755 --- a/t/t9903-bash-prompt.sh +++ b/t/t9903-bash-prompt.sh @@ -759,4 +759,20 @@ test_expect_success 'prompt - hide if pwd ignored - inside gitdir' ' test_cmp expected "$actual" ' +test_expect_success 'prompt - conflict indicator' ' + printf " (main|CONFLICT)" >expected && + echo "stash" >file && + git stash && + test_when_finished "git stash drop" && + echo "commit" >file && + git commit -m "commit" file && + test_when_finished "git reset --hard HEAD~" && + test_must_fail git stash apply && + ( + GIT_PS1_SHOWCONFLICTSTATE="yes" && + __git_ps1 >"$actual" + ) && + test_cmp expected "$actual" +' + test_done diff --git a/t/test-lib-functions.sh b/t/test-lib-functions.sh index 8c44856eae..c6479f24eb 100644 --- a/t/test-lib-functions.sh +++ b/t/test-lib-functions.sh @@ -633,7 +633,7 @@ test_hook () { # - Explicitly using test_have_prereq. # # - Implicitly by specifying the prerequisite tag in the calls to -# test_expect_{success,failure} and test_external{,_without_stderr}. +# test_expect_{success,failure} # # The single parameter is the prerequisite tag (a simple word, in all # capital letters by convention). @@ -835,93 +835,6 @@ test_expect_success () { test_finish_ } -# test_external runs external test scripts that provide continuous -# test output about their progress, and succeeds/fails on -# zero/non-zero exit code. It outputs the test output on stdout even -# in non-verbose mode, and announces the external script with "# run -# <n>: ..." before running it. When providing relative paths, keep in -# mind that all scripts run in "trash directory". -# Usage: test_external description command arguments... -# Example: test_external 'Perl API' perl ../path/to/test.pl -test_external () { - test "$#" = 4 && { test_prereq=$1; shift; } || test_prereq= - test "$#" = 3 || - BUG "not 3 or 4 parameters to test_external" - descr="$1" - shift - test_verify_prereq - export test_prereq - if ! test_skip "$descr" "$@" - then - # Announce the script to reduce confusion about the - # test output that follows. - say_color "" "# run $test_count: $descr ($*)" - # Export TEST_DIRECTORY, TRASH_DIRECTORY and GIT_TEST_LONG - # to be able to use them in script - export TEST_DIRECTORY TRASH_DIRECTORY GIT_TEST_LONG - # Run command; redirect its stderr to &4 as in - # test_run_, but keep its stdout on our stdout even in - # non-verbose mode. - "$@" 2>&4 - if test "$?" = 0 - then - if test $test_external_has_tap -eq 0; then - test_ok_ "$descr" - else - say_color "" "# test_external test $descr was ok" - test_success=$(($test_success + 1)) - fi - else - if test $test_external_has_tap -eq 0; then - test_failure_ "$descr" "$@" - else - say_color error "# test_external test $descr failed: $@" - test_failure=$(($test_failure + 1)) - fi - fi - fi -} - -# Like test_external, but in addition tests that the command generated -# no output on stderr. -test_external_without_stderr () { - # The temporary file has no (and must have no) security - # implications. - tmp=${TMPDIR:-/tmp} - stderr="$tmp/git-external-stderr.$$.tmp" - test_external "$@" 4> "$stderr" - test -f "$stderr" || error "Internal error: $stderr disappeared." - descr="no stderr: $1" - shift - say >&3 "# expecting no stderr from previous command" - if test ! -s "$stderr" - then - rm "$stderr" - - if test $test_external_has_tap -eq 0; then - test_ok_ "$descr" - else - say_color "" "# test_external_without_stderr test $descr was ok" - test_success=$(($test_success + 1)) - fi - else - if test "$verbose" = t - then - output=$(echo; echo "# Stderr is:"; cat "$stderr") - else - output= - fi - # rm first in case test_failure exits. - rm "$stderr" - if test $test_external_has_tap -eq 0; then - test_failure_ "$descr" "$@" "$output" - else - say_color error "# test_external_without_stderr test $descr failed: $@: $output" - test_failure=$(($test_failure + 1)) - fi - fi -} - # debugging-friendly alternatives to "test [-f|-d|-e]" # The commands test the existence or non-existence of $1 test_path_is_file () { diff --git a/t/test-lib.sh b/t/test-lib.sh index 120f11812c..377cc1c120 100644 --- a/t/test-lib.sh +++ b/t/test-lib.sh @@ -238,6 +238,9 @@ parse_option () { ;; esac ;; + --invert-exit-code) + invert_exit_code=t + ;; *) echo "error: unknown test option '$opt'" >&2; exit 1 ;; esac @@ -302,6 +305,11 @@ TEST_NUMBER="${TEST_NAME%%-*}" TEST_NUMBER="${TEST_NUMBER#t}" TEST_RESULTS_DIR="$TEST_OUTPUT_DIRECTORY/test-results" TEST_RESULTS_BASE="$TEST_RESULTS_DIR/$TEST_NAME$TEST_STRESS_JOB_SFX" +TEST_RESULTS_SAN_FILE_PFX=trace +TEST_RESULTS_SAN_DIR_SFX=leak +TEST_RESULTS_SAN_FILE= +TEST_RESULTS_SAN_DIR="$TEST_RESULTS_DIR/$TEST_NAME.$TEST_RESULTS_SAN_DIR_SFX" +TEST_RESULTS_SAN_DIR_NR_LEAKS_STARTUP= TRASH_DIRECTORY="trash directory.$TEST_NAME$TEST_STRESS_JOB_SFX" test -n "$root" && TRASH_DIRECTORY="$root/$TRASH_DIRECTORY" case "$TRASH_DIRECTORY" in @@ -309,6 +317,16 @@ case "$TRASH_DIRECTORY" in *) TRASH_DIRECTORY="$TEST_OUTPUT_DIRECTORY/$TRASH_DIRECTORY" ;; esac +# Utility functions using $TEST_RESULTS_* variables +nr_san_dir_leaks_ () { + # stderr piped to /dev/null because the directory may have + # been "rmdir"'d already. + find "$TEST_RESULTS_SAN_DIR" \ + -type f \ + -name "$TEST_RESULTS_SAN_FILE_PFX.*" 2>/dev/null | + wc -l +} + # If --stress was passed, run this test repeatedly in several parallel loops. if test "$GIT_TEST_STRESS_STARTED" = "done" then @@ -793,15 +811,31 @@ test_ok_ () { finalize_test_case_output ok "$@" } +_invert_exit_code_failure_end_blurb () { + say_color warn "# faked up failures as TODO & now exiting with 0 due to --invert-exit-code" +} + test_failure_ () { failure_label=$1 test_failure=$(($test_failure + 1)) - say_color error "not ok $test_count - $1" + local pfx="" + if test -n "$invert_exit_code" # && test -n "$HARNESS_ACTIVE" + then + pfx="# TODO induced breakage (--invert-exit-code):" + fi + say_color error "not ok $test_count - ${pfx:+$pfx }$1" shift printf '%s\n' "$*" | sed -e 's/^/# /' if test -n "$immediate" then say_color error "1..$test_count" + if test -n "$invert_exit_code" + then + finalize_test_output + _invert_exit_code_failure_end_blurb + GIT_EXIT_OK=t + exit 0 + fi _error_exit fi finalize_test_case_output failure "$failure_label" "$@" @@ -809,14 +843,14 @@ test_failure_ () { test_known_broken_ok_ () { test_fixed=$(($test_fixed+1)) - say_color error "ok $test_count - $@ # TODO known breakage vanished" - finalize_test_case_output fixed "$@" + say_color error "ok $test_count - $1 # TODO known breakage vanished" + finalize_test_case_output fixed "$1" } test_known_broken_failure_ () { test_broken=$(($test_broken+1)) - say_color warn "not ok $test_count - $@ # TODO known breakage" - finalize_test_case_output broken "$@" + say_color warn "not ok $test_count - $1 # TODO known breakage" + finalize_test_case_output broken "$1" } test_debug () { @@ -1173,9 +1207,67 @@ test_atexit_handler () { teardown_malloc_check } -test_done () { - GIT_EXIT_OK=t +sanitize_leak_log_message_ () { + local new="$1" && + local old="$2" && + local file="$3" && + + printf "With SANITIZE=leak at exit we have %d leak logs, but started with %d + +This means that we have a blindspot where git is leaking but we're +losing the exit code somewhere, or not propagating it appropriately +upwards! + +See the logs at \"%s.*\"; +those logs are reproduced below." \ + "$new" "$old" "$file" +} +check_test_results_san_file_ () { + if test -z "$TEST_RESULTS_SAN_FILE" + then + return + fi && + local old="$TEST_RESULTS_SAN_DIR_NR_LEAKS_STARTUP" && + local new="$(nr_san_dir_leaks_)" && + + if test $new -le $old + then + return + fi && + local out="$(sanitize_leak_log_message_ "$new" "$old" "$TEST_RESULTS_SAN_FILE")" && + say_color error "$out" && + if test "$old" != 0 + then + echo && + say_color error "The logs include output from past runs to avoid" && + say_color error "that remove 'test-results' between runs." + fi && + say_color error "$(cat "$TEST_RESULTS_SAN_FILE".*)" && + + if test -n "$passes_sanitize_leak" && test "$test_failure" = 0 + then + say "As TEST_PASSES_SANITIZE_LEAK=true and our logs show we're leaking, exit non-zero!" && + invert_exit_code=t + elif test -n "$passes_sanitize_leak" + then + say "As TEST_PASSES_SANITIZE_LEAK=true and our logs show we're leaking, and we're failing for other reasons too..." && + invert_exit_code= + elif test -n "$sanitize_leak_check" && test "$test_failure" = 0 + then + say "As TEST_PASSES_SANITIZE_LEAK=true isn't set the above leak is 'ok' with GIT_TEST_PASSING_SANITIZE_LEAK=check" && + invert_exit_code= + elif test -n "$sanitize_leak_check" + then + say "As TEST_PASSES_SANITIZE_LEAK=true isn't set the above leak is 'ok' with GIT_TEST_PASSING_SANITIZE_LEAK=check" && + invert_exit_code=t + else + say "With GIT_TEST_SANITIZE_LEAK_LOG=true our logs revealed a memory leak, exit non-zero!" && + invert_exit_code=t + fi +} + +test_done () { # Run the atexit commands _before_ the trash directory is # removed, so the commands can access pidfiles and socket files. test_atexit_handler @@ -1215,28 +1307,32 @@ test_done () { fi case "$test_failure" in 0) - if test $test_external_has_tap -eq 0 + if test $test_remaining -gt 0 then - if test $test_remaining -gt 0 - then - say_color pass "# passed all $msg" - fi - - # Maybe print SKIP message - test -z "$skip_all" || skip_all="# SKIP $skip_all" - case "$test_count" in - 0) - say "1..$test_count${skip_all:+ $skip_all}" - ;; - *) - test -z "$skip_all" || - say_color warn "$skip_all" - say "1..$test_count" - ;; - esac + say_color pass "# passed all $msg" fi - if test -z "$debug" && test -n "$remove_trash" + # Maybe print SKIP message + test -z "$skip_all" || skip_all="# SKIP $skip_all" + case "$test_count" in + 0) + say "1..$test_count${skip_all:+ $skip_all}" + ;; + *) + test -z "$skip_all" || + say_color warn "$skip_all" + say "1..$test_count" + ;; + esac + + if test -n "$stress" && test -n "$invert_exit_code" + then + # We're about to move our "$TRASH_DIRECTORY" + # to "$TRASH_DIRECTORY.stress-failed" if + # --stress is combined with + # --invert-exit-code. + say "with --stress and --invert-exit-code we're not removing '$TRASH_DIRECTORY'" + elif test -z "$debug" && test -n "$remove_trash" then test -d "$TRASH_DIRECTORY" || error "Tests passed but trash directory already removed before test cleanup; aborting" @@ -1249,17 +1345,35 @@ test_done () { } || error "Tests passed but test cleanup failed; aborting" fi + + check_test_results_san_file_ "$test_failure" + + if test -z "$skip_all" && test -n "$invert_exit_code" + then + say_color warn "# faking up non-zero exit with --invert-exit-code" + GIT_EXIT_OK=t + exit 1 + fi + test_at_end_hook_ + GIT_EXIT_OK=t exit 0 ;; *) - if test $test_external_has_tap -eq 0 + say_color error "# failed $test_failure among $msg" + say "1..$test_count" + + check_test_results_san_file_ "$test_failure" + + if test -n "$invert_exit_code" then - say_color error "# failed $test_failure among $msg" - say "1..$test_count" + _invert_exit_code_failure_end_blurb + GIT_EXIT_OK=t + exit 0 fi + GIT_EXIT_OK=t exit 1 ;; esac @@ -1392,14 +1506,12 @@ fi GITPERLLIB="$GIT_BUILD_DIR"/perl/build/lib export GITPERLLIB test -d "$GIT_BUILD_DIR"/templates/blt || { - error "You haven't built things yet, have you?" + BAIL_OUT "You haven't built things yet, have you?" } if ! test -x "$GIT_BUILD_DIR"/t/helper/test-tool$X then - echo >&2 'You need to build test-tool:' - echo >&2 'Run "make t/helper/test-tool" in the source (toplevel) directory' - exit 1 + BAIL_OUT 'You need to build test-tool; Run "make t/helper/test-tool" in the source (toplevel) directory' fi # Are we running this test at all? @@ -1413,24 +1525,70 @@ then test_done fi -# skip non-whitelisted tests when compiled with SANITIZE=leak +BAIL_OUT_ENV_NEEDS_SANITIZE_LEAK () { + BAIL_OUT "$1 has no effect except when compiled with SANITIZE=leak" +} + if test -n "$SANITIZE_LEAK" then - if test_bool_env GIT_TEST_PASSING_SANITIZE_LEAK false + # Normalize with test_bool_env + passes_sanitize_leak= + + # We need to see TEST_PASSES_SANITIZE_LEAK in "git + # env--helper" (via test_bool_env) + export TEST_PASSES_SANITIZE_LEAK + if test_bool_env TEST_PASSES_SANITIZE_LEAK false then - # We need to see it in "git env--helper" (via - # test_bool_env) - export TEST_PASSES_SANITIZE_LEAK + passes_sanitize_leak=t + fi - if ! test_bool_env TEST_PASSES_SANITIZE_LEAK false + if test "$GIT_TEST_PASSING_SANITIZE_LEAK" = "check" + then + sanitize_leak_check=t + if test -n "$invert_exit_code" then - skip_all="skipping $this_test under GIT_TEST_PASSING_SANITIZE_LEAK=true" - test_done + BAIL_OUT "cannot use --invert-exit-code under GIT_TEST_PASSING_SANITIZE_LEAK=check" fi + + if test -z "$passes_sanitize_leak" + then + say "in GIT_TEST_PASSING_SANITIZE_LEAK=check mode, setting --invert-exit-code for TEST_PASSES_SANITIZE_LEAK != true" + invert_exit_code=t + fi + elif test -z "$passes_sanitize_leak" && + test_bool_env GIT_TEST_PASSING_SANITIZE_LEAK false + then + skip_all="skipping $this_test under GIT_TEST_PASSING_SANITIZE_LEAK=true" + test_done + fi + + if test_bool_env GIT_TEST_SANITIZE_LEAK_LOG false + then + if ! mkdir -p "$TEST_RESULTS_SAN_DIR" + then + BAIL_OUT "cannot create $TEST_RESULTS_SAN_DIR" + fi && + TEST_RESULTS_SAN_FILE="$TEST_RESULTS_SAN_DIR/$TEST_RESULTS_SAN_FILE_PFX" + + # In case "test-results" is left over from a previous + # run: Only report if new leaks show up. + TEST_RESULTS_SAN_DIR_NR_LEAKS_STARTUP=$(nr_san_dir_leaks_) + + # Don't litter *.leak dirs if there was nothing to report + test_atexit "rmdir \"$TEST_RESULTS_SAN_DIR\" 2>/dev/null || :" + + prepend_var LSAN_OPTIONS : dedup_token_length=9999 + prepend_var LSAN_OPTIONS : log_exe_name=1 + prepend_var LSAN_OPTIONS : log_path=\"$TEST_RESULTS_SAN_FILE\" + export LSAN_OPTIONS fi -elif test_bool_env GIT_TEST_PASSING_SANITIZE_LEAK false +elif test "$GIT_TEST_PASSING_SANITIZE_LEAK" = "check" || + test_bool_env GIT_TEST_PASSING_SANITIZE_LEAK false +then + BAIL_OUT_ENV_NEEDS_SANITIZE_LEAK "GIT_TEST_PASSING_SANITIZE_LEAK=true" +elif test_bool_env GIT_TEST_SANITIZE_LEAK_LOG false then - BAIL_OUT "GIT_TEST_PASSING_SANITIZE_LEAK=true has no effect except when compiled with SANITIZE=leak" + BAIL_OUT_ENV_NEEDS_SANITIZE_LEAK "GIT_TEST_SANITIZE_LEAK_LOG=true" fi # Last-minute variable setup @@ -1453,15 +1611,15 @@ remove_trash_directory () { # Test repository remove_trash_directory "$TRASH_DIRECTORY" || { - GIT_EXIT_OK=t - echo >&5 "FATAL: Cannot prepare test area" - exit 1 + BAIL_OUT 'cannot prepare test area' } remove_trash=t if test -z "$TEST_NO_CREATE_REPO" then - git init "$TRASH_DIRECTORY" >&3 2>&4 || + git init \ + ${TEST_CREATE_REPO_NO_TEMPLATE:+--template=} \ + "$TRASH_DIRECTORY" >&3 2>&4 || error "cannot run git init" else mkdir -p "$TRASH_DIRECTORY" @@ -1469,7 +1627,7 @@ fi # Use -P to resolve symlinks in our working directory so that the cwd # in subprocesses like git equals our $PWD (for pathname comparisons). -cd -P "$TRASH_DIRECTORY" || exit 1 +cd -P "$TRASH_DIRECTORY" || BAIL_OUT "cannot cd -P to \"$TRASH_DIRECTORY\"" start_test_output "$0" diff --git a/trace2/tr2_tgt_event.c b/trace2/tr2_tgt_event.c index c5c8cfbbaa..37a3163be1 100644 --- a/trace2/tr2_tgt_event.c +++ b/trace2/tr2_tgt_event.c @@ -479,9 +479,12 @@ static void fn_param_fl(const char *file, int line, const char *param, { const char *event_name = "def_param"; struct json_writer jw = JSON_WRITER_INIT; + enum config_scope scope = current_config_scope(); + const char *scope_name = config_scope_name(scope); jw_object_begin(&jw, 0); event_fmt_prepare(event_name, file, line, NULL, &jw); + jw_object_string(&jw, "scope", scope_name); jw_object_string(&jw, "param", param); jw_object_string(&jw, "value", value); jw_end(&jw); diff --git a/trace2/tr2_tgt_normal.c b/trace2/tr2_tgt_normal.c index c42fbade7f..69f8033077 100644 --- a/trace2/tr2_tgt_normal.c +++ b/trace2/tr2_tgt_normal.c @@ -298,8 +298,11 @@ static void fn_param_fl(const char *file, int line, const char *param, const char *value) { struct strbuf buf_payload = STRBUF_INIT; + enum config_scope scope = current_config_scope(); + const char *scope_name = config_scope_name(scope); - strbuf_addf(&buf_payload, "def_param %s=%s", param, value); + strbuf_addf(&buf_payload, "def_param scope:%s %s=%s", scope_name, param, + value); normal_io_write_fl(file, line, &buf_payload); strbuf_release(&buf_payload); } diff --git a/trace2/tr2_tgt_perf.c b/trace2/tr2_tgt_perf.c index a1eff8bea3..8cb792488c 100644 --- a/trace2/tr2_tgt_perf.c +++ b/trace2/tr2_tgt_perf.c @@ -441,12 +441,17 @@ static void fn_param_fl(const char *file, int line, const char *param, { const char *event_name = "def_param"; struct strbuf buf_payload = STRBUF_INIT; + struct strbuf scope_payload = STRBUF_INIT; + enum config_scope scope = current_config_scope(); + const char *scope_name = config_scope_name(scope); strbuf_addf(&buf_payload, "%s:%s", param, value); + strbuf_addf(&scope_payload, "%s:%s", "scope", scope_name); - perf_io_write_fl(file, line, event_name, NULL, NULL, NULL, NULL, - &buf_payload); + perf_io_write_fl(file, line, event_name, NULL, NULL, NULL, + scope_payload.buf, &buf_payload); strbuf_release(&buf_payload); + strbuf_release(&scope_payload); } static void fn_repo_fl(const char *file, int line, diff --git a/upload-pack.c b/upload-pack.c index 3a851b3606..b217a1f469 100644 --- a/upload-pack.c +++ b/upload-pack.c @@ -455,6 +455,7 @@ static void create_pack_file(struct upload_pack_data *pack_data, return; fail: + free(output_state); send_client_data(3, abort_msg, sizeof(abort_msg), pack_data->use_sideband); die("git upload-pack: %s", abort_msg); @@ -1321,18 +1322,27 @@ static int upload_pack_config(const char *var, const char *value, void *cb_data) data->advertise_sid = git_config_bool(var, value); } - if (current_config_scope() != CONFIG_SCOPE_LOCAL && - current_config_scope() != CONFIG_SCOPE_WORKTREE) { - if (!strcmp("uploadpack.packobjectshook", var)) - return git_config_string(&data->pack_objects_hook, var, value); - } - if (parse_object_filter_config(var, value, data) < 0) return -1; return parse_hide_refs_config(var, value, "uploadpack"); } +static int upload_pack_protected_config(const char *var, const char *value, void *cb_data) +{ + struct upload_pack_data *data = cb_data; + + if (!strcmp("uploadpack.packobjectshook", var)) + return git_config_string(&data->pack_objects_hook, var, value); + return 0; +} + +static void get_upload_pack_config(struct upload_pack_data *data) +{ + git_config(upload_pack_config, data); + git_protected_config(upload_pack_protected_config, data); +} + void upload_pack(const int advertise_refs, const int stateless_rpc, const int timeout) { @@ -1340,8 +1350,7 @@ void upload_pack(const int advertise_refs, const int stateless_rpc, struct upload_pack_data data; upload_pack_data_init(&data); - - git_config(upload_pack_config, &data); + get_upload_pack_config(&data); data.stateless_rpc = stateless_rpc; data.timeout = timeout; @@ -1695,8 +1704,7 @@ int upload_pack_v2(struct repository *r, struct packet_reader *request) upload_pack_data_init(&data); data.use_sideband = LARGE_PACKET_MAX; - - git_config(upload_pack_config, &data); + get_upload_pack_config(&data); while (state != FETCH_DONE) { switch (state) { @@ -161,28 +161,6 @@ void xsetenv(const char *name, const char *value, int overwrite) die_errno(_("could not setenv '%s'"), name ? name : "(null)"); } -/* - * Limit size of IO chunks, because huge chunks only cause pain. OS X - * 64-bit is buggy, returning EINVAL if len >= INT_MAX; and even in - * the absence of bugs, large chunks can result in bad latencies when - * you decide to kill the process. - * - * We pick 8 MiB as our default, but if the platform defines SSIZE_MAX - * that is smaller than that, clip it to SSIZE_MAX, as a call to - * read(2) or write(2) larger than that is allowed to fail. As the last - * resort, we allow a port to pass via CFLAGS e.g. "-DMAX_IO_SIZE=value" - * to override this, if the definition of SSIZE_MAX given by the platform - * is broken. - */ -#ifndef MAX_IO_SIZE -# define MAX_IO_SIZE_DEFAULT (8*1024*1024) -# if defined(SSIZE_MAX) && (SSIZE_MAX < MAX_IO_SIZE_DEFAULT) -# define MAX_IO_SIZE SSIZE_MAX -# else -# define MAX_IO_SIZE MAX_IO_SIZE_DEFAULT -# endif -#endif - /** * xopen() is the same as open(), but it die()s if the open() fails. */ @@ -616,10 +594,16 @@ int git_fsync(int fd, enum fsync_action action) } } +static void log_trace_fsync_if(const char *key, intmax_t value) +{ + if (value) + trace2_data_intmax("fsync", the_repository, key, value); +} + void trace_git_fsync_stats(void) { - trace2_data_intmax("fsync", the_repository, "fsync/writeout-only", count_fsync_writeout_only); - trace2_data_intmax("fsync", the_repository, "fsync/hardware-flush", count_fsync_hardware_flush); + log_trace_fsync_if("fsync/writeout-only", count_fsync_writeout_only); + log_trace_fsync_if("fsync/hardware-flush", count_fsync_hardware_flush); } static int warn_if_unremovable(const char *op, const char *file, int rc) diff --git a/xdiff/xdiff.h b/xdiff/xdiff.h index 72e25a9ffa..bb56b23f34 100644 --- a/xdiff/xdiff.h +++ b/xdiff/xdiff.h @@ -120,6 +120,7 @@ typedef struct s_bdiffparam { #define xdl_malloc(x) xmalloc(x) +#define xdl_calloc(n, sz) xcalloc(n, sz) #define xdl_free(ptr) free(ptr) #define xdl_realloc(ptr,x) xrealloc(ptr,x) diff --git a/xdiff/xdiffi.c b/xdiff/xdiffi.c index 758410c11a..32652ded2d 100644 --- a/xdiff/xdiffi.c +++ b/xdiff/xdiffi.c @@ -321,12 +321,12 @@ int xdl_do_diff(mmfile_t *mf1, mmfile_t *mf2, xpparam_t const *xpp, return -1; if (XDF_DIFF_ALG(xpp->flags) == XDF_PATIENCE_DIFF) { - res = xdl_do_patience_diff(mf1, mf2, xpp, xe); + res = xdl_do_patience_diff(xpp, xe); goto out; } if (XDF_DIFF_ALG(xpp->flags) == XDF_HISTOGRAM_DIFF) { - res = xdl_do_histogram_diff(mf1, mf2, xpp, xe); + res = xdl_do_histogram_diff(xpp, xe); goto out; } @@ -337,7 +337,7 @@ int xdl_do_diff(mmfile_t *mf1, mmfile_t *mf2, xpparam_t const *xpp, * One is to store the forward path and one to store the backward path. */ ndiags = xe->xdf1.nreff + xe->xdf2.nreff + 3; - if (!(kvd = (long *) xdl_malloc((2 * ndiags + 2) * sizeof(long)))) { + if (!XDL_ALLOC_ARRAY(kvd, 2 * ndiags + 2)) { xdl_free_env(xe); return -1; diff --git a/xdiff/xdiffi.h b/xdiff/xdiffi.h index 8f1c7c8b04..126c9d8ff4 100644 --- a/xdiff/xdiffi.h +++ b/xdiff/xdiffi.h @@ -56,9 +56,7 @@ int xdl_build_script(xdfenv_t *xe, xdchange_t **xscr); void xdl_free_script(xdchange_t *xscr); int xdl_emit_diff(xdfenv_t *xe, xdchange_t *xscr, xdemitcb_t *ecb, xdemitconf_t const *xecfg); -int xdl_do_patience_diff(mmfile_t *mf1, mmfile_t *mf2, xpparam_t const *xpp, - xdfenv_t *env); -int xdl_do_histogram_diff(mmfile_t *mf1, mmfile_t *mf2, xpparam_t const *xpp, - xdfenv_t *env); +int xdl_do_patience_diff(xpparam_t const *xpp, xdfenv_t *env); +int xdl_do_histogram_diff(xpparam_t const *xpp, xdfenv_t *env); #endif /* #if !defined(XDIFFI_H) */ diff --git a/xdiff/xhistogram.c b/xdiff/xhistogram.c index 01decffc33..16a8fe2f3f 100644 --- a/xdiff/xhistogram.c +++ b/xdiff/xhistogram.c @@ -251,7 +251,7 @@ static int find_lcs(xpparam_t const *xpp, xdfenv_t *env, int line1, int count1, int line2, int count2) { int b_ptr; - int sz, ret = -1; + int ret = -1; struct histindex index; memset(&index, 0, sizeof(index)); @@ -265,23 +265,16 @@ static int find_lcs(xpparam_t const *xpp, xdfenv_t *env, index.rcha.head = NULL; index.table_bits = xdl_hashbits(count1); - sz = index.records_size = 1 << index.table_bits; - sz *= sizeof(struct record *); - if (!(index.records = (struct record **) xdl_malloc(sz))) + index.records_size = 1 << index.table_bits; + if (!XDL_CALLOC_ARRAY(index.records, index.records_size)) goto cleanup; - memset(index.records, 0, sz); - sz = index.line_map_size = count1; - sz *= sizeof(struct record *); - if (!(index.line_map = (struct record **) xdl_malloc(sz))) + index.line_map_size = count1; + if (!XDL_CALLOC_ARRAY(index.line_map, index.line_map_size)) goto cleanup; - memset(index.line_map, 0, sz); - sz = index.line_map_size; - sz *= sizeof(unsigned int); - if (!(index.next_ptrs = (unsigned int *) xdl_malloc(sz))) + if (!XDL_CALLOC_ARRAY(index.next_ptrs, index.line_map_size)) goto cleanup; - memset(index.next_ptrs, 0, sz); /* lines / 4 + 1 comes from xprepare.c:xdl_prepare_ctx() */ if (xdl_cha_init(&index.rcha, sizeof(struct record), count1 / 4 + 1) < 0) @@ -369,8 +362,7 @@ out: return result; } -int xdl_do_histogram_diff(mmfile_t *file1, mmfile_t *file2, - xpparam_t const *xpp, xdfenv_t *env) +int xdl_do_histogram_diff(xpparam_t const *xpp, xdfenv_t *env) { return histogram_diff(xpp, env, env->xdf1.dstart + 1, env->xdf1.dend - env->xdf1.dstart + 1, diff --git a/xdiff/xmacros.h b/xdiff/xmacros.h index ae4636c247..8487bb396f 100644 --- a/xdiff/xmacros.h +++ b/xdiff/xmacros.h @@ -49,5 +49,23 @@ do { \ ((unsigned long) __p[2]) << 16 | ((unsigned long) __p[3]) << 24; \ } while (0) +/* Allocate an array of nr elements, returns NULL on failure */ +#define XDL_ALLOC_ARRAY(p, nr) \ + ((p) = SIZE_MAX / sizeof(*(p)) >= (size_t)(nr) \ + ? xdl_malloc((nr) * sizeof(*(p))) \ + : NULL) + +/* Allocate an array of nr zeroed out elements, returns NULL on failure */ +#define XDL_CALLOC_ARRAY(p, nr) ((p) = xdl_calloc(nr, sizeof(*(p)))) + +/* + * Ensure array p can accommodate at least nr elements, growing the + * array and updating alloc (which is the number of allocated + * elements) as necessary. Frees p and returns -1 on failure, returns + * 0 on success + */ +#define XDL_ALLOC_GROW(p, nr, alloc) \ + (-!((nr) <= (alloc) || \ + ((p) = xdl_alloc_grow_helper((p), (nr), &(alloc), sizeof(*(p)))))) #endif /* #if !defined(XMACROS_H) */ diff --git a/xdiff/xpatience.c b/xdiff/xpatience.c index 1a21c6a74b..a2d8955537 100644 --- a/xdiff/xpatience.c +++ b/xdiff/xpatience.c @@ -69,7 +69,6 @@ struct hashmap { } *entries, *first, *last; /* were common records found? */ unsigned long has_matches; - mmfile_t *file1, *file2; xdfenv_t *env; xpparam_t const *xpp; }; @@ -139,23 +138,17 @@ static void insert_record(xpparam_t const *xpp, int line, struct hashmap *map, * * It is assumed that env has been prepared using xdl_prepare(). */ -static int fill_hashmap(mmfile_t *file1, mmfile_t *file2, - xpparam_t const *xpp, xdfenv_t *env, +static int fill_hashmap(xpparam_t const *xpp, xdfenv_t *env, struct hashmap *result, int line1, int count1, int line2, int count2) { - result->file1 = file1; - result->file2 = file2; result->xpp = xpp; result->env = env; /* We know exactly how large we want the hash map */ result->alloc = count1 * 2; - result->entries = (struct entry *) - xdl_malloc(result->alloc * sizeof(struct entry)); - if (!result->entries) + if (!XDL_CALLOC_ARRAY(result->entries, result->alloc)) return -1; - memset(result->entries, 0, result->alloc * sizeof(struct entry)); /* First, fill with entries from the first file */ while (count1--) @@ -200,7 +193,7 @@ static int binary_search(struct entry **sequence, int longest, */ static int find_longest_common_sequence(struct hashmap *map, struct entry **res) { - struct entry **sequence = xdl_malloc(map->nr * sizeof(struct entry *)); + struct entry **sequence; int longest = 0, i; struct entry *entry; @@ -211,7 +204,7 @@ static int find_longest_common_sequence(struct hashmap *map, struct entry **res) */ int anchor_i = -1; - if (!sequence) + if (!XDL_ALLOC_ARRAY(sequence, map->nr)) return -1; for (entry = map->first; entry; entry = entry->next) { @@ -257,8 +250,7 @@ static int match(struct hashmap *map, int line1, int line2) return record1->ha == record2->ha; } -static int patience_diff(mmfile_t *file1, mmfile_t *file2, - xpparam_t const *xpp, xdfenv_t *env, +static int patience_diff(xpparam_t const *xpp, xdfenv_t *env, int line1, int count1, int line2, int count2); static int walk_common_sequence(struct hashmap *map, struct entry *first, @@ -289,8 +281,7 @@ static int walk_common_sequence(struct hashmap *map, struct entry *first, /* Recurse */ if (next1 > line1 || next2 > line2) { - if (patience_diff(map->file1, map->file2, - map->xpp, map->env, + if (patience_diff(map->xpp, map->env, line1, next1 - line1, line2, next2 - line2)) return -1; @@ -329,8 +320,7 @@ static int fall_back_to_classic_diff(struct hashmap *map, * * This function assumes that env was prepared with xdl_prepare_env(). */ -static int patience_diff(mmfile_t *file1, mmfile_t *file2, - xpparam_t const *xpp, xdfenv_t *env, +static int patience_diff(xpparam_t const *xpp, xdfenv_t *env, int line1, int count1, int line2, int count2) { struct hashmap map; @@ -349,7 +339,7 @@ static int patience_diff(mmfile_t *file1, mmfile_t *file2, } memset(&map, 0, sizeof(map)); - if (fill_hashmap(file1, file2, xpp, env, &map, + if (fill_hashmap(xpp, env, &map, line1, count1, line2, count2)) return -1; @@ -377,9 +367,7 @@ static int patience_diff(mmfile_t *file1, mmfile_t *file2, return result; } -int xdl_do_patience_diff(mmfile_t *file1, mmfile_t *file2, - xpparam_t const *xpp, xdfenv_t *env) +int xdl_do_patience_diff(xpparam_t const *xpp, xdfenv_t *env) { - return patience_diff(file1, file2, xpp, env, - 1, env->xdf1.nrec, 1, env->xdf2.nrec); + return patience_diff(xpp, env, 1, env->xdf1.nrec, 1, env->xdf2.nrec); } diff --git a/xdiff/xprepare.c b/xdiff/xprepare.c index 105752758f..c84549f6c5 100644 --- a/xdiff/xprepare.c +++ b/xdiff/xprepare.c @@ -78,15 +78,14 @@ static int xdl_init_classifier(xdlclassifier_t *cf, long size, long flags) { return -1; } - if (!(cf->rchash = (xdlclass_t **) xdl_malloc(cf->hsize * sizeof(xdlclass_t *)))) { + if (!XDL_CALLOC_ARRAY(cf->rchash, cf->hsize)) { xdl_cha_free(&cf->ncha); return -1; } - memset(cf->rchash, 0, cf->hsize * sizeof(xdlclass_t *)); cf->alloc = size; - if (!(cf->rcrecs = (xdlclass_t **) xdl_malloc(cf->alloc * sizeof(xdlclass_t *)))) { + if (!XDL_ALLOC_ARRAY(cf->rcrecs, cf->alloc)) { xdl_free(cf->rchash); xdl_cha_free(&cf->ncha); @@ -112,7 +111,6 @@ static int xdl_classify_record(unsigned int pass, xdlclassifier_t *cf, xrecord_t long hi; char const *line; xdlclass_t *rcrec; - xdlclass_t **rcrecs; line = rec->ptr; hi = (long) XDL_HASHLONG(rec->ha, cf->hbits); @@ -128,14 +126,8 @@ static int xdl_classify_record(unsigned int pass, xdlclassifier_t *cf, xrecord_t return -1; } rcrec->idx = cf->count++; - if (cf->count > cf->alloc) { - cf->alloc *= 2; - if (!(rcrecs = (xdlclass_t **) xdl_realloc(cf->rcrecs, cf->alloc * sizeof(xdlclass_t *)))) { - + if (XDL_ALLOC_GROW(cf->rcrecs, cf->count, cf->alloc)) return -1; - } - cf->rcrecs = rcrecs; - } cf->rcrecs[rcrec->idx] = rcrec; rcrec->line = line; rcrec->size = rec->size; @@ -164,7 +156,7 @@ static int xdl_prepare_ctx(unsigned int pass, mmfile_t *mf, long narec, xpparam_ unsigned long hav; char const *blk, *cur, *top, *prev; xrecord_t *crec; - xrecord_t **recs, **rrecs; + xrecord_t **recs; xrecord_t **rhash; unsigned long *ha; char *rchg; @@ -178,26 +170,21 @@ static int xdl_prepare_ctx(unsigned int pass, mmfile_t *mf, long narec, xpparam_ if (xdl_cha_init(&xdf->rcha, sizeof(xrecord_t), narec / 4 + 1) < 0) goto abort; - if (!(recs = (xrecord_t **) xdl_malloc(narec * sizeof(xrecord_t *)))) + if (!XDL_ALLOC_ARRAY(recs, narec)) goto abort; hbits = xdl_hashbits((unsigned int) narec); hsize = 1 << hbits; - if (!(rhash = (xrecord_t **) xdl_malloc(hsize * sizeof(xrecord_t *)))) + if (!XDL_CALLOC_ARRAY(rhash, hsize)) goto abort; - memset(rhash, 0, hsize * sizeof(xrecord_t *)); nrec = 0; if ((cur = blk = xdl_mmfile_first(mf, &bsize))) { for (top = blk + bsize; cur < top; ) { prev = cur; hav = xdl_hash_record(&cur, top, xpp->flags); - if (nrec >= narec) { - narec *= 2; - if (!(rrecs = (xrecord_t **) xdl_realloc(recs, narec * sizeof(xrecord_t *)))) - goto abort; - recs = rrecs; - } + if (XDL_ALLOC_GROW(recs, nrec + 1, narec)) + goto abort; if (!(crec = xdl_cha_alloc(&xdf->rcha))) goto abort; crec->ptr = prev; @@ -209,15 +196,14 @@ static int xdl_prepare_ctx(unsigned int pass, mmfile_t *mf, long narec, xpparam_ } } - if (!(rchg = (char *) xdl_malloc((nrec + 2) * sizeof(char)))) + if (!XDL_CALLOC_ARRAY(rchg, nrec + 2)) goto abort; - memset(rchg, 0, (nrec + 2) * sizeof(char)); if ((XDF_DIFF_ALG(xpp->flags) != XDF_PATIENCE_DIFF) && (XDF_DIFF_ALG(xpp->flags) != XDF_HISTOGRAM_DIFF)) { - if (!(rindex = xdl_malloc((nrec + 1) * sizeof(*rindex)))) + if (!XDL_ALLOC_ARRAY(rindex, nrec + 1)) goto abort; - if (!(ha = xdl_malloc((nrec + 1) * sizeof(*ha)))) + if (!XDL_ALLOC_ARRAY(ha, nrec + 1)) goto abort; } @@ -383,11 +369,8 @@ static int xdl_cleanup_records(xdlclassifier_t *cf, xdfile_t *xdf1, xdfile_t *xd xdlclass_t *rcrec; char *dis, *dis1, *dis2; - if (!(dis = (char *) xdl_malloc(xdf1->nrec + xdf2->nrec + 2))) { - + if (!XDL_CALLOC_ARRAY(dis, xdf1->nrec + xdf2->nrec + 2)) return -1; - } - memset(dis, 0, xdf1->nrec + xdf2->nrec + 2); dis1 = dis; dis2 = dis1 + xdf1->nrec + 1; diff --git a/xdiff/xutils.c b/xdiff/xutils.c index 115b2b1640..9e36f24875 100644 --- a/xdiff/xutils.c +++ b/xdiff/xutils.c @@ -432,3 +432,20 @@ int xdl_fall_back_diff(xdfenv_t *diff_env, xpparam_t const *xpp, return 0; } + +void* xdl_alloc_grow_helper(void *p, long nr, long *alloc, size_t size) +{ + void *tmp = NULL; + size_t n = ((LONG_MAX - 16) / 2 >= *alloc) ? 2 * *alloc + 16 : LONG_MAX; + if (nr > n) + n = nr; + if (SIZE_MAX / size >= n) + tmp = xdl_realloc(p, n * size); + if (tmp) { + *alloc = n; + } else { + xdl_free(p); + *alloc = 0; + } + return tmp; +} diff --git a/xdiff/xutils.h b/xdiff/xutils.h index fba7bae03c..fd0bba94e8 100644 --- a/xdiff/xutils.h +++ b/xdiff/xutils.h @@ -42,6 +42,7 @@ int xdl_emit_hunk_hdr(long s1, long c1, long s2, long c2, int xdl_fall_back_diff(xdfenv_t *diff_env, xpparam_t const *xpp, int line1, int count1, int line2, int count2); - +/* Do not call this function, use XDL_ALLOC_GROW instead */ +void* xdl_alloc_grow_helper(void* p, long nr, long* alloc, size_t size); #endif /* #if !defined(XUTILS_H) */ |
