diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md
index 4017ed82ca4341..c19530b086311a 100644
--- a/.github/ISSUE_TEMPLATE.md
+++ b/.github/ISSUE_TEMPLATE.md
@@ -1,8 +1,10 @@
- - [ ] I was not able to find an [open](https://github.com/git-for-windows/git/issues?q=is%3Aopen) or [closed](https://github.com/git-for-windows/git/issues?q=is%3Aclosed) issue matching what I'm seeing
+ - [ ] I was not able to find an [open](https://github.com/microsoft/git/issues?q=is%3Aopen)
+ or [closed](https://github.com/microsoft/git/issues?q=is%3Aclosed) issue matching
+ what I'm seeing, including in [the `git-for-windows/git` tracker](https://github.com/git-for-windows/git/issues).
### Setup
- - Which version of Git for Windows are you using? Is it 32-bit or 64-bit?
+ - Which version of `microsoft/git` are you using? Is it 32-bit or 64-bit?
```
$ git --version --build-options
@@ -10,24 +12,22 @@ $ git --version --build-options
** insert your machine's response here **
```
- - Which version of Windows are you running? Vista, 7, 8, 10? Is it 32-bit or 64-bit?
+Are you using Scalar or VFS for Git?
+
+** insert your answer here **
+
+If VFS for Git, then what version?
```
-$ cmd.exe /c ver
+$ gvfs version
** insert your machine's response here **
```
- - What options did you set as part of the installation? Or did you choose the
- defaults?
+ - Which version of Windows are you running? Vista, 7, 8, 10? Is it 32-bit or 64-bit?
```
-# One of the following:
-> type "C:\Program Files\Git\etc\install-options.txt"
-> type "C:\Program Files (x86)\Git\etc\install-options.txt"
-> type "%USERPROFILE%\AppData\Local\Programs\Git\etc\install-options.txt"
-> type "$env:USERPROFILE\AppData\Local\Programs\Git\etc\install-options.txt"
-$ cat /etc/install-options.txt
+$ cmd.exe /c ver
** insert your machine's response here **
```
@@ -58,7 +58,11 @@ $ cat /etc/install-options.txt
** insert here **
- - If the problem was occurring with a specific repository, can you provide the
- URL to that repository to help us with testing?
+ - If the problem was occurring with a specific repository, can you specify
+ the repository?
-** insert URL here **
+ * [ ] Public repo: **insert URL here**
+ * [ ] Windows monorepo
+ * [ ] Office monorepo
+ * [ ] Other Microsoft-internal repo: **insert name here**
+ * [ ] Other internal repo.
diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md
index 831ef6f19f1d11..3cb48d8582f31c 100644
--- a/.github/PULL_REQUEST_TEMPLATE.md
+++ b/.github/PULL_REQUEST_TEMPLATE.md
@@ -1,19 +1,10 @@
Thanks for taking the time to contribute to Git!
-Those seeking to contribute to the Git for Windows fork should see
-http://gitforwindows.org/#contribute on how to contribute Windows specific
-enhancements.
-
-If your contribution is for the core Git functions and documentation
-please be aware that the Git community does not use the github.com issues
-or pull request mechanism for their contributions.
-
-Instead, we use the Git mailing list (git@vger.kernel.org) for code and
-documentation submissions, code reviews, and bug reports. The
-mailing list is plain text only (anything with HTML is sent directly
-to the spam folder).
-
-Nevertheless, you can use GitGitGadget (https://gitgitgadget.github.io/)
-to conveniently send your Pull Requests commits to our mailing list.
-
-Please read the "guidelines for contributing" linked above!
+This fork contains changes specific to monorepo scenarios. If you are an
+external contributor, then please detail your reason for submitting to
+this fork:
+
+* [ ] This is an early version of work already under review upstream.
+* [ ] This change only applies to interactions with Azure DevOps and the
+ GVFS Protocol.
+* [ ] This change only applies to the virtualization hook and VFS for Git.
diff --git a/.github/macos-installer/Makefile b/.github/macos-installer/Makefile
new file mode 100644
index 00000000000000..df339bd921df23
--- /dev/null
+++ b/.github/macos-installer/Makefile
@@ -0,0 +1,116 @@
+SHELL := /bin/bash
+SUDO := sudo
+C_INCLUDE_PATH := /usr/include
+CPLUS_INCLUDE_PATH := /usr/include
+LD_LIBRARY_PATH := /usr/lib
+
+OSX_VERSION := $(shell sw_vers -productVersion)
+TARGET_FLAGS := -mmacosx-version-min=$(OSX_VERSION) -DMACOSX_DEPLOYMENT_TARGET=$(OSX_VERSION)
+
+ARCH := x86_64
+ARCH_CODE := x86_64
+ARCH_FLAGS_x86_64 := -arch x86_64
+
+CFLAGS := $(TARGET_FLAGS) $(ARCH_FLAGS_${ARCH_CODE})
+LDFLAGS := $(TARGET_FLAGS) $(ARCH_FLAGS_${ARCH_CODE})
+
+PREFIX := /usr/local
+GIT_PREFIX := $(PREFIX)/git
+
+BUILD_CODE := intel-$(ARCH_CODE)
+BUILD_DIR := $(GITHUB_WORKSPACE)/payload
+DESTDIR := $(PWD)/stage/git-$(BUILD_CODE)-$(VERSION)
+ARTIFACTDIR := build_artifacts
+SUBMAKE := $(MAKE) C_INCLUDE_PATH="$(C_INCLUDE_PATH)" CPLUS_INCLUDE_PATH="$(CPLUS_INCLUDE_PATH)" LD_LIBRARY_PATH="$(LD_LIBRARY_PATH)" TARGET_FLAGS="$(TARGET_FLAGS)" CFLAGS="$(CFLAGS)" LDFLAGS="$(LDFLAGS)" NO_GETTEXT=1 NO_DARWIN_PORTS=1 prefix=$(GIT_PREFIX) GIT_BUILT_FROM_COMMIT="$(GIT_BUILT_FROM_COMMIT)" DESTDIR=$(DESTDIR)
+CORES := $(shell bash -c "sysctl hw.ncpu | awk '{print \$$2}'")
+
+.PHONY: image pkg payload
+
+.SECONDARY:
+
+$(DESTDIR)$(GIT_PREFIX)/VERSION-$(VERSION)-$(BUILD_CODE):
+ rm -f $(BUILD_DIR)/git-$(VERSION)/osx-installed*
+ mkdir -p $(DESTDIR)$(GIT_PREFIX)
+ touch $@
+
+$(BUILD_DIR)/git-$(VERSION)/osx-built-keychain:
+ cd $(BUILD_DIR)/git-$(VERSION)/contrib/credential/osxkeychain; $(SUBMAKE) CFLAGS="$(CFLAGS) -g -O2 -Wall"
+ touch $@
+
+$(BUILD_DIR)/git-$(VERSION)/osx-built:
+ [ -d $(DESTDIR)$(GIT_PREFIX) ] && $(SUDO) rm -rf $(DESTDIR) || echo ok
+ cd $(BUILD_DIR)/git-$(VERSION); $(SUBMAKE) -j $(CORES) all strip
+ touch $@
+
+$(BUILD_DIR)/git-$(VERSION)/osx-installed-bin: $(BUILD_DIR)/git-$(VERSION)/osx-built $(BUILD_DIR)/git-$(VERSION)/osx-built-keychain
+ cd $(BUILD_DIR)/git-$(VERSION); $(SUBMAKE) install
+ cp $(BUILD_DIR)/git-$(VERSION)/contrib/credential/osxkeychain/git-credential-osxkeychain $(DESTDIR)$(GIT_PREFIX)/bin/git-credential-osxkeychain
+ mkdir -p $(DESTDIR)$(GIT_PREFIX)/contrib/completion
+ cp $(BUILD_DIR)/git-$(VERSION)/contrib/completion/git-completion.bash $(DESTDIR)$(GIT_PREFIX)/contrib/completion/
+ cp $(BUILD_DIR)/git-$(VERSION)/contrib/completion/git-completion.zsh $(DESTDIR)$(GIT_PREFIX)/contrib/completion/
+ cp $(BUILD_DIR)/git-$(VERSION)/contrib/completion/git-prompt.sh $(DESTDIR)$(GIT_PREFIX)/contrib/completion/
+ # This is needed for Git-Gui, GitK
+ mkdir -p $(DESTDIR)$(GIT_PREFIX)/lib/perl5/site_perl
+ [ ! -f $(DESTDIR)$(GIT_PREFIX)/lib/perl5/site_perl/Error.pm ] && cp $(BUILD_DIR)/git-$(VERSION)/perl/private-Error.pm $(DESTDIR)$(GIT_PREFIX)/lib/perl5/site_perl/Error.pm || echo done
+ touch $@
+
+$(BUILD_DIR)/git-$(VERSION)/osx-installed-man: $(BUILD_DIR)/git-$(VERSION)/osx-installed-bin
+ mkdir -p $(DESTDIR)$(GIT_PREFIX)/share/man
+ cp -R $(GITHUB_WORKSPACE)/manpages/ $(DESTDIR)$(GIT_PREFIX)/share/man
+ touch $@
+
+$(BUILD_DIR)/git-$(VERSION)/osx-built-subtree:
+ cd $(BUILD_DIR)/git-$(VERSION)/contrib/subtree; $(SUBMAKE) XML_CATALOG_FILES="$(XML_CATALOG_FILES)" all git-subtree.1
+ touch $@
+
+$(BUILD_DIR)/git-$(VERSION)/osx-installed-subtree: $(BUILD_DIR)/git-$(VERSION)/osx-built-subtree
+ mkdir -p $(DESTDIR)
+ cd $(BUILD_DIR)/git-$(VERSION)/contrib/subtree; $(SUBMAKE) XML_CATALOG_FILES="$(XML_CATALOG_FILES)" install install-man
+ touch $@
+
+$(BUILD_DIR)/git-$(VERSION)/osx-installed-assets: $(BUILD_DIR)/git-$(VERSION)/osx-installed-bin
+ mkdir -p $(DESTDIR)$(GIT_PREFIX)/etc
+ cat assets/etc/gitconfig.osxkeychain >> $(DESTDIR)$(GIT_PREFIX)/etc/gitconfig
+ cp assets/uninstall.sh $(DESTDIR)$(GIT_PREFIX)/uninstall.sh
+ sh -c "echo .DS_Store >> $(DESTDIR)$(GIT_PREFIX)/share/git-core/templates/info/exclude"
+
+symlinks:
+ mkdir -p $(ARTIFACTDIR)$(PREFIX)/bin
+ cd $(ARTIFACTDIR)$(PREFIX)/bin; find ../git/bin -type f -exec ln -sf {} \;
+ for man in man1 man3 man5 man7; do mkdir -p $(ARTIFACTDIR)$(PREFIX)/share/man/$$man; (cd $(ARTIFACTDIR)$(PREFIX)/share/man/$$man; ln -sf ../../../git/share/man/$$man/* ./); done
+ ruby ../scripts/symlink-git-hardlinks.rb $(ARTIFACTDIR)
+ touch $@
+
+$(BUILD_DIR)/git-$(VERSION)/osx-installed: $(DESTDIR)$(GIT_PREFIX)/VERSION-$(VERSION)-$(BUILD_CODE) $(BUILD_DIR)/git-$(VERSION)/osx-installed-man $(BUILD_DIR)/git-$(VERSION)/osx-installed-assets $(BUILD_DIR)/git-$(VERSION)/osx-installed-subtree
+ find $(DESTDIR)$(GIT_PREFIX) -type d -exec chmod ugo+rx {} \;
+ find $(DESTDIR)$(GIT_PREFIX) -type f -exec chmod ugo+r {} \;
+ touch $@
+
+$(BUILD_DIR)/git-$(VERSION)/osx-built-assert-$(ARCH_CODE): $(BUILD_DIR)/git-$(VERSION)/osx-built
+ifeq ("$(ARCH_CODE)", "universal")
+ File $(BUILD_DIR)/git-$(VERSION)/git
+ File $(BUILD_DIR)/git-$(VERSION)/contrib/credential/osxkeychain/git-credential-osxkeychain
+else
+ [ "$$(File $(BUILD_DIR)/git-$(VERSION)/git | cut -f 5 -d' ')" == "$(ARCH_CODE)" ]
+ [ "$$(File $(BUILD_DIR)/git-$(VERSION)/contrib/credential/osxkeychain/git-credential-osxkeychain | cut -f 5 -d' ')" == "$(ARCH_CODE)" ]
+endif
+ touch $@
+
+disk-image/VERSION-$(VERSION)-$(ARCH_CODE):
+ rm -f disk-image/*.pkg disk-image/VERSION-* disk-image/.DS_Store
+ mkdir disk-image
+ touch "$@"
+
+disk-image/git-$(VERSION)-$(BUILD_CODE).pkg: disk-image/VERSION-$(VERSION)-$(ARCH_CODE) symlinks
+ pkgbuild --identifier com.git.pkg --version $(VERSION) --root $(ARTIFACTDIR)$(PREFIX) --scripts assets/scripts --install-location $(PREFIX) --component-plist ./assets/git-components.plist disk-image/git-$(VERSION)-$(BUILD_CODE).pkg
+
+git-%-$(BUILD_CODE).dmg:
+ hdiutil create git-$(VERSION)-$(BUILD_CODE).uncompressed.dmg -fs HFS+ -srcfolder disk-image -volname "Git $(VERSION) Intel $(ARCH)" -ov
+ hdiutil convert -format UDZO -o $@ git-$(VERSION)-$(BUILD_CODE).uncompressed.dmg
+ rm -f git-$(VERSION)-$(BUILD_CODE).uncompressed.dmg
+
+payload: $(BUILD_DIR)/git-$(VERSION)/osx-installed $(BUILD_DIR)/git-$(VERSION)/osx-built-assert-$(ARCH_CODE)
+
+pkg: disk-image/git-$(VERSION)-$(BUILD_CODE).pkg
+
+image: git-$(VERSION)-$(BUILD_CODE).dmg
diff --git a/.github/macos-installer/assets/etc/gitconfig.osxkeychain b/.github/macos-installer/assets/etc/gitconfig.osxkeychain
new file mode 100644
index 00000000000000..788266b3a40a9d
--- /dev/null
+++ b/.github/macos-installer/assets/etc/gitconfig.osxkeychain
@@ -0,0 +1,2 @@
+[credential]
+ helper = osxkeychain
diff --git a/.github/macos-installer/assets/git-components.plist b/.github/macos-installer/assets/git-components.plist
new file mode 100644
index 00000000000000..78db36777df3ed
--- /dev/null
+++ b/.github/macos-installer/assets/git-components.plist
@@ -0,0 +1,18 @@
+
+
+
+
+
+ BundleHasStrictIdentifier
+
+ BundleIsRelocatable
+
+ BundleIsVersionChecked
+
+ BundleOverwriteAction
+ upgrade
+ RootRelativeBundlePath
+ git/share/git-gui/lib/Git Gui.app
+
+
+
diff --git a/.github/macos-installer/assets/scripts/postinstall b/.github/macos-installer/assets/scripts/postinstall
new file mode 100755
index 00000000000000..94056db9b7b864
--- /dev/null
+++ b/.github/macos-installer/assets/scripts/postinstall
@@ -0,0 +1,62 @@
+#!/bin/bash
+INSTALL_DST="$2"
+SCALAR_C_CMD="$INSTALL_DST/git/bin/scalar"
+SCALAR_DOTNET_CMD="/usr/local/scalar/scalar"
+SCALAR_UNINSTALL_SCRIPT="/usr/local/scalar/uninstall_scalar.sh"
+
+function cleanupScalar()
+{
+ echo "checking whether Scalar was installed"
+ if [ ! -f "$SCALAR_C_CMD" ]; then
+ echo "Scalar not installed; exiting..."
+ return 0
+ fi
+ echo "Scalar is installed!"
+
+ echo "looking for Scalar.NET"
+ if [ ! -f "$SCALAR_DOTNET_CMD" ]; then
+ echo "Scalar.NET not found; exiting..."
+ return 0
+ fi
+ echo "Scalar.NET found!"
+
+ currentUser=$(echo "show State:/Users/ConsoleUser" | scutil | awk '/Name :/ { print $3 }')
+
+ # Re-register Scalar.NET repositories with the newly-installed Scalar
+ for repo in $($SCALAR_DOTNET_CMD list); do
+ (
+ PATH="$INSTALL_DST/git/bin:$PATH"
+ sudo -u "$currentUser" scalar register $repo || \
+ echo "warning: skipping re-registration of $repo"
+ )
+ done
+
+ # Uninstall Scalar.NET
+ echo "removing Scalar.NET"
+
+ # Add /usr/local/bin to path - default install location of Homebrew
+ PATH="/usr/local/bin:$PATH"
+ if (sudo -u "$currentUser" brew list --cask scalar); then
+ # Remove from Homebrew
+ sudo -u "$currentUser" brew remove --cask scalar || echo "warning: Scalar.NET uninstall via Homebrew completed with code $?"
+ echo "Scalar.NET uninstalled via Homebrew!"
+ elif (sudo -u "$currentUser" brew list --cask scalar-azrepos); then
+ sudo -u "$currentUser" brew remove --cask scalar-azrepos || echo "warning: Scalar.NET with GVFS uninstall via Homebrew completed with code $?"
+ echo "Scalar.NET with GVFS uninstalled via Homebrew!"
+ elif [ -f $SCALAR_UNINSTALL_SCRIPT ]; then
+ # If not installed with Homebrew, manually remove package
+ sudo -S sh $SCALAR_UNINSTALL_SCRIPT || echo "warning: Scalar.NET uninstall completed with code $?"
+ echo "Scalar.NET uninstalled!"
+ else
+ echo "warning: Scalar.NET uninstall script not found"
+ fi
+
+ # Re-create the Scalar symlink, in case it was removed by the Scalar.NET uninstall operation
+ mkdir -p $INSTALL_DST/bin
+ /bin/ln -Fs "$SCALAR_C_CMD" "$INSTALL_DST/bin/scalar"
+}
+
+# Run Scalar cleanup (will exit if not applicable)
+cleanupScalar
+
+exit 0
\ No newline at end of file
diff --git a/.github/macos-installer/assets/uninstall.sh b/.github/macos-installer/assets/uninstall.sh
new file mode 100755
index 00000000000000..4fc79fbaa2e652
--- /dev/null
+++ b/.github/macos-installer/assets/uninstall.sh
@@ -0,0 +1,34 @@
+#!/bin/bash -e
+if [ ! -r "/usr/local/git" ]; then
+ echo "Git doesn't appear to be installed via this installer. Aborting"
+ exit 1
+fi
+
+if [ "$1" != "--yes" ]; then
+ echo "This will uninstall git by removing /usr/local/git/, and symlinks"
+ printf "Type 'yes' if you are sure you wish to continue: "
+ read response
+else
+ response="yes"
+fi
+
+if [ "$response" == "yes" ]; then
+ # remove all of the symlinks we've created
+ pkgutil --files com.git.pkg | grep bin | while read f; do
+ if [ -L /usr/local/$f ]; then
+ sudo rm /usr/local/$f
+ fi
+ done
+
+ # forget receipts.
+ pkgutil --packages | grep com.git.pkg | xargs -I {} sudo pkgutil --forget {}
+ echo "Uninstalled"
+
+ # The guts all go here.
+ sudo rm -rf /usr/local/git/
+else
+ echo "Aborted"
+ exit 1
+fi
+
+exit 0
diff --git a/.github/scripts/run-esrp-signing.py b/.github/scripts/run-esrp-signing.py
new file mode 100644
index 00000000000000..725bf4580f5f1b
--- /dev/null
+++ b/.github/scripts/run-esrp-signing.py
@@ -0,0 +1,135 @@
+import argparse
+import json
+import os
+import glob
+import pprint
+import subprocess
+import sys
+import re
+
+parser = argparse.ArgumentParser(description='Sign binaries for macOS')
+parser.add_argument('path', help='Path to file for signing')
+parser.add_argument('keycode', help='Platform-specific key code for signing')
+parser.add_argument('opcode', help='Platform-specific operation code for signing')
+# Setting nargs=argparse.REMAINDER allows us to pass in params that begin with `--`
+parser.add_argument('--params', nargs=argparse.REMAINDER, help='Parameters for signing')
+args = parser.parse_args()
+
+esrp_tool = os.path.join("esrp", "tools", "EsrpClient.exe")
+
+aad_id = os.environ['AZURE_AAD_ID'].strip()
+workspace = os.environ['GITHUB_WORKSPACE'].strip()
+
+source_location = args.path
+files = glob.glob(os.path.join(source_location, "*"))
+
+print("Found files:")
+pprint.pp(files)
+
+auth_json = {
+ "Version": "1.0.0",
+ "AuthenticationType": "AAD_CERT",
+ "TenantId": "72f988bf-86f1-41af-91ab-2d7cd011db47",
+ "ClientId": f"{aad_id}",
+ "AuthCert": {
+ "SubjectName": f"CN={aad_id}.microsoft.com",
+ "StoreLocation": "LocalMachine",
+ "StoreName": "My",
+ "SendX5c" : "true"
+ },
+ "RequestSigningCert": {
+ "SubjectName": f"CN={aad_id}",
+ "StoreLocation": "LocalMachine",
+ "StoreName": "My"
+ }
+}
+
+input_json = {
+ "Version": "1.0.0",
+ "SignBatches": [
+ {
+ "SourceLocationType": "UNC",
+ "SourceRootDirectory": source_location,
+ "DestinationLocationType": "UNC",
+ "DestinationRootDirectory": workspace,
+ "SignRequestFiles": [],
+ "SigningInfo": {
+ "Operations": [
+ {
+ "KeyCode": f"{args.keycode}",
+ "OperationCode": f"{args.opcode}",
+ "Parameters": {},
+ "ToolName": "sign",
+ "ToolVersion": "1.0",
+ }
+ ]
+ }
+ }
+ ]
+}
+
+# add files to sign
+for f in files:
+ name = os.path.basename(f)
+ input_json["SignBatches"][0]["SignRequestFiles"].append(
+ {
+ "SourceLocation": name,
+ "DestinationLocation": os.path.join("signed", name),
+ }
+ )
+
+# add parameters to input.json (e.g. enabling the hardened runtime for macOS)
+if args.params is not None:
+ i = 0
+ while i < len(args.params):
+ input_json["SignBatches"][0]["SigningInfo"]["Operations"][0]["Parameters"][args.params[i]] = args.params[i + 1]
+ i += 2
+
+policy_json = {
+ "Version": "1.0.0",
+ "Intent": "production release",
+ "ContentType": "binary",
+}
+
+configs = [
+ ("auth.json", auth_json),
+ ("input.json", input_json),
+ ("policy.json", policy_json),
+]
+
+for filename, data in configs:
+ with open(filename, 'w') as fp:
+ json.dump(data, fp)
+
+# Run ESRP Client
+esrp_out = "esrp_out.json"
+result = subprocess.run(
+ [esrp_tool, "sign",
+ "-a", "auth.json",
+ "-i", "input.json",
+ "-p", "policy.json",
+ "-o", esrp_out,
+ "-l", "Verbose"],
+ capture_output=True,
+ text=True,
+ cwd=workspace)
+
+# Scrub log before printing
+log = re.sub(r'^.+Uploading.*to\s*destinationUrl\s*(.+?),.+$',
+ '***',
+ result.stdout,
+ flags=re.IGNORECASE|re.MULTILINE)
+print(log)
+
+if result.returncode != 0:
+ print("Failed to run ESRPClient.exe")
+ sys.exit(1)
+
+if os.path.isfile(esrp_out):
+ print("ESRP output json:")
+ with open(esrp_out, 'r') as fp:
+ pprint.pp(json.load(fp))
+
+for file in files:
+ if os.path.isfile(os.path.join("signed", file)):
+ print(f"Success!\nSigned {file}")
\ No newline at end of file
diff --git a/.github/scripts/set-up-esrp.ps1 b/.github/scripts/set-up-esrp.ps1
new file mode 100644
index 00000000000000..ca56266e33f553
--- /dev/null
+++ b/.github/scripts/set-up-esrp.ps1
@@ -0,0 +1,12 @@
+# Install ESRP client
+az storage blob download --file esrp.zip --auth-mode login --account-name esrpsigningstorage --container signing-resources --name microsoft.esrpclient.1.2.76.nupkg
+Expand-Archive -Path esrp.zip -DestinationPath .\esrp
+
+# Install certificates
+az keyvault secret download --vault-name "$env:AZURE_VAULT" --name "$env:AUTH_CERT" --file out.pfx
+certutil -f -importpfx out.pfx
+Remove-Item out.pfx
+
+az keyvault secret download --vault-name "$env:AZURE_VAULT" --name "$env:REQUEST_SIGNING_CERT" --file out.pfx
+certutil -f -importpfx out.pfx
+Remove-Item out.pfx
\ No newline at end of file
diff --git a/.github/scripts/symlink-git-hardlinks.rb b/.github/scripts/symlink-git-hardlinks.rb
new file mode 100644
index 00000000000000..174802ccc85d93
--- /dev/null
+++ b/.github/scripts/symlink-git-hardlinks.rb
@@ -0,0 +1,19 @@
+#!/usr/bin/env ruby
+
+install_prefix = ARGV[0]
+puts install_prefix
+git_binary = File.join(install_prefix, '/usr/local/git/bin/git')
+
+[
+ ['git' , File.join(install_prefix, '/usr/local/git/bin')],
+ ['../../bin/git', File.join(install_prefix, '/usr/local/git/libexec/git-core')]
+].each do |link, path|
+ Dir.glob(File.join(path, '*')).each do |file|
+ next if file == git_binary
+ puts "#{file} #{File.size(file)} == #{File.size(git_binary)}"
+ next unless File.size(file) == File.size(git_binary)
+ puts "Symlinking #{file}"
+ puts `ln -sf #{link} #{file}`
+ exit $?.exitstatus if $?.exitstatus != 0
+ end
+end
\ No newline at end of file
diff --git a/.github/workflows/build-git-installers.yml b/.github/workflows/build-git-installers.yml
new file mode 100644
index 00000000000000..74b730d5b7b0d7
--- /dev/null
+++ b/.github/workflows/build-git-installers.yml
@@ -0,0 +1,853 @@
+name: build-git-installers
+
+on:
+ push:
+ tags:
+ - 'v[0-9]*vfs*' # matches "vvfs"
+
+jobs:
+ # Check prerequisites for the workflow
+ prereqs:
+ runs-on: ubuntu-latest
+ environment: release
+ env:
+ AZ_SUB: ${{ secrets.AZURE_SUBSCRIPTION }}
+ AZ_CREDS: ${{ secrets.AZURE_CREDENTIALS }}
+ outputs:
+ tag_name: ${{ steps.tag.outputs.name }} # The full name of the tag, e.g. v2.32.0.vfs.0.0
+ tag_version: ${{ steps.tag.outputs.version }} # The version number (without preceding "v"), e.g. 2.32.0.vfs.0.0
+ deb_signable: ${{ steps.deb.outputs.signable }} # Whether the credentials needed to sign the .deb package are available
+ steps:
+ - name: Validate tag
+ run: |
+ echo "$GITHUB_REF" |
+ grep '^refs/tags/v2\.\(0\|[1-9][0-9]*\)\.\(0\|[1-9][0-9]*\)\.vfs\.0\.\(0\|[1-9][0-9]*\)$' || {
+ echo "::error::${GITHUB_REF#refs/tags/} is not of the form v2...vfs.0." >&2
+ exit 1
+ }
+ - name: Determine tag to build
+ run: |
+ echo "name=${GITHUB_REF#refs/tags/}" >>$GITHUB_OUTPUT
+ echo "version=${GITHUB_REF#refs/tags/v}" >>$GITHUB_OUTPUT
+ id: tag
+ - name: Determine whether signing certificates are present
+ run: echo "signable=$([[ $AZ_SUB != '' && $AZ_CREDS != '' ]] && echo 'true' || echo 'false')" >>$GITHUB_OUTPUT
+ id: deb
+ - name: Clone git
+ uses: actions/checkout@v3
+ - name: Validate the tag identified with trigger
+ run: |
+ die () {
+ echo "::error::$*" >&2
+ exit 1
+ }
+
+ # `actions/checkout` only downloads the peeled tag (i.e. the commit)
+ git fetch origin +$GITHUB_REF:$GITHUB_REF
+
+ # Verify that the tag is annotated
+ test $(git cat-file -t "$GITHUB_REF") == "tag" || die "Tag ${{ steps.tag.outputs.name }} is not annotated"
+
+ # Verify tag follows rules in GIT-VERSION-GEN (i.e., matches the specified "DEF_VER" in
+ # GIT-VERSION-FILE) and matches tag determined from trigger
+ make GIT-VERSION-FILE
+ test "${{ steps.tag.outputs.version }}" == "$(sed -n 's/^GIT_VERSION = //p'< GIT-VERSION-FILE)" || die "GIT-VERSION-FILE tag does not match ${{ steps.tag.outputs.name }}"
+ # End check prerequisites for the workflow
+
+ # Build Windows installers (x86_64 installer & portable)
+ windows_pkg:
+ runs-on: windows-2019
+ environment: release
+ needs: prereqs
+ env:
+ GPG_OPTIONS: "--batch --yes --no-tty --list-options no-show-photos --verify-options no-show-photos --pinentry-mode loopback"
+ HOME: "${{github.workspace}}\\home"
+ USERPROFILE: "${{github.workspace}}\\home"
+ steps:
+ - name: Configure user
+ shell: bash
+ run:
+ USER_NAME="${{github.actor}}" &&
+ USER_EMAIL="${{github.actor}}@users.noreply.github.com" &&
+ mkdir -p "$HOME" &&
+ git config --global user.name "$USER_NAME" &&
+ git config --global user.email "$USER_EMAIL" &&
+ echo "PACKAGER=$USER_NAME <$USER_EMAIL>" >>$GITHUB_ENV
+ - uses: git-for-windows/setup-git-for-windows-sdk@v1
+ with:
+ flavor: build-installers
+ - name: Clone build-extra
+ shell: bash
+ run: |
+ git clone --filter=blob:none --single-branch -b main https://github.com/git-for-windows/build-extra /usr/src/build-extra
+ - name: Clone git
+ shell: bash
+ run: |
+ # Since we cannot directly clone a specified tag (as we would a branch with `git clone -b `),
+ # this clone has to be done manually (via init->fetch->reset).
+
+ tag_name="${{ needs.prereqs.outputs.tag_name }}" &&
+ git -c init.defaultBranch=main init &&
+ git remote add -f origin https://github.com/git-for-windows/git &&
+ git fetch "https://github.com/${{github.repository}}" refs/tags/${tag_name}:refs/tags/${tag_name} &&
+ git reset --hard ${tag_name}
+ - name: Prepare home directory for code-signing
+ env:
+ CODESIGN_P12: ${{secrets.CODESIGN_P12}}
+ CODESIGN_PASS: ${{secrets.CODESIGN_PASS}}
+ if: env.CODESIGN_P12 != '' && env.CODESIGN_PASS != ''
+ shell: bash
+ run: |
+ cd home &&
+ mkdir -p .sig &&
+ echo -n "$CODESIGN_P12" | tr % '\n' | base64 -d >.sig/codesign.p12 &&
+ echo -n "$CODESIGN_PASS" >.sig/codesign.pass
+ git config --global alias.signtool '!sh "/usr/src/build-extra/signtool.sh"'
+ - name: Prepare home directory for GPG signing
+ if: env.GPGKEY != ''
+ shell: bash
+ run: |
+ # This section ensures that the identity for the GPG key matches the git user identity, otherwise
+ # signing will fail
+
+ echo '${{secrets.PRIVGPGKEY}}' | tr % '\n' | gpg $GPG_OPTIONS --import &&
+ info="$(gpg --list-keys --with-colons "${GPGKEY%% *}" | cut -d : -f 1,10 | sed -n '/^uid/{s|uid:||p;q}')" &&
+ git config --global user.name "${info% <*}" &&
+ git config --global user.email "<${info#*<}"
+ env:
+ GPGKEY: ${{secrets.GPGKEY}}
+ - name: Build mingw-w64-x86_64-git
+ env:
+ GPGKEY: "${{secrets.GPGKEY}}"
+ shell: bash
+ run: |
+ set -x
+
+ # Make sure that there is a `/usr/bin/git` that can be used by `makepkg-mingw`
+ printf '#!/bin/sh\n\nexec /mingw64/bin/git.exe "$@"\n' >/usr/bin/git &&
+
+ # Restrict `PATH` to MSYS2 and to Visual Studio (to let `cv2pdb` find the relevant DLLs)
+ PATH="/mingw64/bin:/usr/bin:/C/Program Files (x86)/Microsoft Visual Studio 14.0/VC/bin/amd64:/C/Windows/system32"
+
+ type -p mspdb140.dll || exit 1
+
+ sh -x /usr/src/build-extra/please.sh build-mingw-w64-git --only-64-bit --build-src-pkg -o artifacts HEAD &&
+ if test -n "$GPGKEY"
+ then
+ for tar in artifacts/*.tar*
+ do
+ /usr/src/build-extra/gnupg-with-gpgkey.sh --detach-sign --no-armor $tar
+ done
+ fi &&
+
+ b=$PWD/artifacts &&
+ version=${{ needs.prereqs.outputs.tag_name }} &&
+ (cd /usr/src/MINGW-packages/mingw-w64-git &&
+ cp PKGBUILD.$version PKGBUILD &&
+ git commit -s -m "mingw-w64-git: new version ($version)" PKGBUILD &&
+ git bundle create "$b"/MINGW-packages.bundle origin/main..main)
+ - name: Publish mingw-w64-x86_64-git
+ uses: actions/upload-artifact@v3
+ with:
+ name: pkg-x86_64
+ path: artifacts
+ windows_artifacts:
+ runs-on: windows-2019
+ environment: release
+ needs: [prereqs, windows_pkg]
+ env:
+ HOME: "${{github.workspace}}\\home"
+ strategy:
+ matrix:
+ artifact:
+ - name: installer
+ fileprefix: Git
+ - name: portable
+ fileprefix: PortableGit
+ fail-fast: false
+ steps:
+ - name: Download pkg-x86_64
+ uses: actions/download-artifact@v3
+ with:
+ name: pkg-x86_64
+ path: pkg-x86_64
+ - uses: git-for-windows/setup-git-for-windows-sdk@v1
+ with:
+ flavor: build-installers
+ - name: Clone build-extra
+ shell: bash
+ run: |
+ git clone --filter=blob:none --single-branch -b main https://github.com/git-for-windows/build-extra /usr/src/build-extra
+ - name: Prepare home directory for code-signing
+ env:
+ CODESIGN_P12: ${{secrets.CODESIGN_P12}}
+ CODESIGN_PASS: ${{secrets.CODESIGN_PASS}}
+ if: env.CODESIGN_P12 != '' && env.CODESIGN_PASS != ''
+ shell: bash
+ run: |
+ mkdir -p home/.sig &&
+ echo -n "$CODESIGN_P12" | tr % '\n' | base64 -d >home/.sig/codesign.p12 &&
+ echo -n "$CODESIGN_PASS" >home/.sig/codesign.pass &&
+ git config --global alias.signtool '!sh "/usr/src/build-extra/signtool.sh"'
+ - name: Retarget auto-update to microsoft/git
+ shell: bash
+ run: |
+ set -x
+
+ b=/usr/src/build-extra &&
+
+ filename=$b/git-update-git-for-windows.config
+ tr % '\t' >$filename <<-\EOF &&
+ [update]
+ %fromFork = microsoft/git
+ EOF
+
+ sed -i -e '/^#include "file-list.iss"/a\
+ Source: {#SourcePath}\\..\\git-update-git-for-windows.config; DestDir: {app}\\mingw64\\bin; Flags: replacesameversion; AfterInstall: DeleteFromVirtualStore' \
+ -e '/^Type: dirifempty; Name: {app}\\{#MINGW_BITNESS}$/i\
+ Type: files; Name: {app}\\{#MINGW_BITNESS}\\bin\\git-update-git-for-windows.config\
+ Type: dirifempty; Name: {app}\\{#MINGW_BITNESS}\\bin' \
+ $b/installer/install.iss
+ - name: Set alerts to continue until upgrade is taken
+ shell: bash
+ run: |
+ set -x
+
+ b=/mingw64/bin &&
+
+ sed -i -e '6 a use_recently_seen=no' \
+ $b/git-update-git-for-windows
+ - name: Set the installer Publisher to the Git Fundamentals team
+ shell: bash
+ run: |
+ b=/usr/src/build-extra &&
+ sed -i -e 's/^\(AppPublisher=\).*/\1The Git Fundamentals Team at GitHub/' $b/installer/install.iss
+ - name: Let the installer configure Visual Studio to use the installed Git
+ shell: bash
+ run: |
+ set -x
+
+ b=/usr/src/build-extra &&
+
+ sed -i -e '/^ *InstallAutoUpdater();$/a\
+ CustomPostInstall();' \
+ -e '/^ *UninstallAutoUpdater();$/a\
+ CustomPostUninstall();' \
+ $b/installer/install.iss &&
+
+ cat >>$b/installer/helpers.inc.iss <<\EOF
+
+ procedure CustomPostInstall();
+ begin
+ if not RegWriteStringValue(HKEY_CURRENT_USER,'Software\Microsoft\VSCommon\15.0\TeamFoundation\GitSourceControl','GitPath',ExpandConstant('{app}')) or
+ not RegWriteStringValue(HKEY_CURRENT_USER,'Software\Microsoft\VSCommon\16.0\TeamFoundation\GitSourceControl','GitPath',ExpandConstant('{app}')) or
+ not RegWriteStringValue(HKEY_CURRENT_USER,'Software\Microsoft\VSCommon\17.0\TeamFoundation\GitSourceControl','GitPath',ExpandConstant('{app}')) or
+ not RegWriteStringValue(HKEY_CURRENT_USER,'Software\Microsoft\VSCommon\18.0\TeamFoundation\GitSourceControl','GitPath',ExpandConstant('{app}')) or
+ not RegWriteStringValue(HKEY_CURRENT_USER,'Software\Microsoft\VSCommon\19.0\TeamFoundation\GitSourceControl','GitPath',ExpandConstant('{app}')) or
+ not RegWriteStringValue(HKEY_CURRENT_USER,'Software\Microsoft\VSCommon\20.0\TeamFoundation\GitSourceControl','GitPath',ExpandConstant('{app}')) then
+ LogError('Could not register TeamFoundation\GitSourceControl');
+ end;
+
+ procedure CustomPostUninstall();
+ begin
+ if not RegDeleteValue(HKEY_CURRENT_USER,'Software\Microsoft\VSCommon\15.0\TeamFoundation\GitSourceControl','GitPath') or
+ not RegDeleteValue(HKEY_CURRENT_USER,'Software\Microsoft\VSCommon\16.0\TeamFoundation\GitSourceControl','GitPath') or
+ not RegDeleteValue(HKEY_CURRENT_USER,'Software\Microsoft\VSCommon\17.0\TeamFoundation\GitSourceControl','GitPath') or
+ not RegDeleteValue(HKEY_CURRENT_USER,'Software\Microsoft\VSCommon\18.0\TeamFoundation\GitSourceControl','GitPath') or
+ not RegDeleteValue(HKEY_CURRENT_USER,'Software\Microsoft\VSCommon\19.0\TeamFoundation\GitSourceControl','GitPath') or
+ not RegDeleteValue(HKEY_CURRENT_USER,'Software\Microsoft\VSCommon\20.0\TeamFoundation\GitSourceControl','GitPath') then
+ LogError('Could not register TeamFoundation\GitSourceControl');
+ end;
+ EOF
+ - name: Enable Scalar/C and the auto-updater in the installer by default
+ shell: bash
+ run: |
+ set -x
+
+ b=/usr/src/build-extra &&
+
+ sed -i -e "/ChosenOptions:=''/a\\
+ if (ExpandConstant('{param:components|/}')='/') then begin\n\
+ WizardSelectComponents('autoupdate');\n\
+ #ifdef WITH_SCALAR\n\
+ WizardSelectComponents('scalar');\n\
+ #endif\n\
+ end;" $b/installer/install.iss
+ - name: Build 64-bit ${{matrix.artifact.name}}
+ shell: bash
+ run: |
+ set -x
+
+ # Copy the PDB archive to the directory where `--include-pdbs` expects it
+ b=/usr/src/build-extra &&
+ mkdir -p $b/cached-source-packages &&
+ cp pkg-x86_64/*-pdb* $b/cached-source-packages/ &&
+
+ # Build the installer, embedding PDBs
+ eval $b/please.sh make_installers_from_mingw_w64_git --include-pdbs \
+ --version=${{ needs.prereqs.outputs.tag_version }} \
+ -o artifacts --${{matrix.artifact.name}} \
+ --pkg=pkg-x86_64/mingw-w64-x86_64-git-[0-9]*.tar.xz \
+ --pkg=pkg-x86_64/mingw-w64-x86_64-git-doc-html-[0-9]*.tar.xz &&
+
+ if test portable = '${{matrix.artifact.name}}' && test -n "$(git config alias.signtool)"
+ then
+ git signtool artifacts/PortableGit-*.exe
+ fi &&
+ openssl dgst -sha256 artifacts/${{matrix.artifact.fileprefix}}-*.exe | sed "s/.* //" >artifacts/sha-256.txt
+ - name: Verify that .exe files are code-signed
+ if: env.CODESIGN_P12 != '' && env.CODESIGN_PASS != ''
+ shell: bash
+ run: |
+ PATH=$PATH:"/c/Program Files (x86)/Windows Kits/10/App Certification Kit/" \
+ signtool verify //pa artifacts/${{matrix.artifact.fileprefix}}-*.exe
+ - name: Publish ${{matrix.artifact.name}}-x86_64
+ uses: actions/upload-artifact@v3
+ with:
+ name: win-${{matrix.artifact.name}}-x86_64
+ path: artifacts
+ # End build Windows installers
+
+ # Build and sign Mac OSX installers & upload artifacts
+ osx_build:
+ runs-on: macos-latest
+ needs: prereqs
+ env:
+ # `gettext` is keg-only
+ LDFLAGS: -L/usr/local/opt/gettext/lib
+ CFLAGS: -I/usr/local/opt/gettext/include
+ # To make use of the catalogs...
+ XML_CATALOG_FILES: /usr/local/etc/xml/catalog
+ VERSION: "${{ needs.prereqs.outputs.tag_version }}"
+ steps:
+ - name: Check out repository
+ uses: actions/checkout@v3
+ with:
+ path: 'git'
+
+ - name: Install Git dependencies
+ run: |
+ set -x
+ brew install automake asciidoc xmlto docbook
+ brew link --force gettext
+
+ - name: Build payload
+ run: |
+ # Configure the environment
+ set -x
+ PATH=/usr/local/bin:$PATH
+ export CURL_LDFLAGS=$(curl-config --libs)
+
+ # Write to "version" file to force match with trigger payload version
+ echo "${{ needs.prereqs.outputs.tag_version }}" >>git/version
+ make -C git -j$(sysctl -n hw.physicalcpu) GIT-VERSION-FILE dist dist-doc
+
+ export GIT_BUILT_FROM_COMMIT=$(gunzip -c git/git-$VERSION.tar.gz | git get-tar-commit-id) ||
+ die "Could not determine commit for build"
+
+ # Extract tarballs
+ mkdir payload manpages
+ tar -xvf git/git-$VERSION.tar.gz -C payload
+ tar -xvf git/git-manpages-$VERSION.tar.gz -C manpages
+
+ # Lay out payload
+ make -C git/.github/macos-installer V=1 payload
+
+ # This step is necessary because we cannot use the $VERSION
+ # environment variable or the tag_version output from the prereqs
+ # job in the upload-artifact task.
+ mkdir -p build_artifacts
+ cp -R stage/git-intel-x86_64-$VERSION/ build_artifacts
+
+ # We keep a list of executable files because their executable bits are
+ # removed when they are zipped, and we need to re-add.
+ find build_artifacts -type f -a -perm -u=x >executable-files.txt
+
+ - name: Upload macOS artifacts
+ uses: actions/upload-artifact@v3
+ with:
+ name: tmp.osx-build
+ path: |
+ build_artifacts
+
+ - name: Upload list of executable files
+ uses: actions/upload-artifact@v3
+ with:
+ name: tmp.executable-files
+ path: |
+ executable-files.txt
+
+ osx_sign_payload:
+ # ESRP service requires signing to run on Windows
+ runs-on: windows-latest
+ environment: release
+ needs: osx_build
+ steps:
+ - name: Check out repository
+ uses: actions/checkout@v3
+ with:
+ path: 'git'
+
+ - name: Download unsigned build artifiacts
+ uses: actions/download-artifact@v3
+ with:
+ name: tmp.osx-build
+ path: build_artifacts
+
+ - name: Zip unsigned build artifacts
+ shell: pwsh
+ run: |
+ Compress-Archive -Path build_artifacts build_artifacts/build_artifacts.zip
+ cd build_artifacts
+ Get-ChildItem -Exclude build_artifacts.zip | Remove-Item -Recurse -Force
+
+ - uses: azure/login@v1
+ with:
+ creds: ${{ secrets.AZURE_CREDENTIALS }}
+
+ - name: Set up ESRP client
+ shell: pwsh
+ env:
+ AZURE_VAULT: ${{ secrets.AZURE_VAULT }}
+ AUTH_CERT: ${{ secrets.AZURE_VAULT_AUTH_CERT_NAME }}
+ REQUEST_SIGNING_CERT: ${{ secrets.AZURE_VAULT_REQUEST_SIGNING_CERT_NAME }}
+ run: |
+ git\.github\scripts\set-up-esrp.ps1
+
+ - name: Run ESRP client
+ shell: pwsh
+ env:
+ AZURE_AAD_ID: ${{ secrets.AZURE_AAD_ID }}
+ APPLE_KEY_CODE: ${{ secrets.APPLE_KEY_CODE }}
+ APPLE_SIGNING_OP_CODE: ${{ secrets.APPLE_SIGNING_OPERATION_CODE }}
+ run: |
+ python git\.github\scripts\run-esrp-signing.py build_artifacts `
+ $env:APPLE_KEY_CODE $env:APPLE_SIGNING_OP_CODE `
+ --params 'Hardening' '--options=runtime'
+
+ - name: Unzip signed build artifacts
+ shell: pwsh
+ run: |
+ Expand-Archive signed/build_artifacts.zip -DestinationPath signed
+ Remove-Item signed/build_artifacts.zip
+
+ - name: Upload signed payload
+ uses: actions/upload-artifact@v3
+ with:
+ name: osx-signed-payload
+ path: |
+ signed
+
+ osx_pack:
+ runs-on: macos-latest
+ needs: [prereqs, osx_sign_payload]
+ steps:
+ - name: Check out repository
+ uses: actions/checkout@v3
+ with:
+ path: 'git'
+
+ - name: Download signed artifacts
+ uses: actions/download-artifact@v3
+ with:
+ name: osx-signed-payload
+
+ - name: Download list of executable files
+ uses: actions/download-artifact@v3
+ with:
+ name: tmp.executable-files
+
+ - name: Build macOS pkg
+ env:
+ VERSION: "${{ needs.prereqs.outputs.tag_version }}"
+ run: |
+ # Install findutils to use gxargs below
+ brew install findutils
+
+ # Configure the environment
+ export CURL_LDFLAGS=$(curl-config --libs)
+
+ # Add executable bits and move build_artifacts into
+ # the same directory as Makefile (so that executable bits
+ # will be recognized).
+ gxargs -r -d '\n' chmod a+x &2
+ exit 1
+ }
+
+ echo "${{ needs.prereqs.outputs.tag_version }}" >>git/version
+ make -C git GIT-VERSION-FILE
+
+ VERSION="${{ needs.prereqs.outputs.tag_version }}"
+
+ ARCH="$(dpkg-architecture -q DEB_HOST_ARCH)"
+ if test -z "$ARCH"; then
+ die "Could not determine host architecture!"
+ fi
+
+ PKGNAME="microsoft-git_$VERSION"
+ PKGDIR="$(dirname $(pwd))/$PKGNAME"
+
+ rm -rf "$PKGDIR"
+ mkdir -p "$PKGDIR"
+
+ DESTDIR="$PKGDIR" make -C git -j5 V=1 DEVELOPER=1 \
+ USE_LIBPCRE=1 \
+ NO_CROSS_DIRECTORY_HARDLINKS=1 \
+ ASCIIDOC8=1 ASCIIDOC_NO_ROFF=1 \
+ ASCIIDOC='TZ=UTC asciidoc' \
+ prefix=/usr/local \
+ gitexecdir=/usr/local/lib/git-core \
+ libexecdir=/usr/local/lib/git-core \
+ htmldir=/usr/local/share/doc/git/html \
+ install install-doc install-html
+
+ cd ..
+ mkdir "$PKGNAME/DEBIAN"
+
+ # Based on https://packages.ubuntu.com/xenial/vcs/git
+ cat >"$PKGNAME/DEBIAN/control" <
+ Description: Git client built from the https://github.com/microsoft/git repository,
+ specialized in supporting monorepo scenarios. Includes the Scalar CLI.
+ EOF
+
+ dpkg-deb --build "$PKGNAME"
+
+ mkdir $GITHUB_WORKSPACE/artifacts
+ mv "$PKGNAME.deb" $GITHUB_WORKSPACE/artifacts/
+ - name: Publish unsigned .deb package
+ uses: actions/upload-artifact@v3
+ with:
+ name: deb-package-unsigned
+ path: artifacts/
+ ubuntu_sign-artifacts:
+ runs-on: windows-latest # Must be run on Windows due to ESRP executable OS compatibility
+ environment: release
+ needs: [ubuntu_build, prereqs]
+ if: needs.prereqs.outputs.deb_signable == 'true'
+ env:
+ ARTIFACTS_DIR: artifacts
+ steps:
+ - name: Clone repository
+ uses: actions/checkout@v3
+ with:
+ path: 'git'
+ - name: Download unsigned packages
+ uses: actions/download-artifact@v3
+ with:
+ name: deb-package-unsigned
+ path: unsigned
+ - uses: azure/login@v1
+ with:
+ creds: ${{ secrets.AZURE_CREDENTIALS }}
+ - name: Set up ESRP client
+ shell: pwsh
+ env:
+ AZURE_VAULT: ${{ secrets.AZURE_VAULT }}
+ AUTH_CERT: ${{ secrets.AZURE_VAULT_AUTH_CERT_NAME }}
+ REQUEST_SIGNING_CERT: ${{ secrets.AZURE_VAULT_REQUEST_SIGNING_CERT_NAME }}
+ run: |
+ git\.github\scripts\set-up-esrp.ps1
+ - name: Sign package
+ shell: pwsh
+ env:
+ AZURE_AAD_ID: ${{ secrets.AZURE_AAD_ID }}
+ LINUX_KEY_CODE: ${{ secrets.LINUX_KEY_CODE }}
+ LINUX_OP_CODE: ${{ secrets.LINUX_OPERATION_CODE }}
+ run: |
+ python git\.github\scripts\run-esrp-signing.py unsigned $env:LINUX_KEY_CODE $env:LINUX_OP_CODE
+ - name: Upload signed artifact
+ uses: actions/upload-artifact@v3
+ with:
+ name: deb-package-signed
+ path: signed
+ # End build & sign Ubuntu package
+
+ # Validate installers
+ validate-installers:
+ name: Validate installers
+ strategy:
+ matrix:
+ component:
+ - os: ubuntu-latest
+ artifact: deb-package-signed
+ command: git
+ - os: macos-latest
+ artifact: osx-signed-pkg
+ command: git
+ - os: windows-latest
+ artifact: win-installer-x86_64
+ command: $PROGRAMFILES\Git\cmd\git.exe
+ runs-on: ${{ matrix.component.os }}
+ needs: [prereqs, windows_artifacts, osx_publish_dmg, ubuntu_sign-artifacts]
+ steps:
+ - name: Download artifacts
+ uses: actions/download-artifact@v3
+ with:
+ name: ${{ matrix.component.artifact }}
+
+ - name: Install Windows
+ if: contains(matrix.component.os, 'windows')
+ shell: pwsh
+ run: |
+ $exePath = Get-ChildItem -Path ./*.exe | %{$_.FullName}
+ Start-Process -Wait -FilePath "$exePath" -ArgumentList "/SILENT /VERYSILENT /NORESTART /SUPPRESSMSGBOXES /ALLOWDOWNGRADE=1"
+
+ - name: Install Linux
+ if: contains(matrix.component.os, 'ubuntu')
+ run: |
+ debpath=$(find ./*.deb)
+ sudo apt install $debpath
+
+ - name: Install macOS
+ if: contains(matrix.component.os, 'macos')
+ run: |
+ pkgpath=$(find ./*.pkg)
+ sudo installer -pkg $pkgpath -target /
+
+ - name: Validate
+ shell: bash
+ run: |
+ "${{ matrix.component.command }}" --version | sed 's/git version //' >actual
+ echo ${{ needs.prereqs.outputs.tag_version }} >expect
+ cmp expect actual || exit 1
+ # End validate installers
+
+ create-github-release:
+ runs-on: ubuntu-latest
+ needs: [validate-installers]
+ if: |
+ success() ||
+ (needs.ubuntu_sign-artifacts.result == 'skipped' &&
+ needs.osx_publish_dmg.result == 'success' &&
+ needs.windows_artifacts.result == 'success')
+ steps:
+ - name: Download Windows portable installer
+ uses: actions/download-artifact@v3
+ with:
+ name: win-portable-x86_64
+ path: win-portable-x86_64
+ - name: Download Windows x86_64 installer
+ uses: actions/download-artifact@v3
+ with:
+ name: win-installer-x86_64
+ path: win-installer-x86_64
+ - name: Download Mac dmg
+ uses: actions/download-artifact@v3
+ with:
+ name: osx-dmg
+ path: osx-dmg
+ - name: Download Mac pkg
+ uses: actions/download-artifact@v3
+ with:
+ name: osx-signed-pkg
+ path: osx-pkg
+ - name: Download Ubuntu package (signed)
+ if: needs.prereqs.outputs.deb_signable == 'true'
+ uses: actions/download-artifact@v3
+ with:
+ name: deb-package-signed
+ path: deb-package
+ - name: Download Ubuntu package (unsigned)
+ if: needs.prereqs.outputs.deb_signable != 'true'
+ uses: actions/download-artifact@v3
+ with:
+ name: deb-package-unsigned
+ path: deb-package
+ - uses: actions/github-script@v6
+ with:
+ script: |
+ const fs = require('fs');
+ const path = require('path');
+
+ var releaseMetadata = {
+ owner: context.repo.owner,
+ repo: context.repo.repo
+ };
+
+ // Create the release
+ var tagName = "${{ needs.prereqs.outputs.tag_name }}";
+ var createdRelease = await github.rest.repos.createRelease({
+ ...releaseMetadata,
+ draft: true,
+ tag_name: tagName,
+ name: tagName
+ });
+ releaseMetadata.release_id = createdRelease.data.id;
+
+ // Uploads contents of directory to the release created above
+ async function uploadDirectoryToRelease(directory, includeExtensions=[]) {
+ return fs.promises.readdir(directory)
+ .then(async(files) => Promise.all(
+ files.filter(file => {
+ return includeExtensions.length==0 || includeExtensions.includes(path.extname(file).toLowerCase());
+ })
+ .map(async (file) => {
+ var filePath = path.join(directory, file);
+ github.rest.repos.uploadReleaseAsset({
+ ...releaseMetadata,
+ name: file,
+ headers: {
+ "content-length": (await fs.promises.stat(filePath)).size
+ },
+ data: fs.createReadStream(filePath)
+ });
+ }))
+ );
+ }
+
+ await Promise.all([
+ // Upload Windows artifacts
+ uploadDirectoryToRelease('win-installer-x86_64', ['.exe']),
+ uploadDirectoryToRelease('win-portable-x86_64', ['.exe']),
+
+ // Upload Mac artifacts
+ uploadDirectoryToRelease('osx-dmg'),
+ uploadDirectoryToRelease('osx-pkg'),
+
+ // Upload Ubuntu artifacts
+ uploadDirectoryToRelease('deb-package')
+ ]);
diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml
index 9e214a16d986cd..a09456afce5be5 100644
--- a/.github/workflows/main.yml
+++ b/.github/workflows/main.yml
@@ -147,7 +147,7 @@ jobs:
vs-build:
name: win+VS build
needs: ci-config
- if: github.event.repository.owner.login == 'git-for-windows' && needs.ci-config.outputs.enabled == 'yes'
+ if: github.event.repository.owner.login == 'microsoft' && needs.ci-config.outputs.enabled == 'yes'
env:
NO_PERL: 1
GIT_CONFIG_PARAMETERS: "'user.name=CI' 'user.email=ci@git'"
diff --git a/.github/workflows/monitor-components.yml b/.github/workflows/monitor-components.yml
deleted file mode 100644
index 73967950eb7a09..00000000000000
--- a/.github/workflows/monitor-components.yml
+++ /dev/null
@@ -1,93 +0,0 @@
-name: Monitor component updates
-
-# Git for Windows is a slightly modified subset of MSYS2. Some of its
-# components are maintained by Git for Windows, others by MSYS2. To help
-# keeping the former up to date, this workflow monitors the Atom/RSS feeds
-# and opens new tickets for each new component version.
-
-on:
- schedule:
- - cron: "23 8,11,14,17 * * *"
- workflow_dispatch:
-
-env:
- CHARACTER_LIMIT: 5000
- MAX_AGE: 7d
-
-jobs:
- job:
- # Only run this in Git for Windows' fork
- if: github.event.repository.owner.login == 'git-for-windows'
- runs-on: ubuntu-latest
- permissions:
- issues: write
- strategy:
- matrix:
- component:
- - label: git
- feed: https://github.com/git/git/tags.atom
- - label: git-lfs
- feed: https://github.com/git-lfs/git-lfs/tags.atom
- - label: git-credential-manager
- feed: https://github.com/git-ecosystem/git-credential-manager/tags.atom
- - label: tig
- feed: https://github.com/jonas/tig/tags.atom
- - label: cygwin
- feed: https://github.com/cygwin/cygwin/releases.atom
- title-pattern: ^(?!.*newlib)
- - label: msys2-runtime-package
- feed: https://github.com/msys2/MSYS2-packages/commits/master/msys2-runtime.atom
- - label: msys2-runtime
- feed: https://github.com/msys2/msys2-runtime/commits/HEAD.atom
- aggregate: true
- - label: openssh
- feed: https://github.com/openssh/openssh-portable/tags.atom
- - label: libfido2
- feed: https://github.com/Yubico/libfido2/tags.atom
- - label: libcbor
- feed: https://github.com/PJK/libcbor/tags.atom
- - label: openssl
- feed: https://github.com/openssl/openssl/tags.atom
- title-pattern: ^(?!.*alpha)
- - label: gnutls
- feed: https://gnutls.org/news.atom
- - label: heimdal
- feed: https://github.com/heimdal/heimdal/tags.atom
- - label: git-sizer
- feed: https://github.com/github/git-sizer/tags.atom
- - label: gitflow
- feed: https://github.com/petervanderdoes/gitflow-avh/tags.atom
- - label: curl
- feed: https://github.com/curl/curl/tags.atom
- - label: libgpg-error
- feed: https://github.com/gpg/libgpg-error/releases.atom
- title-pattern: ^libgpg-error-[0-9\.]*$
- - label: libgcrypt
- feed: https://github.com/gpg/libgcrypt/releases.atom
- title-pattern: ^libgcrypt-[0-9\.]*$
- - label: gpg
- feed: https://github.com/gpg/gnupg/releases.atom
- - label: mintty
- feed: https://github.com/mintty/mintty/releases.atom
- - label: p7zip
- feed: https://sourceforge.net/projects/p7zip/rss?path=/p7zip
- - label: bash
- feed: https://git.savannah.gnu.org/cgit/bash.git/atom/?h=master
- aggregate: true
- - label: perl
- feed: https://github.com/Perl/perl5/tags.atom
- title-pattern: ^(?!.*(5\.[0-9]+[13579]|RC))
- - label: pcre2
- feed: https://github.com/PCRE2Project/pcre2/tags.atom
- fail-fast: false
- steps:
- - uses: git-for-windows/rss-to-issues@v0
- with:
- feed: ${{matrix.component.feed}}
- prefix: "[New ${{matrix.component.label}} version]"
- labels: component-update
- github-token: ${{ secrets.GITHUB_TOKEN }}
- character-limit: ${{ env.CHARACTER_LIMIT }}
- max-age: ${{ env.MAX_AGE }}
- aggregate: ${{matrix.component.aggregate}}
- title-pattern: ${{matrix.component.title-pattern}}
diff --git a/.github/workflows/release-homebrew.yml b/.github/workflows/release-homebrew.yml
new file mode 100644
index 00000000000000..e2a2634ff60c97
--- /dev/null
+++ b/.github/workflows/release-homebrew.yml
@@ -0,0 +1,31 @@
+name: Update Homebrew Tap
+on:
+ release:
+ types: [released]
+
+jobs:
+ release:
+ runs-on: ubuntu-latest
+ environment: release
+ steps:
+ - id: version
+ name: Compute version number
+ run: |
+ echo "result=$(echo $GITHUB_REF | sed -e "s/^refs\/tags\/v//")" >>$GITHUB_OUTPUT
+ - id: hash
+ name: Compute release asset hash
+ uses: mjcheetham/asset-hash@v1.1
+ with:
+ asset: /git-(.*)\.pkg/
+ hash: sha256
+ token: ${{ secrets.GITHUB_TOKEN }}
+ - name: Update scalar Cask
+ uses: mjcheetham/update-homebrew@v1.3
+ with:
+ token: ${{ secrets.HOMEBREW_TOKEN }}
+ tap: microsoft/git
+ name: microsoft-git
+ type: cask
+ version: ${{ steps.version.outputs.result }}
+ sha256: ${{ steps.hash.outputs.result }}
+ alwaysUsePullRequest: false
diff --git a/.github/workflows/release-winget.yml b/.github/workflows/release-winget.yml
new file mode 100644
index 00000000000000..61010a5ce65abb
--- /dev/null
+++ b/.github/workflows/release-winget.yml
@@ -0,0 +1,41 @@
+name: "release-winget"
+on:
+ release:
+ types: [released]
+
+ workflow_dispatch:
+ inputs:
+ release:
+ description: 'Release Id'
+ required: true
+ default: 'latest'
+
+jobs:
+ release:
+ runs-on: windows-latest
+ environment: release
+ steps:
+ - name: Publish manifest with winget-create
+ run: |
+ # Get correct release asset
+ $github = Get-Content '${{ github.event_path }}' | ConvertFrom-Json
+ $asset = $github.release.assets | Where-Object -Property name -match '64-bit.exe$'
+
+ # Remove 'v' and 'vfs' from the version
+ $github.release.tag_name -match '\d.*'
+ $version = $Matches[0] -replace ".vfs",""
+
+ # Download wingetcreate and create manifests
+ Invoke-WebRequest https://aka.ms/wingetcreate/latest -OutFile wingetcreate.exe
+ .\wingetcreate.exe update Microsoft.Git -u $asset.browser_download_url -v $version -o manifests
+
+ # Manually substitute the name of the default branch in the License
+ # and Copyright URLs since the tooling cannot do that for us.
+ $shortenedVersion = $version -replace ".{4}$"
+ $manifestPath = dir -Path ./manifests -Filter Microsoft.Git.locale.en-US.yaml -Recurse | %{$_.FullName}
+ sed -i "s/vfs-[.0-9]*/vfs-$shortenedVersion/g" "$manifestPath"
+
+ # Submit manifests
+ $manifestDirectory = Split-Path "$manifestPath"
+ .\wingetcreate.exe submit -t "${{ secrets.WINGET_TOKEN }}" $manifestDirectory
+ shell: powershell
diff --git a/.github/workflows/scalar-functional-tests.yml b/.github/workflows/scalar-functional-tests.yml
new file mode 100644
index 00000000000000..7226054aebbcec
--- /dev/null
+++ b/.github/workflows/scalar-functional-tests.yml
@@ -0,0 +1,220 @@
+name: Scalar Functional Tests
+
+env:
+ SCALAR_REPOSITORY: microsoft/scalar
+ SCALAR_REF: main
+ DEBUG_WITH_TMATE: false
+ SCALAR_TEST_SKIP_VSTS_INFO: true
+
+on:
+ push:
+ branches: [ vfs-*, tentative/vfs-* ]
+ pull_request:
+ branches: [ vfs-*, features/* ]
+
+jobs:
+ scalar:
+ name: "Scalar Functional Tests"
+
+ strategy:
+ fail-fast: false
+ matrix:
+ # Order by runtime (in descending order)
+ os: [windows-2019, macos-11, ubuntu-20.04, ubuntu-22.04]
+ # Scalar.NET used to be tested using `features: [false, experimental]`
+ # But currently, Scalar/C ignores `feature.scalar` altogether, so let's
+ # save some electrons and run only one of them...
+ features: [ignored]
+ exclude:
+ # The built-in FSMonitor is not (yet) supported on Linux
+ - os: ubuntu-20.04
+ features: experimental
+ - os: ubuntu-22.04
+ features: experimental
+ runs-on: ${{ matrix.os }}
+
+ env:
+ BUILD_FRAGMENT: bin/Release/netcoreapp3.1
+ GIT_FORCE_UNTRACKED_CACHE: 1
+
+ steps:
+ - name: Check out Git's source code
+ uses: actions/checkout@v3
+
+ - name: Setup build tools on Windows
+ if: runner.os == 'Windows'
+ uses: git-for-windows/setup-git-for-windows-sdk@v1
+
+ - name: Provide a minimal `install` on Windows
+ if: runner.os == 'Windows'
+ shell: bash
+ run: |
+ test -x /usr/bin/install ||
+ tr % '\t' >/usr/bin/install <<-\EOF
+ #!/bin/sh
+
+ cmd=cp
+ while test $# != 0
+ do
+ %case "$1" in
+ %-d) cmd="mkdir -p";;
+ %-m) shift;; # ignore mode
+ %*) break;;
+ %esac
+ %shift
+ done
+
+ exec $cmd "$@"
+ EOF
+
+ - name: Install build dependencies for Git (Linux)
+ if: runner.os == 'Linux'
+ run: |
+ sudo apt-get update
+ sudo apt-get -q -y install libssl-dev libcurl4-openssl-dev gettext
+
+ - name: Build and install Git
+ shell: bash
+ env:
+ NO_TCLTK: Yup
+ run: |
+ # We do require a VFS version
+ def_ver="$(sed -n 's/DEF_VER=\(.*vfs.*\)/\1/p' GIT-VERSION-GEN)"
+ test -n "$def_ver"
+
+ # Ensure that `git version` reflects DEF_VER
+ case "$(git describe --match "v[0-9]*vfs*" HEAD)" in
+ ${def_ver%%.vfs.*}.vfs.*) ;; # okay, we can use this
+ *) git -c user.name=ci -c user.email=ci@github tag -m for-testing ${def_ver}.NNN.g$(git rev-parse --short HEAD);;
+ esac
+
+ SUDO=
+ extra=
+ case "${{ runner.os }}" in
+ Windows)
+ extra=DESTDIR=/c/Progra~1/Git
+ cygpath -aw "/c/Program Files/Git/cmd" >>$GITHUB_PATH
+ ;;
+ Linux)
+ SUDO=sudo
+ extra=prefix=/usr
+ ;;
+ macOS)
+ SUDO=sudo
+ extra=prefix=/usr/local
+ ;;
+ esac
+
+ $SUDO make -j5 $extra install
+
+ - name: Ensure that we use the built Git and Scalar
+ shell: bash
+ run: |
+ type -p git
+ git version
+ case "$(git version)" in *.vfs.*) echo Good;; *) exit 1;; esac
+ type -p scalar
+ scalar version
+ case "$(scalar version 2>&1)" in *.vfs.*) echo Good;; *) exit 1;; esac
+
+ - name: Check out Scalar's source code
+ uses: actions/checkout@v3
+ with:
+ fetch-depth: 0 # Indicate full history so Nerdbank.GitVersioning works.
+ path: scalar
+ repository: ${{ env.SCALAR_REPOSITORY }}
+ ref: ${{ env.SCALAR_REF }}
+
+ - name: Setup .NET Core
+ uses: actions/setup-dotnet@v3
+ with:
+ dotnet-version: '3.1.x'
+
+ - name: Install dependencies
+ run: dotnet restore
+ working-directory: scalar
+ env:
+ DOTNET_NOLOGO: 1
+
+ - name: Build
+ working-directory: scalar
+ run: dotnet build --configuration Release --no-restore -p:UseAppHost=true # Force generation of executable on macOS.
+
+ - name: Setup platform (Linux)
+ if: runner.os == 'Linux'
+ run: |
+ echo "BUILD_PLATFORM=${{ runner.os }}" >>$GITHUB_ENV
+ echo "TRACE2_BASENAME=Trace2.${{ github.run_id }}__${{ github.run_number }}__${{ matrix.os }}__${{ matrix.features }}" >>$GITHUB_ENV
+
+ - name: Setup platform (Mac)
+ if: runner.os == 'macOS'
+ run: |
+ echo 'BUILD_PLATFORM=Mac' >>$GITHUB_ENV
+ echo "TRACE2_BASENAME=Trace2.${{ github.run_id }}__${{ github.run_number }}__${{ matrix.os }}__${{ matrix.features }}" >>$GITHUB_ENV
+
+ - name: Setup platform (Windows)
+ if: runner.os == 'Windows'
+ run: |
+ echo "BUILD_PLATFORM=${{ runner.os }}" >>$env:GITHUB_ENV
+ echo 'BUILD_FILE_EXT=.exe' >>$env:GITHUB_ENV
+ echo "TRACE2_BASENAME=Trace2.${{ github.run_id }}__${{ github.run_number }}__${{ matrix.os }}__${{ matrix.features }}" >>$env:GITHUB_ENV
+
+ - name: Configure feature.scalar
+ run: git config --global feature.scalar ${{ matrix.features }}
+
+ - id: functional_test
+ name: Functional test
+ timeout-minutes: 60
+ working-directory: scalar
+ shell: bash
+ run: |
+ export GIT_TRACE2_EVENT="$PWD/$TRACE2_BASENAME/Event"
+ export GIT_TRACE2_PERF="$PWD/$TRACE2_BASENAME/Perf"
+ export GIT_TRACE2_EVENT_BRIEF=true
+ export GIT_TRACE2_PERF_BRIEF=true
+ mkdir -p "$TRACE2_BASENAME"
+ mkdir -p "$TRACE2_BASENAME/Event"
+ mkdir -p "$TRACE2_BASENAME/Perf"
+ git version --build-options
+ cd ../out
+ Scalar.FunctionalTests/$BUILD_FRAGMENT/Scalar.FunctionalTests$BUILD_FILE_EXT --test-scalar-on-path --test-git-on-path --timeout=300000 --full-suite
+
+ - name: Force-stop FSMonitor daemons and Git processes (Windows)
+ if: runner.os == 'Windows' && (success() || failure())
+ shell: bash
+ run: |
+ set -x
+ wmic process get CommandLine,ExecutablePath,HandleCount,Name,ParentProcessID,ProcessID
+ wmic process where "CommandLine Like '%fsmonitor--daemon %run'" delete
+ wmic process where "ExecutablePath Like '%git.exe'" delete
+
+ - id: trace2_zip_unix
+ if: runner.os != 'Windows' && ( success() || failure() ) && ( steps.functional_test.conclusion == 'success' || steps.functional_test.conclusion == 'failure' )
+ name: Zip Trace2 Logs (Unix)
+ shell: bash
+ working-directory: scalar
+ run: zip -q -r $TRACE2_BASENAME.zip $TRACE2_BASENAME/
+
+ - id: trace2_zip_windows
+ if: runner.os == 'Windows' && ( success() || failure() ) && ( steps.functional_test.conclusion == 'success' || steps.functional_test.conclusion == 'failure' )
+ name: Zip Trace2 Logs (Windows)
+ working-directory: scalar
+ run: Compress-Archive -DestinationPath ${{ env.TRACE2_BASENAME }}.zip -Path ${{ env.TRACE2_BASENAME }}
+
+ - name: Archive Trace2 Logs
+ if: ( success() || failure() ) && ( steps.trace2_zip_unix.conclusion == 'success' || steps.trace2_zip_windows.conclusion == 'success' )
+ uses: actions/upload-artifact@v3
+ with:
+ name: ${{ env.TRACE2_BASENAME }}.zip
+ path: scalar/${{ env.TRACE2_BASENAME }}.zip
+ retention-days: 3
+
+ # The GitHub Action `action-tmate` allows developers to connect to the running agent
+ # using SSH (it will be a `tmux` session; on Windows agents it will be inside the MSYS2
+ # environment in `C:\msys64`, therefore it can be slightly tricky to interact with
+ # Git for Windows, which runs a slightly incompatible MSYS2 runtime).
+ - name: action-tmate
+ if: env.DEBUG_WITH_TMATE == 'true' && failure()
+ uses: mxschmitt/action-tmate@v3
+ with:
+ limit-access-to-actor: true
diff --git a/.gitignore b/.gitignore
index 53066da66bf925..16fa37a9211546 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,6 +1,7 @@
/fuzz_corpora
/GIT-BUILD-DIR
/GIT-BUILD-OPTIONS
+/GIT-BUILT-FROM-COMMIT
/GIT-CFLAGS
/GIT-LDFLAGS
/GIT-PREFIX
@@ -73,6 +74,7 @@
/git-gc
/git-get-tar-commit-id
/git-grep
+/git-gvfs-helper
/git-hash-object
/git-help
/git-hook
@@ -169,6 +171,7 @@
/git-unpack-file
/git-unpack-objects
/git-update-index
+/git-update-microsoft-git
/git-update-ref
/git-update-server-info
/git-upload-archive
diff --git a/BRANCHES.md b/BRANCHES.md
new file mode 100644
index 00000000000000..364158375e7d55
--- /dev/null
+++ b/BRANCHES.md
@@ -0,0 +1,59 @@
+Branches used in this repo
+==========================
+
+The document explains the branching structure that we are using in the VFSForGit repository as well as the forking strategy that we have adopted for contributing.
+
+Repo Branches
+-------------
+
+1. `vfs-#`
+
+ These branches are used to track the specific version that match Git for Windows with the VFSForGit specific patches on top. When a new version of Git for Windows is released, the VFSForGit patches will be rebased on that windows version and a new gvfs-# branch created to create pull requests against.
+
+ #### Examples
+
+ ```
+ vfs-2.27.0
+ vfs-2.30.0
+ ```
+
+ The versions of git for VFSForGit are based on the Git for Windows versions. v2.20.0.vfs.1 will correspond with the v2.20.0.windows.1 with the VFSForGit specific patches applied to the windows version.
+
+2. `vfs-#-exp`
+
+ These branches are for releasing experimental features to early adopters. They
+ should contain everything within the corresponding `vfs-#` branch; if the base
+ branch updates, then merge into the `vfs-#-exp` branch as well.
+
+Tags
+----
+
+We are using annotated tags to build the version number for git. The build will look back through the commit history to find the first tag matching `v[0-9]*vfs*` and build the git version number using that tag.
+
+Full releases are of the form `v2.XX.Y.vfs.Z.W` where `v2.XX.Y` comes from the
+upstream version and `Z.W` are custom updates within our fork. Specifically,
+the `.Z` value represents the "compatibility level" with VFS for Git. Only
+increase this version when making a breaking change with a released version
+of VFS for Git. The `.W` version is used for minor updates between major
+versions.
+
+Experimental releases are of the form `v2.XX.Y.vfs.Z.W.exp`. The `.exp`
+suffix indicates that experimental features are available. The rest of the
+version string comes from the full release tag. These versions will only
+be made available as pre-releases on the releases page, never a full release.
+
+Forking
+-------
+
+A personal fork of this repository and a branch in that repository should be used for development.
+
+These branches should be based on the latest vfs-# branch. If there are work in progress pull requests that you have based on a previous version branch when a new version branch is created, you will need to move your patches to the new branch to get them in that latest version.
+
+#### Example
+
+```
+git clone
+git remote add ms https://github.com/Microsoft/git.git
+git checkout -b my-changes ms/vfs-2.20.0 --no-track
+git push -fu origin HEAD
+```
diff --git a/Documentation/config.txt b/Documentation/config.txt
index ad7c6f9c87f7b1..bf07fe0addc550 100644
--- a/Documentation/config.txt
+++ b/Documentation/config.txt
@@ -441,6 +441,8 @@ include::config/gui.txt[]
include::config/guitool.txt[]
+include::config/gvfs.txt[]
+
include::config/help.txt[]
include::config/http.txt[]
diff --git a/Documentation/config/core.txt b/Documentation/config/core.txt
index b6740b750f03aa..6dbbafb2bf064c 100644
--- a/Documentation/config/core.txt
+++ b/Documentation/config/core.txt
@@ -111,6 +111,14 @@ Version 2 uses an opaque string so that the monitor can return
something that can be used to determine what files have changed
without race conditions.
+core.virtualFilesystem::
+ If set, the value of this variable is used as a command which
+ will identify all files and directories that are present in
+ the working directory. Git will only track and update files
+ listed in the virtual file system. Using the virtual file system
+ will supersede the sparse-checkout settings which will be ignored.
+ See the "virtual file system" section of linkgit:githooks[5].
+
core.trustctime::
If false, the ctime differences between the index and the
working tree are ignored; useful when the inode change time
@@ -728,6 +736,55 @@ core.multiPackIndex::
single index. See linkgit:git-multi-pack-index[1] for more
information. Defaults to true.
+core.gvfs::
+ Enable the features needed for GVFS. This value can be set to true
+ to indicate all features should be turned on or the bit values listed
+ below can be used to turn on specific features.
++
+--
+ GVFS_SKIP_SHA_ON_INDEX::
+ Bit value 1
+ Disables the calculation of the sha when writing the index
+ GVFS_MISSING_OK::
+ Bit value 4
+ Normally git write-tree ensures that the objects referenced by the
+ directory exist in the object database. This option disables this check.
+ GVFS_NO_DELETE_OUTSIDE_SPARSECHECKOUT::
+ Bit value 8
+ When marking entries to remove from the index and the working
+ directory this option will take into account what the
+ skip-worktree bit was set to so that if the entry has the
+ skip-worktree bit set it will not be removed from the working
+ directory. This will allow virtualized working directories to
+ detect the change to HEAD and use the new commit tree to show
+ the files that are in the working directory.
+ GVFS_FETCH_SKIP_REACHABILITY_AND_UPLOADPACK::
+ Bit value 16
+ While performing a fetch with a virtual file system we know
+ that there will be missing objects and we don't want to download
+ them just because of the reachability of the commits. We also
+ don't want to download a pack file with commits, trees, and blobs
+ since these will be downloaded on demand. This flag will skip the
+ checks on the reachability of objects during a fetch as well as
+ the upload pack so that extraneous objects don't get downloaded.
+ GVFS_BLOCK_FILTERS_AND_EOL_CONVERSIONS::
+ Bit value 64
+ With a virtual file system we only know the file size before any
+ CRLF or smudge/clean filters processing is done on the client.
+ To prevent file corruption due to truncation or expansion with
+ garbage at the end, these filters must not run when the file
+ is first accessed and brought down to the client. Git.exe can't
+ currently tell the first access vs subsequent accesses so this
+ flag just blocks them from occurring at all.
+ GVFS_PREFETCH_DURING_FETCH::
+ Bit value 128
+ While performing a `git fetch` command, use the gvfs-helper to
+ perform a "prefetch" of commits and trees.
+--
+
+core.useGvfsHelper::
+ TODO
+
core.sparseCheckout::
Enable "sparse checkout" feature. See linkgit:git-sparse-checkout[1]
for more information.
@@ -755,3 +812,12 @@ core.WSLCompat::
The default value is false. When set to true, Git will set the mode
bits of the file in the way of wsl, so that the executable flag of
files can be set or read correctly.
+
+core.configWriteLockTimeoutMS::
+ When processes try to write to the config concurrently, it is likely
+ that one process "wins" and the other process(es) fail to lock the
+ config file. By configuring a timeout larger than zero, Git can be
+ told to try to lock the config again a couple times within the
+ specified timeout. If the timeout is configure to zero (which is the
+ default), Git will fail immediately when the config is already
+ locked.
diff --git a/Documentation/config/credential.txt b/Documentation/config/credential.txt
index 512f31876e17ed..0ed570ff979629 100644
--- a/Documentation/config/credential.txt
+++ b/Documentation/config/credential.txt
@@ -9,6 +9,14 @@ credential.helper::
Note that multiple helpers may be defined. See linkgit:gitcredentials[7]
for details and examples.
+credential.interactive::
+ By default, Git and any configured credential helpers will ask for
+ user input when new credentials are required. Many of these helpers
+ will succeed based on stored credentials if those credentials are
+ still valid. To avoid the possibility of user interactivity from
+ Git, set `credential.interactive=false`. Some credential helpers
+ respect this option as well.
+
credential.useHttpPath::
When acquiring credentials, consider the "path" component of an http
or https URL to be important. Defaults to false. See
diff --git a/Documentation/config/gvfs.txt b/Documentation/config/gvfs.txt
new file mode 100644
index 00000000000000..6ab221ded36c91
--- /dev/null
+++ b/Documentation/config/gvfs.txt
@@ -0,0 +1,5 @@
+gvfs.cache-server::
+ TODO
+
+gvfs.sharedcache::
+ TODO
diff --git a/Documentation/config/index.txt b/Documentation/config/index.txt
index 23c7985eb40974..404e7bd37f80f3 100644
--- a/Documentation/config/index.txt
+++ b/Documentation/config/index.txt
@@ -1,3 +1,9 @@
+index.deleteSparseDirectories::
+ When enabled, the cone mode sparse-checkout feature will delete
+ directories that are outside of the sparse-checkout cone, unless
+ such a directory contains an untracked, non-ignored file. Defaults
+ to true.
+
index.recordEndOfIndexEntries::
Specifies whether the index file should include an "End Of Index
Entry" section. This reduces index load time on multiprocessor
diff --git a/Documentation/config/status.txt b/Documentation/config/status.txt
index 0fc704ab80b223..af043d7e26f269 100644
--- a/Documentation/config/status.txt
+++ b/Documentation/config/status.txt
@@ -75,3 +75,25 @@ status.submoduleSummary::
the --ignore-submodules=dirty command-line option or the 'git
submodule summary' command, which shows a similar output but does
not honor these settings.
+
+status.deserializePath::
+ EXPERIMENTAL, Pathname to a file containing cached status results
+ generated by `--serialize`. This will be overridden by
+ `--deserialize=` on the command line. If the cache file is
+ invalid or stale, git will fall-back and compute status normally.
+
+status.deserializeWait::
+ EXPERIMENTAL, Specifies what `git status --deserialize` should do
+ if the serialization cache file is stale and whether it should
+ fall-back and compute status normally. This will be overridden by
+ `--deserialize-wait=` on the command line.
++
+--
+* `fail` - cause git to exit with an error when the status cache file
+is stale; this is intended for testing and debugging.
+* `block` - cause git to spin and periodically retry the cache file
+every 100 ms; this is intended to help coordinate with another git
+instance concurrently computing the cache file.
+* `no` - to immediately fall-back if cache file is stale. This is the default.
+* `` - time (in tenths of a second) to spin and retry.
+--
diff --git a/Documentation/git-status.txt b/Documentation/git-status.txt
index a051b1e8f383ab..38b15dad6c686e 100644
--- a/Documentation/git-status.txt
+++ b/Documentation/git-status.txt
@@ -149,6 +149,21 @@ ignored, then the directory is not shown, but all contents are shown.
threshold.
See also linkgit:git-diff[1] `--find-renames`.
+--serialize[=]::
+ (EXPERIMENTAL) Serialize raw status results to a file or stdout
+ in a format suitable for use by `--deserialize`. If a path is
+ given, serialize data will be written to that path *and* normal
+ status output will be written to stdout. If path is omitted,
+ only binary serialization data will be written to stdout.
+
+--deserialize[=]::
+ (EXPERIMENTAL) Deserialize raw status results from a file or
+ stdin rather than scanning the worktree. If `` is omitted
+ and `status.deserializePath` is unset, input is read from stdin.
+--no-deserialize::
+ (EXPERIMENTAL) Disable implicit deserialization of status results
+ from the value of `status.deserializePath`.
+
...::
See the 'pathspec' entry in linkgit:gitglossary[7].
@@ -421,6 +436,26 @@ quoted as explained for the configuration variable `core.quotePath`
(see linkgit:git-config[1]).
+SERIALIZATION and DESERIALIZATION (EXPERIMENTAL)
+------------------------------------------------
+
+The `--serialize` option allows git to cache the result of a
+possibly time-consuming status scan to a binary file. A local
+service/daemon watching file system events could use this to
+periodically pre-compute a fresh status result.
+
+Interactive users could then use `--deserialize` to simply
+(and immediately) print the last-known-good result without
+waiting for the status scan.
+
+The binary serialization file format includes some worktree state
+information allowing `--deserialize` to reject the cached data
+and force a normal status scan if, for example, the commit, branch,
+or status modes/options change. The format cannot, however, indicate
+when the cached data is otherwise stale -- that coordination belongs
+to the task driving the serializations.
+
+
CONFIGURATION
-------------
diff --git a/Documentation/git-update-microsoft-git.txt b/Documentation/git-update-microsoft-git.txt
new file mode 100644
index 00000000000000..724bfc172f8ab7
--- /dev/null
+++ b/Documentation/git-update-microsoft-git.txt
@@ -0,0 +1,24 @@
+git-update-microsoft-git(1)
+===========================
+
+NAME
+----
+git-update-microsoft-git - Update the installed version of Git
+
+
+SYNOPSIS
+--------
+[verse]
+'git update-microsoft-git'
+
+DESCRIPTION
+-----------
+This version of Git is based on the Microsoft fork of Git, which
+has custom capabilities focused on supporting monorepos. This
+command checks for the latest release of that fork and installs
+it on your machine.
+
+
+GIT
+---
+Part of the linkgit:git[1] suite
diff --git a/Documentation/githooks.txt b/Documentation/githooks.txt
index 86f804720ae71f..b6eb60a758af92 100644
--- a/Documentation/githooks.txt
+++ b/Documentation/githooks.txt
@@ -751,6 +751,26 @@ and "0" meaning they were not.
Only one parameter should be set to "1" when the hook runs. The hook
running passing "1", "1" should not be possible.
+virtualFilesystem
+~~~~~~~~~~~~~~~~~~
+
+"Virtual File System" allows populating the working directory sparsely.
+The projection data is typically automatically generated by an external
+process. Git will limit what files it checks for changes as well as which
+directories are checked for untracked files based on the path names given.
+Git will also only update those files listed in the projection.
+
+The hook is invoked when the configuration option core.virtualFilesystem
+is set. It takes one argument, a version (currently 1).
+
+The hook should output to stdout the list of all files in the working
+directory that git should track. The paths are relative to the root
+of the working directory and are separated by a single NUL. Full paths
+('dir1/a.txt') as well as directories are supported (ie 'dir1/').
+
+The exit status determines whether git will use the data from the
+hook. On error, git will abort the command with an error message.
+
SEE ALSO
--------
linkgit:git-hook[1]
diff --git a/Documentation/scalar.txt b/Documentation/scalar.txt
index f33436c7f65ff9..35f67801e92a98 100644
--- a/Documentation/scalar.txt
+++ b/Documentation/scalar.txt
@@ -8,7 +8,9 @@ scalar - A tool for managing large Git repositories
SYNOPSIS
--------
[verse]
-scalar clone [--single-branch] [--branch ] [--full-clone] []
+scalar clone [--single-branch] [--branch ] [--full-clone]
+ [--local-cache-path ] [--cache-server-url ] [--[no-]src]
+ []
scalar list
scalar register []
scalar unregister []
@@ -16,6 +18,7 @@ scalar run ( all | config | commit-graph | fetch | loose-objects | pack-files )
scalar reconfigure [ --all | ]
scalar diagnose []
scalar delete
+scalar cache-server ( --get | --set | --list [] ) []
DESCRIPTION
-----------
@@ -80,10 +83,24 @@ remote-tracking branch for the branch this option was used for the initial
cloning. If the HEAD at the remote did not point at any branch when
`--single-branch` clone was made, no remote-tracking branch is created.
+--no-src::
+ Skip adding a `src` directory within the target enlistment.
+
--[no-]full-clone::
A sparse-checkout is initialized by default. This behavior can be
turned off via `--full-clone`.
+--local-cache-path ::
+ Override the path to the local cache root directory; Pre-fetched objects
+ are stored into a repository-dependent subdirectory of that path.
++
+The default is `:\.scalarCache` on Windows (on the same drive as the
+clone), and `~/.scalarCache` on macOS.
+
+--cache-server-url ::
+ Retrieve missing objects from the specified remote, which is expected to
+ understand the GVFS protocol.
+
List
~~~~
@@ -157,6 +174,27 @@ delete ::
This subcommand lets you delete an existing Scalar enlistment from your
local file system, unregistering the repository.
+Cache-server
+~~~~~~~~~~~~
+
+cache-server ( --get | --set | --list [] ) []::
+ This command lets you query or set the GVFS-enabled cache server used
+ to fetch missing objects.
+
+--get::
+ This is the default command mode: query the currently-configured cache
+ server URL, if any.
+
+--list::
+ Access the `gvfs/info` endpoint of the specified remote (default:
+ `origin`) to figure out which cache servers are available, if any.
++
+In contrast to the `--get` command mode (which only accesses the local
+repository), this command mode triggers a request via the network that
+potentially requires authentication. If authentication is required, the
+configured credential helper is employed (see linkgit:git-credential[1]
+for details).
+
SEE ALSO
--------
linkgit:git-clone[1], linkgit:git-maintenance[1].
diff --git a/Documentation/technical/read-object-protocol.txt b/Documentation/technical/read-object-protocol.txt
new file mode 100644
index 00000000000000..a893b46e7c28a9
--- /dev/null
+++ b/Documentation/technical/read-object-protocol.txt
@@ -0,0 +1,102 @@
+Read Object Process
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The read-object process enables Git to read all missing blobs with a
+single process invocation for the entire life of a single Git command.
+This is achieved by using a packet format (pkt-line, see technical/
+protocol-common.txt) based protocol over standard input and standard
+output as follows. All packets, except for the "*CONTENT" packets and
+the "0000" flush packet, are considered text and therefore are
+terminated by a LF.
+
+Git starts the process when it encounters the first missing object that
+needs to be retrieved. After the process is started, Git sends a welcome
+message ("git-read-object-client"), a list of supported protocol version
+numbers, and a flush packet. Git expects to read a welcome response
+message ("git-read-object-server"), exactly one protocol version number
+from the previously sent list, and a flush packet. All further
+communication will be based on the selected version.
+
+The remaining protocol description below documents "version=1". Please
+note that "version=42" in the example below does not exist and is only
+there to illustrate how the protocol would look with more than one
+version.
+
+After the version negotiation Git sends a list of all capabilities that
+it supports and a flush packet. Git expects to read a list of desired
+capabilities, which must be a subset of the supported capabilities list,
+and a flush packet as response:
+------------------------
+packet: git> git-read-object-client
+packet: git> version=1
+packet: git> version=42
+packet: git> 0000
+packet: git< git-read-object-server
+packet: git< version=1
+packet: git< 0000
+packet: git> capability=get
+packet: git> capability=have
+packet: git> capability=put
+packet: git> capability=not-yet-invented
+packet: git> 0000
+packet: git< capability=get
+packet: git< 0000
+------------------------
+The only supported capability in version 1 is "get".
+
+Afterwards Git sends a list of "key=value" pairs terminated with a flush
+packet. The list will contain at least the command (based on the
+supported capabilities) and the sha1 of the object to retrieve. Please
+note, that the process must not send any response before it received the
+final flush packet.
+
+When the process receives the "get" command, it should make the requested
+object available in the git object store and then return success. Git will
+then check the object store again and this time find it and proceed.
+------------------------
+packet: git> command=get
+packet: git> sha1=0a214a649e1b3d5011e14a3dc227753f2bd2be05
+packet: git> 0000
+------------------------
+
+The process is expected to respond with a list of "key=value" pairs
+terminated with a flush packet. If the process does not experience
+problems then the list must contain a "success" status.
+------------------------
+packet: git< status=success
+packet: git< 0000
+------------------------
+
+In case the process cannot or does not want to process the content, it
+is expected to respond with an "error" status.
+------------------------
+packet: git< status=error
+packet: git< 0000
+------------------------
+
+In case the process cannot or does not want to process the content as
+well as any future content for the lifetime of the Git process, then it
+is expected to respond with an "abort" status at any point in the
+protocol.
+------------------------
+packet: git< status=abort
+packet: git< 0000
+------------------------
+
+Git neither stops nor restarts the process in case the "error"/"abort"
+status is set.
+
+If the process dies during the communication or does not adhere to the
+protocol then Git will stop the process and restart it with the next
+object that needs to be processed.
+
+After the read-object process has processed an object it is expected to
+wait for the next "key=value" list containing a command. Git will close
+the command pipe on exit. The process is expected to detect EOF and exit
+gracefully on its own. Git will wait until the process has stopped.
+
+A long running read-object process demo implementation can be found in
+`contrib/long-running-read-object/example.pl` located in the Git core
+repository. If you develop your own long running process then the
+`GIT_TRACE_PACKET` environment variables can be very helpful for
+debugging (see linkgit:git[1]).
diff --git a/Documentation/technical/status-serialization-format.txt b/Documentation/technical/status-serialization-format.txt
new file mode 100644
index 00000000000000..475ae814495581
--- /dev/null
+++ b/Documentation/technical/status-serialization-format.txt
@@ -0,0 +1,107 @@
+Git status serialization format
+===============================
+
+Git status serialization enables git to dump the results of a status scan
+to a binary file. This file can then be loaded by later status invocations
+to print the cached status results.
+
+The file contains the essential fields from:
+() the index
+() the "struct wt_status" for the overall results
+() the contents of "struct wt_status_change_data" for tracked changed files
+() the list of untracked and ignored files
+
+Version 1 Format:
+=================
+
+The V1 file begins with a required header section followed by optional
+sections for each type of item (changed, untracked, ignored). Individual
+item sections are only present if necessary. Each item section begins
+with an item-type header with the number of items in the section.
+
+Each "line" in the format is encoded using pkt-line with a final LF.
+Flush packets are used to terminate sections.
+
+-----------------
+PKT-LINE("version" SP "1")
+
+[]
+[]
+[]
+-----------------
+
+
+V1 Header
+---------
+
+The v1-header-section fields are taken directly from "struct wt_status".
+Each field is printed on a separate pkt-line. Lines for NULL string
+values are omitted. All integers are printed with "%d". OIDs are
+printed in hex.
+
+v1-header-section =
+
+ PKT-LINE()
+
+v1-index-headers = PKT-LINE("index_mtime" SP SP LF)
+
+v1-wt-status-headers = PKT-LINE("is_initial" SP LF)
+ [ PKT-LINE("branch" SP LF) ]
+ [ PKT-LINE("reference" SP LF) ]
+ PKT-LINE("show_ignored_files" SP LF)
+ PKT-LINE("show_untracked_files" SP LF)
+ PKT-LINE("show_ignored_directory" SP LF)
+ [ PKT-LINE("ignore_submodule_arg" SP LF) ]
+ PKT-LINE("detect_rename" SP LF)
+ PKT-LINE("rename_score" SP LF)
+ PKT-LINE("rename_limit" SP LF)
+ PKT-LINE("detect_break" SP LF)
+ PKT-LINE("sha1_commit" SP LF)
+ PKT-LINE("committable" SP LF)
+ PKT-LINE("workdir_dirty" SP LF)
+
+
+V1 Changed Items
+----------------
+
+The v1-changed-item-section lists all of the changed items with one
+item per pkt-line. Each pkt-line contains: a binary block of data
+from "struct wt_status_serialize_data_fixed" in a fixed header where
+integers are in network byte order and OIDs are in raw (non-hex) form.
+This is followed by one or two raw pathnames (not c-quoted) with NUL
+terminators (both NULs are always present even if there is no rename).
+
+v1-changed-item-section = PKT-LINE("changed" SP LF)
+ [ PKT-LINE( LF) ]+
+ PKT-LINE()
+
+changed_item =
+
+
+
+
+
+
+
+
+
+
+
+ NUL
+ [ ]
+ NUL
+
+
+V1 Untracked and Ignored Items
+------------------------------
+
+These sections are simple lists of pathnames. They ARE NOT
+c-quoted.
+
+v1-untracked-item-section = PKT-LINE("untracked" SP LF)
+ [ PKT-LINE( LF) ]+
+ PKT-LINE()
+
+v1-ignored-item-section = PKT-LINE("ignored" SP LF)
+ [ PKT-LINE( LF) ]+
+ PKT-LINE()
diff --git a/GIT-VERSION-GEN b/GIT-VERSION-GEN
index 2c8dae398f661b..88af5a9152e093 100755
--- a/GIT-VERSION-GEN
+++ b/GIT-VERSION-GEN
@@ -1,7 +1,7 @@
#!/bin/sh
GVF=GIT-VERSION-FILE
-DEF_VER=v2.42.0
+DEF_VER=v2.42.0.vfs.0.0
LF='
'
@@ -12,10 +12,15 @@ if test -f version
then
VN=$(cat version) || VN="$DEF_VER"
elif test -d ${GIT_DIR:-.git} -o -f .git &&
- VN=$(git describe --match "v[0-9]*" HEAD 2>/dev/null) &&
+ VN=$(git describe --match "v[0-9]*vfs*" HEAD 2>/dev/null) &&
case "$VN" in
*$LF*) (exit 1) ;;
v[0-9]*)
+ if test "${VN%%.vfs.*}" != "${DEF_VER%%.vfs.*}"
+ then
+ echo "Found version $VN, which is not based on $DEF_VER" >&2
+ exit 1
+ fi
git update-index -q --refresh
test -z "$(git diff-index --name-only HEAD --)" ||
VN="$VN-dirty" ;;
diff --git a/Makefile b/Makefile
index 939553a72e150a..470c929b60726d 100644
--- a/Makefile
+++ b/Makefile
@@ -321,6 +321,10 @@ include shared.mak
# Define GIT_USER_AGENT if you want to change how git identifies itself during
# network interactions. The default is "git/$(GIT_VERSION)".
#
+# Define GIT_BUILT_FROM_COMMIT if you want to force the commit hash identified
+# in 'git version --build-options' to a specific value. The default is the
+# commit hash of the current HEAD.
+#
# Define DEFAULT_HELP_FORMAT to "man", "info" or "html"
# (defaults to "man") if you want to have a different default when
# "git help" is called without a parameter specifying the format.
@@ -1042,6 +1046,8 @@ LIB_OBJS += git-zlib.o
LIB_OBJS += gpg-interface.o
LIB_OBJS += graph.o
LIB_OBJS += grep.o
+LIB_OBJS += gvfs.o
+LIB_OBJS += gvfs-helper-client.o
LIB_OBJS += hash-lookup.o
LIB_OBJS += hashmap.o
LIB_OBJS += help.o
@@ -1192,6 +1198,7 @@ LIB_OBJS += utf8.o
LIB_OBJS += varint.o
LIB_OBJS += version.o
LIB_OBJS += versioncmp.o
+LIB_OBJS += virtualfilesystem.o
LIB_OBJS += walker.o
LIB_OBJS += wildmatch.o
LIB_OBJS += worktree.o
@@ -1199,6 +1206,8 @@ LIB_OBJS += wrapper.o
LIB_OBJS += write-or-die.o
LIB_OBJS += ws.o
LIB_OBJS += wt-status.o
+LIB_OBJS += wt-status-deserialize.o
+LIB_OBJS += wt-status-serialize.o
LIB_OBJS += xdiff-interface.o
BUILTIN_OBJS += builtin/add.o
@@ -1313,6 +1322,7 @@ BUILTIN_OBJS += builtin/tag.o
BUILTIN_OBJS += builtin/unpack-file.o
BUILTIN_OBJS += builtin/unpack-objects.o
BUILTIN_OBJS += builtin/update-index.o
+BUILTIN_OBJS += builtin/update-microsoft-git.o
BUILTIN_OBJS += builtin/update-ref.o
BUILTIN_OBJS += builtin/update-server-info.o
BUILTIN_OBJS += builtin/upload-archive.o
@@ -1628,6 +1638,9 @@ endif
endif
BASIC_CFLAGS += $(CURL_CFLAGS)
+ PROGRAM_OBJS += gvfs-helper.o
+ TEST_PROGRAMS_NEED_X += test-gvfs-protocol
+
REMOTE_CURL_PRIMARY = git-remote-http$X
REMOTE_CURL_ALIASES = git-remote-https$X git-remote-ftp$X git-remote-ftps$X
REMOTE_CURL_NAMES = $(REMOTE_CURL_PRIMARY) $(REMOTE_CURL_ALIASES)
@@ -2356,6 +2369,15 @@ GIT-USER-AGENT: FORCE
echo '$(GIT_USER_AGENT_SQ)' >GIT-USER-AGENT; \
fi
+GIT_BUILT_FROM_COMMIT = $(eval GIT_BUILT_FROM_COMMIT := $$(shell \
+ GIT_CEILING_DIRECTORIES="$$(CURDIR)/.." \
+ git rev-parse -q --verify HEAD 2>/dev/null))$(GIT_BUILT_FROM_COMMIT)
+GIT-BUILT-FROM-COMMIT: FORCE
+ @if test x'$(GIT_BUILT_FROM_COMMIT)' != x"`cat GIT-BUILT-FROM-COMMIT 2>/dev/null`" ; then \
+ echo >&2 " * new built-from commit"; \
+ echo '$(GIT_BUILT_FROM_COMMIT)' >GIT-BUILT-FROM-COMMIT; \
+ fi
+
ifdef DEFAULT_HELP_FORMAT
BASIC_CFLAGS += -DDEFAULT_HELP_FORMAT='"$(DEFAULT_HELP_FORMAT)"'
endif
@@ -2470,13 +2492,11 @@ PAGER_ENV_CQ_SQ = $(subst ','\'',$(PAGER_ENV_CQ))
pager.sp pager.s pager.o: EXTRA_CPPFLAGS = \
-DPAGER_ENV='$(PAGER_ENV_CQ_SQ)'
-version.sp version.s version.o: GIT-VERSION-FILE GIT-USER-AGENT
+version.sp version.s version.o: GIT-VERSION-FILE GIT-USER-AGENT GIT-BUILT-FROM-COMMIT
version.sp version.s version.o: EXTRA_CPPFLAGS = \
'-DGIT_VERSION="$(GIT_VERSION)"' \
'-DGIT_USER_AGENT=$(GIT_USER_AGENT_CQ_SQ)' \
- '-DGIT_BUILT_FROM_COMMIT="$(shell \
- GIT_CEILING_DIRECTORIES="$(CURDIR)/.." \
- git rev-parse -q --verify HEAD 2>/dev/null)"'
+ '-DGIT_BUILT_FROM_COMMIT="$(GIT_BUILT_FROM_COMMIT)"'
$(BUILT_INS): git$X
$(QUIET_BUILT_IN)$(RM) $@ && \
@@ -2717,6 +2737,7 @@ GIT_OBJS += git.o
.PHONY: git-objs
git-objs: $(GIT_OBJS)
+SCALAR_OBJS := json-parser.o
SCALAR_OBJS += scalar.o
.PHONY: scalar-objs
scalar-objs: $(SCALAR_OBJS)
@@ -2814,7 +2835,7 @@ gettext.sp gettext.s gettext.o: GIT-PREFIX
gettext.sp gettext.s gettext.o: EXTRA_CPPFLAGS = \
-DGIT_LOCALE_PATH='"$(localedir_relative_SQ)"'
-http-push.sp http.sp http-walker.sp remote-curl.sp imap-send.sp: SP_EXTRA_FLAGS += \
+http-push.sp http.sp http-walker.sp remote-curl.sp imap-send.sp gvfs-helper.sp: SP_EXTRA_FLAGS += \
-DCURL_DISABLE_TYPECHECK
pack-revindex.sp: SP_EXTRA_FLAGS += -Wno-memcpy-max-count
@@ -2865,10 +2886,14 @@ $(REMOTE_CURL_PRIMARY): remote-curl.o http.o http-walker.o $(LAZYLOAD_LIBCURL_OB
$(QUIET_LINK)$(CC) $(ALL_CFLAGS) -o $@ $(ALL_LDFLAGS) $(filter %.o,$^) \
$(CURL_LIBCURL) $(EXPAT_LIBEXPAT) $(LIBS)
-scalar$X: scalar.o GIT-LDFLAGS $(GITLIBS)
+scalar$X: $(SCALAR_OBJS) GIT-LDFLAGS $(GITLIBS)
$(QUIET_LINK)$(CC) $(ALL_CFLAGS) -o $@ $(ALL_LDFLAGS) \
$(filter %.o,$^) $(LIBS)
+git-gvfs-helper$X: gvfs-helper.o http.o GIT-LDFLAGS $(GITLIBS) $(LAZYLOAD_LIBCURL_OBJ)
+ $(QUIET_LINK)$(CC) $(ALL_CFLAGS) -o $@ $(ALL_LDFLAGS) $(filter %.o,$^) \
+ $(CURL_LIBCURL) $(EXPAT_LIBEXPAT) $(LIBS)
+
$(LIB_FILE): $(LIB_OBJS)
$(QUIET_AR)$(RM) $@ && $(AR) $(ARFLAGS) $@ $^
@@ -3646,7 +3671,7 @@ dist: git-archive$(X) configure
@$(MAKE) -C git-gui TARDIR=../.dist-tmp-dir/git-gui dist-version
./git-archive --format=tar \
$(GIT_ARCHIVE_EXTRA_FILES) \
- --prefix=$(GIT_TARNAME)/ HEAD^{tree} > $(GIT_TARNAME).tar
+ --prefix=$(GIT_TARNAME)/ HEAD > $(GIT_TARNAME).tar
@$(RM) -r .dist-tmp-dir
gzip -f -9 $(GIT_TARNAME).tar
diff --git a/README.md b/README.md
index e2314c5a313c08..fe82df58415570 100644
--- a/README.md
+++ b/README.md
@@ -1,147 +1,168 @@
-Git for Windows
-===============
-
-[![Open in Visual Studio Code](https://img.shields.io/static/v1?logo=visualstudiocode&label=&message=Open%20in%20Visual%20Studio%20Code&labelColor=2c2c32&color=007acc&logoColor=007acc)](https://open.vscode.dev/git-for-windows/git)
-[![Build status](https://github.com/git-for-windows/git/workflows/CI/badge.svg)](https://github.com/git-for-windows/git/actions?query=branch%3Amain+event%3Apush)
-[![Join the chat at https://gitter.im/git-for-windows/git](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/git-for-windows/git?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
-
-This is [Git for Windows](http://git-for-windows.github.io/), the Windows port
-of [Git](http://git-scm.com/).
-
-The Git for Windows project is run using a [governance
-model](http://git-for-windows.github.io/governance-model.html). If you
-encounter problems, you can report them as [GitHub
-issues](https://github.com/git-for-windows/git/issues), discuss them on Git
-for Windows' [Google Group](http://groups.google.com/group/git-for-windows),
-and [contribute bug
-fixes](https://github.com/git-for-windows/git/wiki/How-to-participate).
-
-To build Git for Windows, please either install [Git for Windows'
-SDK](https://gitforwindows.org/#download-sdk), start its `git-bash.exe`, `cd`
-to your Git worktree and run `make`, or open the Git worktree as a folder in
-Visual Studio.
-
-To verify that your build works, use one of the following methods:
-
-- If you want to test the built executables within Git for Windows' SDK,
- prepend `/bin-wrappers` to the `PATH`.
-- Alternatively, run `make install` in the Git worktree.
-- If you need to test this in a full installer, run `sdk build
- git-and-installer`.
-- You can also "install" Git into an existing portable Git via `make install
- DESTDIR=` where `` refers to the top-level directory of the
- portable Git. In this instance, you will want to prepend that portable Git's
- `/cmd` directory to the `PATH`, or test by running that portable Git's
- `git-bash.exe` or `git-cmd.exe`.
-- If you built using a recent Visual Studio, you can use the menu item
- `Build>Install git` (you will want to click on `Project>CMake Settings for
- Git` first, then click on `Edit JSON` and then point `installRoot` to the
- `mingw64` directory of an already-unpacked portable Git).
-
- As in the previous bullet point, you will then prepend `/cmd` to the `PATH`
- or run using the portable Git's `git-bash.exe` or `git-cmd.exe`.
-- If you want to run the built executables in-place, but in a CMD instead of
- inside a Bash, you can run a snippet like this in the `git-bash.exe` window
- where Git was built (ensure that the `EOF` line has no leading spaces), and
- then paste into the CMD window what was put in the clipboard:
-
- ```sh
- clip.exe <
+```
+
+Double-check that you have the right version by running these commands,
+which should have the same output:
+
+```shell
+git version
+scalar version
+```
+
+To upgrade, you will need to repeat these steps to reinstall.
+
+*Older distributions are missing some required dependencies. Even
+though the package may appear to install successfully, `microsoft/
+git` will not function as expected. If you are running Ubuntu 18.04 or
+older, please follow the install from source instructions below
+instead of installing the debian package.
+
+### Other distributions
+
+You will need to compile and install `microsoft/git` from source:
+
+```shell
+git clone https://github.com/microsoft/git microsoft-git
+cd microsoft-git
+make -j12 prefix=/usr/local
+sudo make -j12 prefix=/usr/local install
+```
+
+For more assistance building Git from source, see
+[the INSTALL file in the core Git project](https://github.com/git/git/blob/master/INSTALL).
+
+Contributing
=========================================================
-Git is a fast, scalable, distributed revision control system with an
-unusually rich command set that provides both high-level operations
-and full access to internals.
-
-Git is an Open Source project covered by the GNU General Public
-License version 2 (some parts of it are under different licenses,
-compatible with the GPLv2). It was originally written by Linus
-Torvalds with help of a group of hackers around the net.
-
-Please read the file [INSTALL][] for installation instructions.
-
-Many Git online resources are accessible from
-including full documentation and Git related tools.
-
-See [Documentation/gittutorial.txt][] to get started, then see
-[Documentation/giteveryday.txt][] for a useful minimum set of commands, and
-`Documentation/git-.txt` for documentation of each command.
-If git has been correctly installed, then the tutorial can also be
-read with `man gittutorial` or `git help tutorial`, and the
-documentation of each command with `man git-` or `git help
-`.
-
-CVS users may also want to read [Documentation/gitcvs-migration.txt][]
-(`man gitcvs-migration` or `git help cvs-migration` if git is
-installed).
-
-The user discussion and development of core Git take place on the Git
-mailing list -- everyone is welcome to post bug reports, feature
-requests, comments and patches to git@vger.kernel.org (read
-[Documentation/SubmittingPatches][] for instructions on patch submission
-and [Documentation/CodingGuidelines][]).
-
-Those wishing to help with error message, usage and informational message
-string translations (localization l10) should see [po/README.md][]
-(a `po` file is a Portable Object file that holds the translations).
-
-To subscribe to the list, send an email with just "subscribe git" in
-the body to majordomo@vger.kernel.org (not the Git list). The mailing
-list archives are available at ,
- and other archival sites.
-The core git mailing list is plain text (no HTML!).
-
-Issues which are security relevant should be disclosed privately to
-the Git Security mailing list .
-
-The maintainer frequently sends the "What's cooking" reports that
-list the current status of various development topics to the mailing
-list. The discussion following them give a good reference for
-project status, development direction and remaining tasks.
-
-The name "git" was given by Linus Torvalds when he wrote the very
-first version. He described the tool as "the stupid content tracker"
-and the name as (depending on your mood):
-
- - random three-letter combination that is pronounceable, and not
- actually used by any common UNIX command. The fact that it is a
- mispronunciation of "get" may or may not be relevant.
- - stupid. contemptible and despicable. simple. Take your pick from the
- dictionary of slang.
- - "global information tracker": you're in a good mood, and it actually
- works for you. Angels sing, and a light suddenly fills the room.
- - "goddamn idiotic truckload of sh*t": when it breaks
-
-[INSTALL]: INSTALL
-[Documentation/gittutorial.txt]: Documentation/gittutorial.txt
-[Documentation/giteveryday.txt]: Documentation/giteveryday.txt
-[Documentation/gitcvs-migration.txt]: Documentation/gitcvs-migration.txt
-[Documentation/SubmittingPatches]: Documentation/SubmittingPatches
-[Documentation/CodingGuidelines]: Documentation/CodingGuidelines
-[po/README.md]: po/README.md
+This project welcomes contributions and suggestions. Most contributions require you to agree to a
+Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us
+the rights to use your contribution. For details, visit .
+
+When you submit a pull request, a CLA-bot will automatically determine whether you need to provide
+a CLA and decorate the PR appropriately (e.g., label, comment). Simply follow the instructions
+provided by the bot. You will only need to do this once across all repos using our CLA.
+
+This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/).
+For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or
+contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments.
diff --git a/abspath.c b/abspath.c
index 0c17e98654e4b0..e899f46d02097a 100644
--- a/abspath.c
+++ b/abspath.c
@@ -14,7 +14,7 @@ int is_directory(const char *path)
}
/* removes the last path component from 'path' except if 'path' is root */
-static void strip_last_component(struct strbuf *path)
+void strip_last_path_component(struct strbuf *path)
{
size_t offset = offset_1st_component(path->buf);
size_t len = path->len;
@@ -119,7 +119,7 @@ static char *strbuf_realpath_1(struct strbuf *resolved, const char *path,
continue; /* '.' component */
} else if (next.len == 2 && !strcmp(next.buf, "..")) {
/* '..' component; strip the last path component */
- strip_last_component(resolved);
+ strip_last_path_component(resolved);
continue;
}
@@ -171,7 +171,7 @@ static char *strbuf_realpath_1(struct strbuf *resolved, const char *path,
* strip off the last component since it will
* be replaced with the contents of the symlink
*/
- strip_last_component(resolved);
+ strip_last_path_component(resolved);
}
/*
diff --git a/abspath.h b/abspath.h
index 4653080d5e4b7a..06241ba13cf646 100644
--- a/abspath.h
+++ b/abspath.h
@@ -10,6 +10,11 @@ char *real_pathdup(const char *path, int die_on_error);
const char *absolute_path(const char *path);
char *absolute_pathdup(const char *path);
+/**
+ * Remove the last path component from 'path' except if 'path' is root.
+ */
+void strip_last_path_component(struct strbuf *path);
+
/*
* Concatenate "prefix" (if len is non-zero) and "path", with no
* connecting characters (so "prefix" should end with a "/").
diff --git a/apply.c b/apply.c
index 818f3766d95847..6726b24ee3cc1c 100644
--- a/apply.c
+++ b/apply.c
@@ -3380,6 +3380,24 @@ static int checkout_target(struct index_state *istate,
{
struct checkout costate = CHECKOUT_INIT;
+ /*
+ * Do not checkout the entry if the skipworktree bit is set
+ *
+ * Both callers of this method (check_preimage and load_current)
+ * check for the existance of the file before calling this
+ * method so we know that the file doesn't exist at this point
+ * and we don't need to perform that check again here.
+ * We just need to check the skip-worktree and return.
+ *
+ * This is to prevent git from creating a file in the
+ * working directory that has the skip-worktree bit on,
+ * then updating the index from the patch and not keeping
+ * the working directory version up to date with what it
+ * changed the index version to be.
+ */
+ if (ce_skip_worktree(ce))
+ return 0;
+
costate.refresh_cache = 1;
costate.istate = istate;
if (checkout_entry(ce, &costate, NULL, NULL) ||
diff --git a/builtin.h b/builtin.h
index d560baa6618ac9..696ae4caf8ceff 100644
--- a/builtin.h
+++ b/builtin.h
@@ -234,6 +234,7 @@ int cmd_tag(int argc, const char **argv, const char *prefix);
int cmd_unpack_file(int argc, const char **argv, const char *prefix);
int cmd_unpack_objects(int argc, const char **argv, const char *prefix);
int cmd_update_index(int argc, const char **argv, const char *prefix);
+int cmd_update_microsoft_git(int argc, const char **argv, const char *prefix);
int cmd_update_ref(int argc, const char **argv, const char *prefix);
int cmd_update_server_info(int argc, const char **argv, const char *prefix);
int cmd_upload_archive(int argc, const char **argv, const char *prefix);
diff --git a/builtin/add.c b/builtin/add.c
index 03ee5a3a7a2e35..620430cabf6f3d 100644
--- a/builtin/add.c
+++ b/builtin/add.c
@@ -5,6 +5,7 @@
*/
#define USE_THE_INDEX_VARIABLE
#include "builtin.h"
+#include "environment.h"
#include "advice.h"
#include "config.h"
#include "lockfile.h"
@@ -48,6 +49,7 @@ static int chmod_pathspec(struct pathspec *pathspec, char flip, int show_only)
int err;
if (!include_sparse &&
+ !core_virtualfilesystem &&
(ce_skip_worktree(ce) ||
!path_in_sparse_checkout(ce->name, &the_index)))
continue;
@@ -128,8 +130,9 @@ static int refresh(int verbose, const struct pathspec *pathspec)
if (!seen[i]) {
const char *path = pathspec->items[i].original;
- if (matches_skip_worktree(pathspec, i, &skip_worktree_seen) ||
- !path_in_sparse_checkout(path, &the_index)) {
+ if (!core_virtualfilesystem &&
+ (matches_skip_worktree(pathspec, i, &skip_worktree_seen) ||
+ !path_in_sparse_checkout(path, &the_index))) {
string_list_append(&only_match_skip_worktree,
pathspec->items[i].original);
} else {
@@ -139,7 +142,11 @@ static int refresh(int verbose, const struct pathspec *pathspec)
}
}
- if (only_match_skip_worktree.nr) {
+ /*
+ * When using a virtual filesystem, we might re-add a path
+ * that is currently virtual and we want that to succeed.
+ */
+ if (!core_virtualfilesystem && only_match_skip_worktree.nr) {
advise_on_updating_sparse_paths(&only_match_skip_worktree);
ret = 1;
}
@@ -517,7 +524,11 @@ int cmd_add(int argc, const char **argv, const char *prefix)
if (seen[i])
continue;
- if (!include_sparse &&
+ /*
+ * When using a virtual filesystem, we might re-add a path
+ * that is currently virtual and we want that to succeed.
+ */
+ if (!include_sparse && !core_virtualfilesystem &&
matches_skip_worktree(&pathspec, i, &skip_worktree_seen)) {
string_list_append(&only_match_skip_worktree,
pathspec.items[i].original);
@@ -541,7 +552,6 @@ int cmd_add(int argc, const char **argv, const char *prefix)
}
}
-
if (only_match_skip_worktree.nr) {
advise_on_updating_sparse_paths(&only_match_skip_worktree);
exit_status = 1;
diff --git a/builtin/checkout.c b/builtin/checkout.c
index 277197d41d9cb0..d0fbcf97f74b85 100644
--- a/builtin/checkout.c
+++ b/builtin/checkout.c
@@ -19,6 +19,7 @@
#include "merge-recursive.h"
#include "object-name.h"
#include "object-store-ll.h"
+#include "packfile.h"
#include "parse-options.h"
#include "path.h"
#include "preload-index.h"
@@ -1018,8 +1019,16 @@ static void update_refs_for_switch(const struct checkout_opts *opts,
remove_branch_state(the_repository, !opts->quiet);
strbuf_release(&msg);
if (!opts->quiet &&
- (new_branch_info->path || (!opts->force_detach && !strcmp(new_branch_info->name, "HEAD"))))
+ (new_branch_info->path || (!opts->force_detach && !strcmp(new_branch_info->name, "HEAD")))) {
+ unsigned long nr_unpack_entry_at_start;
+
+ trace2_region_enter("tracking", "report_tracking", the_repository);
+ nr_unpack_entry_at_start = get_nr_unpack_entry();
report_tracking(new_branch_info);
+ trace2_data_intmax("tracking", NULL, "report_tracking/nr_unpack_entries",
+ (intmax_t)(get_nr_unpack_entry() - nr_unpack_entry_at_start));
+ trace2_region_leave("tracking", "report_tracking", the_repository);
+ }
}
static int add_pending_uninteresting_ref(const char *refname,
diff --git a/builtin/commit.c b/builtin/commit.c
index 82bfe0d3d36aed..5bebd6af0cff3b 100644
--- a/builtin/commit.c
+++ b/builtin/commit.c
@@ -46,6 +46,7 @@
#include "commit-reach.h"
#include "commit-graph.h"
#include "pretty.h"
+#include "trace2.h"
static const char * const builtin_commit_usage[] = {
N_("git commit [-a | --interactive | --patch] [-s] [-v] [-u] [--amend]\n"
@@ -175,6 +176,122 @@ static int opt_parse_porcelain(const struct option *opt, const char *arg, int un
return 0;
}
+static int do_serialize = 0;
+static char *serialize_path = NULL;
+
+static int reject_implicit = 0;
+static int do_implicit_deserialize = 0;
+static int do_explicit_deserialize = 0;
+static char *deserialize_path = NULL;
+
+static enum wt_status_deserialize_wait implicit_deserialize_wait = DESERIALIZE_WAIT__UNSET;
+static enum wt_status_deserialize_wait explicit_deserialize_wait = DESERIALIZE_WAIT__UNSET;
+
+/*
+ * --serialize | --serialize=
+ *
+ * Request that we serialize status output rather than or in addition to
+ * printing in any of the established formats.
+ *
+ * Without a path, we write binary serialization data to stdout (and omit
+ * the normal status output).
+ *
+ * With a path, we write binary serialization data to the and then
+ * write normal status output.
+ */
+static int opt_parse_serialize(const struct option *opt, const char *arg, int unset)
+{
+ enum wt_status_format *value = (enum wt_status_format *)opt->value;
+ if (unset || !arg)
+ *value = STATUS_FORMAT_SERIALIZE_V1;
+
+ if (arg) {
+ free(serialize_path);
+ serialize_path = xstrdup(arg);
+ }
+
+ if (do_explicit_deserialize)
+ die("cannot mix --serialize and --deserialize");
+ do_implicit_deserialize = 0;
+
+ do_serialize = 1;
+ return 0;
+}
+
+/*
+ * --deserialize | --deserialize= |
+ * --no-deserialize
+ *
+ * Request that we deserialize status data from some existing resource
+ * rather than performing a status scan.
+ *
+ * The input source can come from stdin or a path given here -- or be
+ * inherited from the config settings.
+ */
+static int opt_parse_deserialize(const struct option *opt, const char *arg, int unset)
+{
+ if (unset) {
+ do_implicit_deserialize = 0;
+ do_explicit_deserialize = 0;
+ } else {
+ if (do_serialize)
+ die("cannot mix --serialize and --deserialize");
+ if (arg) {
+ /* override config or stdin */
+ free(deserialize_path);
+ deserialize_path = xstrdup(arg);
+ }
+ if (!deserialize_path || !*deserialize_path)
+ do_explicit_deserialize = 1; /* read stdin */
+ else if (wt_status_deserialize_access(deserialize_path, R_OK) == 0)
+ do_explicit_deserialize = 1; /* can read from this file */
+ else {
+ /*
+ * otherwise, silently fallback to the normal
+ * collection scan
+ */
+ do_implicit_deserialize = 0;
+ do_explicit_deserialize = 0;
+ }
+ }
+
+ return 0;
+}
+
+static enum wt_status_deserialize_wait parse_dw(const char *arg)
+{
+ int tenths;
+
+ if (!strcmp(arg, "fail"))
+ return DESERIALIZE_WAIT__FAIL;
+ else if (!strcmp(arg, "block"))
+ return DESERIALIZE_WAIT__BLOCK;
+ else if (!strcmp(arg, "no"))
+ return DESERIALIZE_WAIT__NO;
+
+ /*
+ * Otherwise, assume it is a timeout in tenths of a second.
+ * If it contains a bogus value, atol() will return zero
+ * which is OK.
+ */
+ tenths = atol(arg);
+ if (tenths < 0)
+ tenths = DESERIALIZE_WAIT__NO;
+ return tenths;
+}
+
+static int opt_parse_deserialize_wait(const struct option *opt,
+ const char *arg,
+ int unset)
+{
+ if (unset)
+ explicit_deserialize_wait = DESERIALIZE_WAIT__UNSET;
+ else
+ explicit_deserialize_wait = parse_dw(arg);
+
+ return 0;
+}
+
static int opt_parse_m(const struct option *opt, const char *arg, int unset)
{
struct strbuf *buf = opt->value;
@@ -1176,6 +1293,8 @@ static void handle_untracked_files_arg(struct wt_status *s)
s->show_untracked_files = SHOW_NORMAL_UNTRACKED_FILES;
else if (!strcmp(untracked_files_arg, "all"))
s->show_untracked_files = SHOW_ALL_UNTRACKED_FILES;
+ else if (!strcmp(untracked_files_arg,"complete"))
+ s->show_untracked_files = SHOW_COMPLETE_UNTRACKED_FILES;
/*
* Please update $__git_untracked_file_modes in
* git-completion.bash when you add new options
@@ -1463,6 +1582,28 @@ static int git_status_config(const char *k, const char *v,
s->relative_paths = git_config_bool(k, v);
return 0;
}
+ if (!strcmp(k, "status.deserializepath")) {
+ /*
+ * Automatically assume deserialization if this is
+ * set in the config and the file exists. Do not
+ * complain if the file does not exist, because we
+ * silently fall back to normal mode.
+ */
+ if (v && *v && access(v, R_OK) == 0) {
+ do_implicit_deserialize = 1;
+ deserialize_path = xstrdup(v);
+ } else {
+ reject_implicit = 1;
+ }
+ return 0;
+ }
+ if (!strcmp(k, "status.deserializewait")) {
+ if (!v || !*v)
+ implicit_deserialize_wait = DESERIALIZE_WAIT__UNSET;
+ else
+ implicit_deserialize_wait = parse_dw(v);
+ return 0;
+ }
if (!strcmp(k, "status.showuntrackedfiles")) {
if (!v)
return config_error_nonbool(k);
@@ -1503,7 +1644,8 @@ int cmd_status(int argc, const char **argv, const char *prefix)
static const char *rename_score_arg = (const char *)-1;
static struct wt_status s;
unsigned int progress_flag = 0;
- int fd;
+ int try_deserialize;
+ int fd = -1;
struct object_id oid;
static struct option builtin_status_options[] = {
OPT__VERBOSE(&verbose, N_("be verbose")),
@@ -1518,6 +1660,15 @@ int cmd_status(int argc, const char **argv, const char *prefix)
OPT_CALLBACK_F(0, "porcelain", &status_format,
N_("version"), N_("machine-readable output"),
PARSE_OPT_OPTARG, opt_parse_porcelain),
+ { OPTION_CALLBACK, 0, "serialize", &status_format,
+ N_("path"), N_("serialize raw status data to path or stdout"),
+ PARSE_OPT_OPTARG | PARSE_OPT_NONEG, opt_parse_serialize },
+ { OPTION_CALLBACK, 0, "deserialize", NULL,
+ N_("path"), N_("deserialize raw status data from file"),
+ PARSE_OPT_OPTARG, opt_parse_deserialize },
+ { OPTION_CALLBACK, 0, "deserialize-wait", NULL,
+ N_("fail|block|no"), N_("how to wait if status cache file is invalid"),
+ PARSE_OPT_OPTARG, opt_parse_deserialize_wait },
OPT_SET_INT(0, "long", &status_format,
N_("show status in long format (default)"),
STATUS_FORMAT_LONG),
@@ -1562,10 +1713,53 @@ int cmd_status(int argc, const char **argv, const char *prefix)
s.show_untracked_files == SHOW_NO_UNTRACKED_FILES)
die(_("Unsupported combination of ignored and untracked-files arguments"));
+ if (s.show_untracked_files == SHOW_COMPLETE_UNTRACKED_FILES &&
+ s.show_ignored_mode == SHOW_NO_IGNORED)
+ die(_("Complete Untracked only supported with ignored files"));
+
parse_pathspec(&s.pathspec, 0,
PATHSPEC_PREFER_FULL,
prefix, argv);
+ /*
+ * If we want to try to deserialize status data from a cache file,
+ * we need to re-order the initialization code. The problem is that
+ * this makes for a very nasty diff and causes merge conflicts as we
+ * carry it forward. And it easy to mess up the merge, so we
+ * duplicate some code here to hopefully reduce conflicts.
+ */
+ try_deserialize = (!do_serialize &&
+ (do_implicit_deserialize || do_explicit_deserialize));
+
+ /*
+ * Disable deserialize when verbose is set because it causes us to
+ * print diffs for each modified file, but that requires us to have
+ * the index loaded and we don't want to do that (at least not now for
+ * this seldom used feature). My fear is that would further tangle
+ * the merge conflict with upstream.
+ *
+ * TODO Reconsider this in the future.
+ */
+ if (try_deserialize && verbose) {
+ trace2_data_string("status", the_repository, "deserialize/reject",
+ "args/verbose");
+ try_deserialize = 0;
+ }
+
+ if (try_deserialize)
+ goto skip_init;
+ /*
+ * If we implicitly received a status cache pathname from the config
+ * and the file does not exist, we silently reject it and do the normal
+ * status "collect". Fake up some trace2 messages to reflect this and
+ * assist post-processors know this case is different.
+ */
+ if (!do_serialize && reject_implicit) {
+ trace2_cmd_mode("implicit-deserialize");
+ trace2_data_string("status", the_repository, "deserialize/reject",
+ "status-cache/access");
+ }
+
enable_fscache(0);
if (status_format != STATUS_FORMAT_PORCELAIN &&
status_format != STATUS_FORMAT_PORCELAIN_V2)
@@ -1580,6 +1774,7 @@ int cmd_status(int argc, const char **argv, const char *prefix)
else
fd = -1;
+skip_init:
s.is_initial = repo_get_oid(the_repository, s.reference, &oid) ? 1 : 0;
if (!s.is_initial)
oidcpy(&s.oid_commit, &oid);
@@ -1596,6 +1791,36 @@ int cmd_status(int argc, const char **argv, const char *prefix)
s.rename_score = parse_rename_score(&rename_score_arg);
}
+ if (try_deserialize) {
+ int result;
+ enum wt_status_deserialize_wait dw = implicit_deserialize_wait;
+ if (explicit_deserialize_wait != DESERIALIZE_WAIT__UNSET)
+ dw = explicit_deserialize_wait;
+ if (dw == DESERIALIZE_WAIT__UNSET)
+ dw = DESERIALIZE_WAIT__NO;
+
+ if (s.relative_paths)
+ s.prefix = prefix;
+
+ trace2_cmd_mode("deserialize");
+ result = wt_status_deserialize(&s, deserialize_path, dw);
+ if (result == DESERIALIZE_OK)
+ return 0;
+ if (dw == DESERIALIZE_WAIT__FAIL)
+ die(_("Rejected status serialization cache"));
+
+ /* deserialize failed, so force the initialization we skipped above. */
+ enable_fscache(1);
+ repo_read_index_preload(the_repository, &s.pathspec, 0);
+ refresh_index(&the_index, REFRESH_QUIET|REFRESH_UNMERGED, &s.pathspec, NULL, NULL);
+
+ if (use_optional_locks())
+ fd = repo_hold_locked_index(the_repository, &index_lock, 0);
+ else
+ fd = -1;
+ }
+
+ trace2_cmd_mode("collect");
wt_status_collect(&s);
if (0 <= fd)
@@ -1604,6 +1829,17 @@ int cmd_status(int argc, const char **argv, const char *prefix)
if (s.relative_paths)
s.prefix = prefix;
+ if (serialize_path) {
+ int fd_serialize = xopen(serialize_path,
+ O_WRONLY | O_CREAT | O_TRUNC, 0666);
+ if (fd_serialize < 0)
+ die_errno(_("could not serialize to '%s'"),
+ serialize_path);
+ trace2_cmd_mode("serialize");
+ wt_status_serialize_v1(fd_serialize, &s);
+ close(fd_serialize);
+ }
+
wt_status_print(&s);
wt_status_collect_free_buffers(&s);
diff --git a/builtin/fetch.c b/builtin/fetch.c
index eed4a7cdb6c1d6..614434fdd429a9 100644
--- a/builtin/fetch.c
+++ b/builtin/fetch.c
@@ -18,6 +18,8 @@
#include "string-list.h"
#include "remote.h"
#include "transport.h"
+#include "gvfs.h"
+#include "gvfs-helper-client.h"
#include "run-command.h"
#include "parse-options.h"
#include "sigchain.h"
@@ -1145,6 +1147,13 @@ static int store_updated_refs(struct display_state *display_state,
opt.exclude_hidden_refs_section = "fetch";
rm = ref_map;
+
+ /*
+ * Before checking connectivity, be really sure we have the
+ * latest pack-files loaded into memory.
+ */
+ reprepare_packed_git(the_repository);
+
if (check_connected(iterate_ref_map, &rm, &opt)) {
rc = error(_("%s did not send all necessary objects\n"),
display_state->url);
@@ -2369,6 +2378,9 @@ int cmd_fetch(int argc, const char **argv, const char *prefix)
}
string_list_remove_duplicates(&list, 0);
+ if (core_gvfs & GVFS_PREFETCH_DURING_FETCH)
+ gh_client__prefetch(0, NULL);
+
if (negotiate_only) {
struct oidset acked_commits = OIDSET_INIT;
struct oidset_iter iter;
diff --git a/builtin/gc.c b/builtin/gc.c
index 5c4315f0d816c8..0de1c4dfafca91 100644
--- a/builtin/gc.c
+++ b/builtin/gc.c
@@ -16,6 +16,7 @@
#include "environment.h"
#include "hex.h"
#include "repository.h"
+#include "gvfs.h"
#include "config.h"
#include "tempfile.h"
#include "lockfile.h"
@@ -621,6 +622,9 @@ int cmd_gc(int argc, const char **argv, const char *prefix)
if (quiet)
strvec_push(&repack, "-q");
+ if ((!auto_gc || (auto_gc && gc_auto_threshold > 0)) && gvfs_config_is_set(GVFS_BLOCK_COMMANDS))
+ die(_("'git gc' is not supported on a GVFS repo"));
+
if (auto_gc) {
/*
* Auto-gc should be least intrusive as possible.
@@ -1026,6 +1030,8 @@ static int write_loose_object_to_stdin(const struct object_id *oid,
return ++(d->count) > d->batch_size;
}
+static const char *object_dir = NULL;
+
static int pack_loose(struct maintenance_run_opts *opts)
{
struct repository *r = the_repository;
@@ -1033,11 +1039,14 @@ static int pack_loose(struct maintenance_run_opts *opts)
struct write_loose_object_data data;
struct child_process pack_proc = CHILD_PROCESS_INIT;
+ if (!object_dir)
+ object_dir = r->objects->odb->path;
+
/*
* Do not start pack-objects process
* if there are no loose objects.
*/
- if (!for_each_loose_file_in_objdir(r->objects->odb->path,
+ if (!for_each_loose_file_in_objdir(object_dir,
bail_on_loose,
NULL, NULL, NULL))
return 0;
@@ -1047,7 +1056,7 @@ static int pack_loose(struct maintenance_run_opts *opts)
strvec_push(&pack_proc.args, "pack-objects");
if (opts->quiet)
strvec_push(&pack_proc.args, "--quiet");
- strvec_pushf(&pack_proc.args, "%s/pack/loose", r->objects->odb->path);
+ strvec_pushf(&pack_proc.args, "%s/pack/loose", object_dir);
pack_proc.in = -1;
@@ -1060,7 +1069,7 @@ static int pack_loose(struct maintenance_run_opts *opts)
data.count = 0;
data.batch_size = 50000;
- for_each_loose_file_in_objdir(r->objects->odb->path,
+ for_each_loose_file_in_objdir(object_dir,
write_loose_object_to_stdin,
NULL,
NULL,
@@ -1438,6 +1447,7 @@ static int maintenance_run(int argc, const char **argv, const char *prefix)
{
int i;
struct maintenance_run_opts opts;
+ const char *tmp_obj_dir = NULL;
struct option builtin_maintenance_run_options[] = {
OPT_BOOL(0, "auto", &opts.auto_flag,
N_("run tasks based on the state of the repository")),
@@ -1471,6 +1481,18 @@ static int maintenance_run(int argc, const char **argv, const char *prefix)
if (argc != 0)
usage_with_options(builtin_maintenance_run_usage,
builtin_maintenance_run_options);
+
+ /*
+ * To enable the VFS for Git/Scalar shared object cache, use
+ * the gvfs.sharedcache config option to redirect the
+ * maintenance to that location.
+ */
+ if (!git_config_get_value("gvfs.sharedcache", &tmp_obj_dir) &&
+ tmp_obj_dir) {
+ object_dir = xstrdup(tmp_obj_dir);
+ setenv(DB_ENVIRONMENT, object_dir, 1);
+ }
+
return maintenance_run_tasks(&opts);
}
@@ -1635,6 +1657,42 @@ static const char *get_frequency(enum schedule_priority schedule)
}
}
+static const char *extraconfig[] = {
+ "credential.interactive=false",
+ "core.askPass=true", /* 'true' returns success, but no output. */
+ NULL
+};
+
+static const char *get_extra_config_parameters(void) {
+ static const char *result = NULL;
+ struct strbuf builder = STRBUF_INIT;
+
+ if (result)
+ return result;
+
+ for (const char **s = extraconfig; s && *s; s++)
+ strbuf_addf(&builder, "-c %s ", *s);
+
+ result = strbuf_detach(&builder, NULL);
+ return result;
+}
+
+static const char *get_extra_launchctl_strings(void) {
+ static const char *result = NULL;
+ struct strbuf builder = STRBUF_INIT;
+
+ if (result)
+ return result;
+
+ for (const char **s = extraconfig; s && *s; s++) {
+ strbuf_addstr(&builder, "-c\n");
+ strbuf_addf(&builder, "%s\n", *s);
+ }
+
+ result = strbuf_detach(&builder, NULL);
+ return result;
+}
+
/*
* get_schedule_cmd` reads the GIT_TEST_MAINT_SCHEDULER environment variable
* to mock the schedulers that `git maintenance start` rely on.
@@ -1708,6 +1766,15 @@ static int get_schedule_cmd(const char **cmd, int *is_available)
return 1;
}
+static int get_random_minute(void)
+{
+ /* Use a static value when under tests. */
+ if (getenv("GIT_TEST_MAINT_SCHEDULER"))
+ return 13;
+
+ return git_rand() % 60;
+}
+
static int is_launchctl_available(void)
{
const char *cmd = "launchctl";
@@ -1820,6 +1887,7 @@ static int launchctl_schedule_plist(const char *exec_path, enum schedule_priorit
struct strbuf plist = STRBUF_INIT, plist2 = STRBUF_INIT;
struct stat st;
const char *cmd = "launchctl";
+ int minute = get_random_minute();
get_schedule_cmd(&cmd, NULL);
preamble = "\n"
@@ -1831,6 +1899,7 @@ static int launchctl_schedule_plist(const char *exec_path, enum schedule_priorit
"\n"
"%s/git\n"
"--exec-path=%s\n"
+ "%s" /* For extra config parameters. */
"for-each-repo\n"
"--config=maintenance.repo\n"
"maintenance\n"
@@ -1839,35 +1908,37 @@ static int launchctl_schedule_plist(const char *exec_path, enum schedule_priorit
"\n"
"StartCalendarInterval\n"
"\n";
- strbuf_addf(&plist, preamble, name, exec_path, exec_path, frequency);
+ strbuf_addf(&plist, preamble, name, exec_path, exec_path,
+ get_extra_launchctl_strings(), frequency);
switch (schedule) {
case SCHEDULE_HOURLY:
repeat = "\n"
"Hour%d\n"
- "Minute0\n"
+ "Minute%d\n"
"\n";
for (i = 1; i <= 23; i++)
- strbuf_addf(&plist, repeat, i);
+ strbuf_addf(&plist, repeat, i, minute);
break;
case SCHEDULE_DAILY:
repeat = "\n"
"Day%d\n"
"Hour0\n"
- "Minute0\n"
+ "Minute%d\n"
"\n";
for (i = 1; i <= 6; i++)
- strbuf_addf(&plist, repeat, i);
+ strbuf_addf(&plist, repeat, i, minute);
break;
case SCHEDULE_WEEKLY:
- strbuf_addstr(&plist,
- "\n"
- "Day0\n"
- "Hour0\n"
- "Minute0\n"
- "\n");
+ strbuf_addf(&plist,
+ "\n"
+ "Day0\n"
+ "Hour0\n"
+ "Minute%d\n"
+ "\n",
+ minute);
break;
default:
@@ -1984,6 +2055,7 @@ static int schtasks_schedule_task(const char *exec_path, enum schedule_priority
const char *frequency = get_frequency(schedule);
char *name = schtasks_task_name(frequency);
struct strbuf tfilename = STRBUF_INIT;
+ int minute = get_random_minute();
get_schedule_cmd(&cmd, NULL);
@@ -2004,7 +2076,7 @@ static int schtasks_schedule_task(const char *exec_path, enum schedule_priority
switch (schedule) {
case SCHEDULE_HOURLY:
fprintf(tfile->fp,
- "2020-01-01T01:00:00\n"
+ "2020-01-01T01:%02d:00\n"
"true\n"
"\n"
"1\n"
@@ -2013,12 +2085,13 @@ static int schtasks_schedule_task(const char *exec_path, enum schedule_priority
"PT1H\n"
"PT23H\n"
"false\n"
- "\n");
+ "\n",
+ minute);
break;
case SCHEDULE_DAILY:
fprintf(tfile->fp,
- "2020-01-01T00:00:00\n"
+ "2020-01-01T00:%02d:00\n"
"true\n"
"\n"
"\n"
@@ -2030,19 +2103,21 @@ static int schtasks_schedule_task(const char *exec_path, enum schedule_priority
"\n"
"\n"
"1\n"
- "\n");
+ "\n",
+ minute);
break;
case SCHEDULE_WEEKLY:
fprintf(tfile->fp,
- "2020-01-01T00:00:00\n"
+ "2020-01-01T00:%02d:00\n"
"true\n"
"\n"
"\n"
"\n"
"\n"
"1\n"
- "\n");
+ "\n",
+ minute);
break;
default:
@@ -2069,11 +2144,12 @@ static int schtasks_schedule_task(const char *exec_path, enum schedule_priority
"\n"
"\n"
"\"%s\\headless-git.exe\"\n"
- "--exec-path=\"%s\" for-each-repo --config=maintenance.repo maintenance run --schedule=%s\n"
+ "--exec-path=\"%s\" %s for-each-repo --config=maintenance.repo maintenance run --schedule=%s\n"
"\n"
"\n"
"\n";
- fprintf(tfile->fp, xml, exec_path, exec_path, frequency);
+ fprintf(tfile->fp, xml, exec_path, exec_path,
+ get_extra_config_parameters(), frequency);
strvec_split(&child.args, cmd);
strvec_pushl(&child.args, "/create", "/tn", name, "/f", "/xml",
get_tempfile_path(tfile), NULL);
@@ -2159,6 +2235,7 @@ static int crontab_update_schedule(int run_maintenance, int fd)
FILE *cron_list, *cron_in;
struct strbuf line = STRBUF_INIT;
struct tempfile *tmpedit = NULL;
+ int minute = get_random_minute();
get_schedule_cmd(&cmd, NULL);
strvec_split(&crontab_list.args, cmd);
@@ -2213,11 +2290,11 @@ static int crontab_update_schedule(int run_maintenance, int fd)
"# replaced in the future by a Git command.\n\n");
strbuf_addf(&line_format,
- "%%s %%s * * %%s \"%s/git\" --exec-path=\"%s\" for-each-repo --config=maintenance.repo maintenance run --schedule=%%s\n",
- exec_path, exec_path);
- fprintf(cron_in, line_format.buf, "0", "1-23", "*", "hourly");
- fprintf(cron_in, line_format.buf, "0", "0", "1-6", "daily");
- fprintf(cron_in, line_format.buf, "0", "0", "0", "weekly");
+ "%%d %%s * * %%s \"%s/git\" --exec-path=\"%s\" %s for-each-repo --config=maintenance.repo maintenance run --schedule=%%s\n",
+ exec_path, exec_path, get_extra_config_parameters());
+ fprintf(cron_in, line_format.buf, minute, "1-23", "*", "hourly");
+ fprintf(cron_in, line_format.buf, minute, "0", "1-6", "daily");
+ fprintf(cron_in, line_format.buf, minute, "0", "0", "weekly");
strbuf_release(&line_format);
fprintf(cron_in, "\n%s\n", END_LINE);
@@ -2276,77 +2353,54 @@ static char *xdg_config_home_systemd(const char *filename)
return xdg_config_home_for("systemd/user", filename);
}
-static int systemd_timer_enable_unit(int enable,
- enum schedule_priority schedule)
-{
- const char *cmd = "systemctl";
- struct child_process child = CHILD_PROCESS_INIT;
- const char *frequency = get_frequency(schedule);
-
- /*
- * Disabling the systemd unit while it is already disabled makes
- * systemctl print an error.
- * Let's ignore it since it means we already are in the expected state:
- * the unit is disabled.
- *
- * On the other hand, enabling a systemd unit which is already enabled
- * produces no error.
- */
- if (!enable)
- child.no_stderr = 1;
-
- get_schedule_cmd(&cmd, NULL);
- strvec_split(&child.args, cmd);
- strvec_pushl(&child.args, "--user", enable ? "enable" : "disable",
- "--now", NULL);
- strvec_pushf(&child.args, "git-maintenance@%s.timer", frequency);
-
- if (start_command(&child))
- return error(_("failed to start systemctl"));
- if (finish_command(&child))
- /*
- * Disabling an already disabled systemd unit makes
- * systemctl fail.
- * Let's ignore this failure.
- *
- * Enabling an enabled systemd unit doesn't fail.
- */
- if (enable)
- return error(_("failed to run systemctl"));
- return 0;
-}
+#define SYSTEMD_UNIT_FORMAT "git-maintenance@%s.%s"
-static int systemd_timer_delete_unit_templates(void)
+static int systemd_timer_delete_timer_file(enum schedule_priority priority)
{
int ret = 0;
- char *filename = xdg_config_home_systemd("git-maintenance@.timer");
- if (unlink(filename) && !is_missing_file_error(errno))
- ret = error_errno(_("failed to delete '%s'"), filename);
- FREE_AND_NULL(filename);
+ const char *frequency = get_frequency(priority);
+ char *local_timer_name = xstrfmt(SYSTEMD_UNIT_FORMAT, frequency, "timer");
+ char *filename = xdg_config_home_systemd(local_timer_name);
- filename = xdg_config_home_systemd("git-maintenance@.service");
if (unlink(filename) && !is_missing_file_error(errno))
ret = error_errno(_("failed to delete '%s'"), filename);
free(filename);
+ free(local_timer_name);
return ret;
}
-static int systemd_timer_delete_units(void)
+static int systemd_timer_delete_service_template(void)
{
- return systemd_timer_enable_unit(0, SCHEDULE_HOURLY) ||
- systemd_timer_enable_unit(0, SCHEDULE_DAILY) ||
- systemd_timer_enable_unit(0, SCHEDULE_WEEKLY) ||
- systemd_timer_delete_unit_templates();
+ int ret = 0;
+ char *local_service_name = xstrfmt(SYSTEMD_UNIT_FORMAT, "", "service");
+ char *filename = xdg_config_home_systemd(local_service_name);
+ if (unlink(filename) && !is_missing_file_error(errno))
+ ret = error_errno(_("failed to delete '%s'"), filename);
+
+ free(filename);
+ free(local_service_name);
+ return ret;
}
-static int systemd_timer_write_unit_templates(const char *exec_path)
+/*
+ * Write the schedule information into a git-maintenance@.timer
+ * file using a custom minute. This timer file cannot use the templating
+ * system, so we generate a specific file for each.
+ */
+static int systemd_timer_write_timer_file(enum schedule_priority schedule,
+ int minute)
{
+ int res = -1;
char *filename;
FILE *file;
const char *unit;
+ char *schedule_pattern = NULL;
+ const char *frequency = get_frequency(schedule);
+ char *local_timer_name = xstrfmt(SYSTEMD_UNIT_FORMAT, frequency, "timer");
+
+ filename = xdg_config_home_systemd(local_timer_name);
- filename = xdg_config_home_systemd("git-maintenance@.timer");
if (safe_create_leading_directories(filename)) {
error(_("failed to create directories for '%s'"), filename);
goto error;
@@ -2355,6 +2409,23 @@ static int systemd_timer_write_unit_templates(const char *exec_path)
if (!file)
goto error;
+ switch (schedule) {
+ case SCHEDULE_HOURLY:
+ schedule_pattern = xstrfmt("*-*-* 1..23:%02d:00", minute);
+ break;
+
+ case SCHEDULE_DAILY:
+ schedule_pattern = xstrfmt("Tue..Sun *-*-* 0:%02d:00", minute);
+ break;
+
+ case SCHEDULE_WEEKLY:
+ schedule_pattern = xstrfmt("Mon 0:%02d:00", minute);
+ break;
+
+ default:
+ BUG("Unhandled schedule_priority");
+ }
+
unit = "# This file was created and is maintained by Git.\n"
"# Any edits made in this file might be replaced in the future\n"
"# by a Git command.\n"
@@ -2363,12 +2434,12 @@ static int systemd_timer_write_unit_templates(const char *exec_path)
"Description=Optimize Git repositories data\n"
"\n"
"[Timer]\n"
- "OnCalendar=%i\n"
+ "OnCalendar=%s\n"
"Persistent=true\n"
"\n"
"[Install]\n"
"WantedBy=timers.target\n";
- if (fputs(unit, file) == EOF) {
+ if (fprintf(file, unit, schedule_pattern) < 0) {
error(_("failed to write to '%s'"), filename);
fclose(file);
goto error;
@@ -2377,9 +2448,36 @@ static int systemd_timer_write_unit_templates(const char *exec_path)
error_errno(_("failed to flush '%s'"), filename);
goto error;
}
+
+ res = 0;
+
+error:
+ free(schedule_pattern);
+ free(local_timer_name);
free(filename);
+ return res;
+}
- filename = xdg_config_home_systemd("git-maintenance@.service");
+/*
+ * No matter the schedule, we use the same service and can make use of the
+ * templating system. When installing git-maintenance@.timer,
+ * systemd will notice that git-maintenance@.service exists as a template
+ * and will use this file and insert the into the template at
+ * the position of "%i".
+ */
+static int systemd_timer_write_service_template(const char *exec_path)
+{
+ int res = -1;
+ char *filename;
+ FILE *file;
+ const char *unit;
+ char *local_service_name = xstrfmt(SYSTEMD_UNIT_FORMAT, "", "service");
+
+ filename = xdg_config_home_systemd(local_service_name);
+ if (safe_create_leading_directories(filename)) {
+ error(_("failed to create directories for '%s'"), filename);
+ goto error;
+ }
file = fopen_or_warn(filename, "w");
if (!file)
goto error;
@@ -2393,7 +2491,7 @@ static int systemd_timer_write_unit_templates(const char *exec_path)
"\n"
"[Service]\n"
"Type=oneshot\n"
- "ExecStart=\"%s/git\" --exec-path=\"%s\" for-each-repo --config=maintenance.repo maintenance run --schedule=%%i\n"
+ "ExecStart=\"%s/git\" --exec-path=\"%s\" %s for-each-repo --config=maintenance.repo maintenance run --schedule=%%i\n"
"LockPersonality=yes\n"
"MemoryDenyWriteExecute=yes\n"
"NoNewPrivileges=yes\n"
@@ -2403,7 +2501,7 @@ static int systemd_timer_write_unit_templates(const char *exec_path)
"RestrictSUIDSGID=yes\n"
"SystemCallArchitectures=native\n"
"SystemCallFilter=@system-service\n";
- if (fprintf(file, unit, exec_path, exec_path) < 0) {
+ if (fprintf(file, unit, exec_path, exec_path, get_extra_config_parameters()) < 0) {
error(_("failed to write to '%s'"), filename);
fclose(file);
goto error;
@@ -2412,25 +2510,110 @@ static int systemd_timer_write_unit_templates(const char *exec_path)
error_errno(_("failed to flush '%s'"), filename);
goto error;
}
+
+ res = 0;
+
+error:
+ free(local_service_name);
free(filename);
+ return res;
+}
+
+static int systemd_timer_enable_unit(int enable,
+ enum schedule_priority schedule,
+ int minute)
+{
+ const char *cmd = "systemctl";
+ struct child_process child = CHILD_PROCESS_INIT;
+ const char *frequency = get_frequency(schedule);
+
+ /*
+ * Disabling the systemd unit while it is already disabled makes
+ * systemctl print an error.
+ * Let's ignore it since it means we already are in the expected state:
+ * the unit is disabled.
+ *
+ * On the other hand, enabling a systemd unit which is already enabled
+ * produces no error.
+ */
+ if (!enable)
+ child.no_stderr = 1;
+ else if (systemd_timer_write_timer_file(schedule, minute))
+ return -1;
+
+ get_schedule_cmd(&cmd, NULL);
+ strvec_split(&child.args, cmd);
+ strvec_pushl(&child.args, "--user", enable ? "enable" : "disable",
+ "--now", NULL);
+ strvec_pushf(&child.args, SYSTEMD_UNIT_FORMAT, frequency, "timer");
+
+ if (start_command(&child))
+ return error(_("failed to start systemctl"));
+ if (finish_command(&child))
+ /*
+ * Disabling an already disabled systemd unit makes
+ * systemctl fail.
+ * Let's ignore this failure.
+ *
+ * Enabling an enabled systemd unit doesn't fail.
+ */
+ if (enable)
+ return error(_("failed to run systemctl"));
return 0;
+}
+
+/*
+ * A previous version of Git wrote the timer units as template files.
+ * Clean these up, if they exist.
+ */
+static void systemd_timer_delete_stale_timer_templates(void)
+{
+ char *timer_template_name = xstrfmt(SYSTEMD_UNIT_FORMAT, "", "timer");
+ char *filename = xdg_config_home_systemd(timer_template_name);
+
+ if (unlink(filename) && !is_missing_file_error(errno))
+ warning(_("failed to delete '%s'"), filename);
-error:
free(filename);
- systemd_timer_delete_unit_templates();
- return -1;
+ free(timer_template_name);
+}
+
+static int systemd_timer_delete_unit_files(void)
+{
+ systemd_timer_delete_stale_timer_templates();
+
+ /* Purposefully not short-circuited to make sure all are called. */
+ return systemd_timer_delete_timer_file(SCHEDULE_HOURLY) |
+ systemd_timer_delete_timer_file(SCHEDULE_DAILY) |
+ systemd_timer_delete_timer_file(SCHEDULE_WEEKLY) |
+ systemd_timer_delete_service_template();
+}
+
+static int systemd_timer_delete_units(void)
+{
+ int minute = get_random_minute();
+ /* Purposefully not short-circuited to make sure all are called. */
+ return systemd_timer_enable_unit(0, SCHEDULE_HOURLY, minute) |
+ systemd_timer_enable_unit(0, SCHEDULE_DAILY, minute) |
+ systemd_timer_enable_unit(0, SCHEDULE_WEEKLY, minute) |
+ systemd_timer_delete_unit_files();
}
static int systemd_timer_setup_units(void)
{
+ int minute = get_random_minute();
const char *exec_path = git_exec_path();
- int ret = systemd_timer_write_unit_templates(exec_path) ||
- systemd_timer_enable_unit(1, SCHEDULE_HOURLY) ||
- systemd_timer_enable_unit(1, SCHEDULE_DAILY) ||
- systemd_timer_enable_unit(1, SCHEDULE_WEEKLY);
+ int ret = systemd_timer_write_service_template(exec_path) ||
+ systemd_timer_enable_unit(1, SCHEDULE_HOURLY, minute) ||
+ systemd_timer_enable_unit(1, SCHEDULE_DAILY, minute) ||
+ systemd_timer_enable_unit(1, SCHEDULE_WEEKLY, minute);
+
if (ret)
systemd_timer_delete_units();
+ else
+ systemd_timer_delete_stale_timer_templates();
+
return ret;
}
@@ -2606,9 +2789,12 @@ static int maintenance_start(int argc, const char **argv, const char *prefix)
opts.scheduler = resolve_scheduler(opts.scheduler);
validate_scheduler(opts.scheduler);
+ if (update_background_schedule(&opts, 1))
+ die(_("failed to set up maintenance schedule"));
+
if (maintenance_register(ARRAY_SIZE(register_args)-1, register_args, NULL))
warning(_("failed to add repo to global config"));
- return update_background_schedule(&opts, 1);
+ return 0;
}
static const char *const builtin_maintenance_stop_usage[] = {
diff --git a/builtin/index-pack.c b/builtin/index-pack.c
index 006ffdc9c550d7..b8cfb1328bbfc0 100644
--- a/builtin/index-pack.c
+++ b/builtin/index-pack.c
@@ -812,7 +812,7 @@ static void sha1_object(const void *data, struct object_entry *obj_entry,
read_lock();
collision_test_needed =
repo_has_object_file_with_flags(the_repository, oid,
- OBJECT_INFO_QUICK);
+ OBJECT_INFO_FOR_PREFETCH);
read_unlock();
}
diff --git a/builtin/reset.c b/builtin/reset.c
index 7a3e08bd1388b2..349a9d0d036c79 100644
--- a/builtin/reset.c
+++ b/builtin/reset.c
@@ -40,6 +40,8 @@
#include "add-interactive.h"
#include "strbuf.h"
#include "quote.h"
+#include "dir.h"
+#include "entry.h"
#define REFRESH_INDEX_DELAY_WARNING_IN_MS (2 * 1000)
@@ -156,9 +158,47 @@ static void update_index_from_diff(struct diff_queue_struct *q,
for (i = 0; i < q->nr; i++) {
int pos;
+ int respect_skip_worktree = 1;
struct diff_filespec *one = q->queue[i]->one;
+ struct diff_filespec *two = q->queue[i]->two;
int is_in_reset_tree = one->mode && !is_null_oid(&one->oid);
+ int is_missing = !(one->mode && !is_null_oid(&one->oid));
+ int was_missing = !two->mode && is_null_oid(&two->oid);
struct cache_entry *ce;
+ struct cache_entry *ceBefore;
+ struct checkout state = CHECKOUT_INIT;
+
+ /*
+ * When using the virtual filesystem feature, the cache entries that are
+ * added here will not have the skip-worktree bit set.
+ *
+ * Without this code there is data that is lost because the files that
+ * would normally be in the working directory are not there and show as
+ * deleted for the next status or in the case of added files just disappear.
+ * We need to create the previous version of the files in the working
+ * directory so that they will have the right content and the next
+ * status call will show modified or untracked files correctly.
+ */
+ if (core_virtualfilesystem && !file_exists(two->path))
+ {
+ pos = index_name_pos(&the_index, two->path, strlen(two->path));
+ if ((pos >= 0 && ce_skip_worktree(the_index.cache[pos])) &&
+ (is_missing || !was_missing))
+ {
+ state.force = 1;
+ state.refresh_cache = 1;
+ state.istate = &the_index;
+ ceBefore = make_cache_entry(&the_index, two->mode,
+ &two->oid, two->path,
+ 0, 0);
+ if (!ceBefore)
+ die(_("make_cache_entry failed for path '%s'"),
+ two->path);
+
+ checkout_entry(ceBefore, &state, NULL, NULL);
+ respect_skip_worktree = 0;
+ }
+ }
if (!is_in_reset_tree && !intent_to_add) {
remove_file_from_index(&the_index, one->path);
@@ -177,8 +217,14 @@ static void update_index_from_diff(struct diff_queue_struct *q,
* to properly construct the reset sparse directory.
*/
pos = index_name_pos(&the_index, one->path, strlen(one->path));
- if ((pos >= 0 && ce_skip_worktree(the_index.cache[pos])) ||
- (pos < 0 && !path_in_sparse_checkout(one->path, &the_index)))
+
+ /*
+ * Do not add the SKIP_WORKTREE bit back if we populated the
+ * file on purpose in a virtual filesystem scenario.
+ */
+ if (respect_skip_worktree &&
+ ((pos >= 0 && ce_skip_worktree(the_index.cache[pos])) ||
+ (pos < 0 && !path_in_sparse_checkout(one->path, &the_index))))
ce->ce_flags |= CE_SKIP_WORKTREE;
if (!ce)
diff --git a/builtin/rm.c b/builtin/rm.c
index dff819ae5098ff..82501a3df8ada1 100644
--- a/builtin/rm.c
+++ b/builtin/rm.c
@@ -5,6 +5,7 @@
*/
#define USE_THE_INDEX_VARIABLE
#include "builtin.h"
+#include "environment.h"
#include "advice.h"
#include "config.h"
#include "lockfile.h"
@@ -312,7 +313,7 @@ int cmd_rm(int argc, const char **argv, const char *prefix)
for (i = 0; i < the_index.cache_nr; i++) {
const struct cache_entry *ce = the_index.cache[i];
- if (!include_sparse &&
+ if (!include_sparse && !core_virtualfilesystem &&
(ce_skip_worktree(ce) ||
!path_in_sparse_checkout(ce->name, &the_index)))
continue;
@@ -349,7 +350,11 @@ int cmd_rm(int argc, const char **argv, const char *prefix)
*original ? original : ".");
}
- if (only_match_skip_worktree.nr) {
+ /*
+ * When using a virtual filesystem, we might re-add a path
+ * that is currently virtual and we want that to succeed.
+ */
+ if (!core_virtualfilesystem && only_match_skip_worktree.nr) {
advise_on_updating_sparse_paths(&only_match_skip_worktree);
ret = 1;
}
diff --git a/builtin/sparse-checkout.c b/builtin/sparse-checkout.c
index 5c8ffb1f7598b0..75295951eb7974 100644
--- a/builtin/sparse-checkout.c
+++ b/builtin/sparse-checkout.c
@@ -111,7 +111,7 @@ static int sparse_checkout_list(int argc, const char **argv, const char *prefix)
static void clean_tracked_sparse_directories(struct repository *r)
{
- int i, was_full = 0;
+ int i, value, was_full = 0;
struct strbuf path = STRBUF_INIT;
size_t pathlen;
struct string_list_item *item;
@@ -127,6 +127,13 @@ static void clean_tracked_sparse_directories(struct repository *r)
!r->index->sparse_checkout_patterns->use_cone_patterns)
return;
+ /*
+ * Users can disable this behavior.
+ */
+ if (!repo_config_get_bool(r, "index.deletesparsedirectories", &value) &&
+ !value)
+ return;
+
/*
* Use the sparse index as a data structure to assist finding
* directories that are safe to delete. This conversion to a
diff --git a/builtin/update-index.c b/builtin/update-index.c
index aee3cb8cbd3a03..51567300629053 100644
--- a/builtin/update-index.c
+++ b/builtin/update-index.c
@@ -5,6 +5,7 @@
*/
#define USE_THE_INDEX_VARIABLE
#include "builtin.h"
+#include "gvfs.h"
#include "bulk-checkin.h"
#include "config.h"
#include "environment.h"
@@ -1181,7 +1182,13 @@ int cmd_update_index(int argc, const char **argv, const char *prefix)
argc = parse_options_end(&ctx);
getline_fn = nul_term_line ? strbuf_getline_nul : strbuf_getline_lf;
+ if (mark_skip_worktree_only && gvfs_config_is_set(GVFS_BLOCK_COMMANDS))
+ die(_("modifying the skip worktree bit is not supported on a GVFS repo"));
+
if (preferred_index_format) {
+ if (preferred_index_format != 4 && gvfs_config_is_set(GVFS_BLOCK_COMMANDS))
+ die(_("changing the index version is not supported on a GVFS repo"));
+
if (preferred_index_format < INDEX_FORMAT_LB ||
INDEX_FORMAT_UB < preferred_index_format)
die("index-version %d not in range: %d..%d",
@@ -1222,6 +1229,9 @@ int cmd_update_index(int argc, const char **argv, const char *prefix)
end_odb_transaction();
if (split_index > 0) {
+ if (gvfs_config_is_set(GVFS_BLOCK_COMMANDS))
+ die(_("split index is not supported on a GVFS repo"));
+
if (git_config_get_split_index() == 0)
warning(_("core.splitIndex is set to false; "
"remove or change it, if you really want to "
diff --git a/builtin/update-microsoft-git.c b/builtin/update-microsoft-git.c
new file mode 100644
index 00000000000000..3152ee23c30096
--- /dev/null
+++ b/builtin/update-microsoft-git.c
@@ -0,0 +1,69 @@
+#include "builtin.h"
+#include "repository.h"
+#include "parse-options.h"
+#include "run-command.h"
+#include "strvec.h"
+
+#if defined(GIT_WINDOWS_NATIVE)
+/*
+ * On Windows, run 'git update-git-for-windows' which
+ * is installed by the installer, based on the script
+ * in git-for-windows/build-extra.
+ */
+static int platform_specific_upgrade(void)
+{
+ struct child_process cp = CHILD_PROCESS_INIT;
+
+ strvec_push(&cp.args, "git-update-git-for-windows");
+ return run_command(&cp);
+}
+#elif defined(__APPLE__)
+/*
+ * On macOS, we expect the user to have the microsoft-git
+ * cask installed via Homebrew. We check using these
+ * commands:
+ *
+ * 1. 'brew update' to get latest versions.
+ * 2. 'brew upgrade --cask microsoft-git' to get the
+ * latest version.
+ */
+static int platform_specific_upgrade(void)
+{
+ int res;
+ struct child_process update = CHILD_PROCESS_INIT;
+ struct child_process upgrade = CHILD_PROCESS_INIT;
+
+ printf("Updating Homebrew with 'brew update'\n");
+
+ strvec_pushl(&update.args, "brew", "update", NULL);
+ res = run_command(&update);
+
+ if (res) {
+ error(_("'brew update' failed; is brew installed?"));
+ return 1;
+ }
+
+ printf("Upgrading microsoft-git with 'brew upgrade --cask microsoft-git'\n");
+ strvec_pushl(&upgrade.args, "brew", "upgrade", "--cask", "microsoft-git", NULL);
+ res = run_command(&upgrade);
+
+ return res;
+}
+#else
+static int platform_specific_upgrade(void)
+{
+ error(_("update-microsoft-git is not supported on this platform"));
+ return 1;
+}
+#endif
+
+static const char builtin_update_microsoft_git_usage[] =
+ N_("git update-microsoft-git");
+
+int cmd_update_microsoft_git(int argc, const char **argv, const char *prefix)
+{
+ if (argc == 2 && !strcmp(argv[1], "-h"))
+ usage(builtin_update_microsoft_git_usage);
+
+ return platform_specific_upgrade();
+}
diff --git a/builtin/worktree.c b/builtin/worktree.c
index 4cd01842de79fb..07c46c95ebf0d6 100644
--- a/builtin/worktree.c
+++ b/builtin/worktree.c
@@ -1,6 +1,7 @@
#include "builtin.h"
#include "abspath.h"
#include "advice.h"
+#include "gvfs.h"
#include "checkout.h"
#include "config.h"
#include "copy.h"
@@ -1404,6 +1405,13 @@ int cmd_worktree(int ac, const char **av, const char *prefix)
git_config(git_worktree_config, NULL);
+ /*
+ * git-worktree is special-cased to work in Scalar repositories
+ * even when they use the GVFS Protocol.
+ */
+ if (core_gvfs & GVFS_USE_VIRTUAL_FILESYSTEM)
+ die("'git %s' is not supported on a GVFS repo", "worktree");
+
if (!prefix)
prefix = "";
diff --git a/cache-tree.c b/cache-tree.c
index 641427ed410af3..e81571a16826a4 100644
--- a/cache-tree.c
+++ b/cache-tree.c
@@ -1,6 +1,7 @@
#include "git-compat-util.h"
#include "environment.h"
#include "hex.h"
+#include "gvfs.h"
#include "lockfile.h"
#include "tree.h"
#include "tree-walk.h"
@@ -230,7 +231,7 @@ static void discard_unused_subtrees(struct cache_tree *it)
}
}
-int cache_tree_fully_valid(struct cache_tree *it)
+static int cache_tree_fully_valid_1(struct cache_tree *it)
{
int i;
if (!it)
@@ -238,7 +239,7 @@ int cache_tree_fully_valid(struct cache_tree *it)
if (it->entry_count < 0 || !repo_has_object_file(the_repository, &it->oid))
return 0;
for (i = 0; i < it->subtree_nr; i++) {
- if (!cache_tree_fully_valid(it->down[i]->cache_tree))
+ if (!cache_tree_fully_valid_1(it->down[i]->cache_tree))
return 0;
}
return 1;
@@ -249,6 +250,17 @@ static int must_check_existence(const struct cache_entry *ce)
return !(repo_has_promisor_remote(the_repository) && ce_skip_worktree(ce));
}
+int cache_tree_fully_valid(struct cache_tree *it)
+{
+ int result;
+
+ trace2_region_enter("cache_tree", "fully_valid", NULL);
+ result = cache_tree_fully_valid_1(it);
+ trace2_region_leave("cache_tree", "fully_valid", NULL);
+
+ return result;
+}
+
static int update_one(struct cache_tree *it,
struct cache_entry **cache,
int entries,
@@ -258,7 +270,8 @@ static int update_one(struct cache_tree *it,
int flags)
{
struct strbuf buffer;
- int missing_ok = flags & WRITE_TREE_MISSING_OK;
+ int missing_ok = gvfs_config_is_set(GVFS_MISSING_OK) ?
+ WRITE_TREE_MISSING_OK : (flags & WRITE_TREE_MISSING_OK);
int dryrun = flags & WRITE_TREE_DRY_RUN;
int repair = flags & WRITE_TREE_REPAIR;
int to_invalidate = 0;
@@ -427,7 +440,29 @@ static int update_one(struct cache_tree *it,
continue;
strbuf_grow(&buffer, entlen + 100);
- strbuf_addf(&buffer, "%o %.*s%c", mode, entlen, path + baselen, '\0');
+
+ switch (mode) {
+ case 0100644:
+ strbuf_add(&buffer, "100644 ", 7);
+ break;
+ case 0100664:
+ strbuf_add(&buffer, "100664 ", 7);
+ break;
+ case 0100755:
+ strbuf_add(&buffer, "100755 ", 7);
+ break;
+ case 0120000:
+ strbuf_add(&buffer, "120000 ", 7);
+ break;
+ case 0160000:
+ strbuf_add(&buffer, "160000 ", 7);
+ break;
+ default:
+ strbuf_addf(&buffer, "%o ", mode);
+ break;
+ }
+ strbuf_add(&buffer, path + baselen, entlen);
+ strbuf_addch(&buffer, '\0');
strbuf_add(&buffer, oid->hash, the_hash_algo->rawsz);
#if DEBUG_CACHE_TREE
diff --git a/commit.c b/commit.c
index b3223478bc2a3a..7bb48ce61bc626 100644
--- a/commit.c
+++ b/commit.c
@@ -1,4 +1,5 @@
#include "git-compat-util.h"
+#include "gvfs.h"
#include "tag.h"
#include "commit.h"
#include "commit-graph.h"
@@ -560,13 +561,17 @@ int repo_parse_commit_internal(struct repository *r,
.sizep = &size,
.contentp = &buffer,
};
+ int ret;
/*
* Git does not support partial clones that exclude commits, so set
* OBJECT_INFO_SKIP_FETCH_OBJECT to fail fast when an object is missing.
*/
int flags = OBJECT_INFO_LOOKUP_REPLACE | OBJECT_INFO_SKIP_FETCH_OBJECT |
- OBJECT_INFO_DIE_IF_CORRUPT;
- int ret;
+ OBJECT_INFO_DIE_IF_CORRUPT;
+
+ /* But the GVFS Protocol _does_ support missing commits! */
+ if (gvfs_config_is_set(GVFS_MISSING_OK))
+ flags ^= OBJECT_INFO_SKIP_FETCH_OBJECT;
if (!item)
return -1;
diff --git a/compat/mingw.c b/compat/mingw.c
index 8b7b526d9f6fa6..380fb0107000a6 100644
--- a/compat/mingw.c
+++ b/compat/mingw.c
@@ -4021,6 +4021,8 @@ int wmain(int argc, const wchar_t **wargv)
SetConsoleCtrlHandler(handle_ctrl_c, TRUE);
+ trace2_initialize_clock();
+
maybe_redirect_std_handles();
adjust_symlink_flags();
fsync_object_files = 1;
diff --git a/config.c b/config.c
index d5b2a9b5099687..680c970da1496e 100644
--- a/config.c
+++ b/config.c
@@ -9,6 +9,7 @@
#include "abspath.h"
#include "advice.h"
#include "date.h"
+#include "gvfs.h"
#include "branch.h"
#include "config.h"
#include "convert.h"
@@ -39,6 +40,7 @@
#include "worktree.h"
#include "ws.h"
#include "write-or-die.h"
+#include "transport.h"
struct config_source {
struct config_source *prev;
@@ -1776,8 +1778,22 @@ int git_default_core_config(const char *var, const char *value,
return 0;
}
+ if (!strcmp(var, "core.gvfs")) {
+ gvfs_load_config_value(value);
+ return 0;
+ }
+
+ if (!strcmp(var, "core.usegvfshelper")) {
+ core_use_gvfs_helper = git_config_bool(var, value);
+ return 0;
+ }
+
if (!strcmp(var, "core.sparsecheckout")) {
- core_apply_sparse_checkout = git_config_bool(var, value);
+ /* virtual file system relies on the sparse checkout logic so force it on */
+ if (core_virtualfilesystem)
+ core_apply_sparse_checkout = 1;
+ else
+ core_apply_sparse_checkout = git_config_bool(var, value);
return 0;
}
@@ -1801,6 +1817,11 @@ int git_default_core_config(const char *var, const char *value,
return 0;
}
+ if (!strcmp(var, "core.virtualizeobjects")) {
+ core_virtualize_objects = git_config_bool(var, value);
+ return 0;
+ }
+
/* Add other config variables here and to Documentation/config.txt. */
return platform_core_config(var, value, ctx, cb);
}
@@ -1904,6 +1925,35 @@ static int git_default_mailmap_config(const char *var, const char *value)
return 0;
}
+static int git_default_gvfs_config(const char *var, const char *value)
+{
+ if (!strcmp(var, "gvfs.cache-server")) {
+ const char *v2 = NULL;
+
+ if (!git_config_string(&v2, var, value) && v2 && *v2)
+ gvfs_cache_server_url = transport_anonymize_url(v2);
+ free((char*)v2);
+ return 0;
+ }
+
+ if (!strcmp(var, "gvfs.sharedcache") && value && *value) {
+ strbuf_setlen(&gvfs_shared_cache_pathname, 0);
+ strbuf_addstr(&gvfs_shared_cache_pathname, value);
+ if (strbuf_normalize_path(&gvfs_shared_cache_pathname) < 0) {
+ /*
+ * Pretend it wasn't set. This will cause us to
+ * fallback to ".git/objects" effectively.
+ */
+ strbuf_release(&gvfs_shared_cache_pathname);
+ return 0;
+ }
+ strbuf_trim_trailing_dir_sep(&gvfs_shared_cache_pathname);
+ return 0;
+ }
+
+ return 0;
+}
+
int git_default_config(const char *var, const char *value,
const struct config_context *ctx, void *cb)
{
@@ -1954,6 +2004,9 @@ int git_default_config(const char *var, const char *value,
if (starts_with(var, "sparse."))
return git_default_sparse_config(var, value);
+ if (starts_with(var, "gvfs."))
+ return git_default_gvfs_config(var, value);
+
/* Add other config variables here and to Documentation/config.txt. */
return 0;
}
@@ -2908,6 +2961,44 @@ int git_config_get_max_percent_split_change(void)
return -1; /* default value */
}
+int git_config_get_virtualfilesystem(void)
+{
+ /* Run only once. */
+ static int virtual_filesystem_result = -1;
+ if (virtual_filesystem_result >= 0)
+ return virtual_filesystem_result;
+
+ if (git_config_get_pathname("core.virtualfilesystem", &core_virtualfilesystem))
+ core_virtualfilesystem = getenv("GIT_VIRTUALFILESYSTEM_TEST");
+
+ if (core_virtualfilesystem && !*core_virtualfilesystem)
+ core_virtualfilesystem = NULL;
+
+ if (core_virtualfilesystem) {
+ /*
+ * Some git commands spawn helpers and redirect the index to a different
+ * location. These include "difftool -d" and the sequencer
+ * (i.e. `git rebase -i`, `git cherry-pick` and `git revert`) and others.
+ * In those instances we don't want to update their temporary index with
+ * our virtualization data.
+ */
+ char *default_index_file = xstrfmt("%s/%s", the_repository->gitdir, "index");
+ int should_run_hook = !strcmp(default_index_file, the_repository->index_file);
+
+ free(default_index_file);
+ if (should_run_hook) {
+ /* virtual file system relies on the sparse checkout logic so force it on */
+ core_apply_sparse_checkout = 1;
+ virtual_filesystem_result = 1;
+ return 1;
+ }
+ core_virtualfilesystem = NULL;
+ }
+
+ virtual_filesystem_result = 0;
+ return 0;
+}
+
int git_config_get_index_threads(int *dest)
{
int is_bool, val;
@@ -3324,6 +3415,7 @@ int git_config_set_multivar_in_file_gently(const char *config_filename,
const char *value_pattern,
unsigned flags)
{
+ static unsigned long timeout_ms = ULONG_MAX;
int fd = -1, in_fd = -1;
int ret;
struct lock_file lock = LOCK_INIT;
@@ -3342,11 +3434,16 @@ int git_config_set_multivar_in_file_gently(const char *config_filename,
if (!config_filename)
config_filename = filename_buf = git_pathdup("config");
+ if ((long)timeout_ms < 0 &&
+ git_config_get_ulong("core.configWriteLockTimeoutMS", &timeout_ms))
+ timeout_ms = 0;
+
/*
* The lock serves a purpose in addition to locking: the new
* contents of .git/config will be written into it.
*/
- fd = hold_lock_file_for_update(&lock, config_filename, 0);
+ fd = hold_lock_file_for_update_timeout(&lock, config_filename, 0,
+ timeout_ms);
if (fd < 0) {
error_errno(_("could not lock config file %s"), config_filename);
ret = CONFIG_NO_LOCK;
diff --git a/config.h b/config.h
index eaac1e00f80a5e..c54a3b6ab05acf 100644
--- a/config.h
+++ b/config.h
@@ -703,6 +703,7 @@ int git_config_get_pathname(const char *key, const char **dest);
int git_config_get_index_threads(int *dest);
int git_config_get_split_index(void);
int git_config_get_max_percent_split_change(void);
+int git_config_get_virtualfilesystem(void);
/* This dies if the configured or default date is in the future */
int git_config_get_expiry(const char *key, const char **output);
diff --git a/connected.c b/connected.c
index 8f89376dbcf30c..12b9f49cdfa801 100644
--- a/connected.c
+++ b/connected.c
@@ -1,6 +1,8 @@
#include "git-compat-util.h"
+#include "environment.h"
#include "gettext.h"
#include "hex.h"
+#include "gvfs.h"
#include "object-store-ll.h"
#include "run-command.h"
#include "sigchain.h"
@@ -32,6 +34,26 @@ int check_connected(oid_iterate_fn fn, void *cb_data,
struct transport *transport;
size_t base_len;
+ /*
+ * Running a virtual file system there will be objects that are
+ * missing locally and we don't want to download a bunch of
+ * commits, trees, and blobs just to make sure everything is
+ * reachable locally so this option will skip reachablility
+ * checks below that use rev-list. This will stop the check
+ * before uploadpack runs to determine if there is anything to
+ * fetch. Returning zero for the first check will also prevent the
+ * uploadpack from happening. It will also skip the check after
+ * the fetch is finished to make sure all the objects where
+ * downloaded in the pack file. This will allow the fetch to
+ * run and get all the latest tip commit ids for all the branches
+ * in the fetch but not pull down commits, trees, or blobs via
+ * upload pack.
+ */
+ if (gvfs_config_is_set(GVFS_FETCH_SKIP_REACHABILITY_AND_UPLOADPACK))
+ return 0;
+ if (core_virtualize_objects)
+ return 0;
+
if (!opt)
opt = &defaults;
transport = opt->transport;
diff --git a/contrib/buildsystems/CMakeLists.txt b/contrib/buildsystems/CMakeLists.txt
index a0efeb3d2c7b9e..d0d620ed740980 100644
--- a/contrib/buildsystems/CMakeLists.txt
+++ b/contrib/buildsystems/CMakeLists.txt
@@ -648,7 +648,7 @@ if(NOT CURL_FOUND)
add_compile_definitions(NO_CURL)
message(WARNING "git-http-push and git-http-fetch will not be built")
else()
- list(APPEND PROGRAMS_BUILT git-http-fetch git-http-push git-imap-send git-remote-http)
+ list(APPEND PROGRAMS_BUILT git-http-fetch git-http-push git-imap-send git-remote-http git-gvfs-helper)
if(CURL_VERSION_STRING VERSION_GREATER_EQUAL 7.34.0)
add_compile_definitions(USE_CURL_FOR_IMAP_SEND)
endif()
@@ -798,7 +798,7 @@ target_link_libraries(git-sh-i18n--envsubst common-main)
add_executable(git-shell ${CMAKE_SOURCE_DIR}/shell.c)
target_link_libraries(git-shell common-main)
-add_executable(scalar ${CMAKE_SOURCE_DIR}/scalar.c)
+add_executable(scalar ${CMAKE_SOURCE_DIR}/scalar.c ${CMAKE_SOURCE_DIR}/json-parser.c)
target_link_libraries(scalar common-main)
if(CURL_FOUND)
@@ -817,6 +817,9 @@ if(CURL_FOUND)
add_executable(git-http-push ${CMAKE_SOURCE_DIR}/http-push.c)
target_link_libraries(git-http-push http_obj common-main ${CURL_LIBRARIES} ${EXPAT_LIBRARIES})
endif()
+
+ add_executable(git-gvfs-helper ${CMAKE_SOURCE_DIR}/gvfs-helper.c)
+ target_link_libraries(git-gvfs-helper http_obj common-main ${CURL_LIBRARIES} )
endif()
parse_makefile_for_executables(git_builtin_extra "BUILT_INS")
@@ -1026,6 +1029,20 @@ set(wrapper_scripts
set(wrapper_test_scripts
test-fake-ssh test-tool)
+if(CURL_FOUND)
+ list(APPEND wrapper_test_scripts test-gvfs-protocol)
+
+ add_executable(test-gvfs-protocol ${CMAKE_SOURCE_DIR}/t/helper/test-gvfs-protocol.c)
+ target_link_libraries(test-gvfs-protocol common-main)
+
+ if(MSVC)
+ set_target_properties(test-gvfs-protocol
+ PROPERTIES RUNTIME_OUTPUT_DIRECTORY_DEBUG ${CMAKE_BINARY_DIR}/t/helper)
+ set_target_properties(test-gvfs-protocol
+ PROPERTIES RUNTIME_OUTPUT_DIRECTORY_RELEASE ${CMAKE_BINARY_DIR}/t/helper)
+ endif()
+endif()
+
foreach(script ${wrapper_scripts})
file(STRINGS ${CMAKE_SOURCE_DIR}/wrap-for-bin.sh content NEWLINE_CONSUME)
diff --git a/contrib/completion/git-completion.bash b/contrib/completion/git-completion.bash
index 133ec92bfae721..9dbdf483b81e30 100644
--- a/contrib/completion/git-completion.bash
+++ b/contrib/completion/git-completion.bash
@@ -1675,7 +1675,7 @@ _git_clone ()
esac
}
-__git_untracked_file_modes="all no normal"
+__git_untracked_file_modes="all no normal complete"
_git_commit ()
{
diff --git a/contrib/long-running-read-object/example.pl b/contrib/long-running-read-object/example.pl
new file mode 100644
index 00000000000000..b8f37f836a813c
--- /dev/null
+++ b/contrib/long-running-read-object/example.pl
@@ -0,0 +1,114 @@
+#!/usr/bin/perl
+#
+# Example implementation for the Git read-object protocol version 1
+# See Documentation/technical/read-object-protocol.txt
+#
+# Allows you to test the ability for blobs to be pulled from a host git repo
+# "on demand." Called when git needs a blob it couldn't find locally due to
+# a lazy clone that only cloned the commits and trees.
+#
+# A lazy clone can be simulated via the following commands from the host repo
+# you wish to create a lazy clone of:
+#
+# cd /host_repo
+# git rev-parse HEAD
+# git init /guest_repo
+# git cat-file --batch-check --batch-all-objects | grep -v 'blob' |
+# cut -d' ' -f1 | git pack-objects /guest_repo/.git/objects/pack/noblobs
+# cd /guest_repo
+# git config core.virtualizeobjects true
+# git reset --hard
+#
+# Please note, this sample is a minimal skeleton. No proper error handling
+# was implemented.
+#
+
+use strict;
+use warnings;
+
+#
+# Point $DIR to the folder where your host git repo is located so we can pull
+# missing objects from it
+#
+my $DIR = "/host_repo/.git/";
+
+sub packet_bin_read {
+ my $buffer;
+ my $bytes_read = read STDIN, $buffer, 4;
+ if ( $bytes_read == 0 ) {
+
+ # EOF - Git stopped talking to us!
+ exit();
+ }
+ elsif ( $bytes_read != 4 ) {
+ die "invalid packet: '$buffer'";
+ }
+ my $pkt_size = hex($buffer);
+ if ( $pkt_size == 0 ) {
+ return ( 1, "" );
+ }
+ elsif ( $pkt_size > 4 ) {
+ my $content_size = $pkt_size - 4;
+ $bytes_read = read STDIN, $buffer, $content_size;
+ if ( $bytes_read != $content_size ) {
+ die "invalid packet ($content_size bytes expected; $bytes_read bytes read)";
+ }
+ return ( 0, $buffer );
+ }
+ else {
+ die "invalid packet size: $pkt_size";
+ }
+}
+
+sub packet_txt_read {
+ my ( $res, $buf ) = packet_bin_read();
+ unless ( $buf =~ s/\n$// ) {
+ die "A non-binary line MUST be terminated by an LF.";
+ }
+ return ( $res, $buf );
+}
+
+sub packet_bin_write {
+ my $buf = shift;
+ print STDOUT sprintf( "%04x", length($buf) + 4 );
+ print STDOUT $buf;
+ STDOUT->flush();
+}
+
+sub packet_txt_write {
+ packet_bin_write( $_[0] . "\n" );
+}
+
+sub packet_flush {
+ print STDOUT sprintf( "%04x", 0 );
+ STDOUT->flush();
+}
+
+( packet_txt_read() eq ( 0, "git-read-object-client" ) ) || die "bad initialize";
+( packet_txt_read() eq ( 0, "version=1" ) ) || die "bad version";
+( packet_bin_read() eq ( 1, "" ) ) || die "bad version end";
+
+packet_txt_write("git-read-object-server");
+packet_txt_write("version=1");
+packet_flush();
+
+( packet_txt_read() eq ( 0, "capability=get" ) ) || die "bad capability";
+( packet_bin_read() eq ( 1, "" ) ) || die "bad capability end";
+
+packet_txt_write("capability=get");
+packet_flush();
+
+while (1) {
+ my ($command) = packet_txt_read() =~ /^command=([^=]+)$/;
+
+ if ( $command eq "get" ) {
+ my ($sha1) = packet_txt_read() =~ /^sha1=([0-9a-f]{40})$/;
+ packet_bin_read();
+
+ system ('git --git-dir="' . $DIR . '" cat-file blob ' . $sha1 . ' | git -c core.virtualizeobjects=false hash-object -w --stdin >/dev/null 2>&1');
+ packet_txt_write(($?) ? "status=error" : "status=success");
+ packet_flush();
+ } else {
+ die "bad command '$command'";
+ }
+}
diff --git a/contrib/scalar/docs/faq.md b/contrib/scalar/docs/faq.md
new file mode 100644
index 00000000000000..a14f78a996d5d5
--- /dev/null
+++ b/contrib/scalar/docs/faq.md
@@ -0,0 +1,51 @@
+Frequently Asked Questions
+==========================
+
+Using Scalar
+------------
+
+### I don't want a sparse clone, I want every file after I clone!
+
+Run `scalar clone --full-clone ` to initialize your repo to include
+every file. You can switch to a sparse-checkout later by running
+`git sparse-checkout init --cone`.
+
+### I already cloned without `--full-clone`. How do I get everything?
+
+Run `git sparse-checkout disable`.
+
+Scalar Design Decisions
+-----------------------
+
+There may be many design decisions within Scalar that are confusing at first
+glance. Some of them may cause friction when you use Scalar with your existing
+repos and existing habits.
+
+> Scalar has the most benefit when users design repositories
+> with efficient patterns.
+
+For example: Scalar uses the sparse-checkout feature to limit the size of the
+working directory within a large monorepo. It is designed to work efficiently
+with monorepos that are highly componentized, allowing most developers to
+need many fewer files in their daily work.
+
+### Why does `scalar clone` create a `/src` folder?
+
+Scalar uses a file system watcher to keep track of changes under this `src` folder.
+Any activity in this folder is assumed to be important to Git operations. By
+creating the `src` folder, we are making it easy for your build system to
+create output folders outside the `src` directory. We commonly see systems
+create folders for build outputs and package downloads. Scalar itself creates
+these folders during its builds.
+
+Your build system may create build artifacts such as `.obj` or `.lib` files
+next to your source code. These are commonly "hidden" from Git using
+`.gitignore` files. Having such artifacts in your source tree creates
+additional work for Git because it needs to look at these files and match them
+against the `.gitignore` patterns.
+
+By following the `src` pattern Scalar tries to establish and placing your build
+intermediates and outputs parallel with the `src` folder and not inside it,
+you can help optimize Git command performance for developers in the repository
+by limiting the number of files Git needs to consider for many common
+operations.
diff --git a/contrib/scalar/docs/getting-started.md b/contrib/scalar/docs/getting-started.md
new file mode 100644
index 00000000000000..d5125330320d2c
--- /dev/null
+++ b/contrib/scalar/docs/getting-started.md
@@ -0,0 +1,109 @@
+Getting Started
+===============
+
+Registering existing Git repos
+------------------------------
+
+To add a repository to the list of registered repos, run `scalar register []`.
+If `` is not provided, then the "current repository" is discovered from
+the working directory by scanning the parent paths for a path containing a `.git`
+folder, possibly inside a `src` folder.
+
+To see which repositories are currently tracked by the service, run
+`scalar list`.
+
+Run `scalar unregister []` to remove the repo from this list.
+
+Creating a new Scalar clone
+---------------------------------------------------
+
+The `clone` verb creates a local enlistment of a remote repository using the
+partial clone feature available e.g. on GitHub, or using the
+[GVFS protocol](https://github.com/microsoft/VFSForGit/blob/HEAD/Protocol.md),
+such as Azure Repos.
+
+```
+scalar clone [options] []
+```
+
+Create a local copy of the repository at ``. If specified, create the ``
+directory and place the repository there. Otherwise, the last section of the ``
+will be used for ``.
+
+At the end, the repo is located at `/src`. By default, the sparse-checkout
+feature is enabled and the only files present are those in the root of your
+Git repository. Use `git sparse-checkout set` to expand the set of directories
+you want to see, or `git sparse-checkout disable` to expand to all files. You
+can explore the subdirectories outside your sparse-checkout specification using
+`git ls-tree HEAD`.
+
+### Sparse Repo Mode
+
+By default, Scalar reduces your working directory to only the files at the
+root of the repository. You need to add the folders you care about to build up
+to your working set.
+
+* `scalar clone `
+ * Please choose the **Clone with HTTPS** option in the `Clone Repository` dialog in Azure Repos, not **Clone with SSH**.
+* `cd \src`
+* At this point, your `src` directory only contains files that appear in your root
+ tree. No folders are populated.
+* Set the directory list for your sparse-checkout using:
+ 1. `git sparse-checkout set ...`
+ 2. `git sparse-checkout set --stdin < dir-list.txt`
+* Run git commands as you normally would.
+* To fully populate your working directory, run `git sparse-checkout disable`.
+
+If instead you want to start with all files on-disk, you can clone with the
+`--full-clone` option. To enable sparse-checkout after the fact, run
+`git sparse-checkout init --cone`. This will initialize your sparse-checkout
+patterns to only match the files at root.
+
+If you are unfamiliar with what directories are available in the repository,
+then you can run `git ls-tree -d --name-only HEAD` to discover the directories
+at root, or `git ls-tree -d --name-only HEAD ` to discover the directories
+in ``.
+
+### Options
+
+These options allow a user to customize their initial enlistment.
+
+* `--full-clone`: If specified, do not initialize the sparse-checkout feature.
+ All files will be present in your `src` directory. This behaves very similar
+ to a Git partial clone in that blobs are downloaded on demand. However, it
+ will use the GVFS protocol to download all Git objects.
+
+* `--cache-server-url=`: If specified, set the intended cache server to
+ the specified ``. All object queries will use the GVFS protocol to this
+ `` instead of the origin remote. If the remote supplies a list of
+ cache servers via the `/gvfs/config` endpoint, then the `clone` command
+ will select a nearby cache server from that list.
+
+* `--branch=[`: Specify the branch to checkout after clone.
+
+* `--local-cache-path=`: Use this option to override the path for the
+ local Scalar cache. If not specified, then Scalar will select a default
+ path to share objects with your other enlistments. On Windows, this path
+ is a subdirectory of `:\.scalarCache\`. On Mac, this path is a
+ subdirectory of `~/.scalarCache/`. The default cache path is recommended so
+ multiple enlistments of the same remote repository share objects on the
+ same device.
+
+### Advanced Options
+
+The options below are not intended for use by a typical user. These are
+usually used by build machines to create a temporary enlistment that
+operates on a single commit.
+
+* `--single-branch`: Use this option to only download metadata for the branch
+ that will be checked out. This is helpful for build machines that target
+ a remote with many branches. Any `git fetch` commands after the clone will
+ still ask for all branches.
+
+Removing a Scalar Clone
+-----------------------
+
+Since the `scalar clone` command sets up a file-system watcher (when available),
+that watcher could prevent deleting the enlistment. Run `scalar delete `
+from outside of your enlistment to unregister the enlistment from the filesystem
+watcher and delete the enlistment at ``.
diff --git a/contrib/scalar/docs/index.md b/contrib/scalar/docs/index.md
new file mode 100644
index 00000000000000..4f56e2b0ebbac6
--- /dev/null
+++ b/contrib/scalar/docs/index.md
@@ -0,0 +1,54 @@
+Scalar: Enabling Git at Scale
+=============================
+
+Scalar is a tool that helps Git scale to some of the largest Git repositories.
+It achieves this by enabling some advanced Git features, such as:
+
+* *Partial clone:* reduces time to get a working repository by not
+ downloading all Git objects right away.
+
+* *Background prefetch:* downloads Git object data from all remotes every
+ hour, reducing the amount of time for foreground `git fetch` calls.
+
+* *Sparse-checkout:* limits the size of your working directory.
+
+* *File system monitor:* tracks the recently modified files and eliminates
+ the need for Git to scan the entire worktree.
+
+* *Commit-graph:* accelerates commit walks and reachability calculations,
+ speeding up commands like `git log`.
+
+* *Multi-pack-index:* enables fast object lookups across many pack-files.
+
+* *Incremental repack:* Repacks the packed Git data into fewer pack-file
+ without disrupting concurrent commands by using the multi-pack-index.
+
+By running `scalar register` in any Git repo, Scalar will automatically enable
+these features for that repo (except partial clone) and start running suggested
+maintenance in the background using
+[the `git maintenance` feature](https://git-scm.com/docs/git-maintenance).
+
+Repos cloned with the `scalar clone` command use partial clone or the
+[GVFS protocol](https://github.com/microsoft/VFSForGit/blob/HEAD/Protocol.md)
+to significantly reduce the amount of data required to get started
+using a repository. By delaying all blob downloads until they are required,
+Scalar allows you to work with very large repositories quickly. The GVFS
+protocol allows a network of _cache servers_ to serve objects with lower
+latency and higher throughput. The cache servers also reduce load on the
+central server.
+
+Documentation
+-------------
+
+* [Getting Started](getting-started.md): Get started with Scalar.
+ Includes `scalar register`, `scalar unregister`, `scalar clone`, and
+ `scalar delete`.
+
+* [Troubleshooting](troubleshooting.md):
+ Collect diagnostic information or update custom settings. Includes
+ `scalar diagnose` and `scalar cache-server`.
+
+* [The Philosophy of Scalar](philosophy.md): Why does Scalar work the way
+ it does, and how do we make decisions about its future?
+
+* [Frequently Asked Questions](faq.md)
diff --git a/contrib/scalar/docs/philosophy.md b/contrib/scalar/docs/philosophy.md
new file mode 100644
index 00000000000000..e3dfa025a2504c
--- /dev/null
+++ b/contrib/scalar/docs/philosophy.md
@@ -0,0 +1,71 @@
+The Philosophy of Scalar
+========================
+
+The team building Scalar has **opinions** about Git performance. Scalar
+takes out the guesswork by automatically configuring your Git repositories
+to take advantage of the latest and greatest features. It is difficult to
+say that these are the absolute best settings for every repository, but
+these settings do work for some of the largest repositories in the world.
+
+Scalar intends to do very little more than the standard Git client. We
+actively implement new features into Git instead of Scalar, then update
+Scalar only to configure those new settings. In particular, we ported
+features like background maintenance to Git to make Scalar simpler and
+make Git more powerful.
+
+Scalar ships inside [a custom version of Git][microsoft-git], but we are
+working to make it available in other forks of Git. The only feature
+that is not intended to ever reach the standard Git client is Scalar's use
+of [the GVFS Protocol][gvfs-protocol], which is essentially an older
+version of [Git's partial clone feature](https://github.blog/2020-12-21-get-up-to-speed-with-partial-clone-and-shallow-clone/)
+that was available first in Azure Repos. Services such as GitHub support
+only partial clone instead of the GVFS protocol because that is the
+standard adopted by the Git project. If your hosting service supports
+partial clone, then we absolutely recommend it as a way to greatly speed
+up your clone and fetch times and to reduce how much disk space your Git
+repository requires. Scalar will help with this!
+
+If you don't use the GVFS Protocol, then most of the value of Scalar can
+be found in the core Git client. However, most of the advanced features
+that really optimize Git's performance are off by default for compatibility
+reasons. To really take advantage of Git's latest and greatest features,
+you either need to study the [`git config` documentation](https://git-scm.com/docs/git-config)
+and regularly read [the Git release notes](https://github.com/git/git/tree/master/Documentation/RelNotes).
+Even if you do all that work and customize your Git settings on your machines,
+you likely will want to share those settings with other team members.
+Or, you can just use Scalar!
+
+Using `scalar register` on an existing Git repository will give you these
+benefits:
+
+* Additional compression of your `.git/index` file.
+* Hourly background `git fetch` operations, keeping you in-sync with your
+ remotes.
+* Advanced data structures, such as the `commit-graph` and `multi-pack-index`
+ are updated automatically in the background.
+* If using macOS or Windows, then Scalar configures Git's builtin File System
+ Monitor, providing faster commands such as `git status` or `git add`.
+
+Additionally, if you use `scalar clone` to create a new repository, then
+you will automatically get these benefits:
+
+* Use Git's partial clone feature to only download the files you need for
+ your current checkout.
+* Use Git's [sparse-checkout feature][sparse-checkout] to minimize the
+ number of files required in your working directory.
+ [Read more about sparse-checkout here.][sparse-checkout-blog]
+* Create the Git repository inside `/src` to make it easy to
+ place build artifacts outside of the Git repository, such as in
+ `/bin` or `/packages`.
+
+We also admit that these **opinions** can always be improved! If you have
+an idea of how to improve our setup, consider
+[creating an issue](https://github.com/microsoft/scalar/issues/new) or
+contributing a pull request! Some [existing](https://github.com/microsoft/scalar/issues/382)
+[issues](https://github.com/microsoft/scalar/issues/388) have already
+improved our configuration settings and roadmap!
+
+[gvfs-protocol]: https://github.com/microsoft/VFSForGit/blob/HEAD/Protocol.md
+[microsoft-git]: https://github.com/microsoft/git
+[sparse-checkout]: https://git-scm.com/docs/git-sparse-checkout
+[sparse-checkout-blog]: https://github.blog/2020-01-17-bring-your-monorepo-down-to-size-with-sparse-checkout/
diff --git a/contrib/scalar/docs/troubleshooting.md b/contrib/scalar/docs/troubleshooting.md
new file mode 100644
index 00000000000000..c54d2438f22523
--- /dev/null
+++ b/contrib/scalar/docs/troubleshooting.md
@@ -0,0 +1,40 @@
+Troubleshooting
+===============
+
+Diagnosing Issues
+-----------------
+
+The `scalar diagnose` command collects logs and config details for the current
+repository. The resulting zip file helps root-cause issues.
+
+When run inside your repository, creates a zip file containing several important
+files for that repository. This includes:
+
+* Configuration files from your `.git` folder, such as the `config` file,
+ `index`, `hooks`, and `refs`.
+
+* A summary of your Git object database, including the number of loose objects
+ and the names and sizes of pack-files.
+
+As the `diagnose` command completes, it provides the path of the resulting
+zip file. This zip can be attached to bug reports to make the analysis easier.
+
+Modifying Configuration Values
+------------------------------
+
+The Scalar-specific configuration is only available for repos using the
+GVFS protocol.
+
+### Cache Server URL
+
+When using an enlistment cloned with `scalar clone` and the GVFS protocol,
+you will have a value called the cache server URL. Cache servers are a feature
+of the GVFS protocol to provide low-latency access to the on-demand object
+requests. This modifies the `gvfs.cache-server` setting in your local Git config
+file.
+
+Run `scalar cache-server --get` to see the current cache server.
+
+Run `scalar cache-server --list` to see the available cache server URLs.
+
+Run `scalar cache-server --set=` to set your cache server to ``.
diff --git a/convert.c b/convert.c
index a8870baff36a4a..e3b6c52011efab 100644
--- a/convert.c
+++ b/convert.c
@@ -1,5 +1,6 @@
#include "git-compat-util.h"
#include "advice.h"
+#include "gvfs.h"
#include "config.h"
#include "convert.h"
#include "copy.h"
@@ -555,6 +556,9 @@ static int crlf_to_git(struct index_state *istate,
if (!buf)
return 1;
+ if (gvfs_config_is_set(GVFS_BLOCK_FILTERS_AND_EOL_CONVERSIONS))
+ die("CRLF conversions not supported when running under GVFS");
+
/* only grow if not in place */
if (strbuf_avail(buf) + buf->len < len)
strbuf_grow(buf, len - buf->len);
@@ -594,6 +598,9 @@ static int crlf_to_worktree(const char *src, size_t len, struct strbuf *buf,
if (!will_convert_lf_to_crlf(&stats, crlf_action))
return 0;
+ if (gvfs_config_is_set(GVFS_BLOCK_FILTERS_AND_EOL_CONVERSIONS))
+ die("CRLF conversions not supported when running under GVFS");
+
/* are we "faking" in place editing ? */
if (src == buf->buf)
to_free = strbuf_detach(buf, NULL);
@@ -703,6 +710,9 @@ static int apply_single_file_filter(const char *path, const char *src, size_t le
struct async async;
struct filter_params params;
+ if (gvfs_config_is_set(GVFS_BLOCK_FILTERS_AND_EOL_CONVERSIONS))
+ die("Filter \"%s\" not supported when running under GVFS", cmd);
+
memset(&async, 0, sizeof(async));
async.proc = filter_buffer_or_fd;
async.data = ¶ms;
@@ -1116,6 +1126,9 @@ static int ident_to_git(const char *src, size_t len,
if (!buf)
return 1;
+ if (gvfs_config_is_set(GVFS_BLOCK_FILTERS_AND_EOL_CONVERSIONS))
+ die("ident conversions not supported when running under GVFS");
+
/* only grow if not in place */
if (strbuf_avail(buf) + buf->len < len)
strbuf_grow(buf, len - buf->len);
@@ -1163,6 +1176,9 @@ static int ident_to_worktree(const char *src, size_t len,
if (!cnt)
return 0;
+ if (gvfs_config_is_set(GVFS_BLOCK_FILTERS_AND_EOL_CONVERSIONS))
+ die("ident conversions not supported when running under GVFS");
+
/* are we "faking" in place editing ? */
if (src == buf->buf)
to_free = strbuf_detach(buf, NULL);
@@ -1612,6 +1628,9 @@ static int lf_to_crlf_filter_fn(struct stream_filter *filter,
size_t count, o = 0;
struct lf_to_crlf_filter *lf_to_crlf = (struct lf_to_crlf_filter *)filter;
+ if (gvfs_config_is_set(GVFS_BLOCK_FILTERS_AND_EOL_CONVERSIONS))
+ die("CRLF conversions not supported when running under GVFS");
+
/*
* We may be holding onto the CR to see if it is followed by a
* LF, in which case we would need to go to the main loop.
@@ -1856,6 +1875,9 @@ static int ident_filter_fn(struct stream_filter *filter,
struct ident_filter *ident = (struct ident_filter *)filter;
static const char head[] = "$Id";
+ if (gvfs_config_is_set(GVFS_BLOCK_FILTERS_AND_EOL_CONVERSIONS))
+ die("ident conversions not supported when running under GVFS");
+
if (!input) {
/* drain upon eof */
switch (ident->state) {
diff --git a/credential.c b/credential.c
index d6647541634f38..807c2fdea60cea 100644
--- a/credential.c
+++ b/credential.c
@@ -11,6 +11,8 @@
#include "strbuf.h"
#include "urlmatch.h"
#include "git-compat-util.h"
+#include "trace2.h"
+#include "repository.h"
void credential_init(struct credential *c)
{
@@ -198,14 +200,36 @@ static char *credential_ask_one(const char *what, struct credential *c,
return xstrdup(r);
}
-static void credential_getpass(struct credential *c)
+static int credential_getpass(struct credential *c)
{
+ int interactive;
+ char *value;
+ if (!git_config_get_maybe_bool("credential.interactive", &interactive) &&
+ !interactive) {
+ trace2_data_intmax("credential", the_repository,
+ "interactive/skipped", 1);
+ return -1;
+ }
+ if (!git_config_get_string("credential.interactive", &value)) {
+ int same = !strcmp(value, "never");
+ free(value);
+ if (same) {
+ trace2_data_intmax("credential", the_repository,
+ "interactive/skipped", 1);
+ return -1;
+ }
+ }
+
+ trace2_region_enter("credential", "interactive", the_repository);
if (!c->username)
c->username = credential_ask_one("Username", c,
PROMPT_ASKPASS|PROMPT_ECHO);
if (!c->password)
c->password = credential_ask_one("Password", c,
PROMPT_ASKPASS);
+ trace2_region_leave("credential", "interactive", the_repository);
+
+ return 0;
}
int credential_read(struct credential *c, FILE *fp)
@@ -312,6 +336,8 @@ static int run_credential_helper(struct credential *c,
else
helper.no_stdout = 1;
+ helper.trace2_child_class = "cred";
+
if (start_command(&helper) < 0)
return -1;
@@ -381,8 +407,8 @@ void credential_fill(struct credential *c)
c->helpers.items[i].string);
}
- credential_getpass(c);
- if (!c->username && !c->password)
+ if (credential_getpass(c) ||
+ (!c->username && !c->password))
die("unable to get password from user");
}
diff --git a/diagnose.c b/diagnose.c
index 8430064000bcba..e5114a3c9adc01 100644
--- a/diagnose.c
+++ b/diagnose.c
@@ -11,6 +11,7 @@
#include "packfile.h"
#include "parse-options.h"
#include "write-or-die.h"
+#include "config.h"
struct archive_dir {
const char *path;
@@ -107,6 +108,39 @@ static unsigned char get_dtype(struct dirent *e, struct strbuf *path)
return dtype;
}
+static void dir_stats(struct strbuf *buf, const char *path)
+{
+ DIR *dir = opendir(path);
+ struct dirent *e;
+ struct stat e_stat;
+ struct strbuf file_path = STRBUF_INIT;
+ size_t base_path_len;
+
+ if (!dir)
+ return;
+
+ strbuf_addstr(buf, "Contents of ");
+ strbuf_add_absolute_path(buf, path);
+ strbuf_addstr(buf, ":\n");
+
+ strbuf_add_absolute_path(&file_path, path);
+ strbuf_addch(&file_path, '/');
+ base_path_len = file_path.len;
+
+ while ((e = readdir(dir)) != NULL)
+ if (!is_dot_or_dotdot(e->d_name) && e->d_type == DT_REG) {
+ strbuf_setlen(&file_path, base_path_len);
+ strbuf_addstr(&file_path, e->d_name);
+ if (!stat(file_path.buf, &e_stat))
+ strbuf_addf(buf, "%-70s %16"PRIuMAX"\n",
+ e->d_name,
+ (uintmax_t)e_stat.st_size);
+ }
+
+ strbuf_release(&file_path);
+ closedir(dir);
+}
+
static int count_files(struct strbuf *path)
{
DIR *dir = opendir(path->buf);
@@ -219,7 +253,8 @@ int create_diagnostics_archive(struct strbuf *zip_path, enum diagnose_mode mode)
struct strvec archiver_args = STRVEC_INIT;
char **argv_copy = NULL;
int stdout_fd = -1, archiver_fd = -1;
- struct strbuf buf = STRBUF_INIT;
+ char *cache_server_url = NULL, *shared_cache = NULL;
+ struct strbuf buf = STRBUF_INIT, path = STRBUF_INIT;
int res, i;
struct archive_dir archive_dirs[] = {
{ ".git", 0 },
@@ -254,6 +289,13 @@ int create_diagnostics_archive(struct strbuf *zip_path, enum diagnose_mode mode)
get_version_info(&buf, 1);
strbuf_addf(&buf, "Repository root: %s\n", the_repository->worktree);
+
+ git_config_get_string("gvfs.cache-server", &cache_server_url);
+ git_config_get_string("gvfs.sharedCache", &shared_cache);
+ strbuf_addf(&buf, "Cache Server: %s\nLocal Cache: %s\n\n",
+ cache_server_url ? cache_server_url : "None",
+ shared_cache ? shared_cache : "None");
+
get_disk_info(&buf);
write_or_die(stdout_fd, buf.buf, buf.len);
strvec_pushf(&archiver_args,
@@ -284,6 +326,52 @@ int create_diagnostics_archive(struct strbuf *zip_path, enum diagnose_mode mode)
}
}
+ if (shared_cache) {
+ size_t path_len;
+
+ strbuf_reset(&buf);
+ strbuf_addf(&path, "%s/pack", shared_cache);
+ strbuf_reset(&buf);
+ strbuf_addstr(&buf, "--add-virtual-file=packs-cached.txt:");
+ dir_stats(&buf, path.buf);
+ strvec_push(&archiver_args, buf.buf);
+
+ strbuf_reset(&buf);
+ strbuf_addstr(&buf, "--add-virtual-file=objects-cached.txt:");
+ loose_objs_stats(&buf, shared_cache);
+ strvec_push(&archiver_args, buf.buf);
+
+ strbuf_reset(&path);
+ strbuf_addf(&path, "%s/info", shared_cache);
+ path_len = path.len;
+
+ if (is_directory(path.buf)) {
+ DIR *dir = opendir(path.buf);
+ struct dirent *e;
+
+ while ((e = readdir(dir))) {
+ if (!strcmp(".", e->d_name) || !strcmp("..", e->d_name))
+ continue;
+ if (e->d_type == DT_DIR)
+ continue;
+
+ strbuf_reset(&buf);
+ strbuf_addf(&buf, "--add-virtual-file=info/%s:", e->d_name);
+
+ strbuf_setlen(&path, path_len);
+ strbuf_addch(&path, '/');
+ strbuf_addstr(&path, e->d_name);
+
+ if (strbuf_read_file(&buf, path.buf, 0) < 0) {
+ res = error_errno(_("could not read '%s'"), path.buf);
+ goto diagnose_cleanup;
+ }
+ strvec_push(&archiver_args, buf.buf);
+ }
+ closedir(dir);
+ }
+ }
+
strvec_pushl(&archiver_args, "--prefix=",
oid_to_hex(the_hash_algo->empty_tree), "--", NULL);
@@ -297,10 +385,13 @@ int create_diagnostics_archive(struct strbuf *zip_path, enum diagnose_mode mode)
goto diagnose_cleanup;
}
- fprintf(stderr, "\n"
- "Diagnostics complete.\n"
- "All of the gathered info is captured in '%s'\n",
- zip_path->buf);
+ strbuf_reset(&buf);
+ strbuf_addf(&buf, "\n"
+ "Diagnostics complete.\n"
+ "All of the gathered info is captured in '%s'\n",
+ zip_path->buf);
+ write_or_die(stdout_fd, buf.buf, buf.len);
+ write_or_die(2, buf.buf, buf.len);
diagnose_cleanup:
if (archiver_fd >= 0) {
@@ -311,6 +402,8 @@ int create_diagnostics_archive(struct strbuf *zip_path, enum diagnose_mode mode)
free(argv_copy);
strvec_clear(&archiver_args);
strbuf_release(&buf);
+ free(cache_server_url);
+ free(shared_cache);
return res;
}
diff --git a/diff.c b/diff.c
index ee3eb629e3dc5e..9797ad009ceb68 100644
--- a/diff.c
+++ b/diff.c
@@ -3994,6 +3994,13 @@ static int reuse_worktree_file(struct index_state *istate,
if (!FAST_WORKING_DIRECTORY && !want_file && has_object_pack(oid))
return 0;
+ /*
+ * If this path does not match our sparse-checkout definition,
+ * then the file will not be in the working directory.
+ */
+ if (!path_in_sparse_checkout(name, istate))
+ return 0;
+
/*
* Similarly, if we'd have to convert the file contents anyway, that
* makes the optimization not worthwhile.
diff --git a/dir.c b/dir.c
index 25130bcb90d41e..2f354a509ac00b 100644
--- a/dir.c
+++ b/dir.c
@@ -7,6 +7,7 @@
*/
#include "git-compat-util.h"
#include "abspath.h"
+#include "virtualfilesystem.h"
#include "config.h"
#include "convert.h"
#include "dir.h"
@@ -1432,6 +1433,17 @@ enum pattern_match_result path_matches_pattern_list(
int result = NOT_MATCHED;
size_t slash_pos;
+ /*
+ * The virtual file system data is used to prevent git from traversing
+ * any part of the tree that is not in the virtual file system. Return
+ * 1 to exclude the entry if it is not found in the virtual file system,
+ * else fall through to the regular excludes logic as it may further exclude.
+ */
+ if (*dtype == DT_UNKNOWN)
+ *dtype = resolve_dtype(DT_UNKNOWN, istate, pathname, pathlen);
+ if (is_excluded_from_virtualfilesystem(pathname, pathlen, *dtype) > 0)
+ return 1;
+
if (!pl->use_cone_patterns) {
pattern = last_matching_pattern_from_list(pathname, pathlen, basename,
dtype, pl, istate);
@@ -1521,6 +1533,13 @@ static int path_in_sparse_checkout_1(const char *path,
enum pattern_match_result match = UNDECIDED;
const char *end, *slash;
+ /*
+ * When using a virtual filesystem, there aren't really patterns
+ * to follow, but be extra careful to skip this check.
+ */
+ if (core_virtualfilesystem)
+ return 1;
+
/*
* We default to accepting a path if the path is empty, there are no
* patterns, or the patterns are of the wrong type.
@@ -1776,8 +1795,20 @@ struct path_pattern *last_matching_pattern(struct dir_struct *dir,
int is_excluded(struct dir_struct *dir, struct index_state *istate,
const char *pathname, int *dtype_p)
{
- struct path_pattern *pattern =
- last_matching_pattern(dir, istate, pathname, dtype_p);
+ struct path_pattern *pattern;
+
+ /*
+ * The virtual file system data is used to prevent git from traversing
+ * any part of the tree that is not in the virtual file system. Return
+ * 1 to exclude the entry if it is not found in the virtual file system,
+ * else fall through to the regular excludes logic as it may further exclude.
+ */
+ if (*dtype_p == DT_UNKNOWN)
+ *dtype_p = resolve_dtype(DT_UNKNOWN, istate, pathname, strlen(pathname));
+ if (is_excluded_from_virtualfilesystem(pathname, strlen(pathname), *dtype_p) > 0)
+ return 1;
+
+ pattern = last_matching_pattern(dir, istate, pathname, dtype_p);
if (pattern)
return pattern->flags & PATTERN_FLAG_NEGATIVE ? 0 : 1;
return 0;
@@ -2363,6 +2394,8 @@ static enum path_treatment treat_path(struct dir_struct *dir,
ignore_case);
if (dtype != DT_DIR && has_path_in_index)
return path_none;
+ if (is_excluded_from_virtualfilesystem(path->buf, path->len, dtype) > 0)
+ return path_excluded;
/*
* When we are looking at a directory P in the working tree,
@@ -2567,6 +2600,8 @@ static void add_path_to_appropriate_result_list(struct dir_struct *dir,
/* add the path to the appropriate result list */
switch (state) {
case path_excluded:
+ if (is_excluded_from_virtualfilesystem(path->buf, path->len, DT_DIR) > 0)
+ break;
if (dir->flags & DIR_SHOW_IGNORED)
dir_add_name(dir, istate, path->buf, path->len);
else if ((dir->flags & DIR_SHOW_IGNORED_TOO) ||
@@ -3114,6 +3149,8 @@ static int cmp_icase(char a, char b)
{
if (a == b)
return 0;
+ if (is_dir_sep(a))
+ return is_dir_sep(b) ? 0 : -1;
if (ignore_case)
return toupper(a) - toupper(b);
return a - b;
diff --git a/environment.c b/environment.c
index f98d76f08047f1..26e60579b329b8 100644
--- a/environment.c
+++ b/environment.c
@@ -77,9 +77,12 @@ int grafts_keep_true_parents;
int core_apply_sparse_checkout;
int core_sparse_checkout_cone;
int sparse_expect_files_outside_of_patterns;
+int core_gvfs;
+const char *core_virtualfilesystem;
int merge_log_config = -1;
int precomposed_unicode = -1; /* see probe_utf8_pathname_composition() */
unsigned long pack_size_limit_cfg;
+int core_virtualize_objects;
enum log_refs_config log_all_ref_updates = LOG_REFS_UNSET;
#ifndef PROTECT_HFS_DEFAULT
@@ -91,6 +94,9 @@ int protect_hfs = PROTECT_HFS_DEFAULT;
#define PROTECT_NTFS_DEFAULT 1
#endif
int protect_ntfs = PROTECT_NTFS_DEFAULT;
+int core_use_gvfs_helper;
+const char *gvfs_cache_server_url;
+struct strbuf gvfs_shared_cache_pathname = STRBUF_INIT;
/*
* The character that begins a commented line in user-editable file
diff --git a/environment.h b/environment.h
index c5377473c68339..88c635dd1f696f 100644
--- a/environment.h
+++ b/environment.h
@@ -11,6 +11,8 @@ struct strvec;
extern char comment_line_char;
extern int auto_comment_line_char;
+extern int core_virtualize_objects;
+
/*
* Wrapper of getenv() that returns a strdup value. This value is kept
* in argv to be freed later.
@@ -144,9 +146,14 @@ int get_shared_repository(void);
void reset_shared_repository(void);
extern int core_preload_index;
+extern const char *core_virtualfilesystem;
+extern int core_gvfs;
extern int precomposed_unicode;
extern int protect_hfs;
extern int protect_ntfs;
+extern int core_use_gvfs_helper;
+extern const char *gvfs_cache_server_url;
+extern struct strbuf gvfs_shared_cache_pathname;
extern int core_apply_sparse_checkout;
extern int core_sparse_checkout_cone;
diff --git a/git.c b/git.c
index c67e44dd82d2e4..56442ed3d1c988 100644
--- a/git.c
+++ b/git.c
@@ -1,4 +1,5 @@
#include "builtin.h"
+#include "gvfs.h"
#include "config.h"
#include "environment.h"
#include "exec-cmd.h"
@@ -14,6 +15,8 @@
#include "shallow.h"
#include "trace.h"
#include "trace2.h"
+#include "dir.h"
+#include "hook.h"
#define RUN_SETUP (1<<0)
#define RUN_SETUP_GENTLY (1<<1)
@@ -25,6 +28,7 @@
#define NEED_WORK_TREE (1<<3)
#define DELAY_PAGER_CONFIG (1<<4)
#define NO_PARSEOPT (1<<5) /* parse-options is not used */
+#define BLOCK_ON_GVFS_REPO (1<<6) /* command not allowed in GVFS repos */
struct cmd_struct {
const char *cmd;
@@ -425,6 +429,68 @@ static int handle_alias(int *argcp, const char ***argv)
return ret;
}
+/* Runs pre/post-command hook */
+static struct strvec sargv = STRVEC_INIT;
+static int run_post_hook = 0;
+static int exit_code = -1;
+
+static int run_pre_command_hook(const char **argv)
+{
+ char *lock;
+ int ret = 0;
+ struct run_hooks_opt opt = RUN_HOOKS_OPT_INIT;
+
+ /*
+ * Ensure the global pre/post command hook is only called for
+ * the outer command and not when git is called recursively
+ * or spawns multiple commands (like with the alias command)
+ */
+ lock = getenv("COMMAND_HOOK_LOCK");
+ if (lock && !strcmp(lock, "true"))
+ return 0;
+ setenv("COMMAND_HOOK_LOCK", "true", 1);
+
+ /* call the hook proc */
+ strvec_pushv(&sargv, argv);
+ strvec_pushf(&sargv, "--git-pid=%"PRIuMAX, (uintmax_t)getpid());
+ strvec_pushv(&opt.args, sargv.v);
+ ret = run_hooks_opt("pre-command", &opt);
+
+ if (!ret)
+ run_post_hook = 1;
+ return ret;
+}
+
+static int run_post_command_hook(void)
+{
+ char *lock;
+ int ret = 0;
+ struct run_hooks_opt opt = RUN_HOOKS_OPT_INIT;
+
+ /*
+ * Only run post_command if pre_command succeeded in this process
+ */
+ if (!run_post_hook)
+ return 0;
+ lock = getenv("COMMAND_HOOK_LOCK");
+ if (!lock || strcmp(lock, "true"))
+ return 0;
+
+ strvec_pushv(&opt.args, sargv.v);
+ strvec_pushf(&opt.args, "--exit_code=%u", exit_code);
+ ret = run_hooks_opt("post-command", &opt);
+
+ run_post_hook = 0;
+ strvec_clear(&sargv);
+ setenv("COMMAND_HOOK_LOCK", "false", 1);
+ return ret;
+}
+
+static void post_command_hook_atexit(void)
+{
+ run_post_command_hook();
+}
+
static int run_builtin(struct cmd_struct *p, int argc, const char **argv)
{
int status, help;
@@ -460,18 +526,26 @@ static int run_builtin(struct cmd_struct *p, int argc, const char **argv)
if (!help && p->option & NEED_WORK_TREE)
setup_work_tree();
+ if (!help && p->option & BLOCK_ON_GVFS_REPO && gvfs_config_is_set(GVFS_BLOCK_COMMANDS))
+ die("'git %s' is not supported on a GVFS repo", p->cmd);
+
+ if (run_pre_command_hook(argv))
+ die("pre-command hook aborted command");
+
trace_argv_printf(argv, "trace: built-in: git");
trace2_cmd_name(p->cmd);
trace2_cmd_list_config();
trace2_cmd_list_env_vars();
validate_cache_entries(the_repository->index);
- status = p->fn(argc, argv, prefix);
+ exit_code = status = p->fn(argc, argv, prefix);
validate_cache_entries(the_repository->index);
if (status)
return status;
+ run_post_command_hook();
+
/* Somebody closed stdout? */
if (fstat(fileno(stdout), &st))
return 0;
@@ -539,7 +613,7 @@ static struct cmd_struct commands[] = {
{ "for-each-ref", cmd_for_each_ref, RUN_SETUP },
{ "for-each-repo", cmd_for_each_repo, RUN_SETUP_GENTLY },
{ "format-patch", cmd_format_patch, RUN_SETUP },
- { "fsck", cmd_fsck, RUN_SETUP },
+ { "fsck", cmd_fsck, RUN_SETUP | BLOCK_ON_GVFS_REPO},
{ "fsck-objects", cmd_fsck, RUN_SETUP },
{ "fsmonitor--daemon", cmd_fsmonitor__daemon, RUN_SETUP },
{ "gc", cmd_gc, RUN_SETUP },
@@ -580,7 +654,7 @@ static struct cmd_struct commands[] = {
{ "pack-refs", cmd_pack_refs, RUN_SETUP },
{ "patch-id", cmd_patch_id, RUN_SETUP_GENTLY | NO_PARSEOPT },
{ "pickaxe", cmd_blame, RUN_SETUP },
- { "prune", cmd_prune, RUN_SETUP },
+ { "prune", cmd_prune, RUN_SETUP | BLOCK_ON_GVFS_REPO},
{ "prune-packed", cmd_prune_packed, RUN_SETUP },
{ "pull", cmd_pull, RUN_SETUP | NEED_WORK_TREE },
{ "push", cmd_push, RUN_SETUP },
@@ -592,7 +666,7 @@ static struct cmd_struct commands[] = {
{ "remote", cmd_remote, RUN_SETUP },
{ "remote-ext", cmd_remote_ext, NO_PARSEOPT },
{ "remote-fd", cmd_remote_fd, NO_PARSEOPT },
- { "repack", cmd_repack, RUN_SETUP },
+ { "repack", cmd_repack, RUN_SETUP | BLOCK_ON_GVFS_REPO },
{ "replace", cmd_replace, RUN_SETUP },
{ "rerere", cmd_rerere, RUN_SETUP },
{ "reset", cmd_reset, RUN_SETUP },
@@ -612,13 +686,14 @@ static struct cmd_struct commands[] = {
{ "stash", cmd_stash, RUN_SETUP | NEED_WORK_TREE },
{ "status", cmd_status, RUN_SETUP | NEED_WORK_TREE },
{ "stripspace", cmd_stripspace },
- { "submodule--helper", cmd_submodule__helper, RUN_SETUP },
+ { "submodule--helper", cmd_submodule__helper, RUN_SETUP | BLOCK_ON_GVFS_REPO },
{ "switch", cmd_switch, RUN_SETUP | NEED_WORK_TREE },
{ "symbolic-ref", cmd_symbolic_ref, RUN_SETUP },
{ "tag", cmd_tag, RUN_SETUP | DELAY_PAGER_CONFIG },
{ "unpack-file", cmd_unpack_file, RUN_SETUP | NO_PARSEOPT },
{ "unpack-objects", cmd_unpack_objects, RUN_SETUP | NO_PARSEOPT },
{ "update-index", cmd_update_index, RUN_SETUP },
+ { "update-microsoft-git", cmd_update_microsoft_git },
{ "update-ref", cmd_update_ref, RUN_SETUP },
{ "update-server-info", cmd_update_server_info, RUN_SETUP },
{ "upload-archive", cmd_upload_archive, NO_PARSEOPT },
@@ -748,13 +823,16 @@ static void execv_dashed_external(const char **argv)
*/
trace_argv_printf(cmd.args.v, "trace: exec:");
+ if (run_pre_command_hook(cmd.args.v))
+ die("pre-command hook aborted command");
+
/*
* If we fail because the command is not found, it is
* OK to return. Otherwise, we just pass along the status code,
* or our usual generic code if we were not even able to exec
* the program.
*/
- status = run_command(&cmd);
+ exit_code = status = run_command(&cmd);
/*
* If the child process ran and we are now going to exit, emit a
@@ -765,6 +843,8 @@ static void execv_dashed_external(const char **argv)
exit(status);
else if (errno != ENOENT)
exit(128);
+
+ run_post_command_hook();
}
static int run_argv(int *argcp, const char ***argv)
@@ -872,6 +952,7 @@ int cmd_main(int argc, const char **argv)
}
trace_command_performance(argv);
+ atexit(post_command_hook_atexit);
/*
* "git-xxxx" is the same as "git xxxx", but we obviously:
@@ -897,10 +978,14 @@ int cmd_main(int argc, const char **argv)
if (!argc) {
/* The user didn't specify a command; give them help */
commit_pager_choice();
+ if (run_pre_command_hook(argv))
+ die("pre-command hook aborted command");
printf(_("usage: %s\n\n"), git_usage_string);
list_common_cmds_help();
printf("\n%s\n", _(git_more_info_string));
- exit(1);
+ exit_code = 1;
+ run_post_command_hook();
+ exit(exit_code);
}
if (!strcmp("--version", argv[0]) || !strcmp("-v", argv[0]))
diff --git a/gvfs-helper-client.c b/gvfs-helper-client.c
new file mode 100644
index 00000000000000..c44def7d912e8e
--- /dev/null
+++ b/gvfs-helper-client.c
@@ -0,0 +1,570 @@
+#include "git-compat-util.h"
+#include "environment.h"
+#include "hex.h"
+#include "strvec.h"
+#include "trace2.h"
+#include "oidset.h"
+#include "object.h"
+#include "object-store.h"
+#include "gvfs-helper-client.h"
+#include "sub-process.h"
+#include "sigchain.h"
+#include "pkt-line.h"
+#include "quote.h"
+#include "packfile.h"
+
+static struct oidset gh_client__oidset_queued = OIDSET_INIT;
+static unsigned long gh_client__oidset_count;
+
+struct gh_server__process {
+ struct subprocess_entry subprocess; /* must be first */
+ unsigned int supported_capabilities;
+};
+
+static int gh_server__subprocess_map_initialized;
+static struct hashmap gh_server__subprocess_map;
+static struct object_directory *gh_client__chosen_odb;
+
+/*
+ * The "objects" capability has verbs: "get" and "post" and "prefetch".
+ */
+#define CAP_OBJECTS (1u<<1)
+#define CAP_OBJECTS_NAME "objects"
+
+#define CAP_OBJECTS__VERB_GET1_NAME "get"
+#define CAP_OBJECTS__VERB_POST_NAME "post"
+#define CAP_OBJECTS__VERB_PREFETCH_NAME "prefetch"
+
+static int gh_client__start_fn(struct subprocess_entry *subprocess)
+{
+ static int versions[] = {1, 0};
+ static struct subprocess_capability capabilities[] = {
+ { CAP_OBJECTS_NAME, CAP_OBJECTS },
+ { NULL, 0 }
+ };
+
+ struct gh_server__process *entry = (struct gh_server__process *)subprocess;
+
+ return subprocess_handshake(subprocess, "gvfs-helper", versions,
+ NULL, capabilities,
+ &entry->supported_capabilities);
+}
+
+/*
+ * Send the queued OIDs in the OIDSET to gvfs-helper for it to
+ * fetch from the cache-server or main Git server using "/gvfs/objects"
+ * POST semantics.
+ *
+ * objects.post LF
+ * ( LF)*
+ *
+ *
+ */
+static int gh_client__send__objects_post(struct child_process *process)
+{
+ struct oidset_iter iter;
+ struct object_id *oid;
+ int err;
+
+ /*
+ * We assume that all of the packet_ routines call error()
+ * so that we don't have to.
+ */
+
+ err = packet_write_fmt_gently(
+ process->in,
+ (CAP_OBJECTS_NAME "." CAP_OBJECTS__VERB_POST_NAME "\n"));
+ if (err)
+ return err;
+
+ oidset_iter_init(&gh_client__oidset_queued, &iter);
+ while ((oid = oidset_iter_next(&iter))) {
+ err = packet_write_fmt_gently(process->in, "%s\n",
+ oid_to_hex(oid));
+ if (err)
+ return err;
+ }
+
+ err = packet_flush_gently(process->in);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+/*
+ * Send the given OID to gvfs-helper for it to fetch from the
+ * cache-server or main Git server using "/gvfs/objects" GET
+ * semantics.
+ *
+ * This ignores any queued OIDs.
+ *
+ * objects.get LF
+ * LF
+ *
+ *
+ */
+static int gh_client__send__objects_get(struct child_process *process,
+ const struct object_id *oid)
+{
+ int err;
+
+ /*
+ * We assume that all of the packet_ routines call error()
+ * so that we don't have to.
+ */
+
+ err = packet_write_fmt_gently(
+ process->in,
+ (CAP_OBJECTS_NAME "." CAP_OBJECTS__VERB_GET1_NAME "\n"));
+ if (err)
+ return err;
+
+ err = packet_write_fmt_gently(process->in, "%s\n",
+ oid_to_hex(oid));
+ if (err)
+ return err;
+
+ err = packet_flush_gently(process->in);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+/*
+ * Send a request to gvfs-helper to prefetch packfiles from either the
+ * cache-server or the main Git server using "/gvfs/prefetch".
+ *
+ * objects.prefetch LF
+ * [ LF]
+ *
+ */
+static int gh_client__send__objects_prefetch(struct child_process *process,
+ timestamp_t seconds_since_epoch)
+{
+ int err;
+
+ /*
+ * We assume that all of the packet_ routines call error()
+ * so that we don't have to.
+ */
+
+ err = packet_write_fmt_gently(
+ process->in,
+ (CAP_OBJECTS_NAME "." CAP_OBJECTS__VERB_PREFETCH_NAME "\n"));
+ if (err)
+ return err;
+
+ if (seconds_since_epoch) {
+ err = packet_write_fmt_gently(process->in, "%" PRItime "\n",
+ seconds_since_epoch);
+ if (err)
+ return err;
+ }
+
+ err = packet_flush_gently(process->in);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+/*
+ * Update the loose object cache to include the newly created
+ * object.
+ */
+static void gh_client__update_loose_cache(const char *line)
+{
+ const char *v1_oid;
+ struct object_id oid;
+
+ if (!skip_prefix(line, "loose ", &v1_oid))
+ BUG("update_loose_cache: invalid line '%s'", line);
+
+ if (get_oid_hex(v1_oid, &oid))
+ BUG("update_loose_cache: invalid line '%s'", line);
+
+ odb_loose_cache_add_new_oid(gh_client__chosen_odb, &oid);
+}
+
+/*
+ * Update the packed-git list to include the newly created packfile.
+ */
+static void gh_client__update_packed_git(const char *line)
+{
+ struct strbuf path = STRBUF_INIT;
+ const char *v1_filename;
+ struct packed_git *p;
+ int is_local;
+
+ if (!skip_prefix(line, "packfile ", &v1_filename))
+ BUG("update_packed_git: invalid line '%s'", line);
+
+ /*
+ * ODB[0] is the local .git/objects. All others are alternates.
+ */
+ is_local = (gh_client__chosen_odb == the_repository->objects->odb);
+
+ strbuf_addf(&path, "%s/pack/%s",
+ gh_client__chosen_odb->path, v1_filename);
+ strbuf_strip_suffix(&path, ".pack");
+ strbuf_addstr(&path, ".idx");
+
+ p = add_packed_git(path.buf, path.len, is_local);
+ if (p)
+ install_packed_git_and_mru(the_repository, p);
+}
+
+/*
+ * CAP_OBJECTS verbs return the same format response:
+ *
+ *
+ * *
+ *
+ *
+ *
+ * Where:
+ *
+ * ::= odb SP LF
+ *
+ * ::= /
+ *
+ * ::= packfile SP LF
+ *
+ * ::= loose SP LF
+ *
+ * ::= ok LF
+ * / partial LF
+ * / error SP LF
+ *
+ * Note that `gvfs-helper` controls how/if it chunks the request when
+ * it talks to the cache-server and/or main Git server. So it is
+ * possible for us to receive many packfiles and/or loose objects *AND
+ * THEN* get a hard network error or a 404 on an individual object.
+ *
+ * If we get a partial result, we can let the caller try to continue
+ * -- for example, maybe an immediate request for a tree object was
+ * grouped with a queued request for a blob. The tree-walk *might* be
+ * able to continue and let the 404 blob be handled later.
+ */
+static int gh_client__objects__receive_response(
+ struct child_process *process,
+ enum gh_client__created *p_ghc,
+ int *p_nr_loose, int *p_nr_packfile)
+{
+ enum gh_client__created ghc = GHC__CREATED__NOTHING;
+ const char *v1;
+ char *line;
+ int len;
+ int nr_loose = 0;
+ int nr_packfile = 0;
+ int err = 0;
+
+ while (1) {
+ /*
+ * Warning: packet_read_line_gently() calls die()
+ * despite the _gently moniker.
+ */
+ len = packet_read_line_gently(process->out, NULL, &line);
+ if ((len < 0) || !line)
+ break;
+
+ if (starts_with(line, "odb")) {
+ /* trust that this matches what we expect */
+ }
+
+ else if (starts_with(line, "packfile")) {
+ gh_client__update_packed_git(line);
+ ghc |= GHC__CREATED__PACKFILE;
+ nr_packfile++;
+ }
+
+ else if (starts_with(line, "loose")) {
+ gh_client__update_loose_cache(line);
+ ghc |= GHC__CREATED__LOOSE;
+ nr_loose++;
+ }
+
+ else if (starts_with(line, "ok"))
+ ;
+ else if (starts_with(line, "partial"))
+ ;
+ else if (skip_prefix(line, "error ", &v1)) {
+ error("gvfs-helper error: '%s'", v1);
+ err = -1;
+ }
+ }
+
+ *p_ghc = ghc;
+ *p_nr_loose = nr_loose;
+ *p_nr_packfile = nr_packfile;
+
+ return err;
+}
+
+/*
+ * Select the preferred ODB for fetching missing objects.
+ * This should be the alternate with the same directory
+ * name as set in `gvfs.sharedCache`.
+ *
+ * Fallback to .git/objects if necessary.
+ */
+static void gh_client__choose_odb(void)
+{
+ struct object_directory *odb;
+
+ if (gh_client__chosen_odb)
+ return;
+
+ prepare_alt_odb(the_repository);
+ gh_client__chosen_odb = the_repository->objects->odb;
+
+ if (!gvfs_shared_cache_pathname.len)
+ return;
+
+ for (odb = the_repository->objects->odb->next; odb; odb = odb->next) {
+ if (!strcmp(odb->path, gvfs_shared_cache_pathname.buf)) {
+ gh_client__chosen_odb = odb;
+ return;
+ }
+ }
+}
+
+static struct gh_server__process *gh_client__find_long_running_process(
+ unsigned int cap_needed)
+{
+ struct gh_server__process *entry;
+ struct strvec argv = STRVEC_INIT;
+ struct strbuf quoted = STRBUF_INIT;
+
+ gh_client__choose_odb();
+
+ /*
+ * TODO decide what defaults we want.
+ */
+ strvec_push(&argv, "gvfs-helper");
+ strvec_push(&argv, "--fallback");
+ strvec_push(&argv, "--cache-server=trust");
+ strvec_pushf(&argv, "--shared-cache=%s",
+ gh_client__chosen_odb->path);
+ strvec_push(&argv, "server");
+
+ sq_quote_argv_pretty("ed, argv.v);
+
+ /*
+ * Find an existing long-running process with the above command
+ * line -or- create a new long-running process for this and
+ * subsequent requests.
+ */
+ if (!gh_server__subprocess_map_initialized) {
+ gh_server__subprocess_map_initialized = 1;
+ hashmap_init(&gh_server__subprocess_map,
+ (hashmap_cmp_fn)cmd2process_cmp, NULL, 0);
+ entry = NULL;
+ } else
+ entry = (struct gh_server__process *)subprocess_find_entry(
+ &gh_server__subprocess_map, quoted.buf);
+
+ if (!entry) {
+ entry = xmalloc(sizeof(*entry));
+ entry->supported_capabilities = 0;
+
+ if (subprocess_start_strvec(&gh_server__subprocess_map,
+ &entry->subprocess, 1,
+ &argv, gh_client__start_fn))
+ FREE_AND_NULL(entry);
+ }
+
+ if (entry &&
+ (entry->supported_capabilities & cap_needed) != cap_needed) {
+ error("gvfs-helper: does not support needed capabilities");
+ subprocess_stop(&gh_server__subprocess_map,
+ (struct subprocess_entry *)entry);
+ FREE_AND_NULL(entry);
+ }
+
+ strvec_clear(&argv);
+ strbuf_release("ed);
+
+ return entry;
+}
+
+void gh_client__queue_oid(const struct object_id *oid)
+{
+ /*
+ * Keep this trace as a printf only, so that it goes to the
+ * perf log, but not the event log. It is useful for interactive
+ * debugging, but generates way too much (unuseful) noise for the
+ * database.
+ */
+ if (trace2_is_enabled())
+ trace2_printf("gh_client__queue_oid: %s", oid_to_hex(oid));
+
+ if (!oidset_insert(&gh_client__oidset_queued, oid))
+ gh_client__oidset_count++;
+}
+
+/*
+ * This routine should actually take a "const struct oid_array *"
+ * rather than the component parts, but fetch_objects() uses
+ * this model (because of the call in sha1-file.c).
+ */
+void gh_client__queue_oid_array(const struct object_id *oids, int oid_nr)
+{
+ int k;
+
+ for (k = 0; k < oid_nr; k++)
+ gh_client__queue_oid(&oids[k]);
+}
+
+/*
+ * Bulk fetch all of the queued OIDs in the OIDSET.
+ */
+int gh_client__drain_queue(enum gh_client__created *p_ghc)
+{
+ struct gh_server__process *entry;
+ struct child_process *process;
+ int nr_loose = 0;
+ int nr_packfile = 0;
+ int err = 0;
+
+ *p_ghc = GHC__CREATED__NOTHING;
+
+ if (!gh_client__oidset_count)
+ return 0;
+
+ entry = gh_client__find_long_running_process(CAP_OBJECTS);
+ if (!entry)
+ return -1;
+
+ trace2_region_enter("gh-client", "objects/post", the_repository);
+
+ process = &entry->subprocess.process;
+
+ sigchain_push(SIGPIPE, SIG_IGN);
+
+ err = gh_client__send__objects_post(process);
+ if (!err)
+ err = gh_client__objects__receive_response(
+ process, p_ghc, &nr_loose, &nr_packfile);
+
+ sigchain_pop(SIGPIPE);
+
+ if (err) {
+ subprocess_stop(&gh_server__subprocess_map,
+ (struct subprocess_entry *)entry);
+ FREE_AND_NULL(entry);
+ }
+
+ trace2_data_intmax("gh-client", the_repository,
+ "objects/post/nr_objects", gh_client__oidset_count);
+ trace2_region_leave("gh-client", "objects/post", the_repository);
+
+ oidset_clear(&gh_client__oidset_queued);
+ gh_client__oidset_count = 0;
+
+ return err;
+}
+
+/*
+ * Get exactly 1 object immediately.
+ * Ignore any queued objects.
+ */
+int gh_client__get_immediate(const struct object_id *oid,
+ enum gh_client__created *p_ghc)
+{
+ struct gh_server__process *entry;
+ struct child_process *process;
+ int nr_loose = 0;
+ int nr_packfile = 0;
+ int err = 0;
+
+ /*
+ * Keep this trace as a printf only, so that it goes to the
+ * perf log, but not the event log. It is useful for interactive
+ * debugging, but generates way too much (unuseful) noise for the
+ * database.
+ */
+ if (trace2_is_enabled())
+ trace2_printf("gh_client__get_immediate: %s", oid_to_hex(oid));
+
+ entry = gh_client__find_long_running_process(CAP_OBJECTS);
+ if (!entry)
+ return -1;
+
+ trace2_region_enter("gh-client", "objects/get", the_repository);
+
+ process = &entry->subprocess.process;
+
+ sigchain_push(SIGPIPE, SIG_IGN);
+
+ err = gh_client__send__objects_get(process, oid);
+ if (!err)
+ err = gh_client__objects__receive_response(
+ process, p_ghc, &nr_loose, &nr_packfile);
+
+ sigchain_pop(SIGPIPE);
+
+ if (err) {
+ subprocess_stop(&gh_server__subprocess_map,
+ (struct subprocess_entry *)entry);
+ FREE_AND_NULL(entry);
+ }
+
+ trace2_region_leave("gh-client", "objects/get", the_repository);
+
+ return err;
+}
+
+/*
+ * Ask gvfs-helper to prefetch commits-and-trees packfiles since a
+ * given timestamp.
+ *
+ * If seconds_since_epoch is zero, gvfs-helper will scan the ODB for
+ * the last received prefetch and ask for ones newer than that.
+ */
+int gh_client__prefetch(timestamp_t seconds_since_epoch,
+ int *nr_packfiles_received)
+{
+ struct gh_server__process *entry;
+ struct child_process *process;
+ enum gh_client__created ghc;
+ int nr_loose = 0;
+ int nr_packfile = 0;
+ int err = 0;
+
+ entry = gh_client__find_long_running_process(CAP_OBJECTS);
+ if (!entry)
+ return -1;
+
+ trace2_region_enter("gh-client", "objects/prefetch", the_repository);
+ trace2_data_intmax("gh-client", the_repository, "prefetch/since",
+ seconds_since_epoch);
+
+ process = &entry->subprocess.process;
+
+ sigchain_push(SIGPIPE, SIG_IGN);
+
+ err = gh_client__send__objects_prefetch(process, seconds_since_epoch);
+ if (!err)
+ err = gh_client__objects__receive_response(
+ process, &ghc, &nr_loose, &nr_packfile);
+
+ sigchain_pop(SIGPIPE);
+
+ if (err) {
+ subprocess_stop(&gh_server__subprocess_map,
+ (struct subprocess_entry *)entry);
+ FREE_AND_NULL(entry);
+ }
+
+ trace2_data_intmax("gh-client", the_repository,
+ "prefetch/packfile_count", nr_packfile);
+ trace2_region_leave("gh-client", "objects/prefetch", the_repository);
+
+ if (nr_packfiles_received)
+ *nr_packfiles_received = nr_packfile;
+
+ return err;
+}
diff --git a/gvfs-helper-client.h b/gvfs-helper-client.h
new file mode 100644
index 00000000000000..7692534ecda54c
--- /dev/null
+++ b/gvfs-helper-client.h
@@ -0,0 +1,87 @@
+#ifndef GVFS_HELPER_CLIENT_H
+#define GVFS_HELPER_CLIENT_H
+
+struct repository;
+struct commit;
+struct object_id;
+
+enum gh_client__created {
+ /*
+ * The _get_ operation did not create anything. If doesn't
+ * matter if `gvfs-helper` had errors or not -- just that
+ * nothing was created.
+ */
+ GHC__CREATED__NOTHING = 0,
+
+ /*
+ * The _get_ operation created one or more packfiles.
+ */
+ GHC__CREATED__PACKFILE = 1<<1,
+
+ /*
+ * The _get_ operation created one or more loose objects.
+ * (Not necessarily the for the individual OID you requested.)
+ */
+ GHC__CREATED__LOOSE = 1<<2,
+
+ /*
+ * The _get_ operation created one or more packfilea *and*
+ * one or more loose objects.
+ */
+ GHC__CREATED__PACKFILE_AND_LOOSE = (GHC__CREATED__PACKFILE |
+ GHC__CREATED__LOOSE),
+};
+
+/*
+ * Ask `gvfs-helper server` to immediately fetch a single object
+ * using "/gvfs/objects" GET semantics.
+ *
+ * A long-running background process is used to make subsequent
+ * requests more efficient.
+ *
+ * A loose object will be created in the shared-cache ODB and
+ * in-memory cache updated.
+ */
+int gh_client__get_immediate(const struct object_id *oid,
+ enum gh_client__created *p_ghc);
+
+/*
+ * Queue this OID for a future fetch using `gvfs-helper service`.
+ * It does not wait.
+ *
+ * Callers should not rely on the queued object being on disk until
+ * the queue has been drained.
+ */
+void gh_client__queue_oid(const struct object_id *oid);
+void gh_client__queue_oid_array(const struct object_id *oids, int oid_nr);
+
+/*
+ * Ask `gvfs-helper server` to fetch the set of queued OIDs using
+ * "/gvfs/objects" POST semantics.
+ *
+ * A long-running background process is used to subsequent requests
+ * more efficient.
+ *
+ * One or more packfiles will be created in the shared-cache ODB.
+ */
+int gh_client__drain_queue(enum gh_client__created *p_ghc);
+
+/*
+ * Ask `gvfs-helper server` to fetch any "prefetch packs"
+ * available on the server more recent than the requested time.
+ *
+ * If seconds_since_epoch is zero, gvfs-helper will scan the ODB for
+ * the last received prefetch and ask for ones newer than that.
+ *
+ * A long-running background process is used to subsequent requests
+ * (either prefetch or regular immediate/queued requests) more efficient.
+ *
+ * One or more packfiles will be created in the shared-cache ODB.
+ *
+ * Returns 0 on success, -1 on error. Optionally also returns the
+ * number of prefetch packs received.
+ */
+int gh_client__prefetch(timestamp_t seconds_since_epoch,
+ int *nr_packfiles_received);
+
+#endif /* GVFS_HELPER_CLIENT_H */
diff --git a/gvfs-helper.c b/gvfs-helper.c
new file mode 100644
index 00000000000000..35e7ec5634ee1d
--- /dev/null
+++ b/gvfs-helper.c
@@ -0,0 +1,4226 @@
+// TODO Write a man page. Here are some notes for dogfooding.
+// TODO
+//
+// Usage: git gvfs-helper [] []
+//
+// :
+//
+// --remote= // defaults to "origin"
+//
+// --fallback // boolean. defaults to off
+//
+// When a fetch from the cache-server fails, automatically
+// fallback to the main Git server. This option has no effect
+// if no cache-server is defined.
+//
+// --cache-server=]