aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.github/dependabot.yaml22
-rw-r--r--.github/workflows/cifuzz.yml35
-rw-r--r--.github/workflows/codeql.yml6
-rw-r--r--.github/workflows/git.yml10
-rw-r--r--.github/workflows/pr-validation.yml30
-rw-r--r--.github/workflows/stale-issues-bot.yaml31
-rw-r--r--.github/workflows/test.yml10
-rw-r--r--.gitignore1
-rw-r--r--COMPATIBILITY.md344
-rw-r--r--Makefile9
-rw-r--r--_examples/sha256/main.go2
-rw-r--r--cli/go-git/go.mod32
-rw-r--r--cli/go-git/go.sum133
-rw-r--r--cli/go-git/main.go1
-rw-r--r--cli/go-git/update_server_info.go34
-rw-r--r--config/branch.go2
-rw-r--r--config/config.go15
-rw-r--r--go.mod39
-rw-r--r--go.sum84
-rw-r--r--internal/reference/sort.go14
-rw-r--r--internal/revision/parser_test.go9
-rw-r--r--options.go18
-rw-r--r--oss-fuzz.sh35
-rw-r--r--plumbing/filemode/filemode.go2
-rw-r--r--plumbing/format/commitgraph/commitgraph.go6
-rw-r--r--plumbing/format/commitgraph/doc.go113
-rw-r--r--plumbing/format/commitgraph/encoder.go18
-rw-r--r--plumbing/format/commitgraph/file.go41
-rw-r--r--plumbing/format/commitgraph/memory.go6
-rw-r--r--plumbing/format/commitgraph/v2/chain.go100
-rw-r--r--plumbing/format/commitgraph/v2/chain_test.go100
-rw-r--r--plumbing/format/commitgraph/v2/chunk.go49
-rw-r--r--plumbing/format/commitgraph/v2/commitgraph.go57
-rw-r--r--plumbing/format/commitgraph/v2/commitgraph_test.go200
-rw-r--r--plumbing/format/commitgraph/v2/doc.go106
-rw-r--r--plumbing/format/commitgraph/v2/encoder.go250
-rw-r--r--plumbing/format/commitgraph/v2/file.go412
-rw-r--r--plumbing/format/commitgraph/v2/memory.go107
-rw-r--r--plumbing/format/config/decoder_test.go11
-rw-r--r--plumbing/format/packfile/delta_test.go12
-rw-r--r--plumbing/format/packfile/diff_delta.go5
-rw-r--r--plumbing/format/packfile/parser.go195
-rw-r--r--plumbing/format/packfile/patch_delta.go289
-rw-r--r--plumbing/format/pktline/encoder.go4
-rw-r--r--plumbing/format/pktline/error.go51
-rw-r--r--plumbing/format/pktline/error_test.go68
-rw-r--r--plumbing/format/pktline/scanner.go12
-rw-r--r--plumbing/hash/hash.go2
-rw-r--r--plumbing/object.go2
-rw-r--r--plumbing/object/commit.go61
-rw-r--r--plumbing/object/commit_test.go51
-rw-r--r--plumbing/object/commitgraph/commitnode.go200
-rw-r--r--plumbing/object/commitgraph/commitnode_graph.go271
-rw-r--r--plumbing/object/commitgraph/commitnode_object.go187
-rw-r--r--plumbing/object/commitgraph/commitnode_test.go301
-rw-r--r--plumbing/object/commitgraph/commitnode_walker_author_order.go61
-rw-r--r--plumbing/object/commitgraph/commitnode_walker_ctime.go211
-rw-r--r--plumbing/object/commitgraph/commitnode_walker_date_order.go41
-rw-r--r--plumbing/object/commitgraph/commitnode_walker_helper.go164
-rw-r--r--plumbing/object/commitgraph/commitnode_walker_test.go187
-rw-r--r--plumbing/object/commitgraph/commitnode_walker_topo_order.go161
-rw-r--r--plumbing/object/patch.go4
-rw-r--r--plumbing/object/patch_stats_test.go54
-rw-r--r--plumbing/object/signature_test.go7
-rw-r--r--plumbing/object/tree_test.go17
-rw-r--r--plumbing/protocol/packp/common.go5
-rw-r--r--plumbing/protocol/packp/gitproto.go120
-rw-r--r--plumbing/protocol/packp/gitproto_test.go99
-rw-r--r--plumbing/protocol/packp/srvresp.go12
-rw-r--r--plumbing/protocol/packp/srvresp_test.go27
-rw-r--r--plumbing/protocol/packp/ulreq_decode.go2
-rw-r--r--plumbing/protocol/packp/ulreq_decode_test.go2
-rw-r--r--plumbing/protocol/packp/uppackresp_test.go12
-rw-r--r--plumbing/reference.go89
-rw-r--r--plumbing/reference_test.go59
-rw-r--r--plumbing/serverinfo/serverinfo.go94
-rw-r--r--plumbing/serverinfo/serverinfo_test.go185
-rw-r--r--plumbing/storer/object.go1
-rw-r--r--plumbing/storer/object_test.go4
-rw-r--r--plumbing/transport/common.go2
-rw-r--r--plumbing/transport/common_test.go7
-rw-r--r--plumbing/transport/file/client.go3
-rw-r--r--plumbing/transport/file/common_test.go4
-rw-r--r--plumbing/transport/git/common.go26
-rw-r--r--plumbing/transport/git/common_test.go20
-rw-r--r--plumbing/transport/http/common.go18
-rw-r--r--plumbing/transport/http/common_test.go19
-rw-r--r--plumbing/transport/http/receive_pack.go1
-rw-r--r--plumbing/transport/http/upload_pack.go1
-rw-r--r--plumbing/transport/internal/common/common.go98
-rw-r--r--plumbing/transport/internal/common/common_test.go101
-rw-r--r--plumbing/transport/internal/common/mocks.go46
-rw-r--r--plumbing/transport/server/server.go2
-rw-r--r--plumbing/transport/ssh/common.go14
-rw-r--r--plumbing/transport/ssh/common_test.go45
-rw-r--r--remote.go41
-rw-r--r--remote_test.go143
-rw-r--r--repository.go60
-rw-r--r--repository_test.go184
-rw-r--r--storage/filesystem/dotgit/dotgit.go97
-rw-r--r--storage/filesystem/dotgit/dotgit_test.go162
-rw-r--r--storage/filesystem/object.go13
-rw-r--r--storage/filesystem/storage.go9
-rw-r--r--storage/memory/storage.go4
-rw-r--r--storage/transactional/object.go4
-rw-r--r--utils/binary/read.go2
-rw-r--r--utils/ioutil/common.go12
-rw-r--r--utils/ioutil/pipe.go9
-rw-r--r--utils/ioutil/pipe_js.go9
-rw-r--r--utils/merkletrie/difftree.go2
-rw-r--r--utils/merkletrie/filesystem/node.go4
-rw-r--r--utils/merkletrie/filesystem/node_test.go26
-rw-r--r--utils/merkletrie/internal/fsnoder/file.go2
-rw-r--r--utils/trace/trace.go55
-rw-r--r--utils/trace/trace_test.go95
-rw-r--r--worktree.go152
-rw-r--r--worktree_commit.go2
-rw-r--r--worktree_status.go9
-rw-r--r--worktree_test.go269
119 files changed, 6232 insertions, 1363 deletions
diff --git a/.github/dependabot.yaml b/.github/dependabot.yaml
new file mode 100644
index 0000000..403f428
--- /dev/null
+++ b/.github/dependabot.yaml
@@ -0,0 +1,22 @@
+version: 2
+updates:
+ - package-ecosystem: "github-actions"
+ directory: "/"
+ schedule:
+ interval: "weekly"
+ commit-message:
+ prefix: "build"
+
+ - package-ecosystem: "gomod"
+ directory: "/"
+ schedule:
+ interval: "daily"
+ commit-message:
+ prefix: "build"
+
+ - package-ecosystem: "gomod"
+ directory: "/cli/go-git"
+ schedule:
+ interval: "daily"
+ commit-message:
+ prefix: "build"
diff --git a/.github/workflows/cifuzz.yml b/.github/workflows/cifuzz.yml
new file mode 100644
index 0000000..a930876
--- /dev/null
+++ b/.github/workflows/cifuzz.yml
@@ -0,0 +1,35 @@
+name: CIFuzz
+on: [pull_request]
+permissions: {}
+jobs:
+ Fuzzing:
+ runs-on: ubuntu-latest
+ permissions:
+ security-events: write
+ steps:
+ - name: Build Fuzzers
+ id: build
+ uses: google/oss-fuzz/infra/cifuzz/actions/build_fuzzers@master
+ with:
+ oss-fuzz-project-name: 'go-git'
+ language: go
+ - name: Run Fuzzers
+ uses: google/oss-fuzz/infra/cifuzz/actions/run_fuzzers@master
+ with:
+ oss-fuzz-project-name: 'go-git'
+ language: go
+ fuzz-seconds: 300
+ output-sarif: true
+ - name: Upload Crash
+ uses: actions/upload-artifact@v4
+ if: failure() && steps.build.outcome == 'success'
+ with:
+ name: artifacts
+ path: ./out/artifacts
+ - name: Upload Sarif
+ if: always() && steps.build.outcome == 'success'
+ uses: github/codeql-action/upload-sarif@v3
+ with:
+ # Path to SARIF file relative to the root of the repository
+ sarif_file: cifuzz-sarif/results.sarif
+ checkout_path: cifuzz-sarif
diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml
index fbb867c..920fc3e 100644
--- a/.github/workflows/codeql.yml
+++ b/.github/workflows/codeql.yml
@@ -24,11 +24,11 @@ jobs:
steps:
- name: Checkout code
- uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2
+ uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
- uses: github/codeql-action/init@29b1f65c5e92e24fe6b6647da1eaabe529cec70f # v2.3.3
+ uses: github/codeql-action/init@03e7845b7bfcd5e7fb63d1ae8c61b0e791134fab # v2.22.11
with:
languages: ${{ matrix.language }}
# xref: https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs
@@ -39,6 +39,6 @@ jobs:
run: go build ./...
- name: Perform CodeQL Analysis
- uses: github/codeql-action/analyze@29b1f65c5e92e24fe6b6647da1eaabe529cec70f # v2.3.3
+ uses: github/codeql-action/analyze@03e7845b7bfcd5e7fb63d1ae8c61b0e791134fab # v2.22.11
with:
category: "/language:${{matrix.language}}"
diff --git a/.github/workflows/git.yml b/.github/workflows/git.yml
index 60cfa12..6e0ebb6 100644
--- a/.github/workflows/git.yml
+++ b/.github/workflows/git.yml
@@ -16,13 +16,13 @@ jobs:
GIT_DIST_PATH: .git-dist/${{ matrix.git[0] }}
steps:
+ - name: Checkout code
+ uses: actions/checkout@v4
+
- name: Install Go
- uses: actions/setup-go@v3
+ uses: actions/setup-go@v5
with:
- go-version: 1.20.x
-
- - name: Checkout code
- uses: actions/checkout@v3
+ go-version: 1.21.x
- name: Install build dependencies
run: sudo apt-get update && sudo apt-get install gettext libcurl4-openssl-dev
diff --git a/.github/workflows/pr-validation.yml b/.github/workflows/pr-validation.yml
new file mode 100644
index 0000000..d7b1150
--- /dev/null
+++ b/.github/workflows/pr-validation.yml
@@ -0,0 +1,30 @@
+name: 'PR Validation'
+
+on:
+ pull_request:
+ types:
+ - opened
+ - edited
+ - reopened
+ - synchronize
+
+permissions:
+ contents: read
+
+jobs:
+ check-commit-message:
+ name: Check Commit Messages
+ runs-on: ubuntu-latest
+ steps:
+ - name: Check Package Prefix
+ uses: gsactions/commit-message-checker@v2
+ with:
+ pattern: '^(\*|git|plumbing|utils|config|_examples|internal|storage|cli|build): .+'
+ error: |
+ Commit message(s) does not align with contribution acceptance criteria.
+
+ Refer to https://github.com/go-git/go-git/blob/master/CONTRIBUTING.md#format-of-the-commit-message for more information.
+ excludeDescription: 'true'
+ excludeTitle: 'true'
+ checkAllCommitMessages: 'true'
+ accessToken: ${{ secrets.GITHUB_TOKEN }}
diff --git a/.github/workflows/stale-issues-bot.yaml b/.github/workflows/stale-issues-bot.yaml
new file mode 100644
index 0000000..11b86ae
--- /dev/null
+++ b/.github/workflows/stale-issues-bot.yaml
@@ -0,0 +1,31 @@
+name: "stale issues bot"
+on:
+ schedule:
+ - cron: "0 7 * * *"
+
+permissions:
+ issues: write
+ pull-requests: write
+
+jobs:
+ stale-bot:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/stale@v9
+ with:
+ ascending: true
+ operations-per-run: 30
+ days-before-stale: 90
+ days-before-close: 30
+ stale-issue-label: stale
+ stale-pr-label: stale
+ exempt-issue-labels: no-autoclose
+ repo-token: ${{ secrets.GITHUB_TOKEN }}
+ stale-issue-message: |
+ To help us keep things tidy and focus on the active tasks, we've introduced a stale bot to spot issues/PRs that haven't had any activity in a while.
+
+ This particular issue hasn't had any updates or activity in the past 90 days, so it's been labeled as 'stale'. If it remains inactive for the next 30 days, it'll be automatically closed.
+
+ We understand everyone's busy, but if this issue is still important to you, please feel free to add a comment or make an update to keep it active.
+
+ Thanks for your understanding and cooperation!
diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml
index ce5872d..f94d3e7 100644
--- a/.github/workflows/test.yml
+++ b/.github/workflows/test.yml
@@ -8,18 +8,18 @@ jobs:
strategy:
fail-fast: false
matrix:
- go-version: [1.19.x, 1.20.x]
+ go-version: [1.19.x, 1.20.x, 1.21.x]
platform: [ubuntu-latest, macos-latest, windows-latest]
runs-on: ${{ matrix.platform }}
steps:
+ - name: Checkout code
+ uses: actions/checkout@v4
+
- name: Install Go
- uses: actions/setup-go@v3
+ uses: actions/setup-go@v5
with:
go-version: ${{ matrix.go-version }}
-
- - name: Checkout code
- uses: actions/checkout@v3
- name: Configure known hosts
if: matrix.platform != 'ubuntu-latest'
diff --git a/.gitignore b/.gitignore
index 361133d..b7f2c58 100644
--- a/.gitignore
+++ b/.gitignore
@@ -4,3 +4,4 @@ coverage.txt
profile.out
.tmp/
.git-dist/
+.vscode
diff --git a/COMPATIBILITY.md b/COMPATIBILITY.md
index afd4f03..c1f280d 100644
--- a/COMPATIBILITY.md
+++ b/COMPATIBILITY.md
@@ -5,229 +5,229 @@ compatibility status with go-git.
## Getting and creating repositories
-| Feature | Sub-feature | Status | Notes | Examples |
-|---|---|---|---|---|
-| `init` | | ✅ | | |
-| `init` | `--bare` | ✅ | | |
-| `init` | `--template` <br/> `--separate-git-dir` <br/> `--shared` | ❌ | | |
-| `clone` | | ✅ | | - [PlainClone](_examples/clone/main.go) |
-| `clone` | Authentication: <br/> - none <br/> - access token <br/> - username + password <br/> - ssh | ✅ | | - [clone ssh](_examples/clone/auth/ssh/main.go) <br/> - [clone access token](_examples/clone/auth/basic/access_token/main.go) <br/> - [clone user + password](_examples/clone/auth/basic/username_password/main.go) |
-| `clone` | `--progress` <br/> `--single-branch` <br/> `--depth` <br/> `--origin` <br/> `--recurse-submodules` | ✅ | | - [recurse submodules](_examples/clone/main.go) <br/> - [progress](_examples/progress/main.go) |
+| Feature | Sub-feature | Status | Notes | Examples |
+| ------- | ------------------------------------------------------------------------------------------------------------------ | ------ | ----- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| `init` | | ✅ | | |
+| `init` | `--bare` | ✅ | | |
+| `init` | `--template` <br/> `--separate-git-dir` <br/> `--shared` | ❌ | | |
+| `clone` | | ✅ | | - [PlainClone](_examples/clone/main.go) |
+| `clone` | Authentication: <br/> - none <br/> - access token <br/> - username + password <br/> - ssh | ✅ | | - [clone ssh](_examples/clone/auth/ssh/main.go) <br/> - [clone access token](_examples/clone/auth/basic/access_token/main.go) <br/> - [clone user + password](_examples/clone/auth/basic/username_password/main.go) |
+| `clone` | `--progress` <br/> `--single-branch` <br/> `--depth` <br/> `--origin` <br/> `--recurse-submodules` <br/>`--shared` | ✅ | | - [recurse submodules](_examples/clone/main.go) <br/> - [progress](_examples/progress/main.go) |
## Basic snapshotting
-| Feature | Sub-feature | Status | Notes | Examples |
-|---|---|---|---|---|
-| `add` | | ✅ | Plain add is supported. Any other flags aren't supported | |
-| `status` | | ✅ | | |
-| `commit` | | ✅ | | - [commit](_examples/commit/main.go) |
-| `reset` | | ✅ | | |
-| `rm` | | ✅ | | |
-| `mv` | | ✅ | | |
+| Feature | Sub-feature | Status | Notes | Examples |
+| -------- | ----------- | ------ | -------------------------------------------------------- | ------------------------------------ |
+| `add` | | ✅ | Plain add is supported. Any other flags aren't supported | |
+| `status` | | ✅ | | |
+| `commit` | | ✅ | | - [commit](_examples/commit/main.go) |
+| `reset` | | ✅ | | |
+| `rm` | | ✅ | | |
+| `mv` | | ✅ | | |
## Branching and merging
-| Feature | Sub-feature | Status | Notes | Examples |
-|---|---|---|---|---|
-| `branch` | | ✅ | | - [branch](_examples/branch/main.go) |
-| `checkout` | | ✅ | Basic usages of checkout are supported. | - [checkout](_examples/checkout/main.go) |
-| `merge` | | ❌ | | |
-| `mergetool` | | ❌ | | |
-| `stash` | | ❌ | | |
-| `tag` | | ✅ | | - [tag](_examples/tag/main.go) <br/> - [tag create and push](_examples/tag-create-push/main.go) |
+| Feature | Sub-feature | Status | Notes | Examples |
+| ----------- | ----------- | ------ | --------------------------------------- | ----------------------------------------------------------------------------------------------- |
+| `branch` | | ✅ | | - [branch](_examples/branch/main.go) |
+| `checkout` | | ✅ | Basic usages of checkout are supported. | - [checkout](_examples/checkout/main.go) |
+| `merge` | | ❌ | | |
+| `mergetool` | | ❌ | | |
+| `stash` | | ❌ | | |
+| `tag` | | ✅ | | - [tag](_examples/tag/main.go) <br/> - [tag create and push](_examples/tag-create-push/main.go) |
## Sharing and updating projects
-| Feature | Sub-feature | Status | Notes | Examples |
-|---|---|---|---|---|
-| `fetch` | | ✅ | | |
-| `pull` | | ✅ | Only supports merges where the merge can be resolved as a fast-forward. | - [pull](_examples/pull/main.go) |
-| `push` | | ✅ | | - [push](_examples/push/main.go) |
-| `remote` | | ✅ | | - [remotes](_examples/remotes/main.go) |
-| `submodule` | | ✅ | | - [submodule](_examples/submodule/main.go) |
-| `submodule` | deinit | ❌ | | |
+| Feature | Sub-feature | Status | Notes | Examples |
+| ----------- | ----------- | ------ | ----------------------------------------------------------------------- | ------------------------------------------ |
+| `fetch` | | ✅ | | |
+| `pull` | | ✅ | Only supports merges where the merge can be resolved as a fast-forward. | - [pull](_examples/pull/main.go) |
+| `push` | | ✅ | | - [push](_examples/push/main.go) |
+| `remote` | | ✅ | | - [remotes](_examples/remotes/main.go) |
+| `submodule` | | ✅ | | - [submodule](_examples/submodule/main.go) |
+| `submodule` | deinit | ❌ | | |
## Inspection and comparison
-| Feature | Sub-feature | Status | Notes | Examples |
-|---|---|---|---|---|
-| `show` | | ✅ | | |
-| `log` | | ✅ | | - [log](_examples/log/main.go) |
-| `shortlog` | | (see log) | | |
-| `describe` | | ❌ | | |
+| Feature | Sub-feature | Status | Notes | Examples |
+| ---------- | ----------- | --------- | ----- | ------------------------------ |
+| `show` | | ✅ | | |
+| `log` | | ✅ | | - [log](_examples/log/main.go) |
+| `shortlog` | | (see log) | | |
+| `describe` | | ❌ | | |
## Patching
-| Feature | Sub-feature | Status | Notes | Examples |
-|---|---|---|---|---|
-| `apply` | | ❌ | | |
-| `cherry-pick` | | ❌ | | |
-| `diff` | | ✅ | Patch object with UnifiedDiff output representation. | |
-| `rebase` | | ❌ | | |
-| `revert` | | ❌ | | |
+| Feature | Sub-feature | Status | Notes | Examples |
+| ------------- | ----------- | ------ | ---------------------------------------------------- | -------- |
+| `apply` | | ❌ | | |
+| `cherry-pick` | | ❌ | | |
+| `diff` | | ✅ | Patch object with UnifiedDiff output representation. | |
+| `rebase` | | ❌ | | |
+| `revert` | | ❌ | | |
## Debugging
-| Feature | Sub-feature | Status | Notes | Examples |
-|---|---|---|---|---|
-| `bisect` | | ❌ | | |
-| `blame` | | ✅ | | - [blame](_examples/blame/main.go) |
-| `grep` | | ✅ | | |
+| Feature | Sub-feature | Status | Notes | Examples |
+| -------- | ----------- | ------ | ----- | ---------------------------------- |
+| `bisect` | | ❌ | | |
+| `blame` | | ✅ | | - [blame](_examples/blame/main.go) |
+| `grep` | | ✅ | | |
## Email
-| Feature | Sub-feature | Status | Notes | Examples |
-|---|---|---|---|---|
-| `am` | | ❌ | | |
-| `apply` | | ❌ | | |
-| `format-patch` | | ❌ | | |
-| `send-email` | | ❌ | | |
-| `request-pull` | | ❌ | | |
+| Feature | Sub-feature | Status | Notes | Examples |
+| -------------- | ----------- | ------ | ----- | -------- |
+| `am` | | ❌ | | |
+| `apply` | | ❌ | | |
+| `format-patch` | | ❌ | | |
+| `send-email` | | ❌ | | |
+| `request-pull` | | ❌ | | |
## External systems
-| Feature | Sub-feature | Status | Notes | Examples |
-|---|---|---|---|---|
-| `svn` | | ❌ | | |
-| `fast-import` | | ❌ | | |
-| `lfs` | | ❌ | | |
+| Feature | Sub-feature | Status | Notes | Examples |
+| ------------- | ----------- | ------ | ----- | -------- |
+| `svn` | | ❌ | | |
+| `fast-import` | | ❌ | | |
+| `lfs` | | ❌ | | |
## Administration
-| Feature | Sub-feature | Status | Notes | Examples |
-|---|---|---|---|---|
-| `clean` | | ✅ | | |
-| `gc` | | ❌ | | |
-| `fsck` | | ❌ | | |
-| `reflog` | | ❌ | | |
-| `filter-branch` | | ❌ | | |
-| `instaweb` | | ❌ | | |
-| `archive` | | ❌ | | |
-| `bundle` | | ❌ | | |
-| `prune` | | ❌ | | |
-| `repack` | | ❌ | | |
+| Feature | Sub-feature | Status | Notes | Examples |
+| --------------- | ----------- | ------ | ----- | -------- |
+| `clean` | | ✅ | | |
+| `gc` | | ❌ | | |
+| `fsck` | | ❌ | | |
+| `reflog` | | ❌ | | |
+| `filter-branch` | | ❌ | | |
+| `instaweb` | | ❌ | | |
+| `archive` | | ❌ | | |
+| `bundle` | | ❌ | | |
+| `prune` | | ❌ | | |
+| `repack` | | ❌ | | |
## Server admin
-| Feature | Sub-feature | Status | Notes | Examples |
-|---|---|---|---|---|
-| `daemon` | | ❌ | | |
-| `update-server-info` | | ❌ | | |
+| Feature | Sub-feature | Status | Notes | Examples |
+| -------------------- | ----------- | ------ | ----- | ----------------------------------------- |
+| `daemon` | | ❌ | | |
+| `update-server-info` | | ✅ | | [cli](./cli/go-git/update_server_info.go) |
## Advanced
-| Feature | Sub-feature | Status | Notes | Examples |
-|---|---|---|---|---|
-| `notes` | | ❌ | | |
-| `replace` | | ❌ | | |
-| `worktree` | | ❌ | | |
-| `annotate` | | (see blame) | | |
+| Feature | Sub-feature | Status | Notes | Examples |
+| ---------- | ----------- | ----------- | ----- | -------- |
+| `notes` | | ❌ | | |
+| `replace` | | ❌ | | |
+| `worktree` | | ❌ | | |
+| `annotate` | | (see blame) | | |
## GPG
-| Feature | Sub-feature | Status | Notes | Examples |
-|---|---|---|---|---|
-| `git-verify-commit` | | ✅ | | |
-| `git-verify-tag` | | ✅ | | |
+| Feature | Sub-feature | Status | Notes | Examples |
+| ------------------- | ----------- | ------ | ----- | -------- |
+| `git-verify-commit` | | ✅ | | |
+| `git-verify-tag` | | ✅ | | |
## Plumbing commands
-| Feature | Sub-feature | Status | Notes | Examples |
-|---|---|---|---|---|
-| `cat-file` | | ✅ | | |
-| `check-ignore` | | ❌ | | |
-| `commit-tree` | | ❌ | | |
-| `count-objects` | | ❌ | | |
-| `diff-index` | | ❌ | | |
-| `for-each-ref` | | ✅ | | |
-| `hash-object` | | ✅ | | |
-| `ls-files` | | ✅ | | |
-| `ls-remote` | | ✅ | | - [ls-remote](_examples/ls-remote/main.go) |
-| `merge-base` | `--independent` <br/> `--is-ancestor` | ⚠️ (partial) | Calculates the merge-base only between two commits. | - [merge-base](_examples/merge_base/main.go) |
-| `merge-base` | `--fork-point` <br/> `--octopus` | ❌ | | |
-| `read-tree` | | ❌ | | |
-| `rev-list` | | ✅ | | |
-| `rev-parse` | | ❌ | | |
-| `show-ref` | | ✅ | | |
-| `symbolic-ref` | | ✅ | | |
-| `update-index` | | ❌ | | |
-| `update-ref` | | ❌ | | |
-| `verify-pack` | | ❌ | | |
-| `write-tree` | | ❌ | | |
+| Feature | Sub-feature | Status | Notes | Examples |
+| --------------- | ------------------------------------- | ------------ | --------------------------------------------------- | -------------------------------------------- |
+| `cat-file` | | ✅ | | |
+| `check-ignore` | | ❌ | | |
+| `commit-tree` | | ❌ | | |
+| `count-objects` | | ❌ | | |
+| `diff-index` | | ❌ | | |
+| `for-each-ref` | | ✅ | | |
+| `hash-object` | | ✅ | | |
+| `ls-files` | | ✅ | | |
+| `ls-remote` | | ✅ | | - [ls-remote](_examples/ls-remote/main.go) |
+| `merge-base` | `--independent` <br/> `--is-ancestor` | ⚠️ (partial) | Calculates the merge-base only between two commits. | - [merge-base](_examples/merge_base/main.go) |
+| `merge-base` | `--fork-point` <br/> `--octopus` | ❌ | | |
+| `read-tree` | | ❌ | | |
+| `rev-list` | | ✅ | | |
+| `rev-parse` | | ❌ | | |
+| `show-ref` | | ✅ | | |
+| `symbolic-ref` | | ✅ | | |
+| `update-index` | | ❌ | | |
+| `update-ref` | | ❌ | | |
+| `verify-pack` | | ❌ | | |
+| `write-tree` | | ❌ | | |
## Indexes and Git Protocols
-| Feature | Version | Status | Notes |
-|---|---|---|---|
-| index | [v1](https://github.com/git/git/blob/master/Documentation/gitformat-index.txt) | ❌ | |
-| index | [v2](https://github.com/git/git/blob/master/Documentation/gitformat-index.txt) | ✅ | |
-| index | [v3](https://github.com/git/git/blob/master/Documentation/gitformat-index.txt) | ❌ | |
-| pack-protocol | [v1](https://github.com/git/git/blob/master/Documentation/gitprotocol-pack.txt) | ✅ | |
-| pack-protocol | [v2](https://github.com/git/git/blob/master/Documentation/gitprotocol-v2.txt) | ❌ | |
-| multi-pack-index | [v1](https://github.com/git/git/blob/master/Documentation/gitformat-pack.txt) | ❌ | |
-| pack-*.rev files | [v1](https://github.com/git/git/blob/master/Documentation/gitformat-pack.txt) | ❌ | |
-| pack-*.mtimes files | [v1](https://github.com/git/git/blob/master/Documentation/gitformat-pack.txt) | ❌ | |
-| cruft packs | | ❌ | |
+| Feature | Version | Status | Notes |
+| -------------------- | ------------------------------------------------------------------------------- | ------ | ----- |
+| index | [v1](https://github.com/git/git/blob/master/Documentation/gitformat-index.txt) | ❌ | |
+| index | [v2](https://github.com/git/git/blob/master/Documentation/gitformat-index.txt) | ✅ | |
+| index | [v3](https://github.com/git/git/blob/master/Documentation/gitformat-index.txt) | ❌ | |
+| pack-protocol | [v1](https://github.com/git/git/blob/master/Documentation/gitprotocol-pack.txt) | ✅ | |
+| pack-protocol | [v2](https://github.com/git/git/blob/master/Documentation/gitprotocol-v2.txt) | ❌ | |
+| multi-pack-index | [v1](https://github.com/git/git/blob/master/Documentation/gitformat-pack.txt) | ❌ | |
+| pack-\*.rev files | [v1](https://github.com/git/git/blob/master/Documentation/gitformat-pack.txt) | ❌ | |
+| pack-\*.mtimes files | [v1](https://github.com/git/git/blob/master/Documentation/gitformat-pack.txt) | ❌ | |
+| cruft packs | | ❌ | |
## Capabilities
-| Feature | Status | Notes |
-|---|---|---|
-| `multi_ack` | ❌ | |
-| `multi_ack_detailed` | ❌ | |
-| `no-done` | ❌ | |
-| `thin-pack` | ❌ | |
-| `side-band` | ⚠️ (partial) | |
-| `side-band-64k` | ⚠️ (partial) | |
-| `ofs-delta` | ✅ | |
-| `agent` | ✅ | |
-| `object-format` | ❌ | |
-| `symref` | ✅ | |
-| `shallow` | ✅ | |
-| `deepen-since` | ✅ | |
-| `deepen-not` | ❌ | |
-| `deepen-relative` | ❌ | |
-| `no-progress` | ✅ | |
-| `include-tag` | ✅ | |
-| `report-status` | ✅ | |
-| `report-status-v2` | ❌ | |
-| `delete-refs` | ✅ | |
-| `quiet` | ❌ | |
-| `atomic` | ✅ | |
-| `push-options` | ✅ | |
-| `allow-tip-sha1-in-want` | ✅ | |
-| `allow-reachable-sha1-in-want` | ❌ | |
-| `push-cert=<nonce>` | ❌ | |
-| `filter` | ❌ | |
-| `session-id=<session id>` | ❌ | |
+| Feature | Status | Notes |
+| ------------------------------ | ------------ | ----- |
+| `multi_ack` | ❌ | |
+| `multi_ack_detailed` | ❌ | |
+| `no-done` | ❌ | |
+| `thin-pack` | ❌ | |
+| `side-band` | ⚠️ (partial) | |
+| `side-band-64k` | ⚠️ (partial) | |
+| `ofs-delta` | ✅ | |
+| `agent` | ✅ | |
+| `object-format` | ❌ | |
+| `symref` | ✅ | |
+| `shallow` | ✅ | |
+| `deepen-since` | ✅ | |
+| `deepen-not` | ❌ | |
+| `deepen-relative` | ❌ | |
+| `no-progress` | ✅ | |
+| `include-tag` | ✅ | |
+| `report-status` | ✅ | |
+| `report-status-v2` | ❌ | |
+| `delete-refs` | ✅ | |
+| `quiet` | ❌ | |
+| `atomic` | ✅ | |
+| `push-options` | ✅ | |
+| `allow-tip-sha1-in-want` | ✅ | |
+| `allow-reachable-sha1-in-want` | ❌ | |
+| `push-cert=<nonce>` | ❌ | |
+| `filter` | ❌ | |
+| `session-id=<session id>` | ❌ | |
## Transport Schemes
-| Scheme | Status | Notes | Examples |
-|---|---|---|---|
-| `http(s)://` (dumb) | ❌ | | |
-| `http(s)://` (smart) | ✅ | | |
-| `git://` | ✅ | | |
-| `ssh://` | ✅ | | |
-| `file://` | ⚠️ (partial) | Warning: this is not pure Golang. This shells out to the `git` binary. | |
-| Custom | ✅ | All existing schemes can be replaced by custom implementations. | - [custom_http](_examples/custom_http/main.go) |
+| Scheme | Status | Notes | Examples |
+| -------------------- | ------------ | ---------------------------------------------------------------------- | ---------------------------------------------- |
+| `http(s)://` (dumb) | ❌ | | |
+| `http(s)://` (smart) | ✅ | | |
+| `git://` | ✅ | | |
+| `ssh://` | ✅ | | |
+| `file://` | ⚠️ (partial) | Warning: this is not pure Golang. This shells out to the `git` binary. | |
+| Custom | ✅ | All existing schemes can be replaced by custom implementations. | - [custom_http](_examples/custom_http/main.go) |
## SHA256
-| Feature | Sub-feature | Status | Notes | Examples |
-|---|---|---|---|---|
-| `init` | | ✅ | Requires building with tag sha256. | - [init](_examples/sha256/main.go) |
-| `commit` | | ✅ | Requires building with tag sha256. | - [commit](_examples/sha256/main.go) |
-| `pull` | | ❌ | | |
-| `fetch` | | ❌ | | |
-| `push` | | ❌ | | |
+| Feature | Sub-feature | Status | Notes | Examples |
+| -------- | ----------- | ------ | ---------------------------------- | ------------------------------------ |
+| `init` | | ✅ | Requires building with tag sha256. | - [init](_examples/sha256/main.go) |
+| `commit` | | ✅ | Requires building with tag sha256. | - [commit](_examples/sha256/main.go) |
+| `pull` | | ❌ | | |
+| `fetch` | | ❌ | | |
+| `push` | | ❌ | | |
## Other features
-| Feature | Sub-feature | Status | Notes | Examples |
-|---|---|---|---|---|
-| `config` | `--local` | ✅ | Read and write per-repository (`.git/config`). | |
-| `config` | `--global` <br/> `--system` | ✅ | Read-only. | |
-| `gitignore` | | ✅ | | |
-| `gitattributes` | | ✅ | | |
-| `git-worktree` | | ❌ | Multiple worktrees are not supported. | |
+| Feature | Sub-feature | Status | Notes | Examples |
+| --------------- | --------------------------- | ------ | ---------------------------------------------- | -------- |
+| `config` | `--local` | ✅ | Read and write per-repository (`.git/config`). | |
+| `config` | `--global` <br/> `--system` | ✅ | Read-only. | |
+| `gitignore` | | ✅ | | |
+| `gitattributes` | | ✅ | | |
+| `git-worktree` | | ❌ | Multiple worktrees are not supported. | |
diff --git a/Makefile b/Makefile
index 66adc8c..1e10396 100644
--- a/Makefile
+++ b/Makefile
@@ -42,3 +42,12 @@ test-coverage:
clean:
rm -rf $(GIT_DIST_PATH)
+
+fuzz:
+ @go test -fuzz=FuzzParser $(PWD)/internal/revision
+ @go test -fuzz=FuzzDecoder $(PWD)/plumbing/format/config
+ @go test -fuzz=FuzzPatchDelta $(PWD)/plumbing/format/packfile
+ @go test -fuzz=FuzzParseSignedBytes $(PWD)/plumbing/object
+ @go test -fuzz=FuzzDecode $(PWD)/plumbing/object
+ @go test -fuzz=FuzzDecoder $(PWD)/plumbing/protocol/packp
+ @go test -fuzz=FuzzNewEndpoint $(PWD)/plumbing/transport
diff --git a/_examples/sha256/main.go b/_examples/sha256/main.go
index e1772d2..0392772 100644
--- a/_examples/sha256/main.go
+++ b/_examples/sha256/main.go
@@ -15,7 +15,7 @@ import (
// This example requires building with the sha256 tag for it to work:
// go run -tags sha256 main.go /tmp/repository
-// Basic example of how to initialise a repository using sha256 as the hashing algorithmn.
+// Basic example of how to initialise a repository using sha256 as the hashing algorithm.
func main() {
CheckArgs("<directory>")
directory := os.Args[1]
diff --git a/cli/go-git/go.mod b/cli/go-git/go.mod
new file mode 100644
index 0000000..33f5f24
--- /dev/null
+++ b/cli/go-git/go.mod
@@ -0,0 +1,32 @@
+module github.com/go-git/go-git/cli/go-git
+
+go 1.19
+
+require (
+ github.com/go-git/go-git/v5 v5.11.0
+ github.com/jessevdk/go-flags v1.5.0
+)
+
+require (
+ dario.cat/mergo v1.0.0 // indirect
+ github.com/Microsoft/go-winio v0.6.1 // indirect
+ github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 // indirect
+ github.com/cloudflare/circl v1.3.7 // indirect
+ github.com/cyphar/filepath-securejoin v0.2.4 // indirect
+ github.com/emirpasic/gods v1.18.1 // indirect
+ github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect
+ github.com/go-git/go-billy/v5 v5.5.0 // indirect
+ github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
+ github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect
+ github.com/kevinburke/ssh_config v1.2.0 // indirect
+ github.com/pjbgf/sha1cd v0.3.0 // indirect
+ github.com/sergi/go-diff v1.1.0 // indirect
+ github.com/skeema/knownhosts v1.2.1 // indirect
+ github.com/xanzy/ssh-agent v0.3.3 // indirect
+ golang.org/x/crypto v0.17.0 // indirect
+ golang.org/x/mod v0.12.0 // indirect
+ golang.org/x/net v0.19.0 // indirect
+ golang.org/x/sys v0.15.0 // indirect
+ golang.org/x/tools v0.13.0 // indirect
+ gopkg.in/warnings.v0 v0.1.2 // indirect
+)
diff --git a/cli/go-git/go.sum b/cli/go-git/go.sum
new file mode 100644
index 0000000..42324f5
--- /dev/null
+++ b/cli/go-git/go.sum
@@ -0,0 +1,133 @@
+dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk=
+dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
+github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY=
+github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
+github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
+github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 h1:kkhsdkhsCvIsutKu5zLMgWtgh9YxGCNAw8Ad8hjwfYg=
+github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0=
+github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8=
+github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
+github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0=
+github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA=
+github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU=
+github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA=
+github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg=
+github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a h1:mATvB/9r/3gvcejNsXKSkQ6lcIaNec2nyfOdlTBR2lU=
+github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc=
+github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ=
+github.com/gliderlabs/ssh v0.3.5 h1:OcaySEmAQJgyYcArR+gGGTHCyE7nvhEMTlYY+Dp8CpY=
+github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI=
+github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic=
+github.com/go-git/go-billy/v5 v5.5.0 h1:yEY4yhzCDuMGSv83oGxiBotRzhwhNr8VZyphhiu+mTU=
+github.com/go-git/go-billy/v5 v5.5.0/go.mod h1:hmexnoNsr2SJU1Ju67OaNz5ASJY3+sHgFRpCtpDCKow=
+github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399 h1:eMje31YglSBqCdIqdhKBW8lokaMrL3uTkpGYlE2OOT4=
+github.com/go-git/go-git/v5 v5.11.0 h1:XIZc1p+8YzypNr34itUfSvYJcv+eYdTnTvOZ2vD3cA4=
+github.com/go-git/go-git/v5 v5.11.0/go.mod h1:6GFcX2P3NM7FPBfpePbpLd21XxsgdAt+lKqXmCUiUCY=
+github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
+github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
+github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A=
+github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo=
+github.com/jessevdk/go-flags v1.5.0 h1:1jKYvbxEjfUl0fmqTCOfonvskHHXMjBySTLW4y9LFvc=
+github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4=
+github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4gf13a4=
+github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM=
+github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
+github.com/onsi/gomega v1.27.10 h1:naR28SdDFlqrG6kScpT8VWpu1xWY5nJRCF3XaYyBjhI=
+github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4=
+github.com/pjbgf/sha1cd v0.3.0/go.mod h1:nZ1rrWOcGJ5uZgEEVL1VUM9iRQiZvWdbZjkKyFzPPsI=
+github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
+github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M=
+github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0=
+github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
+github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
+github.com/skeema/knownhosts v1.2.1 h1:SHWdIUa82uGZz+F+47k8SY4QhhI291cXCpopT1lK2AQ=
+github.com/skeema/knownhosts v1.2.1/go.mod h1:xYbVRSPxqBZFrdmDyMmsOs+uX1UZC3nTN3ThzgDxUwo=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
+github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
+github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM=
+github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw=
+github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
+golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4=
+golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
+golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k=
+golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
+golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
+golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc=
+golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
+golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
+golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
+golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
+golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c=
+golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc=
+golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
+golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
+golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
+golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
+golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
+golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
+golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
+golang.org/x/tools v0.13.0 h1:Iey4qkscZuv0VvIt8E0neZjtPVQFSc870HQ448QgEmQ=
+golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
+golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
+gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME=
+gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
diff --git a/cli/go-git/main.go b/cli/go-git/main.go
index 97b8c3e..0a5ad2c 100644
--- a/cli/go-git/main.go
+++ b/cli/go-git/main.go
@@ -22,6 +22,7 @@ func main() {
}
parser := flags.NewNamedParser(bin, flags.Default)
+ parser.AddCommand("update-server-info", "", "", &CmdUpdateServerInfo{})
parser.AddCommand("receive-pack", "", "", &CmdReceivePack{})
parser.AddCommand("upload-pack", "", "", &CmdUploadPack{})
parser.AddCommand("version", "Show the version information.", "", &CmdVersion{})
diff --git a/cli/go-git/update_server_info.go b/cli/go-git/update_server_info.go
new file mode 100644
index 0000000..a7f3e3e
--- /dev/null
+++ b/cli/go-git/update_server_info.go
@@ -0,0 +1,34 @@
+package main
+
+import (
+ "fmt"
+ "os"
+
+ "github.com/go-git/go-git/v5"
+ "github.com/go-git/go-git/v5/plumbing/serverinfo"
+ "github.com/go-git/go-git/v5/storage/filesystem"
+)
+
+// CmdUpdateServerInfo command updates the server info files in the repository.
+// This is used by git http transport (dumb) to generate a list of available
+// refs for the repository. See:
+// https://git-scm.com/docs/git-update-server-info
+type CmdUpdateServerInfo struct {
+ cmd
+}
+
+// Usage returns the usage of the command.
+func (CmdUpdateServerInfo) Usage() string {
+ return fmt.Sprintf("within a git repository run: %s", os.Args[0])
+}
+
+// Execute runs the command.
+func (c *CmdUpdateServerInfo) Execute(args []string) error {
+ r, err := git.PlainOpen(".")
+ if err != nil {
+ return err
+ }
+
+ fs := r.Storer.(*filesystem.Storage).Filesystem()
+ return serverinfo.UpdateServerInfo(r.Storer, fs)
+}
diff --git a/config/branch.go b/config/branch.go
index 652270a..db2cb49 100644
--- a/config/branch.go
+++ b/config/branch.go
@@ -54,7 +54,7 @@ func (b *Branch) Validate() error {
return errBranchInvalidRebase
}
- return nil
+ return plumbing.NewBranchReferenceName(b.Name).Validate()
}
func (b *Branch) marshal() *format.Subsection {
diff --git a/config/config.go b/config/config.go
index 82af12d..6d41c15 100644
--- a/config/config.go
+++ b/config/config.go
@@ -13,6 +13,7 @@ import (
"github.com/go-git/go-billy/v5/osfs"
"github.com/go-git/go-git/v5/internal/url"
+ "github.com/go-git/go-git/v5/plumbing"
format "github.com/go-git/go-git/v5/plumbing/format/config"
)
@@ -63,9 +64,9 @@ type Config struct {
}
User struct {
- // Name is the personal name of the author and the commiter of a commit.
+ // Name is the personal name of the author and the committer of a commit.
Name string
- // Email is the email of the author and the commiter of a commit.
+ // Email is the email of the author and the committer of a commit.
Email string
}
@@ -77,9 +78,9 @@ type Config struct {
}
Committer struct {
- // Name is the personal name of the commiter of a commit.
+ // Name is the personal name of the committer of a commit.
Name string
- // Email is the email of the the commiter of a commit.
+ // Email is the email of the committer of a commit.
Email string
}
@@ -157,8 +158,8 @@ func ReadConfig(r io.Reader) (*Config, error) {
}
// LoadConfig loads a config file from a given scope. The returned Config,
-// contains exclusively information fom the given scope. If couldn't find a
-// config file to the given scope, a empty one is returned.
+// contains exclusively information from the given scope. If it couldn't find a
+// config file to the given scope, an empty one is returned.
func LoadConfig(scope Scope) (*Config, error) {
if scope == LocalScope {
return nil, fmt.Errorf("LocalScope should be read from the a ConfigStorer")
@@ -614,7 +615,7 @@ func (c *RemoteConfig) Validate() error {
c.Fetch = []RefSpec{RefSpec(fmt.Sprintf(DefaultFetchRefSpec, c.Name))}
}
- return nil
+ return plumbing.NewRemoteHEADReferenceName(c.Name).Validate()
}
func (c *RemoteConfig) unmarshal(s *format.Subsection) error {
diff --git a/go.mod b/go.mod
index ea47145..70107e0 100644
--- a/go.mod
+++ b/go.mod
@@ -1,43 +1,46 @@
module github.com/go-git/go-git/v5
// go-git supports the last 3 stable Go versions.
-go 1.18
+go 1.19
require (
dario.cat/mergo v1.0.0
- github.com/ProtonMail/go-crypto v0.0.0-20230717121422-5aa5874ade95
- github.com/acomagu/bufpipe v1.0.4
+ github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5
- github.com/elazarl/goproxy v0.0.0-20221015165544-a0805db90819
+ github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a
github.com/emirpasic/gods v1.18.1
- github.com/gliderlabs/ssh v0.3.5
+ github.com/gliderlabs/ssh v0.3.6
github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376
- github.com/go-git/go-billy/v5 v5.4.1
- github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20230305113008-0c11038e723f
+ github.com/go-git/go-billy/v5 v5.5.0
+ github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da
- github.com/google/go-cmp v0.5.9
+ github.com/google/go-cmp v0.6.0
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99
- github.com/jessevdk/go-flags v1.5.0
github.com/kevinburke/ssh_config v1.2.0
github.com/pjbgf/sha1cd v0.3.0
github.com/sergi/go-diff v1.1.0
- github.com/skeema/knownhosts v1.2.0
+ github.com/skeema/knownhosts v1.2.1
+ github.com/stretchr/testify v1.8.4
github.com/xanzy/ssh-agent v0.3.3
- golang.org/x/crypto v0.11.0
- golang.org/x/net v0.12.0
- golang.org/x/sys v0.10.0
- golang.org/x/text v0.11.0
+ golang.org/x/crypto v0.18.0
+ golang.org/x/net v0.20.0
+ golang.org/x/sys v0.16.0
+ golang.org/x/text v0.14.0
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c
)
require (
github.com/Microsoft/go-winio v0.6.1 // indirect
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be // indirect
- github.com/cloudflare/circl v1.3.3 // indirect
+ github.com/cloudflare/circl v1.3.7 // indirect
+ github.com/cyphar/filepath-securejoin v0.2.4 // indirect
+ github.com/davecgh/go-spew v1.1.1 // indirect
github.com/kr/pretty v0.3.1 // indirect
github.com/kr/text v0.2.0 // indirect
- github.com/rogpeppe/go-internal v1.9.0 // indirect
- golang.org/x/mod v0.8.0 // indirect
- golang.org/x/tools v0.6.0 // indirect
+ github.com/pmezard/go-difflib v1.0.0 // indirect
+ github.com/rogpeppe/go-internal v1.11.0 // indirect
+ golang.org/x/mod v0.12.0 // indirect
+ golang.org/x/tools v0.13.0 // indirect
gopkg.in/warnings.v0 v0.1.2 // indirect
+ gopkg.in/yaml.v3 v3.0.1 // indirect
)
diff --git a/go.sum b/go.sum
index 375a22a..b11a6ef 100644
--- a/go.sum
+++ b/go.sum
@@ -3,44 +3,42 @@ dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY=
github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
-github.com/ProtonMail/go-crypto v0.0.0-20230717121422-5aa5874ade95 h1:KLq8BE0KwCL+mmXnjLWEAOYO+2l2AE4YMmqG1ZpZHBs=
-github.com/ProtonMail/go-crypto v0.0.0-20230717121422-5aa5874ade95/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0=
-github.com/acomagu/bufpipe v1.0.4 h1:e3H4WUzM3npvo5uv95QuJM3cQspFNtFBzvJ2oNjKIDQ=
-github.com/acomagu/bufpipe v1.0.4/go.mod h1:mxdxdup/WdsKVreO5GpW4+M/1CE2sMG4jeGJ2sYmHc4=
+github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 h1:kkhsdkhsCvIsutKu5zLMgWtgh9YxGCNAw8Ad8hjwfYg=
+github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0=
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8=
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4=
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0=
-github.com/cloudflare/circl v1.3.3 h1:fE/Qz0QdIGqeWfnwq0RE0R7MI51s0M2E4Ga9kq5AEMs=
github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA=
+github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU=
+github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
+github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg=
+github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/elazarl/goproxy v0.0.0-20221015165544-a0805db90819 h1:RIB4cRk+lBqKK3Oy0r2gRX4ui7tuhiZq2SuTtTCi0/0=
-github.com/elazarl/goproxy v0.0.0-20221015165544-a0805db90819/go.mod h1:Ro8st/ElPeALwNFlcTpWmkr6IoMFfkjXAvTHpevnDsM=
+github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a h1:mATvB/9r/3gvcejNsXKSkQ6lcIaNec2nyfOdlTBR2lU=
+github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a/go.mod h1:Ro8st/ElPeALwNFlcTpWmkr6IoMFfkjXAvTHpevnDsM=
github.com/elazarl/goproxy/ext v0.0.0-20190711103511-473e67f1d7d2 h1:dWB6v3RcOy03t/bUadywsbyrQwCqZeNIEX6M1OtSZOM=
github.com/elazarl/goproxy/ext v0.0.0-20190711103511-473e67f1d7d2/go.mod h1:gNh8nYJoAm43RfaxurUnxr+N1PwuFV3ZMl/efxlIlY8=
github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc=
github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ=
-github.com/gliderlabs/ssh v0.3.5 h1:OcaySEmAQJgyYcArR+gGGTHCyE7nvhEMTlYY+Dp8CpY=
-github.com/gliderlabs/ssh v0.3.5/go.mod h1:8XB4KraRrX39qHhT6yxPsHedjA08I/uBVwj4xC+/+z4=
+github.com/gliderlabs/ssh v0.3.6 h1:ZzjlDa05TcFRICb3anf/dSPN3ewz1Zx6CMLPWgkm3b8=
+github.com/gliderlabs/ssh v0.3.6/go.mod h1:zpHEXBstFnQYtGnB8k8kQLol82umzn/2/snG7alWVD8=
github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI=
github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic=
-github.com/go-git/go-billy/v5 v5.3.1/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0=
-github.com/go-git/go-billy/v5 v5.4.1 h1:Uwp5tDRkPr+l/TnbHOQzp+tmJfLceOlbVucgpTz8ix4=
-github.com/go-git/go-billy/v5 v5.4.1/go.mod h1:vjbugF6Fz7JIflbVpl1hJsGjSHNltrSw45YK/ukIvQg=
-github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20230305113008-0c11038e723f h1:Pz0DHeFij3XFhoBRGUDPzSJ+w2UcK5/0JvF8DRI58r8=
-github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20230305113008-0c11038e723f/go.mod h1:8LHG1a3SRW71ettAD/jW13h8c6AqjVSeL11RAdgaqpo=
+github.com/go-git/go-billy/v5 v5.5.0 h1:yEY4yhzCDuMGSv83oGxiBotRzhwhNr8VZyphhiu+mTU=
+github.com/go-git/go-billy/v5 v5.5.0/go.mod h1:hmexnoNsr2SJU1Ju67OaNz5ASJY3+sHgFRpCtpDCKow=
+github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399 h1:eMje31YglSBqCdIqdhKBW8lokaMrL3uTkpGYlE2OOT4=
+github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399/go.mod h1:1OCfN199q1Jm3HZlxleg+Dw/mwps2Wbk9frAWm+4FII=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
-github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
+github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A=
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo=
-github.com/jessevdk/go-flags v1.5.0 h1:1jKYvbxEjfUl0fmqTCOfonvskHHXMjBySTLW4y9LFvc=
-github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4=
github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4gf13a4=
github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
@@ -51,9 +49,7 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
-github.com/matryer/is v1.2.0 h1:92UTHpy8CDwaJ08GqLDzhhuixiBUUD1p3AU6PHddz4A=
-github.com/matryer/is v1.2.0/go.mod h1:2fLPjFQM9rhQ15aVEtbuwhJinnOqrmgXPNdZsdwlWXA=
-github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
+github.com/onsi/gomega v1.27.10 h1:naR28SdDFlqrG6kScpT8VWpu1xWY5nJRCF3XaYyBjhI=
github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4=
github.com/pjbgf/sha1cd v0.3.0/go.mod h1:nZ1rrWOcGJ5uZgEEVL1VUM9iRQiZvWdbZjkKyFzPPsI=
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
@@ -62,72 +58,67 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/rogpeppe/go-charset v0.0.0-20180617210344-2471d30d28b4/go.mod h1:qgYeAmZ5ZIpBWTGllZSQnw97Dj+woV0toclVaRGI8pc=
-github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
+github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M=
+github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA=
github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0=
github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
-github.com/skeema/knownhosts v1.2.0 h1:h9r9cf0+u7wSE+M183ZtMGgOJKiL96brpaz5ekfJCpM=
-github.com/skeema/knownhosts v1.2.0/go.mod h1:g4fPeYpque7P0xefxtGzV81ihjC8sX2IqpAoNkjxbMo=
+github.com/skeema/knownhosts v1.2.1 h1:SHWdIUa82uGZz+F+47k8SY4QhhI291cXCpopT1lK2AQ=
+github.com/skeema/knownhosts v1.2.1/go.mod h1:xYbVRSPxqBZFrdmDyMmsOs+uX1UZC3nTN3ThzgDxUwo=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
-github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
-github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
+github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM=
github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
-golang.org/x/crypto v0.0.0-20220826181053-bd7e27e6170d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4=
golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
-golang.org/x/crypto v0.11.0 h1:6Ewdq3tDic1mg5xRO4milcWCfMVQhI4NkqWWvqejpuA=
-golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio=
+golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc=
+golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
-golang.org/x/mod v0.8.0 h1:LUYupSeNrTNCGzR/hVBk2NHZO4hXcVaW1k4Qx7rjPx8=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc=
+golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
-golang.org/x/net v0.0.0-20220826154423-83b083e8dc8b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
-golang.org/x/net v0.12.0 h1:cfawfvKITfUsFCeJIHJrbSxpeu/E81khclypR0GVT50=
-golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA=
+golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo=
+golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220825204002-c680a09ffe64/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA=
-golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU=
+golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
-golang.org/x/term v0.0.0-20220722155259-a9ba230a4035/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
-golang.org/x/term v0.10.0 h1:3R7pNqamzBraeqj/Tj8qt1aQ2HpmlC+Cx/qL/7hn4/c=
+golang.org/x/term v0.16.0 h1:m+B6fahuftsE9qjo0VWp2FW0mB3MTJvR0BaMQrq0pmE=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
@@ -135,23 +126,22 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
-golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4=
-golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
+golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
+golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
-golang.org/x/tools v0.6.0 h1:BOw41kyTf3PuCW1pVQf8+Cyg8pMlkYB1oo9iJ6D/lKM=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
+golang.org/x/tools v0.13.0 h1:Iey4qkscZuv0VvIt8E0neZjtPVQFSc870HQ448QgEmQ=
+golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME=
gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
diff --git a/internal/reference/sort.go b/internal/reference/sort.go
new file mode 100644
index 0000000..726edbd
--- /dev/null
+++ b/internal/reference/sort.go
@@ -0,0 +1,14 @@
+package reference
+
+import (
+ "sort"
+
+ "github.com/go-git/go-git/v5/plumbing"
+)
+
+// Sort sorts the references by name to ensure a consistent order.
+func Sort(refs []*plumbing.Reference) {
+ sort.Slice(refs, func(i, j int) bool {
+ return refs[i].Name() < refs[j].Name()
+ })
+}
diff --git a/internal/revision/parser_test.go b/internal/revision/parser_test.go
index 3a77b2f..1eb3861 100644
--- a/internal/revision/parser_test.go
+++ b/internal/revision/parser_test.go
@@ -3,6 +3,7 @@ package revision
import (
"bytes"
"regexp"
+ "testing"
"time"
. "gopkg.in/check.v1"
@@ -397,3 +398,11 @@ func (s *ParserSuite) TestParseRefWithInvalidName(c *C) {
c.Assert(err, DeepEquals, e)
}
}
+
+func FuzzParser(f *testing.F) {
+
+ f.Fuzz(func(t *testing.T, input string) {
+ parser := NewParser(bytes.NewBufferString(input))
+ parser.Parse()
+ })
+}
diff --git a/options.go b/options.go
index 757bdc8..e748b91 100644
--- a/options.go
+++ b/options.go
@@ -78,6 +78,15 @@ type CloneOptions struct {
CABundle []byte
// ProxyOptions provides info required for connecting to a proxy.
ProxyOptions transport.ProxyOptions
+ // When the repository to clone is on the local machine, instead of
+ // using hard links, automatically setup .git/objects/info/alternates
+ // to share the objects with the source repository.
+ // The resulting repository starts out without any object of its own.
+ // NOTE: this is a possibly dangerous operation; do not use it unless
+ // you understand what it does.
+ //
+ // [Reference]: https://git-scm.com/docs/git-clone#Documentation/git-clone.txt---shared
+ Shared bool
}
// Validate validates the fields and sets the default values.
@@ -315,9 +324,9 @@ var (
// CheckoutOptions describes how a checkout operation should be performed.
type CheckoutOptions struct {
- // Hash is the hash of the commit to be checked out. If used, HEAD will be
- // in detached mode. If Create is not used, Branch and Hash are mutually
- // exclusive.
+ // Hash is the hash of a commit or tag to be checked out. If used, HEAD
+ // will be in detached mode. If Create is not used, Branch and Hash are
+ // mutually exclusive.
Hash plumbing.Hash
// Branch to be checked out, if Branch and Hash are empty is set to `master`.
Branch plumbing.ReferenceName
@@ -737,6 +746,9 @@ type PlainOpenOptions struct {
func (o *PlainOpenOptions) Validate() error { return nil }
type PlainInitOptions struct {
+ InitOptions
+ // Determines if the repository will have a worktree (non-bare) or not (bare).
+ Bare bool
ObjectFormat formatcfg.ObjectFormat
}
diff --git a/oss-fuzz.sh b/oss-fuzz.sh
new file mode 100644
index 0000000..885548f
--- /dev/null
+++ b/oss-fuzz.sh
@@ -0,0 +1,35 @@
+#!/bin/bash -eu
+# Copyright 2023 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+################################################################################
+
+
+go mod download
+go get github.com/AdamKorcz/go-118-fuzz-build/testing
+
+if [ "$SANITIZER" != "coverage" ]; then
+ sed -i '/func (s \*DecoderSuite) TestDecode(/,/^}/ s/^/\/\//' plumbing/format/config/decoder_test.go
+ sed -n '35,$p' plumbing/format/packfile/common_test.go >> plumbing/format/packfile/delta_test.go
+ sed -n '20,53p' plumbing/object/object_test.go >> plumbing/object/tree_test.go
+ sed -i 's|func Test|// func Test|' plumbing/transport/common_test.go
+fi
+
+compile_native_go_fuzzer $(pwd)/internal/revision FuzzParser fuzz_parser
+compile_native_go_fuzzer $(pwd)/plumbing/format/config FuzzDecoder fuzz_decoder_config
+compile_native_go_fuzzer $(pwd)/plumbing/format/packfile FuzzPatchDelta fuzz_patch_delta
+compile_native_go_fuzzer $(pwd)/plumbing/object FuzzParseSignedBytes fuzz_parse_signed_bytes
+compile_native_go_fuzzer $(pwd)/plumbing/object FuzzDecode fuzz_decode
+compile_native_go_fuzzer $(pwd)/plumbing/protocol/packp FuzzDecoder fuzz_decoder_packp
+compile_native_go_fuzzer $(pwd)/plumbing/transport FuzzNewEndpoint fuzz_new_endpoint
diff --git a/plumbing/filemode/filemode.go b/plumbing/filemode/filemode.go
index b848a97..ea1a457 100644
--- a/plumbing/filemode/filemode.go
+++ b/plumbing/filemode/filemode.go
@@ -133,7 +133,7 @@ func (m FileMode) IsMalformed() bool {
m != Submodule
}
-// String returns the FileMode as a string in the standatd git format,
+// String returns the FileMode as a string in the standard git format,
// this is, an octal number padded with ceros to 7 digits. Malformed
// modes are printed in that same format, for easier debugging.
//
diff --git a/plumbing/format/commitgraph/commitgraph.go b/plumbing/format/commitgraph/commitgraph.go
index 3d59323..e772d26 100644
--- a/plumbing/format/commitgraph/commitgraph.go
+++ b/plumbing/format/commitgraph/commitgraph.go
@@ -8,6 +8,9 @@ import (
// CommitData is a reduced representation of Commit as presented in the commit graph
// file. It is merely useful as an optimization for walking the commit graphs.
+//
+// Deprecated: This package uses the wrong types for Generation and Index in CommitData.
+// Use the v2 package instead.
type CommitData struct {
// TreeHash is the hash of the root tree of the commit.
TreeHash plumbing.Hash
@@ -24,6 +27,9 @@ type CommitData struct {
// Index represents a representation of commit graph that allows indexed
// access to the nodes using commit object hash
+//
+// Deprecated: This package uses the wrong types for Generation and Index in CommitData.
+// Use the v2 package instead.
type Index interface {
// GetIndexByHash gets the index in the commit graph from commit hash, if available
GetIndexByHash(h plumbing.Hash) (int, error)
diff --git a/plumbing/format/commitgraph/doc.go b/plumbing/format/commitgraph/doc.go
index 41cd8b1..c320e18 100644
--- a/plumbing/format/commitgraph/doc.go
+++ b/plumbing/format/commitgraph/doc.go
@@ -1,23 +1,26 @@
// Package commitgraph implements encoding and decoding of commit-graph files.
//
+// Deprecated: This package uses the wrong types for Generation and Index in CommitData.
+// Use the v2 package instead.
+//
// Git commit graph format
// =======================
//
// The Git commit graph stores a list of commit OIDs and some associated
// metadata, including:
//
-// - The generation number of the commit. Commits with no parents have
-// generation number 1; commits with parents have generation number
-// one more than the maximum generation number of its parents. We
-// reserve zero as special, and can be used to mark a generation
-// number invalid or as "not computed".
+// - The generation number of the commit. Commits with no parents have
+// generation number 1; commits with parents have generation number
+// one more than the maximum generation number of its parents. We
+// reserve zero as special, and can be used to mark a generation
+// number invalid or as "not computed".
//
// - The root tree OID.
//
// - The commit date.
//
-// - The parents of the commit, stored using positional references within
-// the graph file.
+// - The parents of the commit, stored using positional references within
+// the graph file.
//
// These positional references are stored as unsigned 32-bit integers
// corresponding to the array position within the list of commit OIDs. Due
@@ -35,68 +38,68 @@
//
// HEADER:
//
-// 4-byte signature:
-// The signature is: {'C', 'G', 'P', 'H'}
+// 4-byte signature:
+// The signature is: {'C', 'G', 'P', 'H'}
//
-// 1-byte version number:
-// Currently, the only valid version is 1.
+// 1-byte version number:
+// Currently, the only valid version is 1.
//
-// 1-byte Hash Version (1 = SHA-1)
-// We infer the hash length (H) from this value.
+// 1-byte Hash Version (1 = SHA-1)
+// We infer the hash length (H) from this value.
//
-// 1-byte number (C) of "chunks"
+// 1-byte number (C) of "chunks"
//
-// 1-byte (reserved for later use)
-// Current clients should ignore this value.
+// 1-byte (reserved for later use)
+// Current clients should ignore this value.
//
// CHUNK LOOKUP:
//
-// (C + 1) * 12 bytes listing the table of contents for the chunks:
-// First 4 bytes describe the chunk id. Value 0 is a terminating label.
-// Other 8 bytes provide the byte-offset in current file for chunk to
-// start. (Chunks are ordered contiguously in the file, so you can infer
-// the length using the next chunk position if necessary.) Each chunk
-// ID appears at most once.
+// (C + 1) * 12 bytes listing the table of contents for the chunks:
+// First 4 bytes describe the chunk id. Value 0 is a terminating label.
+// Other 8 bytes provide the byte-offset in current file for chunk to
+// start. (Chunks are ordered contiguously in the file, so you can infer
+// the length using the next chunk position if necessary.) Each chunk
+// ID appears at most once.
//
-// The remaining data in the body is described one chunk at a time, and
-// these chunks may be given in any order. Chunks are required unless
-// otherwise specified.
+// The remaining data in the body is described one chunk at a time, and
+// these chunks may be given in any order. Chunks are required unless
+// otherwise specified.
//
// CHUNK DATA:
//
-// OID Fanout (ID: {'O', 'I', 'D', 'F'}) (256 * 4 bytes)
-// The ith entry, F[i], stores the number of OIDs with first
-// byte at most i. Thus F[255] stores the total
-// number of commits (N).
-//
-// OID Lookup (ID: {'O', 'I', 'D', 'L'}) (N * H bytes)
-// The OIDs for all commits in the graph, sorted in ascending order.
-//
-// Commit Data (ID: {'C', 'D', 'A', 'T' }) (N * (H + 16) bytes)
-// * The first H bytes are for the OID of the root tree.
-// * The next 8 bytes are for the positions of the first two parents
-// of the ith commit. Stores value 0x7000000 if no parent in that
-// position. If there are more than two parents, the second value
-// has its most-significant bit on and the other bits store an array
-// position into the Extra Edge List chunk.
-// * The next 8 bytes store the generation number of the commit and
-// the commit time in seconds since EPOCH. The generation number
-// uses the higher 30 bits of the first 4 bytes, while the commit
-// time uses the 32 bits of the second 4 bytes, along with the lowest
-// 2 bits of the lowest byte, storing the 33rd and 34th bit of the
-// commit time.
-//
-// Extra Edge List (ID: {'E', 'D', 'G', 'E'}) [Optional]
-// This list of 4-byte values store the second through nth parents for
-// all octopus merges. The second parent value in the commit data stores
-// an array position within this list along with the most-significant bit
-// on. Starting at that array position, iterate through this list of commit
-// positions for the parents until reaching a value with the most-significant
-// bit on. The other bits correspond to the position of the last parent.
+// OID Fanout (ID: {'O', 'I', 'D', 'F'}) (256 * 4 bytes)
+// The ith entry, F[i], stores the number of OIDs with first
+// byte at most i. Thus F[255] stores the total
+// number of commits (N).
+//
+// OID Lookup (ID: {'O', 'I', 'D', 'L'}) (N * H bytes)
+// The OIDs for all commits in the graph, sorted in ascending order.
+//
+// Commit Data (ID: {'C', 'D', 'A', 'T' }) (N * (H + 16) bytes)
+// * The first H bytes are for the OID of the root tree.
+// * The next 8 bytes are for the positions of the first two parents
+// of the ith commit. Stores value 0x7000000 if no parent in that
+// position. If there are more than two parents, the second value
+// has its most-significant bit on and the other bits store an array
+// position into the Extra Edge List chunk.
+// * The next 8 bytes store the generation number of the commit and
+// the commit time in seconds since EPOCH. The generation number
+// uses the higher 30 bits of the first 4 bytes, while the commit
+// time uses the 32 bits of the second 4 bytes, along with the lowest
+// 2 bits of the lowest byte, storing the 33rd and 34th bit of the
+// commit time.
+//
+// Extra Edge List (ID: {'E', 'D', 'G', 'E'}) [Optional]
+// This list of 4-byte values store the second through nth parents for
+// all octopus merges. The second parent value in the commit data stores
+// an array position within this list along with the most-significant bit
+// on. Starting at that array position, iterate through this list of commit
+// positions for the parents until reaching a value with the most-significant
+// bit on. The other bits correspond to the position of the last parent.
//
// TRAILER:
//
-// H-byte HASH-checksum of all of the above.
+// H-byte HASH-checksum of all of the above.
//
// Source:
// https://raw.githubusercontent.com/git/git/master/Documentation/technical/commit-graph-format.txt
diff --git a/plumbing/format/commitgraph/encoder.go b/plumbing/format/commitgraph/encoder.go
index f61025b..3176353 100644
--- a/plumbing/format/commitgraph/encoder.go
+++ b/plumbing/format/commitgraph/encoder.go
@@ -1,6 +1,7 @@
package commitgraph
import (
+ "crypto"
"io"
"github.com/go-git/go-git/v5/plumbing"
@@ -9,12 +10,18 @@ import (
)
// Encoder writes MemoryIndex structs to an output stream.
+//
+// Deprecated: This package uses the wrong types for Generation and Index in CommitData.
+// Use the v2 package instead.
type Encoder struct {
io.Writer
hash hash.Hash
}
// NewEncoder returns a new stream encoder that writes to w.
+//
+// Deprecated: This package uses the wrong types for Generation and Index in CommitData.
+// Use the v2 package instead.
func NewEncoder(w io.Writer) *Encoder {
h := hash.New(hash.CryptoType)
mw := io.MultiWriter(w, h)
@@ -22,6 +29,9 @@ func NewEncoder(w io.Writer) *Encoder {
}
// Encode writes an index into the commit-graph file
+//
+// Deprecated: This package uses the wrong types for Generation and Index in CommitData.
+// Use the v2 package instead.
func (e *Encoder) Encode(idx Index) error {
// Get all the hashes in the input index
hashes := idx.Hashes()
@@ -30,7 +40,7 @@ func (e *Encoder) Encode(idx Index) error {
hashToIndex, fanout, extraEdgesCount := e.prepare(idx, hashes)
chunkSignatures := [][]byte{oidFanoutSignature, oidLookupSignature, commitDataSignature}
- chunkSizes := []uint64{4 * 256, uint64(len(hashes)) * hash.Size, uint64(len(hashes)) * 36}
+ chunkSizes := []uint64{4 * 256, uint64(len(hashes)) * hash.Size, uint64(len(hashes)) * (hash.Size + commitDataSize)}
if extraEdgesCount > 0 {
chunkSignatures = append(chunkSignatures, extraEdgeListSignature)
chunkSizes = append(chunkSizes, uint64(extraEdgesCount)*4)
@@ -88,7 +98,11 @@ func (e *Encoder) prepare(idx Index, hashes []plumbing.Hash) (hashToIndex map[pl
func (e *Encoder) encodeFileHeader(chunkCount int) (err error) {
if _, err = e.Write(commitFileSignature); err == nil {
- _, err = e.Write([]byte{1, 1, byte(chunkCount), 0})
+ version := byte(1)
+ if hash.CryptoType == crypto.SHA256 {
+ version = byte(2)
+ }
+ _, err = e.Write([]byte{1, version, byte(chunkCount), 0})
}
return
}
diff --git a/plumbing/format/commitgraph/file.go b/plumbing/format/commitgraph/file.go
index 1d25238..ef8fb34 100644
--- a/plumbing/format/commitgraph/file.go
+++ b/plumbing/format/commitgraph/file.go
@@ -2,15 +2,20 @@ package commitgraph
import (
"bytes"
+ "crypto"
encbin "encoding/binary"
"errors"
"io"
"time"
"github.com/go-git/go-git/v5/plumbing"
+ "github.com/go-git/go-git/v5/plumbing/hash"
"github.com/go-git/go-git/v5/utils/binary"
)
+// Deprecated: This package uses the wrong types for Generation and Index in CommitData.
+// Use the v2 package instead.
+
var (
// ErrUnsupportedVersion is returned by OpenFileIndex when the commit graph
// file version is not supported.
@@ -36,6 +41,8 @@ var (
parentLast = uint32(0x80000000)
)
+const commitDataSize = 16
+
type fileIndex struct {
reader io.ReaderAt
fanout [256]int
@@ -47,6 +54,9 @@ type fileIndex struct {
// OpenFileIndex opens a serialized commit graph file in the format described at
// https://github.com/git/git/blob/master/Documentation/technical/commit-graph-format.txt
+//
+// Deprecated: This package uses the wrong types for Generation and Index in CommitData.
+// Use the v2 package instead.
func OpenFileIndex(reader io.ReaderAt) (Index, error) {
fi := &fileIndex{reader: reader}
@@ -65,7 +75,7 @@ func OpenFileIndex(reader io.ReaderAt) (Index, error) {
func (fi *fileIndex) verifyFileHeader() error {
// Verify file signature
- var signature = make([]byte, 4)
+ signature := make([]byte, 4)
if _, err := fi.reader.ReadAt(signature, 0); err != nil {
return err
}
@@ -74,22 +84,31 @@ func (fi *fileIndex) verifyFileHeader() error {
}
// Read and verify the file header
- var header = make([]byte, 4)
+ header := make([]byte, 4)
if _, err := fi.reader.ReadAt(header, 4); err != nil {
return err
}
if header[0] != 1 {
return ErrUnsupportedVersion
}
- if header[1] != 1 {
- return ErrUnsupportedHash
+ if hash.CryptoType == crypto.SHA1 {
+ if header[1] != 1 {
+ return ErrUnsupportedVersion
+ }
+ } else if hash.CryptoType == crypto.SHA256 {
+ if header[1] != 2 {
+ return ErrUnsupportedVersion
+ }
+ } else {
+ // Unknown hash type
+ return ErrUnsupportedVersion
}
return nil
}
func (fi *fileIndex) readChunkHeaders() error {
- var chunkID = make([]byte, 4)
+ chunkID := make([]byte, 4)
for i := 0; ; i++ {
chunkHeader := io.NewSectionReader(fi.reader, 8+(int64(i)*12), 12)
if _, err := io.ReadAtLeast(chunkHeader, chunkID, 4); err != nil {
@@ -148,7 +167,7 @@ func (fi *fileIndex) GetIndexByHash(h plumbing.Hash) (int, error) {
high := fi.fanout[h[0]]
for low < high {
mid := (low + high) >> 1
- offset := fi.oidLookupOffset + int64(mid)*20
+ offset := fi.oidLookupOffset + int64(mid)*hash.Size
if _, err := fi.reader.ReadAt(oid[:], offset); err != nil {
return 0, err
}
@@ -170,8 +189,8 @@ func (fi *fileIndex) GetCommitDataByIndex(idx int) (*CommitData, error) {
return nil, plumbing.ErrObjectNotFound
}
- offset := fi.commitDataOffset + int64(idx)*36
- commitDataReader := io.NewSectionReader(fi.reader, offset, 36)
+ offset := fi.commitDataOffset + int64(idx)*(hash.Size+commitDataSize)
+ commitDataReader := io.NewSectionReader(fi.reader, offset, hash.Size+commitDataSize)
treeHash, err := binary.ReadHash(commitDataReader)
if err != nil {
@@ -237,7 +256,7 @@ func (fi *fileIndex) getHashesFromIndexes(indexes []int) ([]plumbing.Hash, error
return nil, ErrMalformedCommitGraphFile
}
- offset := fi.oidLookupOffset + int64(idx)*20
+ offset := fi.oidLookupOffset + int64(idx)*hash.Size
if _, err := fi.reader.ReadAt(hashes[i][:], offset); err != nil {
return nil, err
}
@@ -250,8 +269,8 @@ func (fi *fileIndex) getHashesFromIndexes(indexes []int) ([]plumbing.Hash, error
func (fi *fileIndex) Hashes() []plumbing.Hash {
hashes := make([]plumbing.Hash, fi.fanout[0xff])
for i := 0; i < fi.fanout[0xff]; i++ {
- offset := fi.oidLookupOffset + int64(i)*20
- if n, err := fi.reader.ReadAt(hashes[i][:], offset); err != nil || n < 20 {
+ offset := fi.oidLookupOffset + int64(i)*hash.Size
+ if n, err := fi.reader.ReadAt(hashes[i][:], offset); err != nil || n < hash.Size {
return nil
}
}
diff --git a/plumbing/format/commitgraph/memory.go b/plumbing/format/commitgraph/memory.go
index b24ce36..06415e5 100644
--- a/plumbing/format/commitgraph/memory.go
+++ b/plumbing/format/commitgraph/memory.go
@@ -6,12 +6,18 @@ import (
// MemoryIndex provides a way to build the commit-graph in memory
// for later encoding to file.
+//
+// Deprecated: This package uses the wrong types for Generation and Index in CommitData.
+// Use the v2 package instead.
type MemoryIndex struct {
commitData []*CommitData
indexMap map[plumbing.Hash]int
}
// NewMemoryIndex creates in-memory commit graph representation
+//
+// Deprecated: This package uses the wrong types for Generation and Index in CommitData.
+// Use the v2 package instead.
func NewMemoryIndex() *MemoryIndex {
return &MemoryIndex{
indexMap: make(map[plumbing.Hash]int),
diff --git a/plumbing/format/commitgraph/v2/chain.go b/plumbing/format/commitgraph/v2/chain.go
new file mode 100644
index 0000000..8da60d0
--- /dev/null
+++ b/plumbing/format/commitgraph/v2/chain.go
@@ -0,0 +1,100 @@
+package v2
+
+import (
+ "bufio"
+ "io"
+ "path"
+
+ "github.com/go-git/go-billy/v5"
+ "github.com/go-git/go-git/v5/plumbing"
+)
+
+// OpenChainFile reads a commit chain file and returns a slice of the hashes within it
+//
+// Commit-Graph chains are described at https://git-scm.com/docs/commit-graph
+// and are new line separated list of graph file hashes, oldest to newest.
+//
+// This function simply reads the file and returns the hashes as a slice.
+func OpenChainFile(r io.Reader) ([]string, error) {
+ if r == nil {
+ return nil, io.ErrUnexpectedEOF
+ }
+ bufRd := bufio.NewReader(r)
+ chain := make([]string, 0, 8)
+ for {
+ line, err := bufRd.ReadSlice('\n')
+ if err != nil {
+ if err == io.EOF {
+ break
+ }
+ return nil, err
+ }
+
+ hashStr := string(line[:len(line)-1])
+ if !plumbing.IsHash(hashStr) {
+ return nil, ErrMalformedCommitGraphFile
+ }
+ chain = append(chain, hashStr)
+ }
+ return chain, nil
+}
+
+// OpenChainOrFileIndex expects a billy.Filesystem representing a .git directory.
+// It will first attempt to read a commit-graph index file, before trying to read a
+// commit-graph chain file and its index files. If neither are present, an error is returned.
+// Otherwise an Index will be returned.
+//
+// See: https://git-scm.com/docs/commit-graph
+func OpenChainOrFileIndex(fs billy.Filesystem) (Index, error) {
+ file, err := fs.Open(path.Join("objects", "info", "commit-graph"))
+ if err != nil {
+ // try to open a chain file
+ return OpenChainIndex(fs)
+ }
+
+ index, err := OpenFileIndex(file)
+ if err != nil {
+ // Ignore any file closing errors and return the error from OpenFileIndex instead
+ _ = file.Close()
+ return nil, err
+ }
+ return index, nil
+}
+
+// OpenChainIndex expects a billy.Filesystem representing a .git directory.
+// It will read a commit-graph chain file and return a coalesced index.
+// If the chain file or a graph in that chain is not present, an error is returned.
+//
+// See: https://git-scm.com/docs/commit-graph
+func OpenChainIndex(fs billy.Filesystem) (Index, error) {
+ chainFile, err := fs.Open(path.Join("objects", "info", "commit-graphs", "commit-graph-chain"))
+ if err != nil {
+ return nil, err
+ }
+
+ chain, err := OpenChainFile(chainFile)
+ _ = chainFile.Close()
+ if err != nil {
+ return nil, err
+ }
+
+ var index Index
+ for _, hash := range chain {
+
+ file, err := fs.Open(path.Join("objects", "info", "commit-graphs", "graph-"+hash+".graph"))
+ if err != nil {
+ // Ignore all other file closing errors and return the error from opening the last file in the graph
+ _ = index.Close()
+ return nil, err
+ }
+
+ index, err = OpenFileIndexWithParent(file, index)
+ if err != nil {
+ // Ignore file closing errors and return the error from OpenFileIndex instead
+ _ = index.Close()
+ return nil, err
+ }
+ }
+
+ return index, nil
+}
diff --git a/plumbing/format/commitgraph/v2/chain_test.go b/plumbing/format/commitgraph/v2/chain_test.go
new file mode 100644
index 0000000..32ffd69
--- /dev/null
+++ b/plumbing/format/commitgraph/v2/chain_test.go
@@ -0,0 +1,100 @@
+package v2_test
+
+import (
+ "bytes"
+ "crypto"
+ "strings"
+
+ commitgraph "github.com/go-git/go-git/v5/plumbing/format/commitgraph/v2"
+ "github.com/go-git/go-git/v5/plumbing/hash"
+
+ . "gopkg.in/check.v1"
+)
+
+func (s *CommitgraphSuite) TestOpenChainFile(c *C) {
+ sha1Data := []string{
+ "c336d16298a017486c4164c40f8acb28afe64e84",
+ "31eae7b619d166c366bf5df4991f04ba8cebea0a",
+ "b977a025ca21e3b5ca123d8093bd7917694f6da7",
+ "d2a38b4a5965d529566566640519d03d2bd10f6c",
+ "35b585759cbf29f8ec428ef89da20705d59f99ec",
+ "c2bbf9fe8009b22d0f390f3c8c3f13937067590f",
+ "fc9f0643b21cfe571046e27e0c4565f3a1ee96c8",
+ "c088fd6a7e1a38e9d5a9815265cb575bb08d08ff",
+ "5fddbeb678bd2c36c5e5c891ab8f2b143ced5baf",
+ "5d7303c49ac984a9fec60523f2d5297682e16646",
+ }
+
+ sha256Data := []string{
+ "b9efda7160f2647e0974ca623f8a8f8e25fb6944f1b8f78f4db1bf07932de8eb",
+ "7095c59f8bf46e12c21d2d9da344cfe383fae18d26f3ae4d4ab7b71e3d0ddfae",
+ "25a395cb62f7656294e40a001ee19fefcdf3013d265dfcf4b744cd2549891dec",
+ "7fbd564813a82227507d9dd70f1fd21fc1f180223cd3f42e0c3090c9a8b6a7d0",
+ "aa95db1db2df91bd7200a892dd1c03bc2704c4793400d016b3ca08c148b0f7c1",
+ "2176988184b570565dc33823a02f474ad59f667a0e971c86063a7fea64776a87",
+ "d0afc0e64171140eb7902110f807a1beaa38a603d4312fd4bd14a5db2784ba62",
+ "2822136f60bfc58bbd9d624cc19fbef9f0fc0efe2a61729242e1e5f9b77fa3d0",
+ "6f207b5c43463af96bc38c43b0bf45275fa327e656a8bba8e7fc55c5ab6870d8",
+ "6cf33782619b6ff0af9c081e46323f423f8b49bf3d043887c0549bef47d60f55",
+ "60ea0753d2d4e828983528294be3f57e2a3ba37df4f59e3236133c9e2b17afc5",
+ "6b3c9f4ba5092e0807774097953ec6e9f58e8371d775bd8738a0fa98d728ba3d",
+ "c97cab8564054e30515dbe67dda4e14638aabf17b3f042d18dc8461cd098b362",
+ "9f7ece76fd2c9dae08e75176347efffc1446ad74af66004dd34680edb205dfb5",
+ "23e7a7e481b00571b63c2a7d0432f9733dd85d18a9841a3d7b96743100da5824",
+ "e684b1253fa8eb6572f35bab2fd3b6efecabf8472ede43497cd9c171973cc341",
+ "8b9f04080b0c40f7ad2a6bb5e5296cd6c06e730dffce87a0375ae7bd0f85f86e",
+ "384a745f3b14edc89526a98b96b3247b2b548541c755aadee7664352ed7f12ae",
+ "b68c8a82cd5b839917e1058570a0408819b81d16dbab81db118cc8dfc3def044",
+ "fbaf04f1a401335be57e172f4326102c658d857fde6cf2bc987520d11fc99770",
+ "57acf2aa5ac736337b120c951536c8a2b2cb23a4f0f198e86f3433370fa63105",
+ "dd7fcba4c13b6ced0b6190cdb5861adcd08446a92d67f7ec0f02f9533e09bbb0",
+ "744ef481c9b13ebd3b6e43d7e9ba25f7c7a5c8e453e6f0d50f5d71aae1591689",
+ "2c573142f1edd52b64dcd42a9c3b0ca5c9c615f757d80d25bfb02ff3eb2257e2",
+ "ea65cc58ef8520cd0335de4318a0d3b3a1ac257b7e9f82e12483fa3bce6cc0cd",
+ "1dfa626ff1523b82e21a4c29476edcdc9a89842f3c7181f63a28cd4f46cc9923",
+ "aa1153e71af836121e6f6cc716cf64880c19221d8dc367ff42359de1b8ef30e9",
+ "a7c6ec6f6569e22d2fa6e8281639d27c59b633ea00ad8ef27a43171cc985fbda",
+ "627b706d63d2cfd5a388deeaa76655ef09146fe492ee17cb0043578cef9c2800",
+ "d40eaf091ef8357b734d1047a552436eaf057d99a0c6f2068b097c324099d360",
+ "87f0ef81641da4fd3438dcaae4819f0c92a0ade54e262b21f9ded4575ff3f234",
+ "3a00a29e08d29454b5197662f70ccab5699b0ce8c85af7fbf511b8915d97cfd0",
+ }
+
+ goodShas := sha1Data
+ badShas := sha256Data
+ if hash.CryptoType == crypto.SHA256 {
+ goodShas = sha256Data
+ badShas = sha1Data
+ }
+ chainData := strings.Join(goodShas, "\n") + "\n"
+
+ chainReader := strings.NewReader(chainData)
+
+ chain, err := commitgraph.OpenChainFile(chainReader)
+ c.Assert(err, IsNil)
+ c.Assert(goodShas, DeepEquals, chain)
+
+ // Test with bad shas
+ chainData = strings.Join(badShas, "\n") + "\n"
+
+ chainReader = strings.NewReader(chainData)
+
+ chain, err = commitgraph.OpenChainFile(chainReader)
+ c.Assert(err, Equals, commitgraph.ErrMalformedCommitGraphFile)
+ c.Assert(chain, IsNil)
+
+ // Test with empty file
+ emptyChainReader := bytes.NewReader(nil)
+
+ chain, err = commitgraph.OpenChainFile(emptyChainReader)
+ c.Assert(err, IsNil)
+ c.Assert(chain, DeepEquals, []string{})
+
+ // Test with file containing only newlines
+ newlineChainData := []byte("\n\n\n")
+ newlineChainReader := bytes.NewReader(newlineChainData)
+
+ chain, err = commitgraph.OpenChainFile(newlineChainReader)
+ c.Assert(err, Equals, commitgraph.ErrMalformedCommitGraphFile)
+ c.Assert(chain, IsNil)
+}
diff --git a/plumbing/format/commitgraph/v2/chunk.go b/plumbing/format/commitgraph/v2/chunk.go
new file mode 100644
index 0000000..11f4d31
--- /dev/null
+++ b/plumbing/format/commitgraph/v2/chunk.go
@@ -0,0 +1,49 @@
+package v2
+
+import "bytes"
+
+const (
+ szChunkSig = 4 // Length of a chunk signature
+ chunkSigOffset = 4 // Offset of each chunk signature in chunkSignatures
+)
+
+// chunkSignatures contains the coalesced byte signatures for each chunk type.
+// The order of the signatures must match the order of the ChunkType constants.
+// (When adding new chunk types you must avoid introducing ambiguity, and you may need to add padding separators to this list or reorder these signatures.)
+// (i.e. it would not be possible to add a new chunk type with the signature "IDFO" without some reordering or the addition of separators.)
+var chunkSignatures = []byte("OIDFOIDLCDATGDA2GDO2EDGEBIDXBDATBASE\000\000\000\000")
+
+// ChunkType represents the type of a chunk in the commit graph file.
+type ChunkType int
+
+const (
+ OIDFanoutChunk ChunkType = iota // "OIDF"
+ OIDLookupChunk // "OIDL"
+ CommitDataChunk // "CDAT"
+ GenerationDataChunk // "GDA2"
+ GenerationDataOverflowChunk // "GDO2"
+ ExtraEdgeListChunk // "EDGE"
+ BloomFilterIndexChunk // "BIDX"
+ BloomFilterDataChunk // "BDAT"
+ BaseGraphsListChunk // "BASE"
+ ZeroChunk // "\000\000\000\000"
+)
+const lenChunks = int(ZeroChunk) // ZeroChunk is not a valid chunk type, but it is used to determine the length of the chunk type list.
+
+// Signature returns the byte signature for the chunk type.
+func (ct ChunkType) Signature() []byte {
+ if ct >= BaseGraphsListChunk || ct < 0 { // not a valid chunk type just return ZeroChunk
+ return chunkSignatures[ZeroChunk*chunkSigOffset : ZeroChunk*chunkSigOffset+szChunkSig]
+ }
+
+ return chunkSignatures[ct*chunkSigOffset : ct*chunkSigOffset+szChunkSig]
+}
+
+// ChunkTypeFromBytes returns the chunk type for the given byte signature.
+func ChunkTypeFromBytes(b []byte) (ChunkType, bool) {
+ idx := bytes.Index(chunkSignatures, b)
+ if idx == -1 || idx%chunkSigOffset != 0 { // not found, or not aligned at chunkSigOffset
+ return -1, false
+ }
+ return ChunkType(idx / chunkSigOffset), true
+}
diff --git a/plumbing/format/commitgraph/v2/commitgraph.go b/plumbing/format/commitgraph/v2/commitgraph.go
new file mode 100644
index 0000000..9c89cd9
--- /dev/null
+++ b/plumbing/format/commitgraph/v2/commitgraph.go
@@ -0,0 +1,57 @@
+package v2
+
+import (
+ "io"
+ "math"
+ "time"
+
+ "github.com/go-git/go-git/v5/plumbing"
+)
+
+// CommitData is a reduced representation of Commit as presented in the commit graph
+// file. It is merely useful as an optimization for walking the commit graphs.
+type CommitData struct {
+ // TreeHash is the hash of the root tree of the commit.
+ TreeHash plumbing.Hash
+ // ParentIndexes are the indexes of the parent commits of the commit.
+ ParentIndexes []uint32
+ // ParentHashes are the hashes of the parent commits of the commit.
+ ParentHashes []plumbing.Hash
+ // Generation number is the pre-computed generation in the commit graph
+ // or zero if not available.
+ Generation uint64
+ // GenerationV2 stores the corrected commit date for the commits
+ // It combines the contents of the GDA2 and GDO2 sections of the commit-graph
+ // with the commit time portion of the CDAT section.
+ GenerationV2 uint64
+ // When is the timestamp of the commit.
+ When time.Time
+}
+
+// GenerationV2Data returns the corrected commit date for the commits
+func (c *CommitData) GenerationV2Data() uint64 {
+ if c.GenerationV2 == 0 || c.GenerationV2 == math.MaxUint64 {
+ return 0
+ }
+ return c.GenerationV2 - uint64(c.When.Unix())
+}
+
+// Index represents a representation of commit graph that allows indexed
+// access to the nodes using commit object hash
+type Index interface {
+ // GetIndexByHash gets the index in the commit graph from commit hash, if available
+ GetIndexByHash(h plumbing.Hash) (uint32, error)
+ // GetHashByIndex gets the hash given an index in the commit graph
+ GetHashByIndex(i uint32) (plumbing.Hash, error)
+ // GetNodeByIndex gets the commit node from the commit graph using index
+ // obtained from child node, if available
+ GetCommitDataByIndex(i uint32) (*CommitData, error)
+ // Hashes returns all the hashes that are available in the index
+ Hashes() []plumbing.Hash
+ // HasGenerationV2 returns true if the commit graph has the corrected commit date data
+ HasGenerationV2() bool
+ // MaximumNumberOfHashes returns the maximum number of hashes within the index
+ MaximumNumberOfHashes() uint32
+
+ io.Closer
+}
diff --git a/plumbing/format/commitgraph/v2/commitgraph_test.go b/plumbing/format/commitgraph/v2/commitgraph_test.go
new file mode 100644
index 0000000..1278405
--- /dev/null
+++ b/plumbing/format/commitgraph/v2/commitgraph_test.go
@@ -0,0 +1,200 @@
+package v2_test
+
+import (
+ "os"
+ "testing"
+
+ "github.com/go-git/go-billy/v5"
+ "github.com/go-git/go-billy/v5/util"
+ "github.com/go-git/go-git/v5/plumbing"
+ "github.com/go-git/go-git/v5/plumbing/cache"
+ commitgraph "github.com/go-git/go-git/v5/plumbing/format/commitgraph/v2"
+ "github.com/go-git/go-git/v5/plumbing/format/packfile"
+ "github.com/go-git/go-git/v5/plumbing/object"
+ "github.com/go-git/go-git/v5/storage/filesystem"
+
+ fixtures "github.com/go-git/go-git-fixtures/v4"
+ . "gopkg.in/check.v1"
+)
+
+func Test(t *testing.T) { TestingT(t) }
+
+type CommitgraphSuite struct {
+ fixtures.Suite
+}
+
+var _ = Suite(&CommitgraphSuite{})
+
+func testReadIndex(c *C, fs billy.Filesystem, path string) commitgraph.Index {
+ reader, err := fs.Open(path)
+ c.Assert(err, IsNil)
+ index, err := commitgraph.OpenFileIndex(reader)
+ c.Assert(err, IsNil)
+ c.Assert(index, NotNil)
+ return index
+}
+
+func testDecodeHelper(c *C, index commitgraph.Index) {
+ // Root commit
+ nodeIndex, err := index.GetIndexByHash(plumbing.NewHash("347c91919944a68e9413581a1bc15519550a3afe"))
+ c.Assert(err, IsNil)
+ commitData, err := index.GetCommitDataByIndex(nodeIndex)
+ c.Assert(err, IsNil)
+ c.Assert(len(commitData.ParentIndexes), Equals, 0)
+ c.Assert(len(commitData.ParentHashes), Equals, 0)
+
+ // Regular commit
+ nodeIndex, err = index.GetIndexByHash(plumbing.NewHash("e713b52d7e13807e87a002e812041f248db3f643"))
+ c.Assert(err, IsNil)
+ commitData, err = index.GetCommitDataByIndex(nodeIndex)
+ c.Assert(err, IsNil)
+ c.Assert(len(commitData.ParentIndexes), Equals, 1)
+ c.Assert(len(commitData.ParentHashes), Equals, 1)
+ c.Assert(commitData.ParentHashes[0].String(), Equals, "347c91919944a68e9413581a1bc15519550a3afe")
+
+ // Merge commit
+ nodeIndex, err = index.GetIndexByHash(plumbing.NewHash("b29328491a0682c259bcce28741eac71f3499f7d"))
+ c.Assert(err, IsNil)
+ commitData, err = index.GetCommitDataByIndex(nodeIndex)
+ c.Assert(err, IsNil)
+ c.Assert(len(commitData.ParentIndexes), Equals, 2)
+ c.Assert(len(commitData.ParentHashes), Equals, 2)
+ c.Assert(commitData.ParentHashes[0].String(), Equals, "e713b52d7e13807e87a002e812041f248db3f643")
+ c.Assert(commitData.ParentHashes[1].String(), Equals, "03d2c021ff68954cf3ef0a36825e194a4b98f981")
+
+ // Octopus merge commit
+ nodeIndex, err = index.GetIndexByHash(plumbing.NewHash("6f6c5d2be7852c782be1dd13e36496dd7ad39560"))
+ c.Assert(err, IsNil)
+ commitData, err = index.GetCommitDataByIndex(nodeIndex)
+ c.Assert(err, IsNil)
+ c.Assert(len(commitData.ParentIndexes), Equals, 3)
+ c.Assert(len(commitData.ParentHashes), Equals, 3)
+ c.Assert(commitData.ParentHashes[0].String(), Equals, "ce275064ad67d51e99f026084e20827901a8361c")
+ c.Assert(commitData.ParentHashes[1].String(), Equals, "bb13916df33ed23004c3ce9ed3b8487528e655c1")
+ c.Assert(commitData.ParentHashes[2].String(), Equals, "a45273fe2d63300e1962a9e26a6b15c276cd7082")
+
+ // Check all hashes
+ hashes := index.Hashes()
+ c.Assert(len(hashes), Equals, 11)
+ c.Assert(hashes[0].String(), Equals, "03d2c021ff68954cf3ef0a36825e194a4b98f981")
+ c.Assert(hashes[10].String(), Equals, "e713b52d7e13807e87a002e812041f248db3f643")
+}
+
+func (s *CommitgraphSuite) TestDecodeMultiChain(c *C) {
+ fixtures.ByTag("commit-graph-chain-2").Test(c, func(f *fixtures.Fixture) {
+ dotgit := f.DotGit()
+ index, err := commitgraph.OpenChainOrFileIndex(dotgit)
+ c.Assert(err, IsNil)
+ defer index.Close()
+ storer := filesystem.NewStorage(f.DotGit(), cache.NewObjectLRUDefault())
+ p := f.Packfile()
+ defer p.Close()
+ packfile.UpdateObjectStorage(storer, p)
+
+ for idx, hash := range index.Hashes() {
+ idx2, err := index.GetIndexByHash(hash)
+ c.Assert(err, IsNil)
+ c.Assert(idx2, Equals, uint32(idx))
+ hash2, err := index.GetHashByIndex(idx2)
+ c.Assert(err, IsNil)
+ c.Assert(hash2.String(), Equals, hash.String())
+
+ commitData, err := index.GetCommitDataByIndex(uint32(idx))
+ c.Assert(err, IsNil)
+ commit, err := object.GetCommit(storer, hash)
+ c.Assert(err, IsNil)
+
+ for i, parent := range commit.ParentHashes {
+ c.Assert(hash.String()+":"+parent.String(), Equals, hash.String()+":"+commitData.ParentHashes[i].String())
+ }
+ }
+ })
+}
+
+func (s *CommitgraphSuite) TestDecode(c *C) {
+ fixtures.ByTag("commit-graph").Test(c, func(f *fixtures.Fixture) {
+ dotgit := f.DotGit()
+ index := testReadIndex(c, dotgit, dotgit.Join("objects", "info", "commit-graph"))
+ defer index.Close()
+ testDecodeHelper(c, index)
+ })
+}
+
+func (s *CommitgraphSuite) TestDecodeChain(c *C) {
+ fixtures.ByTag("commit-graph").Test(c, func(f *fixtures.Fixture) {
+ dotgit := f.DotGit()
+ index, err := commitgraph.OpenChainOrFileIndex(dotgit)
+ c.Assert(err, IsNil)
+ defer index.Close()
+ testDecodeHelper(c, index)
+ })
+
+ fixtures.ByTag("commit-graph-chain").Test(c, func(f *fixtures.Fixture) {
+ dotgit := f.DotGit()
+ index, err := commitgraph.OpenChainOrFileIndex(dotgit)
+ c.Assert(err, IsNil)
+ defer index.Close()
+ testDecodeHelper(c, index)
+ })
+}
+
+func (s *CommitgraphSuite) TestReencode(c *C) {
+ fixtures.ByTag("commit-graph").Test(c, func(f *fixtures.Fixture) {
+ dotgit := f.DotGit()
+
+ reader, err := dotgit.Open(dotgit.Join("objects", "info", "commit-graph"))
+ c.Assert(err, IsNil)
+ defer reader.Close()
+ index, err := commitgraph.OpenFileIndex(reader)
+ c.Assert(err, IsNil)
+ defer index.Close()
+
+ writer, err := util.TempFile(dotgit, "", "commit-graph")
+ c.Assert(err, IsNil)
+ tmpName := writer.Name()
+ defer os.Remove(tmpName)
+
+ encoder := commitgraph.NewEncoder(writer)
+ err = encoder.Encode(index)
+ c.Assert(err, IsNil)
+ writer.Close()
+
+ tmpIndex := testReadIndex(c, dotgit, tmpName)
+ defer tmpIndex.Close()
+ testDecodeHelper(c, tmpIndex)
+ })
+}
+
+func (s *CommitgraphSuite) TestReencodeInMemory(c *C) {
+ fixtures.ByTag("commit-graph").Test(c, func(f *fixtures.Fixture) {
+ dotgit := f.DotGit()
+
+ reader, err := dotgit.Open(dotgit.Join("objects", "info", "commit-graph"))
+ c.Assert(err, IsNil)
+ index, err := commitgraph.OpenFileIndex(reader)
+ c.Assert(err, IsNil)
+
+ memoryIndex := commitgraph.NewMemoryIndex()
+ defer memoryIndex.Close()
+ for i, hash := range index.Hashes() {
+ commitData, err := index.GetCommitDataByIndex(uint32(i))
+ c.Assert(err, IsNil)
+ memoryIndex.Add(hash, commitData)
+ }
+ index.Close()
+
+ writer, err := util.TempFile(dotgit, "", "commit-graph")
+ c.Assert(err, IsNil)
+ tmpName := writer.Name()
+ defer os.Remove(tmpName)
+
+ encoder := commitgraph.NewEncoder(writer)
+ err = encoder.Encode(memoryIndex)
+ c.Assert(err, IsNil)
+ writer.Close()
+
+ tmpIndex := testReadIndex(c, dotgit, tmpName)
+ defer tmpIndex.Close()
+ testDecodeHelper(c, tmpIndex)
+ })
+}
diff --git a/plumbing/format/commitgraph/v2/doc.go b/plumbing/format/commitgraph/v2/doc.go
new file mode 100644
index 0000000..157621d
--- /dev/null
+++ b/plumbing/format/commitgraph/v2/doc.go
@@ -0,0 +1,106 @@
+// Package v2 implements encoding and decoding of commit-graph files.
+//
+// This package was created to work around the issues of the incorrect types in
+// the commitgraph package.
+//
+// Git commit graph format
+// =======================
+//
+// The Git commit graph stores a list of commit OIDs and some associated
+// metadata, including:
+//
+// - The generation number of the commit. Commits with no parents have
+// generation number 1; commits with parents have generation number
+// one more than the maximum generation number of its parents. We
+// reserve zero as special, and can be used to mark a generation
+// number invalid or as "not computed".
+//
+// - The root tree OID.
+//
+// - The commit date.
+//
+// - The parents of the commit, stored using positional references within
+// the graph file.
+//
+// These positional references are stored as unsigned 32-bit integers
+// corresponding to the array position within the list of commit OIDs. Due
+// to some special constants we use to track parents, we can store at most
+// (1 << 30) + (1 << 29) + (1 << 28) - 1 (around 1.8 billion) commits.
+//
+// == Commit graph files have the following format:
+//
+// In order to allow extensions that add extra data to the graph, we organize
+// the body into "chunks" and provide a binary lookup table at the beginning
+// of the body. The header includes certain values, such as number of chunks
+// and hash type.
+//
+// All 4-byte numbers are in network order.
+//
+// HEADER:
+//
+// 4-byte signature:
+// The signature is: {'C', 'G', 'P', 'H'}
+//
+// 1-byte version number:
+// Currently, the only valid version is 1.
+//
+// 1-byte Hash Version (1 = SHA-1)
+// We infer the hash length (H) from this value.
+//
+// 1-byte number (C) of "chunks"
+//
+// 1-byte (reserved for later use)
+// Current clients should ignore this value.
+//
+// CHUNK LOOKUP:
+//
+// (C + 1) * 12 bytes listing the table of contents for the chunks:
+// First 4 bytes describe the chunk id. Value 0 is a terminating label.
+// Other 8 bytes provide the byte-offset in current file for chunk to
+// start. (Chunks are ordered contiguously in the file, so you can infer
+// the length using the next chunk position if necessary.) Each chunk
+// ID appears at most once.
+//
+// The remaining data in the body is described one chunk at a time, and
+// these chunks may be given in any order. Chunks are required unless
+// otherwise specified.
+//
+// CHUNK DATA:
+//
+// OID Fanout (ID: {'O', 'I', 'D', 'F'}) (256 * 4 bytes)
+// The ith entry, F[i], stores the number of OIDs with first
+// byte at most i. Thus F[255] stores the total
+// number of commits (N).
+//
+// OID Lookup (ID: {'O', 'I', 'D', 'L'}) (N * H bytes)
+// The OIDs for all commits in the graph, sorted in ascending order.
+//
+// Commit Data (ID: {'C', 'D', 'A', 'T' }) (N * (H + 16) bytes)
+// * The first H bytes are for the OID of the root tree.
+// * The next 8 bytes are for the positions of the first two parents
+// of the ith commit. Stores value 0x7000000 if no parent in that
+// position. If there are more than two parents, the second value
+// has its most-significant bit on and the other bits store an array
+// position into the Extra Edge List chunk.
+// * The next 8 bytes store the generation number of the commit and
+// the commit time in seconds since EPOCH. The generation number
+// uses the higher 30 bits of the first 4 bytes, while the commit
+// time uses the 32 bits of the second 4 bytes, along with the lowest
+// 2 bits of the lowest byte, storing the 33rd and 34th bit of the
+// commit time.
+//
+// Extra Edge List (ID: {'E', 'D', 'G', 'E'}) [Optional]
+// This list of 4-byte values store the second through nth parents for
+// all octopus merges. The second parent value in the commit data stores
+// an array position within this list along with the most-significant bit
+// on. Starting at that array position, iterate through this list of commit
+// positions for the parents until reaching a value with the most-significant
+// bit on. The other bits correspond to the position of the last parent.
+//
+// TRAILER:
+//
+// H-byte HASH-checksum of all of the above.
+//
+// Source:
+// https://raw.githubusercontent.com/git/git/master/Documentation/technical/commit-graph-format.txt
+package v2
diff --git a/plumbing/format/commitgraph/v2/encoder.go b/plumbing/format/commitgraph/v2/encoder.go
new file mode 100644
index 0000000..b79bc77
--- /dev/null
+++ b/plumbing/format/commitgraph/v2/encoder.go
@@ -0,0 +1,250 @@
+package v2
+
+import (
+ "crypto"
+ "io"
+ "math"
+
+ "github.com/go-git/go-git/v5/plumbing"
+ "github.com/go-git/go-git/v5/plumbing/hash"
+ "github.com/go-git/go-git/v5/utils/binary"
+)
+
+// Encoder writes MemoryIndex structs to an output stream.
+type Encoder struct {
+ io.Writer
+ hash hash.Hash
+}
+
+// NewEncoder returns a new stream encoder that writes to w.
+func NewEncoder(w io.Writer) *Encoder {
+ h := hash.New(hash.CryptoType)
+ mw := io.MultiWriter(w, h)
+ return &Encoder{mw, h}
+}
+
+// Encode writes an index into the commit-graph file
+func (e *Encoder) Encode(idx Index) error {
+ // Get all the hashes in the input index
+ hashes := idx.Hashes()
+
+ // Sort the inout and prepare helper structures we'll need for encoding
+ hashToIndex, fanout, extraEdgesCount, generationV2OverflowCount := e.prepare(idx, hashes)
+
+ chunkSignatures := [][]byte{OIDFanoutChunk.Signature(), OIDLookupChunk.Signature(), CommitDataChunk.Signature()}
+ chunkSizes := []uint64{szUint32 * lenFanout, uint64(len(hashes)) * hash.Size, uint64(len(hashes)) * (hash.Size + szCommitData)}
+ if extraEdgesCount > 0 {
+ chunkSignatures = append(chunkSignatures, ExtraEdgeListChunk.Signature())
+ chunkSizes = append(chunkSizes, uint64(extraEdgesCount)*szUint32)
+ }
+ if idx.HasGenerationV2() {
+ chunkSignatures = append(chunkSignatures, GenerationDataChunk.Signature())
+ chunkSizes = append(chunkSizes, uint64(len(hashes))*szUint32)
+ if generationV2OverflowCount > 0 {
+ chunkSignatures = append(chunkSignatures, GenerationDataOverflowChunk.Signature())
+ chunkSizes = append(chunkSizes, uint64(generationV2OverflowCount)*szUint64)
+ }
+ }
+
+ if err := e.encodeFileHeader(len(chunkSignatures)); err != nil {
+ return err
+ }
+ if err := e.encodeChunkHeaders(chunkSignatures, chunkSizes); err != nil {
+ return err
+ }
+ if err := e.encodeFanout(fanout); err != nil {
+ return err
+ }
+ if err := e.encodeOidLookup(hashes); err != nil {
+ return err
+ }
+
+ extraEdges, generationV2Data, err := e.encodeCommitData(hashes, hashToIndex, idx)
+ if err != nil {
+ return err
+ }
+ if err = e.encodeExtraEdges(extraEdges); err != nil {
+ return err
+ }
+ if idx.HasGenerationV2() {
+ overflows, err := e.encodeGenerationV2Data(generationV2Data)
+ if err != nil {
+ return err
+ }
+ if err = e.encodeGenerationV2Overflow(overflows); err != nil {
+ return err
+ }
+ }
+
+ return e.encodeChecksum()
+}
+
+func (e *Encoder) prepare(idx Index, hashes []plumbing.Hash) (hashToIndex map[plumbing.Hash]uint32, fanout []uint32, extraEdgesCount uint32, generationV2OverflowCount uint32) {
+ // Sort the hashes and build our index
+ plumbing.HashesSort(hashes)
+ hashToIndex = make(map[plumbing.Hash]uint32)
+ fanout = make([]uint32, lenFanout)
+ for i, hash := range hashes {
+ hashToIndex[hash] = uint32(i)
+ fanout[hash[0]]++
+ }
+
+ // Convert the fanout to cumulative values
+ for i := 1; i < lenFanout; i++ {
+ fanout[i] += fanout[i-1]
+ }
+
+ hasGenerationV2 := idx.HasGenerationV2()
+
+ // Find out if we will need extra edge table
+ for i := 0; i < len(hashes); i++ {
+ v, _ := idx.GetCommitDataByIndex(uint32(i))
+ if len(v.ParentHashes) > 2 {
+ extraEdgesCount += uint32(len(v.ParentHashes) - 1)
+ }
+ if hasGenerationV2 && v.GenerationV2Data() > math.MaxUint32 {
+ generationV2OverflowCount++
+ }
+ }
+
+ return
+}
+
+func (e *Encoder) encodeFileHeader(chunkCount int) (err error) {
+ if _, err = e.Write(commitFileSignature); err == nil {
+ version := byte(1)
+ if hash.CryptoType == crypto.SHA256 {
+ version = byte(2)
+ }
+ _, err = e.Write([]byte{1, version, byte(chunkCount), 0})
+ }
+ return
+}
+
+func (e *Encoder) encodeChunkHeaders(chunkSignatures [][]byte, chunkSizes []uint64) (err error) {
+ // 8 bytes of file header, 12 bytes for each chunk header and 12 byte for terminator
+ offset := uint64(szSignature + szHeader + (len(chunkSignatures)+1)*(szChunkSig+szUint64))
+ for i, signature := range chunkSignatures {
+ if _, err = e.Write(signature); err == nil {
+ err = binary.WriteUint64(e, offset)
+ }
+ if err != nil {
+ return
+ }
+ offset += chunkSizes[i]
+ }
+ if _, err = e.Write(ZeroChunk.Signature()); err == nil {
+ err = binary.WriteUint64(e, offset)
+ }
+ return
+}
+
+func (e *Encoder) encodeFanout(fanout []uint32) (err error) {
+ for i := 0; i <= 0xff; i++ {
+ if err = binary.WriteUint32(e, fanout[i]); err != nil {
+ return
+ }
+ }
+ return
+}
+
+func (e *Encoder) encodeOidLookup(hashes []plumbing.Hash) (err error) {
+ for _, hash := range hashes {
+ if _, err = e.Write(hash[:]); err != nil {
+ return err
+ }
+ }
+ return
+}
+
+func (e *Encoder) encodeCommitData(hashes []plumbing.Hash, hashToIndex map[plumbing.Hash]uint32, idx Index) (extraEdges []uint32, generationV2Data []uint64, err error) {
+ if idx.HasGenerationV2() {
+ generationV2Data = make([]uint64, 0, len(hashes))
+ }
+ for _, hash := range hashes {
+ origIndex, _ := idx.GetIndexByHash(hash)
+ commitData, _ := idx.GetCommitDataByIndex(origIndex)
+ if _, err = e.Write(commitData.TreeHash[:]); err != nil {
+ return
+ }
+
+ var parent1, parent2 uint32
+ if len(commitData.ParentHashes) == 0 {
+ parent1 = parentNone
+ parent2 = parentNone
+ } else if len(commitData.ParentHashes) == 1 {
+ parent1 = hashToIndex[commitData.ParentHashes[0]]
+ parent2 = parentNone
+ } else if len(commitData.ParentHashes) == 2 {
+ parent1 = hashToIndex[commitData.ParentHashes[0]]
+ parent2 = hashToIndex[commitData.ParentHashes[1]]
+ } else if len(commitData.ParentHashes) > 2 {
+ parent1 = hashToIndex[commitData.ParentHashes[0]]
+ parent2 = uint32(len(extraEdges)) | parentOctopusUsed
+ for _, parentHash := range commitData.ParentHashes[1:] {
+ extraEdges = append(extraEdges, hashToIndex[parentHash])
+ }
+ extraEdges[len(extraEdges)-1] |= parentLast
+ }
+
+ if err = binary.WriteUint32(e, parent1); err == nil {
+ err = binary.WriteUint32(e, parent2)
+ }
+ if err != nil {
+ return
+ }
+
+ unixTime := uint64(commitData.When.Unix())
+ unixTime |= uint64(commitData.Generation) << 34
+ if err = binary.WriteUint64(e, unixTime); err != nil {
+ return
+ }
+ if generationV2Data != nil {
+ generationV2Data = append(generationV2Data, commitData.GenerationV2Data())
+ }
+ }
+ return
+}
+
+func (e *Encoder) encodeExtraEdges(extraEdges []uint32) (err error) {
+ for _, parent := range extraEdges {
+ if err = binary.WriteUint32(e, parent); err != nil {
+ return
+ }
+ }
+ return
+}
+
+func (e *Encoder) encodeGenerationV2Data(generationV2Data []uint64) (overflows []uint64, err error) {
+ head := 0
+ for _, data := range generationV2Data {
+ if data >= 0x80000000 {
+ // overflow
+ if err = binary.WriteUint32(e, uint32(head)|0x80000000); err != nil {
+ return nil, err
+ }
+ generationV2Data[head] = data
+ head++
+ continue
+ }
+ if err = binary.WriteUint32(e, uint32(data)); err != nil {
+ return nil, err
+ }
+ }
+
+ return generationV2Data[:head], nil
+}
+
+func (e *Encoder) encodeGenerationV2Overflow(overflows []uint64) (err error) {
+ for _, overflow := range overflows {
+ if err = binary.WriteUint64(e, overflow); err != nil {
+ return
+ }
+ }
+ return
+}
+
+func (e *Encoder) encodeChecksum() error {
+ _, err := e.Write(e.hash.Sum(nil)[:hash.Size])
+ return err
+}
diff --git a/plumbing/format/commitgraph/v2/file.go b/plumbing/format/commitgraph/v2/file.go
new file mode 100644
index 0000000..c5f61e4
--- /dev/null
+++ b/plumbing/format/commitgraph/v2/file.go
@@ -0,0 +1,412 @@
+package v2
+
+import (
+ "bytes"
+ "crypto"
+ encbin "encoding/binary"
+ "errors"
+ "io"
+ "time"
+
+ "github.com/go-git/go-git/v5/plumbing"
+ "github.com/go-git/go-git/v5/plumbing/hash"
+ "github.com/go-git/go-git/v5/utils/binary"
+)
+
+var (
+ // ErrUnsupportedVersion is returned by OpenFileIndex when the commit graph
+ // file version is not supported.
+ ErrUnsupportedVersion = errors.New("unsupported version")
+ // ErrUnsupportedHash is returned by OpenFileIndex when the commit graph
+ // hash function is not supported. Currently only SHA-1 is defined and
+ // supported.
+ ErrUnsupportedHash = errors.New("unsupported hash algorithm")
+ // ErrMalformedCommitGraphFile is returned by OpenFileIndex when the commit
+ // graph file is corrupted.
+ ErrMalformedCommitGraphFile = errors.New("malformed commit graph file")
+
+ commitFileSignature = []byte{'C', 'G', 'P', 'H'}
+
+ parentNone = uint32(0x70000000)
+ parentOctopusUsed = uint32(0x80000000)
+ parentOctopusMask = uint32(0x7fffffff)
+ parentLast = uint32(0x80000000)
+)
+
+const (
+ szUint32 = 4
+ szUint64 = 8
+
+ szSignature = 4
+ szHeader = 4
+ szCommitData = 2*szUint32 + szUint64
+
+ lenFanout = 256
+)
+
+type fileIndex struct {
+ reader ReaderAtCloser
+ fanout [lenFanout]uint32
+ offsets [lenChunks]int64
+ parent Index
+ hasGenerationV2 bool
+ minimumNumberOfHashes uint32
+}
+
+// ReaderAtCloser is an interface that combines io.ReaderAt and io.Closer.
+type ReaderAtCloser interface {
+ io.ReaderAt
+ io.Closer
+}
+
+// OpenFileIndex opens a serialized commit graph file in the format described at
+// https://github.com/git/git/blob/master/Documentation/technical/commit-graph-format.txt
+func OpenFileIndex(reader ReaderAtCloser) (Index, error) {
+ return OpenFileIndexWithParent(reader, nil)
+}
+
+// OpenFileIndexWithParent opens a serialized commit graph file in the format described at
+// https://github.com/git/git/blob/master/Documentation/technical/commit-graph-format.txt
+func OpenFileIndexWithParent(reader ReaderAtCloser, parent Index) (Index, error) {
+ if reader == nil {
+ return nil, io.ErrUnexpectedEOF
+ }
+ fi := &fileIndex{reader: reader, parent: parent}
+
+ if err := fi.verifyFileHeader(); err != nil {
+ return nil, err
+ }
+ if err := fi.readChunkHeaders(); err != nil {
+ return nil, err
+ }
+ if err := fi.readFanout(); err != nil {
+ return nil, err
+ }
+
+ fi.hasGenerationV2 = fi.offsets[GenerationDataChunk] > 0
+ if fi.parent != nil {
+ fi.hasGenerationV2 = fi.hasGenerationV2 && fi.parent.HasGenerationV2()
+ }
+
+ if fi.parent != nil {
+ fi.minimumNumberOfHashes = fi.parent.MaximumNumberOfHashes()
+ }
+
+ return fi, nil
+}
+
+// Close closes the underlying reader and the parent index if it exists.
+func (fi *fileIndex) Close() (err error) {
+ if fi.parent != nil {
+ defer func() {
+ parentErr := fi.parent.Close()
+ // only report the error from the parent if there is no error from the reader
+ if err == nil {
+ err = parentErr
+ }
+ }()
+ }
+ err = fi.reader.Close()
+ return
+}
+
+func (fi *fileIndex) verifyFileHeader() error {
+ // Verify file signature
+ signature := make([]byte, szSignature)
+ if _, err := fi.reader.ReadAt(signature, 0); err != nil {
+ return err
+ }
+ if !bytes.Equal(signature, commitFileSignature) {
+ return ErrMalformedCommitGraphFile
+ }
+
+ // Read and verify the file header
+ header := make([]byte, szHeader)
+ if _, err := fi.reader.ReadAt(header, szHeader); err != nil {
+ return err
+ }
+ if header[0] != 1 {
+ return ErrUnsupportedVersion
+ }
+ if !(hash.CryptoType == crypto.SHA1 && header[1] == 1) &&
+ !(hash.CryptoType == crypto.SHA256 && header[1] == 2) {
+ // Unknown hash type / unsupported hash type
+ return ErrUnsupportedHash
+ }
+
+ return nil
+}
+
+func (fi *fileIndex) readChunkHeaders() error {
+ // The chunk table is a list of 4-byte chunk signatures and uint64 offsets into the file
+ chunkID := make([]byte, szChunkSig)
+ for i := 0; ; i++ {
+ chunkHeader := io.NewSectionReader(fi.reader, szSignature+szHeader+(int64(i)*(szChunkSig+szUint64)), szChunkSig+szUint64)
+ if _, err := io.ReadAtLeast(chunkHeader, chunkID, szChunkSig); err != nil {
+ return err
+ }
+ chunkOffset, err := binary.ReadUint64(chunkHeader)
+ if err != nil {
+ return err
+ }
+
+ chunkType, ok := ChunkTypeFromBytes(chunkID)
+ if !ok {
+ continue
+ }
+ if chunkType == ZeroChunk || int(chunkType) >= len(fi.offsets) {
+ break
+ }
+ fi.offsets[chunkType] = int64(chunkOffset)
+ }
+
+ if fi.offsets[OIDFanoutChunk] <= 0 || fi.offsets[OIDLookupChunk] <= 0 || fi.offsets[CommitDataChunk] <= 0 {
+ return ErrMalformedCommitGraphFile
+ }
+
+ return nil
+}
+
+func (fi *fileIndex) readFanout() error {
+ // The Fanout table is a 256 entry table of the number (as uint32) of OIDs with first byte at most i.
+ // Thus F[255] stores the total number of commits (N)
+ fanoutReader := io.NewSectionReader(fi.reader, fi.offsets[OIDFanoutChunk], lenFanout*szUint32)
+ for i := 0; i < 256; i++ {
+ fanoutValue, err := binary.ReadUint32(fanoutReader)
+ if err != nil {
+ return err
+ }
+ if fanoutValue > 0x7fffffff {
+ return ErrMalformedCommitGraphFile
+ }
+ fi.fanout[i] = fanoutValue
+ }
+ return nil
+}
+
+// GetIndexByHash looks up the provided hash in the commit-graph fanout and returns the index of the commit data for the given hash.
+func (fi *fileIndex) GetIndexByHash(h plumbing.Hash) (uint32, error) {
+ var oid plumbing.Hash
+
+ // Find the hash in the oid lookup table
+ var low uint32
+ if h[0] == 0 {
+ low = 0
+ } else {
+ low = fi.fanout[h[0]-1]
+ }
+ high := fi.fanout[h[0]]
+ for low < high {
+ mid := (low + high) >> 1
+ offset := fi.offsets[OIDLookupChunk] + int64(mid)*hash.Size
+ if _, err := fi.reader.ReadAt(oid[:], offset); err != nil {
+ return 0, err
+ }
+ cmp := bytes.Compare(h[:], oid[:])
+ if cmp < 0 {
+ high = mid
+ } else if cmp == 0 {
+ return mid + fi.minimumNumberOfHashes, nil
+ } else {
+ low = mid + 1
+ }
+ }
+
+ if fi.parent != nil {
+ idx, err := fi.parent.GetIndexByHash(h)
+ if err != nil {
+ return 0, err
+ }
+ return idx, nil
+ }
+
+ return 0, plumbing.ErrObjectNotFound
+}
+
+// GetCommitDataByIndex returns the commit data for the given index in the commit-graph.
+func (fi *fileIndex) GetCommitDataByIndex(idx uint32) (*CommitData, error) {
+ if idx < fi.minimumNumberOfHashes {
+ if fi.parent != nil {
+ data, err := fi.parent.GetCommitDataByIndex(idx)
+ if err != nil {
+ return nil, err
+ }
+ return data, nil
+ }
+
+ return nil, plumbing.ErrObjectNotFound
+ }
+ idx -= fi.minimumNumberOfHashes
+ if idx >= fi.fanout[0xff] {
+ return nil, plumbing.ErrObjectNotFound
+ }
+
+ offset := fi.offsets[CommitDataChunk] + int64(idx)*(hash.Size+szCommitData)
+ commitDataReader := io.NewSectionReader(fi.reader, offset, hash.Size+szCommitData)
+
+ treeHash, err := binary.ReadHash(commitDataReader)
+ if err != nil {
+ return nil, err
+ }
+ parent1, err := binary.ReadUint32(commitDataReader)
+ if err != nil {
+ return nil, err
+ }
+ parent2, err := binary.ReadUint32(commitDataReader)
+ if err != nil {
+ return nil, err
+ }
+ genAndTime, err := binary.ReadUint64(commitDataReader)
+ if err != nil {
+ return nil, err
+ }
+
+ var parentIndexes []uint32
+ if parent2&parentOctopusUsed == parentOctopusUsed {
+ // Octopus merge - Look-up the extra parents from the extra edge list
+ // The extra edge list is a list of uint32s, each of which is an index into the Commit Data table, terminated by a index with the most significant bit on.
+ parentIndexes = []uint32{parent1 & parentOctopusMask}
+ offset := fi.offsets[ExtraEdgeListChunk] + szUint32*int64(parent2&parentOctopusMask)
+ buf := make([]byte, szUint32)
+ for {
+ _, err := fi.reader.ReadAt(buf, offset)
+ if err != nil {
+ return nil, err
+ }
+
+ parent := encbin.BigEndian.Uint32(buf)
+ offset += szUint32
+ parentIndexes = append(parentIndexes, parent&parentOctopusMask)
+ if parent&parentLast == parentLast {
+ break
+ }
+ }
+ } else if parent2 != parentNone {
+ parentIndexes = []uint32{parent1 & parentOctopusMask, parent2 & parentOctopusMask}
+ } else if parent1 != parentNone {
+ parentIndexes = []uint32{parent1 & parentOctopusMask}
+ }
+
+ parentHashes, err := fi.getHashesFromIndexes(parentIndexes)
+ if err != nil {
+ return nil, err
+ }
+
+ generationV2 := uint64(0)
+
+ if fi.hasGenerationV2 {
+ // set the GenerationV2 result to the commit time
+ generationV2 = uint64(genAndTime & 0x3FFFFFFFF)
+
+ // Next read the generation (offset) data from the generation data chunk
+ offset := fi.offsets[GenerationDataChunk] + int64(idx)*szUint32
+ buf := make([]byte, szUint32)
+ if _, err := fi.reader.ReadAt(buf, offset); err != nil {
+ return nil, err
+ }
+ genV2Data := encbin.BigEndian.Uint32(buf)
+
+ // check if the data is an overflow that needs to be looked up in the overflow chunk
+ if genV2Data&0x80000000 > 0 {
+ // Overflow
+ offset := fi.offsets[GenerationDataOverflowChunk] + int64(genV2Data&0x7fffffff)*szUint64
+ buf := make([]byte, 8)
+ if _, err := fi.reader.ReadAt(buf, offset); err != nil {
+ return nil, err
+ }
+
+ generationV2 += encbin.BigEndian.Uint64(buf)
+ } else {
+ generationV2 += uint64(genV2Data)
+ }
+ }
+
+ return &CommitData{
+ TreeHash: treeHash,
+ ParentIndexes: parentIndexes,
+ ParentHashes: parentHashes,
+ Generation: genAndTime >> 34,
+ GenerationV2: generationV2,
+ When: time.Unix(int64(genAndTime&0x3FFFFFFFF), 0),
+ }, nil
+}
+
+// GetHashByIndex looks up the hash for the given index in the commit-graph.
+func (fi *fileIndex) GetHashByIndex(idx uint32) (found plumbing.Hash, err error) {
+ if idx < fi.minimumNumberOfHashes {
+ if fi.parent != nil {
+ return fi.parent.GetHashByIndex(idx)
+ }
+ return found, ErrMalformedCommitGraphFile
+ }
+ idx -= fi.minimumNumberOfHashes
+ if idx >= fi.fanout[0xff] {
+ return found, ErrMalformedCommitGraphFile
+ }
+
+ offset := fi.offsets[OIDLookupChunk] + int64(idx)*hash.Size
+ if _, err := fi.reader.ReadAt(found[:], offset); err != nil {
+ return found, err
+ }
+
+ return found, nil
+}
+
+func (fi *fileIndex) getHashesFromIndexes(indexes []uint32) ([]plumbing.Hash, error) {
+ hashes := make([]plumbing.Hash, len(indexes))
+
+ for i, idx := range indexes {
+ if idx < fi.minimumNumberOfHashes {
+ if fi.parent != nil {
+ hash, err := fi.parent.GetHashByIndex(idx)
+ if err != nil {
+ return nil, err
+ }
+ hashes[i] = hash
+ continue
+ }
+
+ return nil, ErrMalformedCommitGraphFile
+ }
+
+ idx -= fi.minimumNumberOfHashes
+ if idx >= fi.fanout[0xff] {
+ return nil, ErrMalformedCommitGraphFile
+ }
+
+ offset := fi.offsets[OIDLookupChunk] + int64(idx)*hash.Size
+ if _, err := fi.reader.ReadAt(hashes[i][:], offset); err != nil {
+ return nil, err
+ }
+ }
+
+ return hashes, nil
+}
+
+// Hashes returns all the hashes that are available in the index.
+func (fi *fileIndex) Hashes() []plumbing.Hash {
+ hashes := make([]plumbing.Hash, fi.fanout[0xff]+fi.minimumNumberOfHashes)
+ for i := uint32(0); i < fi.minimumNumberOfHashes; i++ {
+ hash, err := fi.parent.GetHashByIndex(i)
+ if err != nil {
+ return nil
+ }
+ hashes[i] = hash
+ }
+
+ for i := uint32(0); i < fi.fanout[0xff]; i++ {
+ offset := fi.offsets[OIDLookupChunk] + int64(i)*hash.Size
+ if n, err := fi.reader.ReadAt(hashes[i+fi.minimumNumberOfHashes][:], offset); err != nil || n < hash.Size {
+ return nil
+ }
+ }
+ return hashes
+}
+
+func (fi *fileIndex) HasGenerationV2() bool {
+ return fi.hasGenerationV2
+}
+
+func (fi *fileIndex) MaximumNumberOfHashes() uint32 {
+ return fi.minimumNumberOfHashes + fi.fanout[0xff]
+}
diff --git a/plumbing/format/commitgraph/v2/memory.go b/plumbing/format/commitgraph/v2/memory.go
new file mode 100644
index 0000000..8de0c5f
--- /dev/null
+++ b/plumbing/format/commitgraph/v2/memory.go
@@ -0,0 +1,107 @@
+package v2
+
+import (
+ "math"
+
+ "github.com/go-git/go-git/v5/plumbing"
+)
+
+// MemoryIndex provides a way to build the commit-graph in memory
+// for later encoding to file.
+type MemoryIndex struct {
+ commitData []commitData
+ indexMap map[plumbing.Hash]uint32
+ hasGenerationV2 bool
+}
+
+type commitData struct {
+ Hash plumbing.Hash
+ *CommitData
+}
+
+// NewMemoryIndex creates in-memory commit graph representation
+func NewMemoryIndex() *MemoryIndex {
+ return &MemoryIndex{
+ indexMap: make(map[plumbing.Hash]uint32),
+ hasGenerationV2: true,
+ }
+}
+
+// GetIndexByHash gets the index in the commit graph from commit hash, if available
+func (mi *MemoryIndex) GetIndexByHash(h plumbing.Hash) (uint32, error) {
+ i, ok := mi.indexMap[h]
+ if ok {
+ return i, nil
+ }
+
+ return 0, plumbing.ErrObjectNotFound
+}
+
+// GetHashByIndex gets the hash given an index in the commit graph
+func (mi *MemoryIndex) GetHashByIndex(i uint32) (plumbing.Hash, error) {
+ if i >= uint32(len(mi.commitData)) {
+ return plumbing.ZeroHash, plumbing.ErrObjectNotFound
+ }
+
+ return mi.commitData[i].Hash, nil
+}
+
+// GetCommitDataByIndex gets the commit node from the commit graph using index
+// obtained from child node, if available
+func (mi *MemoryIndex) GetCommitDataByIndex(i uint32) (*CommitData, error) {
+ if i >= uint32(len(mi.commitData)) {
+ return nil, plumbing.ErrObjectNotFound
+ }
+
+ commitData := mi.commitData[i]
+
+ // Map parent hashes to parent indexes
+ if commitData.ParentIndexes == nil {
+ parentIndexes := make([]uint32, len(commitData.ParentHashes))
+ for i, parentHash := range commitData.ParentHashes {
+ var err error
+ if parentIndexes[i], err = mi.GetIndexByHash(parentHash); err != nil {
+ return nil, err
+ }
+ }
+ commitData.ParentIndexes = parentIndexes
+ }
+
+ return commitData.CommitData, nil
+}
+
+// Hashes returns all the hashes that are available in the index
+func (mi *MemoryIndex) Hashes() []plumbing.Hash {
+ hashes := make([]plumbing.Hash, 0, len(mi.indexMap))
+ for k := range mi.indexMap {
+ hashes = append(hashes, k)
+ }
+ return hashes
+}
+
+// Add adds new node to the memory index
+func (mi *MemoryIndex) Add(hash plumbing.Hash, data *CommitData) {
+ // The parent indexes are calculated lazily in GetNodeByIndex
+ // which allows adding nodes out of order as long as all parents
+ // are eventually resolved
+ data.ParentIndexes = nil
+ mi.indexMap[hash] = uint32(len(mi.commitData))
+ mi.commitData = append(mi.commitData, commitData{Hash: hash, CommitData: data})
+ if data.GenerationV2 == math.MaxUint64 { // if GenerationV2 is not available reset it to zero
+ data.GenerationV2 = 0
+ }
+ mi.hasGenerationV2 = mi.hasGenerationV2 && data.GenerationV2 != 0
+}
+
+func (mi *MemoryIndex) HasGenerationV2() bool {
+ return mi.hasGenerationV2
+}
+
+// Close closes the index
+func (mi *MemoryIndex) Close() error {
+ return nil
+}
+
+func (mi *MemoryIndex) MaximumNumberOfHashes() uint32 {
+ return uint32(len(mi.indexMap))
+}
diff --git a/plumbing/format/config/decoder_test.go b/plumbing/format/config/decoder_test.go
index 0a8e92c..6283f5e 100644
--- a/plumbing/format/config/decoder_test.go
+++ b/plumbing/format/config/decoder_test.go
@@ -2,6 +2,7 @@ package config
import (
"bytes"
+ "testing"
. "gopkg.in/check.v1"
)
@@ -91,3 +92,13 @@ func decodeFails(c *C, text string) {
err := d.Decode(cfg)
c.Assert(err, NotNil)
}
+
+func FuzzDecoder(f *testing.F) {
+
+ f.Fuzz(func(t *testing.T, input []byte) {
+
+ d := NewDecoder(bytes.NewReader(input))
+ cfg := &Config{}
+ d.Decode(cfg)
+ })
+}
diff --git a/plumbing/format/packfile/delta_test.go b/plumbing/format/packfile/delta_test.go
index e8f5ea6..9417e55 100644
--- a/plumbing/format/packfile/delta_test.go
+++ b/plumbing/format/packfile/delta_test.go
@@ -4,6 +4,7 @@ import (
"bytes"
"io"
"math/rand"
+ "testing"
"github.com/go-git/go-git/v5/plumbing"
. "gopkg.in/check.v1"
@@ -176,3 +177,14 @@ func (s *DeltaSuite) TestMaxCopySizeDeltaReader(c *C) {
c.Assert(err, IsNil)
c.Assert(result, DeepEquals, targetBuf)
}
+
+func FuzzPatchDelta(f *testing.F) {
+
+ f.Fuzz(func(t *testing.T, input []byte) {
+
+ input_0 := input[:len(input)/2]
+ input_1 := input[len(input)/2:]
+
+ PatchDelta(input_0, input_1)
+ })
+}
diff --git a/plumbing/format/packfile/diff_delta.go b/plumbing/format/packfile/diff_delta.go
index 2c7a335..8898e58 100644
--- a/plumbing/format/packfile/diff_delta.go
+++ b/plumbing/format/packfile/diff_delta.go
@@ -17,8 +17,11 @@ const (
s = 16
// https://github.com/git/git/blob/f7466e94375b3be27f229c78873f0acf8301c0a5/diff-delta.c#L428
- // Max size of a copy operation (64KB)
+ // Max size of a copy operation (64KB).
maxCopySize = 64 * 1024
+
+ // Min size of a copy operation.
+ minCopySize = 4
)
// GetDelta returns an EncodedObject of type OFSDeltaObject. Base and Target object,
diff --git a/plumbing/format/packfile/parser.go b/plumbing/format/packfile/parser.go
index edbc0e7..62f1d13 100644
--- a/plumbing/format/packfile/parser.go
+++ b/plumbing/format/packfile/parser.go
@@ -3,6 +3,7 @@ package packfile
import (
"bytes"
"errors"
+ "fmt"
"io"
"github.com/go-git/go-git/v5/plumbing"
@@ -174,13 +175,25 @@ func (p *Parser) init() error {
return nil
}
+type objectHeaderWriter func(typ plumbing.ObjectType, sz int64) error
+
+type lazyObjectWriter interface {
+ // LazyWriter enables an object to be lazily written.
+ // It returns:
+ // - w: a writer to receive the object's content.
+ // - lwh: a func to write the object header.
+ // - err: any error from the initial writer creation process.
+ //
+ // Note that if the object header is not written BEFORE the writer
+ // is used, this will result in an invalid object.
+ LazyWriter() (w io.WriteCloser, lwh objectHeaderWriter, err error)
+}
+
func (p *Parser) indexObjects() error {
buf := sync.GetBytesBuffer()
defer sync.PutBytesBuffer(buf)
for i := uint32(0); i < p.count; i++ {
- buf.Reset()
-
oh, err := p.scanner.NextObjectHeader()
if err != nil {
return err
@@ -220,21 +233,60 @@ func (p *Parser) indexObjects() error {
ota = newBaseObject(oh.Offset, oh.Length, t)
}
- buf.Grow(int(oh.Length))
- _, crc, err := p.scanner.NextObject(buf)
+ hasher := plumbing.NewHasher(oh.Type, oh.Length)
+ writers := []io.Writer{hasher}
+ var obj *plumbing.MemoryObject
+
+ // Lazy writing is only available for non-delta objects.
+ if p.storage != nil && !delta {
+ // When a storage is set and supports lazy writing,
+ // use that instead of creating a memory object.
+ if low, ok := p.storage.(lazyObjectWriter); ok {
+ ow, lwh, err := low.LazyWriter()
+ if err != nil {
+ return err
+ }
+
+ if err = lwh(oh.Type, oh.Length); err != nil {
+ return err
+ }
+
+ defer ow.Close()
+ writers = append(writers, ow)
+ } else {
+ obj = new(plumbing.MemoryObject)
+ obj.SetSize(oh.Length)
+ obj.SetType(oh.Type)
+
+ writers = append(writers, obj)
+ }
+ }
+ if delta && !p.scanner.IsSeekable {
+ buf.Reset()
+ buf.Grow(int(oh.Length))
+ writers = append(writers, buf)
+ }
+
+ mw := io.MultiWriter(writers...)
+
+ _, crc, err := p.scanner.NextObject(mw)
if err != nil {
return err
}
+ // Non delta objects needs to be added into the storage. This
+ // is only required when lazy writing is not supported.
+ if obj != nil {
+ if _, err := p.storage.SetEncodedObject(obj); err != nil {
+ return err
+ }
+ }
+
ota.Crc32 = crc
ota.Length = oh.Length
- data := buf.Bytes()
if !delta {
- sha1, err := getSHA1(ota.Type, data)
- if err != nil {
- return err
- }
+ sha1 := hasher.Sum()
// Move children of placeholder parent into actual parent, in case this
// was a non-external delta reference.
@@ -249,20 +301,8 @@ func (p *Parser) indexObjects() error {
p.oiByHash[ota.SHA1] = ota
}
- if p.storage != nil && !delta {
- obj := new(plumbing.MemoryObject)
- obj.SetSize(oh.Length)
- obj.SetType(oh.Type)
- if _, err := obj.Write(data); err != nil {
- return err
- }
-
- if _, err := p.storage.SetEncodedObject(obj); err != nil {
- return err
- }
- }
-
if delta && !p.scanner.IsSeekable {
+ data := buf.Bytes()
p.deltas[oh.Offset] = make([]byte, len(data))
copy(p.deltas[oh.Offset], data)
}
@@ -280,23 +320,29 @@ func (p *Parser) resolveDeltas() error {
for _, obj := range p.oi {
buf.Reset()
+ buf.Grow(int(obj.Length))
err := p.get(obj, buf)
if err != nil {
return err
}
- content := buf.Bytes()
if err := p.onInflatedObjectHeader(obj.Type, obj.Length, obj.Offset); err != nil {
return err
}
- if err := p.onInflatedObjectContent(obj.SHA1, obj.Offset, obj.Crc32, content); err != nil {
+ if err := p.onInflatedObjectContent(obj.SHA1, obj.Offset, obj.Crc32, nil); err != nil {
return err
}
if !obj.IsDelta() && len(obj.Children) > 0 {
+ // Dealing with an io.ReaderAt object, means we can
+ // create it once and reuse across all children.
+ r := bytes.NewReader(buf.Bytes())
for _, child := range obj.Children {
- if err := p.resolveObject(io.Discard, child, content); err != nil {
+ // Even though we are discarding the output, we still need to read it to
+ // so that the scanner can advance to the next object, and the SHA1 can be
+ // calculated.
+ if err := p.resolveObject(io.Discard, child, r); err != nil {
return err
}
p.resolveExternalRef(child)
@@ -361,13 +407,13 @@ func (p *Parser) get(o *objectInfo, buf *bytes.Buffer) (err error) {
if o.DiskType.IsDelta() {
b := sync.GetBytesBuffer()
defer sync.PutBytesBuffer(b)
+ buf.Grow(int(o.Length))
err := p.get(o.Parent, b)
if err != nil {
return err
}
- base := b.Bytes()
- err = p.resolveObject(buf, o, base)
+ err = p.resolveObject(buf, o, bytes.NewReader(b.Bytes()))
if err != nil {
return err
}
@@ -378,6 +424,13 @@ func (p *Parser) get(o *objectInfo, buf *bytes.Buffer) (err error) {
}
}
+ // If the scanner is seekable, caching this data into
+ // memory by offset seems wasteful.
+ // There is a trade-off to be considered here in terms
+ // of execution time vs memory consumption.
+ //
+ // TODO: improve seekable execution time, so that we can
+ // skip this cache.
if len(o.Children) > 0 {
data := make([]byte, buf.Len())
copy(data, buf.Bytes())
@@ -386,10 +439,25 @@ func (p *Parser) get(o *objectInfo, buf *bytes.Buffer) (err error) {
return nil
}
+// resolveObject resolves an object from base, using information
+// provided by o.
+//
+// This call has the side-effect of changing field values
+// from the object info o:
+// - Type: OFSDeltaObject may become the target type (e.g. Blob).
+// - Size: The size may be update with the target size.
+// - Hash: Zero hashes will be calculated as part of the object
+// resolution. Hence why this process can't be avoided even when w
+// is an io.Discard.
+//
+// base must be an io.ReaderAt, which is a requirement from
+// patchDeltaStream. The main reason being that reversing an
+// delta object may lead to going backs and forths within base,
+// which is not supported by io.Reader.
func (p *Parser) resolveObject(
w io.Writer,
o *objectInfo,
- base []byte,
+ base io.ReaderAt,
) error {
if !o.DiskType.IsDelta() {
return nil
@@ -400,26 +468,46 @@ func (p *Parser) resolveObject(
if err != nil {
return err
}
- data := buf.Bytes()
- data, err = applyPatchBase(o, data, base)
+ writers := []io.Writer{w}
+ var obj *plumbing.MemoryObject
+ var lwh objectHeaderWriter
+
+ if p.storage != nil {
+ if low, ok := p.storage.(lazyObjectWriter); ok {
+ ow, wh, err := low.LazyWriter()
+ if err != nil {
+ return err
+ }
+ lwh = wh
+
+ defer ow.Close()
+ writers = append(writers, ow)
+ } else {
+ obj = new(plumbing.MemoryObject)
+ ow, err := obj.Writer()
+ if err != nil {
+ return err
+ }
+
+ writers = append(writers, ow)
+ }
+ }
+
+ mw := io.MultiWriter(writers...)
+
+ err = applyPatchBase(o, base, buf, mw, lwh)
if err != nil {
return err
}
- if p.storage != nil {
- obj := new(plumbing.MemoryObject)
- obj.SetSize(o.Size())
+ if obj != nil {
obj.SetType(o.Type)
- if _, err := obj.Write(data); err != nil {
- return err
- }
-
+ obj.SetSize(o.Size()) // Size here is correct as it was populated by applyPatchBase.
if _, err := p.storage.SetEncodedObject(obj); err != nil {
return err
}
}
- _, err = w.Write(data)
return err
}
@@ -443,24 +531,31 @@ func (p *Parser) readData(w io.Writer, o *objectInfo) error {
return nil
}
-func applyPatchBase(ota *objectInfo, data, base []byte) ([]byte, error) {
- patched, err := PatchDelta(base, data)
- if err != nil {
- return nil, err
+// applyPatchBase applies the patch to target.
+//
+// Note that ota will be updated based on the description in resolveObject.
+func applyPatchBase(ota *objectInfo, base io.ReaderAt, delta io.Reader, target io.Writer, wh objectHeaderWriter) error {
+ if target == nil {
+ return fmt.Errorf("cannot apply patch against nil target")
}
+ typ := ota.Type
if ota.SHA1 == plumbing.ZeroHash {
- ota.Type = ota.Parent.Type
- sha1, err := getSHA1(ota.Type, patched)
- if err != nil {
- return nil, err
- }
+ typ = ota.Parent.Type
+ }
+
+ sz, h, err := patchDeltaWriter(target, base, delta, typ, wh)
+ if err != nil {
+ return err
+ }
- ota.SHA1 = sha1
- ota.Length = int64(len(patched))
+ if ota.SHA1 == plumbing.ZeroHash {
+ ota.Type = typ
+ ota.Length = int64(sz)
+ ota.SHA1 = h
}
- return patched, nil
+ return nil
}
func getSHA1(t plumbing.ObjectType, data []byte) (plumbing.Hash, error) {
diff --git a/plumbing/format/packfile/patch_delta.go b/plumbing/format/packfile/patch_delta.go
index f00562d..960769c 100644
--- a/plumbing/format/packfile/patch_delta.go
+++ b/plumbing/format/packfile/patch_delta.go
@@ -4,6 +4,7 @@ import (
"bufio"
"bytes"
"errors"
+ "fmt"
"io"
"math"
@@ -17,7 +18,33 @@ import (
// and https://github.com/tarruda/node-git-core/blob/master/src/js/delta.js
// for details about the delta format.
-const deltaSizeMin = 4
+var (
+ ErrInvalidDelta = errors.New("invalid delta")
+ ErrDeltaCmd = errors.New("wrong delta command")
+)
+
+const (
+ payload = 0x7f // 0111 1111
+ continuation = 0x80 // 1000 0000
+)
+
+type offset struct {
+ mask byte
+ shift uint
+}
+
+var offsets = []offset{
+ {mask: 0x01, shift: 0},
+ {mask: 0x02, shift: 8},
+ {mask: 0x04, shift: 16},
+ {mask: 0x08, shift: 24},
+}
+
+var sizes = []offset{
+ {mask: 0x10, shift: 0},
+ {mask: 0x20, shift: 8},
+ {mask: 0x40, shift: 16},
+}
// ApplyDelta writes to target the result of applying the modification deltas in delta to base.
func ApplyDelta(target, base plumbing.EncodedObject, delta []byte) (err error) {
@@ -58,11 +85,6 @@ func ApplyDelta(target, base plumbing.EncodedObject, delta []byte) (err error) {
return err
}
-var (
- ErrInvalidDelta = errors.New("invalid delta")
- ErrDeltaCmd = errors.New("wrong delta command")
-)
-
// PatchDelta returns the result of applying the modification deltas in delta to src.
// An error will be returned if delta is corrupted (ErrDeltaLen) or an action command
// is not copy from source or copy from delta (ErrDeltaCmd).
@@ -120,7 +142,8 @@ func ReaderFromDelta(base plumbing.EncodedObject, deltaRC io.Reader) (io.ReadClo
return
}
- if isCopyFromSrc(cmd) {
+ switch {
+ case isCopyFromSrc(cmd):
offset, err := decodeOffsetByteReader(cmd, deltaBuf)
if err != nil {
_ = dstWr.CloseWithError(err)
@@ -173,7 +196,8 @@ func ReaderFromDelta(base plumbing.EncodedObject, deltaRC io.Reader) (io.ReadClo
}
remainingTargetSz -= sz
basePos += sz
- } else if isCopyFromDelta(cmd) {
+
+ case isCopyFromDelta(cmd):
sz := uint(cmd) // cmd is the size itself
if invalidSize(sz, targetSz) {
_ = dstWr.CloseWithError(ErrInvalidDelta)
@@ -185,10 +209,12 @@ func ReaderFromDelta(base plumbing.EncodedObject, deltaRC io.Reader) (io.ReadClo
}
remainingTargetSz -= sz
- } else {
+
+ default:
_ = dstWr.CloseWithError(ErrDeltaCmd)
return
}
+
if remainingTargetSz <= 0 {
_ = dstWr.Close()
return
@@ -200,7 +226,7 @@ func ReaderFromDelta(base plumbing.EncodedObject, deltaRC io.Reader) (io.ReadClo
}
func patchDelta(dst *bytes.Buffer, src, delta []byte) error {
- if len(delta) < deltaSizeMin {
+ if len(delta) < minCopySize {
return ErrInvalidDelta
}
@@ -221,7 +247,9 @@ func patchDelta(dst *bytes.Buffer, src, delta []byte) error {
cmd = delta[0]
delta = delta[1:]
- if isCopyFromSrc(cmd) {
+
+ switch {
+ case isCopyFromSrc(cmd):
var offset, sz uint
var err error
offset, delta, err = decodeOffset(cmd, delta)
@@ -240,7 +268,8 @@ func patchDelta(dst *bytes.Buffer, src, delta []byte) error {
}
dst.Write(src[offset : offset+sz])
remainingTargetSz -= sz
- } else if isCopyFromDelta(cmd) {
+
+ case isCopyFromDelta(cmd):
sz := uint(cmd) // cmd is the size itself
if invalidSize(sz, targetSz) {
return ErrInvalidDelta
@@ -253,7 +282,8 @@ func patchDelta(dst *bytes.Buffer, src, delta []byte) error {
dst.Write(delta[0:sz])
remainingTargetSz -= sz
delta = delta[sz:]
- } else {
+
+ default:
return ErrDeltaCmd
}
@@ -265,6 +295,107 @@ func patchDelta(dst *bytes.Buffer, src, delta []byte) error {
return nil
}
+func patchDeltaWriter(dst io.Writer, base io.ReaderAt, delta io.Reader,
+ typ plumbing.ObjectType, writeHeader objectHeaderWriter) (uint, plumbing.Hash, error) {
+ deltaBuf := bufio.NewReaderSize(delta, 1024)
+ srcSz, err := decodeLEB128ByteReader(deltaBuf)
+ if err != nil {
+ if err == io.EOF {
+ return 0, plumbing.ZeroHash, ErrInvalidDelta
+ }
+ return 0, plumbing.ZeroHash, err
+ }
+
+ if r, ok := base.(*bytes.Reader); ok && srcSz != uint(r.Size()) {
+ return 0, plumbing.ZeroHash, ErrInvalidDelta
+ }
+
+ targetSz, err := decodeLEB128ByteReader(deltaBuf)
+ if err != nil {
+ if err == io.EOF {
+ return 0, plumbing.ZeroHash, ErrInvalidDelta
+ }
+ return 0, plumbing.ZeroHash, err
+ }
+
+ // If header still needs to be written, caller will provide
+ // a LazyObjectWriterHeader. This seems to be the case when
+ // dealing with thin-packs.
+ if writeHeader != nil {
+ err = writeHeader(typ, int64(targetSz))
+ if err != nil {
+ return 0, plumbing.ZeroHash, fmt.Errorf("could not lazy write header: %w", err)
+ }
+ }
+
+ remainingTargetSz := targetSz
+
+ hasher := plumbing.NewHasher(typ, int64(targetSz))
+ mw := io.MultiWriter(dst, hasher)
+
+ bufp := sync.GetByteSlice()
+ defer sync.PutByteSlice(bufp)
+
+ sr := io.NewSectionReader(base, int64(0), int64(srcSz))
+ // Keep both the io.LimitedReader types, so we can reset N.
+ baselr := io.LimitReader(sr, 0).(*io.LimitedReader)
+ deltalr := io.LimitReader(deltaBuf, 0).(*io.LimitedReader)
+
+ for {
+ buf := *bufp
+ cmd, err := deltaBuf.ReadByte()
+ if err == io.EOF {
+ return 0, plumbing.ZeroHash, ErrInvalidDelta
+ }
+ if err != nil {
+ return 0, plumbing.ZeroHash, err
+ }
+
+ if isCopyFromSrc(cmd) {
+ offset, err := decodeOffsetByteReader(cmd, deltaBuf)
+ if err != nil {
+ return 0, plumbing.ZeroHash, err
+ }
+ sz, err := decodeSizeByteReader(cmd, deltaBuf)
+ if err != nil {
+ return 0, plumbing.ZeroHash, err
+ }
+
+ if invalidSize(sz, targetSz) ||
+ invalidOffsetSize(offset, sz, srcSz) {
+ return 0, plumbing.ZeroHash, err
+ }
+
+ if _, err := sr.Seek(int64(offset), io.SeekStart); err != nil {
+ return 0, plumbing.ZeroHash, err
+ }
+ baselr.N = int64(sz)
+ if _, err := io.CopyBuffer(mw, baselr, buf); err != nil {
+ return 0, plumbing.ZeroHash, err
+ }
+ remainingTargetSz -= sz
+ } else if isCopyFromDelta(cmd) {
+ sz := uint(cmd) // cmd is the size itself
+ if invalidSize(sz, targetSz) {
+ return 0, plumbing.ZeroHash, ErrInvalidDelta
+ }
+ deltalr.N = int64(sz)
+ if _, err := io.CopyBuffer(mw, deltalr, buf); err != nil {
+ return 0, plumbing.ZeroHash, err
+ }
+
+ remainingTargetSz -= sz
+ } else {
+ return 0, plumbing.ZeroHash, err
+ }
+ if remainingTargetSz <= 0 {
+ break
+ }
+ }
+
+ return targetSz, hasher.Sum(), nil
+}
+
// Decodes a number encoded as an unsigned LEB128 at the start of some
// binary data and returns the decoded number and the rest of the
// stream.
@@ -306,48 +437,24 @@ func decodeLEB128ByteReader(input io.ByteReader) (uint, error) {
return num, nil
}
-const (
- payload = 0x7f // 0111 1111
- continuation = 0x80 // 1000 0000
-)
-
func isCopyFromSrc(cmd byte) bool {
- return (cmd & 0x80) != 0
+ return (cmd & continuation) != 0
}
func isCopyFromDelta(cmd byte) bool {
- return (cmd&0x80) == 0 && cmd != 0
+ return (cmd&continuation) == 0 && cmd != 0
}
func decodeOffsetByteReader(cmd byte, delta io.ByteReader) (uint, error) {
var offset uint
- if (cmd & 0x01) != 0 {
- next, err := delta.ReadByte()
- if err != nil {
- return 0, err
- }
- offset = uint(next)
- }
- if (cmd & 0x02) != 0 {
- next, err := delta.ReadByte()
- if err != nil {
- return 0, err
- }
- offset |= uint(next) << 8
- }
- if (cmd & 0x04) != 0 {
- next, err := delta.ReadByte()
- if err != nil {
- return 0, err
- }
- offset |= uint(next) << 16
- }
- if (cmd & 0x08) != 0 {
- next, err := delta.ReadByte()
- if err != nil {
- return 0, err
+ for _, o := range offsets {
+ if (cmd & o.mask) != 0 {
+ next, err := delta.ReadByte()
+ if err != nil {
+ return 0, err
+ }
+ offset |= uint(next) << o.shift
}
- offset |= uint(next) << 24
}
return offset, nil
@@ -355,33 +462,14 @@ func decodeOffsetByteReader(cmd byte, delta io.ByteReader) (uint, error) {
func decodeOffset(cmd byte, delta []byte) (uint, []byte, error) {
var offset uint
- if (cmd & 0x01) != 0 {
- if len(delta) == 0 {
- return 0, nil, ErrInvalidDelta
- }
- offset = uint(delta[0])
- delta = delta[1:]
- }
- if (cmd & 0x02) != 0 {
- if len(delta) == 0 {
- return 0, nil, ErrInvalidDelta
- }
- offset |= uint(delta[0]) << 8
- delta = delta[1:]
- }
- if (cmd & 0x04) != 0 {
- if len(delta) == 0 {
- return 0, nil, ErrInvalidDelta
- }
- offset |= uint(delta[0]) << 16
- delta = delta[1:]
- }
- if (cmd & 0x08) != 0 {
- if len(delta) == 0 {
- return 0, nil, ErrInvalidDelta
+ for _, o := range offsets {
+ if (cmd & o.mask) != 0 {
+ if len(delta) == 0 {
+ return 0, nil, ErrInvalidDelta
+ }
+ offset |= uint(delta[0]) << o.shift
+ delta = delta[1:]
}
- offset |= uint(delta[0]) << 24
- delta = delta[1:]
}
return offset, delta, nil
@@ -389,29 +477,18 @@ func decodeOffset(cmd byte, delta []byte) (uint, []byte, error) {
func decodeSizeByteReader(cmd byte, delta io.ByteReader) (uint, error) {
var sz uint
- if (cmd & 0x10) != 0 {
- next, err := delta.ReadByte()
- if err != nil {
- return 0, err
- }
- sz = uint(next)
- }
- if (cmd & 0x20) != 0 {
- next, err := delta.ReadByte()
- if err != nil {
- return 0, err
- }
- sz |= uint(next) << 8
- }
- if (cmd & 0x40) != 0 {
- next, err := delta.ReadByte()
- if err != nil {
- return 0, err
+ for _, s := range sizes {
+ if (cmd & s.mask) != 0 {
+ next, err := delta.ReadByte()
+ if err != nil {
+ return 0, err
+ }
+ sz |= uint(next) << s.shift
}
- sz |= uint(next) << 16
}
+
if sz == 0 {
- sz = 0x10000
+ sz = maxCopySize
}
return sz, nil
@@ -419,29 +496,17 @@ func decodeSizeByteReader(cmd byte, delta io.ByteReader) (uint, error) {
func decodeSize(cmd byte, delta []byte) (uint, []byte, error) {
var sz uint
- if (cmd & 0x10) != 0 {
- if len(delta) == 0 {
- return 0, nil, ErrInvalidDelta
- }
- sz = uint(delta[0])
- delta = delta[1:]
- }
- if (cmd & 0x20) != 0 {
- if len(delta) == 0 {
- return 0, nil, ErrInvalidDelta
- }
- sz |= uint(delta[0]) << 8
- delta = delta[1:]
- }
- if (cmd & 0x40) != 0 {
- if len(delta) == 0 {
- return 0, nil, ErrInvalidDelta
+ for _, s := range sizes {
+ if (cmd & s.mask) != 0 {
+ if len(delta) == 0 {
+ return 0, nil, ErrInvalidDelta
+ }
+ sz |= uint(delta[0]) << s.shift
+ delta = delta[1:]
}
- sz |= uint(delta[0]) << 16
- delta = delta[1:]
}
if sz == 0 {
- sz = 0x10000
+ sz = maxCopySize
}
return sz, delta, nil
diff --git a/plumbing/format/pktline/encoder.go b/plumbing/format/pktline/encoder.go
index 6d40979..b6144fa 100644
--- a/plumbing/format/pktline/encoder.go
+++ b/plumbing/format/pktline/encoder.go
@@ -7,6 +7,8 @@ import (
"errors"
"fmt"
"io"
+
+ "github.com/go-git/go-git/v5/utils/trace"
)
// An Encoder writes pkt-lines to an output stream.
@@ -43,6 +45,7 @@ func NewEncoder(w io.Writer) *Encoder {
// Flush encodes a flush-pkt to the output stream.
func (e *Encoder) Flush() error {
+ defer trace.Packet.Print("packet: > 0000")
_, err := e.w.Write(FlushPkt)
return err
}
@@ -70,6 +73,7 @@ func (e *Encoder) encodeLine(p []byte) error {
}
n := len(p) + 4
+ defer trace.Packet.Printf("packet: > %04x %s", n, p)
if _, err := e.w.Write(asciiHex16(n)); err != nil {
return err
}
diff --git a/plumbing/format/pktline/error.go b/plumbing/format/pktline/error.go
new file mode 100644
index 0000000..2c0e5a7
--- /dev/null
+++ b/plumbing/format/pktline/error.go
@@ -0,0 +1,51 @@
+package pktline
+
+import (
+ "bytes"
+ "errors"
+ "io"
+ "strings"
+)
+
+var (
+ // ErrInvalidErrorLine is returned by Decode when the packet line is not an
+ // error line.
+ ErrInvalidErrorLine = errors.New("expected an error-line")
+
+ errPrefix = []byte("ERR ")
+)
+
+// ErrorLine is a packet line that contains an error message.
+// Once this packet is sent by client or server, the data transfer process is
+// terminated.
+// See https://git-scm.com/docs/pack-protocol#_pkt_line_format
+type ErrorLine struct {
+ Text string
+}
+
+// Error implements the error interface.
+func (e *ErrorLine) Error() string {
+ return e.Text
+}
+
+// Encode encodes the ErrorLine into a packet line.
+func (e *ErrorLine) Encode(w io.Writer) error {
+ p := NewEncoder(w)
+ return p.Encodef("%s%s\n", string(errPrefix), e.Text)
+}
+
+// Decode decodes a packet line into an ErrorLine.
+func (e *ErrorLine) Decode(r io.Reader) error {
+ s := NewScanner(r)
+ if !s.Scan() {
+ return s.Err()
+ }
+
+ line := s.Bytes()
+ if !bytes.HasPrefix(line, errPrefix) {
+ return ErrInvalidErrorLine
+ }
+
+ e.Text = strings.TrimSpace(string(line[4:]))
+ return nil
+}
diff --git a/plumbing/format/pktline/error_test.go b/plumbing/format/pktline/error_test.go
new file mode 100644
index 0000000..3cffd20
--- /dev/null
+++ b/plumbing/format/pktline/error_test.go
@@ -0,0 +1,68 @@
+package pktline
+
+import (
+ "bytes"
+ "errors"
+ "io"
+ "testing"
+)
+
+func TestEncodeEmptyErrorLine(t *testing.T) {
+ e := &ErrorLine{}
+ err := e.Encode(io.Discard)
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestEncodeErrorLine(t *testing.T) {
+ e := &ErrorLine{
+ Text: "something",
+ }
+ var buf bytes.Buffer
+ err := e.Encode(&buf)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if buf.String() != "0012ERR something\n" {
+ t.Fatalf("unexpected encoded error line: %q", buf.String())
+ }
+}
+
+func TestDecodeEmptyErrorLine(t *testing.T) {
+ var buf bytes.Buffer
+ e := &ErrorLine{}
+ err := e.Decode(&buf)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if e.Text != "" {
+ t.Fatalf("unexpected error line: %q", e.Text)
+ }
+}
+
+func TestDecodeErrorLine(t *testing.T) {
+ var buf bytes.Buffer
+ buf.WriteString("000eERR foobar")
+ var e *ErrorLine
+ err := e.Decode(&buf)
+ if !errors.As(err, &e) {
+ t.Fatalf("expected error line, got: %T: %v", err, err)
+ }
+ if e.Text != "foobar" {
+ t.Fatalf("unexpected error line: %q", e.Text)
+ }
+}
+
+func TestDecodeErrorLineLn(t *testing.T) {
+ var buf bytes.Buffer
+ buf.WriteString("000fERR foobar\n")
+ var e *ErrorLine
+ err := e.Decode(&buf)
+ if !errors.As(err, &e) {
+ t.Fatalf("expected error line, got: %T: %v", err, err)
+ }
+ if e.Text != "foobar" {
+ t.Fatalf("unexpected error line: %q", e.Text)
+ }
+}
diff --git a/plumbing/format/pktline/scanner.go b/plumbing/format/pktline/scanner.go
index 99aab46..fbb137d 100644
--- a/plumbing/format/pktline/scanner.go
+++ b/plumbing/format/pktline/scanner.go
@@ -1,8 +1,12 @@
package pktline
import (
+ "bytes"
"errors"
"io"
+ "strings"
+
+ "github.com/go-git/go-git/v5/utils/trace"
)
const (
@@ -65,6 +69,14 @@ func (s *Scanner) Scan() bool {
return false
}
s.payload = s.payload[:l]
+ trace.Packet.Printf("packet: < %04x %s", l, s.payload)
+
+ if bytes.HasPrefix(s.payload, errPrefix) {
+ s.err = &ErrorLine{
+ Text: strings.TrimSpace(string(s.payload[4:])),
+ }
+ return false
+ }
return true
}
diff --git a/plumbing/hash/hash.go b/plumbing/hash/hash.go
index 82d1856..8609848 100644
--- a/plumbing/hash/hash.go
+++ b/plumbing/hash/hash.go
@@ -24,7 +24,7 @@ func reset() {
algos[crypto.SHA256] = crypto.SHA256.New
}
-// RegisterHash allows for the hash algorithm used to be overriden.
+// RegisterHash allows for the hash algorithm used to be overridden.
// This ensures the hash selection for go-git must be explicit, when
// overriding the default value.
func RegisterHash(h crypto.Hash, f func() hash.Hash) error {
diff --git a/plumbing/object.go b/plumbing/object.go
index 2655dee..3ee9de9 100644
--- a/plumbing/object.go
+++ b/plumbing/object.go
@@ -82,7 +82,7 @@ func (t ObjectType) Valid() bool {
return t >= CommitObject && t <= REFDeltaObject
}
-// IsDelta returns true for any ObjectTyoe that represents a delta (i.e.
+// IsDelta returns true for any ObjectType that represents a delta (i.e.
// REFDeltaObject or OFSDeltaObject).
func (t ObjectType) IsDelta() bool {
return t == REFDeltaObject || t == OFSDeltaObject
diff --git a/plumbing/object/commit.go b/plumbing/object/commit.go
index 8a0f35c..ceed5d0 100644
--- a/plumbing/object/commit.go
+++ b/plumbing/object/commit.go
@@ -17,14 +17,25 @@ import (
)
const (
- beginpgp string = "-----BEGIN PGP SIGNATURE-----"
- endpgp string = "-----END PGP SIGNATURE-----"
- headerpgp string = "gpgsig"
+ beginpgp string = "-----BEGIN PGP SIGNATURE-----"
+ endpgp string = "-----END PGP SIGNATURE-----"
+ headerpgp string = "gpgsig"
+ headerencoding string = "encoding"
+
+ // https://github.com/git/git/blob/bcb6cae2966cc407ca1afc77413b3ef11103c175/Documentation/gitformat-signature.txt#L153
+ // When a merge commit is created from a signed tag, the tag is embedded in
+ // the commit with the "mergetag" header.
+ headermergetag string = "mergetag"
+
+ defaultUtf8CommitMesageEncoding MessageEncoding = "UTF-8"
)
// Hash represents the hash of an object
type Hash plumbing.Hash
+// MessageEncoding represents the encoding of a commit
+type MessageEncoding string
+
// Commit points to a single tree, marking it as what the project looked like
// at a certain point in time. It contains meta-information about that point
// in time, such as a timestamp, the author of the changes since the last
@@ -38,6 +49,9 @@ type Commit struct {
// Committer is the one performing the commit, might be different from
// Author.
Committer Signature
+ // MergeTag is the embedded tag object when a merge commit is created by
+ // merging a signed tag.
+ MergeTag string
// PGPSignature is the PGP signature of the commit.
PGPSignature string
// Message is the commit message, contains arbitrary text.
@@ -46,6 +60,8 @@ type Commit struct {
TreeHash plumbing.Hash
// ParentHashes are the hashes of the parent commits of the commit.
ParentHashes []plumbing.Hash
+ // Encoding is the encoding of the commit.
+ Encoding MessageEncoding
s storer.EncodedObjectStorer
}
@@ -173,6 +189,7 @@ func (c *Commit) Decode(o plumbing.EncodedObject) (err error) {
}
c.Hash = o.Hash()
+ c.Encoding = defaultUtf8CommitMesageEncoding
reader, err := o.Reader()
if err != nil {
@@ -184,6 +201,7 @@ func (c *Commit) Decode(o plumbing.EncodedObject) (err error) {
defer sync.PutBufioReader(r)
var message bool
+ var mergetag bool
var pgpsig bool
var msgbuf bytes.Buffer
for {
@@ -192,6 +210,16 @@ func (c *Commit) Decode(o plumbing.EncodedObject) (err error) {
return err
}
+ if mergetag {
+ if len(line) > 0 && line[0] == ' ' {
+ line = bytes.TrimLeft(line, " ")
+ c.MergeTag += string(line)
+ continue
+ } else {
+ mergetag = false
+ }
+ }
+
if pgpsig {
if len(line) > 0 && line[0] == ' ' {
line = bytes.TrimLeft(line, " ")
@@ -225,6 +253,11 @@ func (c *Commit) Decode(o plumbing.EncodedObject) (err error) {
c.Author.Decode(data)
case "committer":
c.Committer.Decode(data)
+ case headermergetag:
+ c.MergeTag += string(data) + "\n"
+ mergetag = true
+ case headerencoding:
+ c.Encoding = MessageEncoding(data)
case headerpgp:
c.PGPSignature += string(data) + "\n"
pgpsig = true
@@ -286,6 +319,28 @@ func (c *Commit) encode(o plumbing.EncodedObject, includeSig bool) (err error) {
return err
}
+ if c.MergeTag != "" {
+ if _, err = fmt.Fprint(w, "\n"+headermergetag+" "); err != nil {
+ return err
+ }
+
+ // Split tag information lines and re-write with a left padding and
+ // newline. Use join for this so it's clear that a newline should not be
+ // added after this section. The newline will be added either as part of
+ // the PGP signature or the commit message.
+ mergetag := strings.TrimSuffix(c.MergeTag, "\n")
+ lines := strings.Split(mergetag, "\n")
+ if _, err = fmt.Fprint(w, strings.Join(lines, "\n ")); err != nil {
+ return err
+ }
+ }
+
+ if string(c.Encoding) != "" && c.Encoding != defaultUtf8CommitMesageEncoding {
+ if _, err = fmt.Fprintf(w, "\n%s %s", headerencoding, c.Encoding); err != nil {
+ return err
+ }
+ }
+
if c.PGPSignature != "" && includeSig {
if _, err = fmt.Fprint(w, "\n"+headerpgp+" "); err != nil {
return err
diff --git a/plumbing/object/commit_test.go b/plumbing/object/commit_test.go
index 4b0f6b4..3e1fe1b 100644
--- a/plumbing/object/commit_test.go
+++ b/plumbing/object/commit_test.go
@@ -3,6 +3,7 @@ package object
import (
"bytes"
"context"
+ "fmt"
"io"
"strings"
"time"
@@ -197,6 +198,27 @@ func (s *SuiteCommit) TestPatchContext_ToNil(c *C) {
}
func (s *SuiteCommit) TestCommitEncodeDecodeIdempotent(c *C) {
+ pgpsignature := `-----BEGIN PGP SIGNATURE-----
+
+iQEcBAABAgAGBQJTZbQlAAoJEF0+sviABDDrZbQH/09PfE51KPVPlanr6q1v4/Ut
+LQxfojUWiLQdg2ESJItkcuweYg+kc3HCyFejeDIBw9dpXt00rY26p05qrpnG+85b
+hM1/PswpPLuBSr+oCIDj5GMC2r2iEKsfv2fJbNW8iWAXVLoWZRF8B0MfqX/YTMbm
+ecorc4iXzQu7tupRihslbNkfvfciMnSDeSvzCpWAHl7h8Wj6hhqePmLm9lAYqnKp
+8S5B/1SSQuEAjRZgI4IexpZoeKGVDptPHxLLS38fozsyi0QyDyzEgJxcJQVMXxVi
+RUysgqjcpT8+iQM1PblGfHR4XAhuOqN5Fx06PSaFZhqvWFezJ28/CLyX5q+oIVk=
+=EFTF
+-----END PGP SIGNATURE-----
+`
+
+ tag := fmt.Sprintf(`object f000000000000000000000000000000000000000
+type commit
+tag change
+tagger Foo <foo@example.local> 1695827841 -0400
+
+change
+%s
+`, pgpsignature)
+
ts, err := time.Parse(time.RFC3339, "2006-01-02T15:04:05-07:00")
c.Assert(err, IsNil)
commits := []*Commit{
@@ -206,6 +228,7 @@ func (s *SuiteCommit) TestCommitEncodeDecodeIdempotent(c *C) {
Message: "Message\n\nFoo\nBar\nWith trailing blank lines\n\n",
TreeHash: plumbing.NewHash("f000000000000000000000000000000000000001"),
ParentHashes: []plumbing.Hash{plumbing.NewHash("f000000000000000000000000000000000000002")},
+ Encoding: defaultUtf8CommitMesageEncoding,
},
{
Author: Signature{Name: "Foo", Email: "foo@example.local", When: ts},
@@ -218,6 +241,32 @@ func (s *SuiteCommit) TestCommitEncodeDecodeIdempotent(c *C) {
plumbing.NewHash("f000000000000000000000000000000000000006"),
plumbing.NewHash("f000000000000000000000000000000000000007"),
},
+ Encoding: MessageEncoding("ISO-8859-1"),
+ },
+ {
+ Author: Signature{Name: "Foo", Email: "foo@example.local", When: ts},
+ Committer: Signature{Name: "Bar", Email: "bar@example.local", When: ts},
+ Message: "Testing mergetag\n\nHere, commit is not signed",
+ TreeHash: plumbing.NewHash("f000000000000000000000000000000000000001"),
+ ParentHashes: []plumbing.Hash{
+ plumbing.NewHash("f000000000000000000000000000000000000002"),
+ plumbing.NewHash("f000000000000000000000000000000000000003"),
+ },
+ MergeTag: tag,
+ Encoding: defaultUtf8CommitMesageEncoding,
+ },
+ {
+ Author: Signature{Name: "Foo", Email: "foo@example.local", When: ts},
+ Committer: Signature{Name: "Bar", Email: "bar@example.local", When: ts},
+ Message: "Testing mergetag\n\nHere, commit is also signed",
+ TreeHash: plumbing.NewHash("f000000000000000000000000000000000000001"),
+ ParentHashes: []plumbing.Hash{
+ plumbing.NewHash("f000000000000000000000000000000000000002"),
+ plumbing.NewHash("f000000000000000000000000000000000000003"),
+ },
+ MergeTag: tag,
+ PGPSignature: pgpsignature,
+ Encoding: defaultUtf8CommitMesageEncoding,
},
}
for _, commit := range commits {
@@ -485,7 +534,7 @@ func (s *SuiteCommit) TestMalformedHeader(c *C) {
}
func (s *SuiteCommit) TestEncodeWithoutSignature(c *C) {
- //Similar to TestString since no signature
+ // Similar to TestString since no signature
encoded := &plumbing.MemoryObject{}
err := s.Commit.EncodeWithoutSignature(encoded)
c.Assert(err, IsNil)
diff --git a/plumbing/object/commitgraph/commitnode.go b/plumbing/object/commitgraph/commitnode.go
index 7abc58b..47227d4 100644
--- a/plumbing/object/commitgraph/commitnode.go
+++ b/plumbing/object/commitgraph/commitnode.go
@@ -1,98 +1,102 @@
-package commitgraph
-
-import (
- "io"
- "time"
-
- "github.com/go-git/go-git/v5/plumbing"
- "github.com/go-git/go-git/v5/plumbing/object"
- "github.com/go-git/go-git/v5/plumbing/storer"
-)
-
-// CommitNode is generic interface encapsulating a lightweight commit object retrieved
-// from CommitNodeIndex
-type CommitNode interface {
- // ID returns the Commit object id referenced by the commit graph node.
- ID() plumbing.Hash
- // Tree returns the Tree referenced by the commit graph node.
- Tree() (*object.Tree, error)
- // CommitTime returns the Commiter.When time of the Commit referenced by the commit graph node.
- CommitTime() time.Time
- // NumParents returns the number of parents in a commit.
- NumParents() int
- // ParentNodes return a CommitNodeIter for parents of specified node.
- ParentNodes() CommitNodeIter
- // ParentNode returns the ith parent of a commit.
- ParentNode(i int) (CommitNode, error)
- // ParentHashes returns hashes of the parent commits for a specified node
- ParentHashes() []plumbing.Hash
- // Generation returns the generation of the commit for reachability analysis.
- // Objects with newer generation are not reachable from objects of older generation.
- Generation() uint64
- // Commit returns the full commit object from the node
- Commit() (*object.Commit, error)
-}
-
-// CommitNodeIndex is generic interface encapsulating an index of CommitNode objects
-type CommitNodeIndex interface {
- // Get returns a commit node from a commit hash
- Get(hash plumbing.Hash) (CommitNode, error)
-}
-
-// CommitNodeIter is a generic closable interface for iterating over commit nodes.
-type CommitNodeIter interface {
- Next() (CommitNode, error)
- ForEach(func(CommitNode) error) error
- Close()
-}
-
-// parentCommitNodeIter provides an iterator for parent commits from associated CommitNodeIndex.
-type parentCommitNodeIter struct {
- node CommitNode
- i int
-}
-
-func newParentgraphCommitNodeIter(node CommitNode) CommitNodeIter {
- return &parentCommitNodeIter{node, 0}
-}
-
-// Next moves the iterator to the next commit and returns a pointer to it. If
-// there are no more commits, it returns io.EOF.
-func (iter *parentCommitNodeIter) Next() (CommitNode, error) {
- obj, err := iter.node.ParentNode(iter.i)
- if err == object.ErrParentNotFound {
- return nil, io.EOF
- }
- if err == nil {
- iter.i++
- }
-
- return obj, err
-}
-
-// ForEach call the cb function for each commit contained on this iter until
-// an error appends or the end of the iter is reached. If ErrStop is sent
-// the iteration is stopped but no error is returned. The iterator is closed.
-func (iter *parentCommitNodeIter) ForEach(cb func(CommitNode) error) error {
- for {
- obj, err := iter.Next()
- if err != nil {
- if err == io.EOF {
- return nil
- }
-
- return err
- }
-
- if err := cb(obj); err != nil {
- if err == storer.ErrStop {
- return nil
- }
-
- return err
- }
- }
-}
-
-func (iter *parentCommitNodeIter) Close() {
-}
+package commitgraph
+
+import (
+ "io"
+ "time"
+
+ "github.com/go-git/go-git/v5/plumbing"
+ "github.com/go-git/go-git/v5/plumbing/object"
+ "github.com/go-git/go-git/v5/plumbing/storer"
+)
+
+// CommitNode is generic interface encapsulating a lightweight commit object retrieved
+// from CommitNodeIndex
+type CommitNode interface {
+ // ID returns the Commit object id referenced by the commit graph node.
+ ID() plumbing.Hash
+ // Tree returns the Tree referenced by the commit graph node.
+ Tree() (*object.Tree, error)
+ // CommitTime returns the Committer.When time of the Commit referenced by the commit graph node.
+ CommitTime() time.Time
+ // NumParents returns the number of parents in a commit.
+ NumParents() int
+ // ParentNodes return a CommitNodeIter for parents of specified node.
+ ParentNodes() CommitNodeIter
+ // ParentNode returns the ith parent of a commit.
+ ParentNode(i int) (CommitNode, error)
+ // ParentHashes returns hashes of the parent commits for a specified node
+ ParentHashes() []plumbing.Hash
+ // Generation returns the generation of the commit for reachability analysis.
+ // Objects with newer generation are not reachable from objects of older generation.
+ Generation() uint64
+ // GenerationV2 stores the corrected commit date for the commits
+ // It combines the contents of the GDA2 and GDO2 sections of the commit-graph
+ // with the commit time portion of the CDAT section.
+ GenerationV2() uint64
+ // Commit returns the full commit object from the node
+ Commit() (*object.Commit, error)
+}
+
+// CommitNodeIndex is generic interface encapsulating an index of CommitNode objects
+type CommitNodeIndex interface {
+ // Get returns a commit node from a commit hash
+ Get(hash plumbing.Hash) (CommitNode, error)
+}
+
+// CommitNodeIter is a generic closable interface for iterating over commit nodes.
+type CommitNodeIter interface {
+ Next() (CommitNode, error)
+ ForEach(func(CommitNode) error) error
+ Close()
+}
+
+// parentCommitNodeIter provides an iterator for parent commits from associated CommitNodeIndex.
+type parentCommitNodeIter struct {
+ node CommitNode
+ i int
+}
+
+func newParentgraphCommitNodeIter(node CommitNode) CommitNodeIter {
+ return &parentCommitNodeIter{node, 0}
+}
+
+// Next moves the iterator to the next commit and returns a pointer to it. If
+// there are no more commits, it returns io.EOF.
+func (iter *parentCommitNodeIter) Next() (CommitNode, error) {
+ obj, err := iter.node.ParentNode(iter.i)
+ if err == object.ErrParentNotFound {
+ return nil, io.EOF
+ }
+ if err == nil {
+ iter.i++
+ }
+
+ return obj, err
+}
+
+// ForEach call the cb function for each commit contained on this iter until
+// an error appends or the end of the iter is reached. If ErrStop is sent
+// the iteration is stopped but no error is returned. The iterator is closed.
+func (iter *parentCommitNodeIter) ForEach(cb func(CommitNode) error) error {
+ for {
+ obj, err := iter.Next()
+ if err != nil {
+ if err == io.EOF {
+ return nil
+ }
+
+ return err
+ }
+
+ if err := cb(obj); err != nil {
+ if err == storer.ErrStop {
+ return nil
+ }
+
+ return err
+ }
+ }
+}
+
+func (iter *parentCommitNodeIter) Close() {
+}
diff --git a/plumbing/object/commitgraph/commitnode_graph.go b/plumbing/object/commitgraph/commitnode_graph.go
index 8e5d4e3..0f51e3b 100644
--- a/plumbing/object/commitgraph/commitnode_graph.go
+++ b/plumbing/object/commitgraph/commitnode_graph.go
@@ -1,131 +1,140 @@
-package commitgraph
-
-import (
- "fmt"
- "time"
-
- "github.com/go-git/go-git/v5/plumbing"
- "github.com/go-git/go-git/v5/plumbing/format/commitgraph"
- "github.com/go-git/go-git/v5/plumbing/object"
- "github.com/go-git/go-git/v5/plumbing/storer"
-)
-
-// graphCommitNode is a reduced representation of Commit as presented in the commit
-// graph file (commitgraph.Node). It is merely useful as an optimization for walking
-// the commit graphs.
-//
-// graphCommitNode implements the CommitNode interface.
-type graphCommitNode struct {
- // Hash for the Commit object
- hash plumbing.Hash
- // Index of the node in the commit graph file
- index int
-
- commitData *commitgraph.CommitData
- gci *graphCommitNodeIndex
-}
-
-// graphCommitNodeIndex is an index that can load CommitNode objects from both the commit
-// graph files and the object store.
-//
-// graphCommitNodeIndex implements the CommitNodeIndex interface
-type graphCommitNodeIndex struct {
- commitGraph commitgraph.Index
- s storer.EncodedObjectStorer
-}
-
-// NewGraphCommitNodeIndex returns CommitNodeIndex implementation that uses commit-graph
-// files as backing storage and falls back to object storage when necessary
-func NewGraphCommitNodeIndex(commitGraph commitgraph.Index, s storer.EncodedObjectStorer) CommitNodeIndex {
- return &graphCommitNodeIndex{commitGraph, s}
-}
-
-func (gci *graphCommitNodeIndex) Get(hash plumbing.Hash) (CommitNode, error) {
- // Check the commit graph first
- parentIndex, err := gci.commitGraph.GetIndexByHash(hash)
- if err == nil {
- parent, err := gci.commitGraph.GetCommitDataByIndex(parentIndex)
- if err != nil {
- return nil, err
- }
-
- return &graphCommitNode{
- hash: hash,
- index: parentIndex,
- commitData: parent,
- gci: gci,
- }, nil
- }
-
- // Fallback to loading full commit object
- commit, err := object.GetCommit(gci.s, hash)
- if err != nil {
- return nil, err
- }
-
- return &objectCommitNode{
- nodeIndex: gci,
- commit: commit,
- }, nil
-}
-
-func (c *graphCommitNode) ID() plumbing.Hash {
- return c.hash
-}
-
-func (c *graphCommitNode) Tree() (*object.Tree, error) {
- return object.GetTree(c.gci.s, c.commitData.TreeHash)
-}
-
-func (c *graphCommitNode) CommitTime() time.Time {
- return c.commitData.When
-}
-
-func (c *graphCommitNode) NumParents() int {
- return len(c.commitData.ParentIndexes)
-}
-
-func (c *graphCommitNode) ParentNodes() CommitNodeIter {
- return newParentgraphCommitNodeIter(c)
-}
-
-func (c *graphCommitNode) ParentNode(i int) (CommitNode, error) {
- if i < 0 || i >= len(c.commitData.ParentIndexes) {
- return nil, object.ErrParentNotFound
- }
-
- parent, err := c.gci.commitGraph.GetCommitDataByIndex(c.commitData.ParentIndexes[i])
- if err != nil {
- return nil, err
- }
-
- return &graphCommitNode{
- hash: c.commitData.ParentHashes[i],
- index: c.commitData.ParentIndexes[i],
- commitData: parent,
- gci: c.gci,
- }, nil
-}
-
-func (c *graphCommitNode) ParentHashes() []plumbing.Hash {
- return c.commitData.ParentHashes
-}
-
-func (c *graphCommitNode) Generation() uint64 {
- // If the commit-graph file was generated with older Git version that
- // set the generation to zero for every commit the generation assumption
- // is still valid. It is just less useful.
- return uint64(c.commitData.Generation)
-}
-
-func (c *graphCommitNode) Commit() (*object.Commit, error) {
- return object.GetCommit(c.gci.s, c.hash)
-}
-
-func (c *graphCommitNode) String() string {
- return fmt.Sprintf(
- "%s %s\nDate: %s",
- plumbing.CommitObject, c.ID(),
- c.CommitTime().Format(object.DateFormat),
- )
-}
+package commitgraph
+
+import (
+ "fmt"
+ "time"
+
+ "github.com/go-git/go-git/v5/plumbing"
+ commitgraph "github.com/go-git/go-git/v5/plumbing/format/commitgraph/v2"
+ "github.com/go-git/go-git/v5/plumbing/object"
+ "github.com/go-git/go-git/v5/plumbing/storer"
+)
+
+// graphCommitNode is a reduced representation of Commit as presented in the commit
+// graph file (commitgraph.Node). It is merely useful as an optimization for walking
+// the commit graphs.
+//
+// graphCommitNode implements the CommitNode interface.
+type graphCommitNode struct {
+ // Hash for the Commit object
+ hash plumbing.Hash
+ // Index of the node in the commit graph file
+ index uint32
+
+ commitData *commitgraph.CommitData
+ gci *graphCommitNodeIndex
+}
+
+// graphCommitNodeIndex is an index that can load CommitNode objects from both the commit
+// graph files and the object store.
+//
+// graphCommitNodeIndex implements the CommitNodeIndex interface
+type graphCommitNodeIndex struct {
+ commitGraph commitgraph.Index
+ s storer.EncodedObjectStorer
+}
+
+// NewGraphCommitNodeIndex returns CommitNodeIndex implementation that uses commit-graph
+// files as backing storage and falls back to object storage when necessary
+func NewGraphCommitNodeIndex(commitGraph commitgraph.Index, s storer.EncodedObjectStorer) CommitNodeIndex {
+ return &graphCommitNodeIndex{commitGraph, s}
+}
+
+func (gci *graphCommitNodeIndex) Get(hash plumbing.Hash) (CommitNode, error) {
+ if gci.commitGraph != nil {
+ // Check the commit graph first
+ parentIndex, err := gci.commitGraph.GetIndexByHash(hash)
+ if err == nil {
+ parent, err := gci.commitGraph.GetCommitDataByIndex(parentIndex)
+ if err != nil {
+ return nil, err
+ }
+
+ return &graphCommitNode{
+ hash: hash,
+ index: parentIndex,
+ commitData: parent,
+ gci: gci,
+ }, nil
+ }
+ }
+
+ // Fallback to loading full commit object
+ commit, err := object.GetCommit(gci.s, hash)
+ if err != nil {
+ return nil, err
+ }
+
+ return &objectCommitNode{
+ nodeIndex: gci,
+ commit: commit,
+ }, nil
+}
+
+func (c *graphCommitNode) ID() plumbing.Hash {
+ return c.hash
+}
+
+func (c *graphCommitNode) Tree() (*object.Tree, error) {
+ return object.GetTree(c.gci.s, c.commitData.TreeHash)
+}
+
+func (c *graphCommitNode) CommitTime() time.Time {
+ return c.commitData.When
+}
+
+func (c *graphCommitNode) NumParents() int {
+ return len(c.commitData.ParentIndexes)
+}
+
+func (c *graphCommitNode) ParentNodes() CommitNodeIter {
+ return newParentgraphCommitNodeIter(c)
+}
+
+func (c *graphCommitNode) ParentNode(i int) (CommitNode, error) {
+ if i < 0 || i >= len(c.commitData.ParentIndexes) {
+ return nil, object.ErrParentNotFound
+ }
+
+ parent, err := c.gci.commitGraph.GetCommitDataByIndex(c.commitData.ParentIndexes[i])
+ if err != nil {
+ return nil, err
+ }
+
+ return &graphCommitNode{
+ hash: c.commitData.ParentHashes[i],
+ index: c.commitData.ParentIndexes[i],
+ commitData: parent,
+ gci: c.gci,
+ }, nil
+}
+
+func (c *graphCommitNode) ParentHashes() []plumbing.Hash {
+ return c.commitData.ParentHashes
+}
+
+func (c *graphCommitNode) Generation() uint64 {
+ // If the commit-graph file was generated with older Git version that
+ // set the generation to zero for every commit the generation assumption
+ // is still valid. It is just less useful.
+ return c.commitData.Generation
+}
+
+func (c *graphCommitNode) GenerationV2() uint64 {
+ // If the commit-graph file was generated with older Git version that
+ // set the generation to zero for every commit the generation assumption
+ // is still valid. It is just less useful.
+ return c.commitData.GenerationV2
+}
+
+func (c *graphCommitNode) Commit() (*object.Commit, error) {
+ return object.GetCommit(c.gci.s, c.hash)
+}
+
+func (c *graphCommitNode) String() string {
+ return fmt.Sprintf(
+ "%s %s\nDate: %s",
+ plumbing.CommitObject, c.ID(),
+ c.CommitTime().Format(object.DateFormat),
+ )
+}
diff --git a/plumbing/object/commitgraph/commitnode_object.go b/plumbing/object/commitgraph/commitnode_object.go
index bdf8cb7..7256bed 100644
--- a/plumbing/object/commitgraph/commitnode_object.go
+++ b/plumbing/object/commitgraph/commitnode_object.go
@@ -1,90 +1,97 @@
-package commitgraph
-
-import (
- "math"
- "time"
-
- "github.com/go-git/go-git/v5/plumbing"
- "github.com/go-git/go-git/v5/plumbing/object"
- "github.com/go-git/go-git/v5/plumbing/storer"
-)
-
-// objectCommitNode is a representation of Commit as presented in the GIT object format.
-//
-// objectCommitNode implements the CommitNode interface.
-type objectCommitNode struct {
- nodeIndex CommitNodeIndex
- commit *object.Commit
-}
-
-// NewObjectCommitNodeIndex returns CommitNodeIndex implementation that uses
-// only object storage to load the nodes
-func NewObjectCommitNodeIndex(s storer.EncodedObjectStorer) CommitNodeIndex {
- return &objectCommitNodeIndex{s}
-}
-
-func (oci *objectCommitNodeIndex) Get(hash plumbing.Hash) (CommitNode, error) {
- commit, err := object.GetCommit(oci.s, hash)
- if err != nil {
- return nil, err
- }
-
- return &objectCommitNode{
- nodeIndex: oci,
- commit: commit,
- }, nil
-}
-
-// objectCommitNodeIndex is an index that can load CommitNode objects only from the
-// object store.
-//
-// objectCommitNodeIndex implements the CommitNodeIndex interface
-type objectCommitNodeIndex struct {
- s storer.EncodedObjectStorer
-}
-
-func (c *objectCommitNode) CommitTime() time.Time {
- return c.commit.Committer.When
-}
-
-func (c *objectCommitNode) ID() plumbing.Hash {
- return c.commit.ID()
-}
-
-func (c *objectCommitNode) Tree() (*object.Tree, error) {
- return c.commit.Tree()
-}
-
-func (c *objectCommitNode) NumParents() int {
- return c.commit.NumParents()
-}
-
-func (c *objectCommitNode) ParentNodes() CommitNodeIter {
- return newParentgraphCommitNodeIter(c)
-}
-
-func (c *objectCommitNode) ParentNode(i int) (CommitNode, error) {
- if i < 0 || i >= len(c.commit.ParentHashes) {
- return nil, object.ErrParentNotFound
- }
-
- // Note: It's necessary to go through CommitNodeIndex here to ensure
- // that if the commit-graph file covers only part of the history we
- // start using it when that part is reached.
- return c.nodeIndex.Get(c.commit.ParentHashes[i])
-}
-
-func (c *objectCommitNode) ParentHashes() []plumbing.Hash {
- return c.commit.ParentHashes
-}
-
-func (c *objectCommitNode) Generation() uint64 {
- // Commit nodes representing objects outside of the commit graph can never
- // be reached by objects from the commit-graph thus we return the highest
- // possible value.
- return math.MaxUint64
-}
-
-func (c *objectCommitNode) Commit() (*object.Commit, error) {
- return c.commit, nil
-}
+package commitgraph
+
+import (
+ "math"
+ "time"
+
+ "github.com/go-git/go-git/v5/plumbing"
+ "github.com/go-git/go-git/v5/plumbing/object"
+ "github.com/go-git/go-git/v5/plumbing/storer"
+)
+
+// objectCommitNode is a representation of Commit as presented in the GIT object format.
+//
+// objectCommitNode implements the CommitNode interface.
+type objectCommitNode struct {
+ nodeIndex CommitNodeIndex
+ commit *object.Commit
+}
+
+// NewObjectCommitNodeIndex returns CommitNodeIndex implementation that uses
+// only object storage to load the nodes
+func NewObjectCommitNodeIndex(s storer.EncodedObjectStorer) CommitNodeIndex {
+ return &objectCommitNodeIndex{s}
+}
+
+func (oci *objectCommitNodeIndex) Get(hash plumbing.Hash) (CommitNode, error) {
+ commit, err := object.GetCommit(oci.s, hash)
+ if err != nil {
+ return nil, err
+ }
+
+ return &objectCommitNode{
+ nodeIndex: oci,
+ commit: commit,
+ }, nil
+}
+
+// objectCommitNodeIndex is an index that can load CommitNode objects only from the
+// object store.
+//
+// objectCommitNodeIndex implements the CommitNodeIndex interface
+type objectCommitNodeIndex struct {
+ s storer.EncodedObjectStorer
+}
+
+func (c *objectCommitNode) CommitTime() time.Time {
+ return c.commit.Committer.When
+}
+
+func (c *objectCommitNode) ID() plumbing.Hash {
+ return c.commit.ID()
+}
+
+func (c *objectCommitNode) Tree() (*object.Tree, error) {
+ return c.commit.Tree()
+}
+
+func (c *objectCommitNode) NumParents() int {
+ return c.commit.NumParents()
+}
+
+func (c *objectCommitNode) ParentNodes() CommitNodeIter {
+ return newParentgraphCommitNodeIter(c)
+}
+
+func (c *objectCommitNode) ParentNode(i int) (CommitNode, error) {
+ if i < 0 || i >= len(c.commit.ParentHashes) {
+ return nil, object.ErrParentNotFound
+ }
+
+ // Note: It's necessary to go through CommitNodeIndex here to ensure
+ // that if the commit-graph file covers only part of the history we
+ // start using it when that part is reached.
+ return c.nodeIndex.Get(c.commit.ParentHashes[i])
+}
+
+func (c *objectCommitNode) ParentHashes() []plumbing.Hash {
+ return c.commit.ParentHashes
+}
+
+func (c *objectCommitNode) Generation() uint64 {
+ // Commit nodes representing objects outside of the commit graph can never
+ // be reached by objects from the commit-graph thus we return the highest
+ // possible value.
+ return math.MaxUint64
+}
+
+func (c *objectCommitNode) GenerationV2() uint64 {
+ // Commit nodes representing objects outside of the commit graph can never
+ // be reached by objects from the commit-graph thus we return the highest
+ // possible value.
+ return math.MaxUint64
+}
+
+func (c *objectCommitNode) Commit() (*object.Commit, error) {
+ return c.commit, nil
+}
diff --git a/plumbing/object/commitgraph/commitnode_test.go b/plumbing/object/commitgraph/commitnode_test.go
index 6c9a643..441ff6f 100644
--- a/plumbing/object/commitgraph/commitnode_test.go
+++ b/plumbing/object/commitgraph/commitnode_test.go
@@ -1,148 +1,153 @@
-package commitgraph
-
-import (
- "path"
- "testing"
-
- "github.com/go-git/go-git/v5/plumbing"
- "github.com/go-git/go-git/v5/plumbing/cache"
- "github.com/go-git/go-git/v5/plumbing/format/commitgraph"
- "github.com/go-git/go-git/v5/plumbing/format/packfile"
- "github.com/go-git/go-git/v5/storage/filesystem"
-
- fixtures "github.com/go-git/go-git-fixtures/v4"
- . "gopkg.in/check.v1"
-)
-
-func Test(t *testing.T) { TestingT(t) }
-
-type CommitNodeSuite struct {
- fixtures.Suite
-}
-
-var _ = Suite(&CommitNodeSuite{})
-
-func unpackRepositry(f *fixtures.Fixture) *filesystem.Storage {
- storer := filesystem.NewStorage(f.DotGit(), cache.NewObjectLRUDefault())
- p := f.Packfile()
- defer p.Close()
- packfile.UpdateObjectStorage(storer, p)
- return storer
-}
-
-func testWalker(c *C, nodeIndex CommitNodeIndex) {
- head, err := nodeIndex.Get(plumbing.NewHash("b9d69064b190e7aedccf84731ca1d917871f8a1c"))
- c.Assert(err, IsNil)
-
- iter := NewCommitNodeIterCTime(
- head,
- nil,
- nil,
- )
-
- var commits []CommitNode
- iter.ForEach(func(c CommitNode) error {
- commits = append(commits, c)
- return nil
- })
-
- c.Assert(commits, HasLen, 9)
-
- expected := []string{
- "b9d69064b190e7aedccf84731ca1d917871f8a1c",
- "6f6c5d2be7852c782be1dd13e36496dd7ad39560",
- "a45273fe2d63300e1962a9e26a6b15c276cd7082",
- "c0edf780dd0da6a65a7a49a86032fcf8a0c2d467",
- "bb13916df33ed23004c3ce9ed3b8487528e655c1",
- "03d2c021ff68954cf3ef0a36825e194a4b98f981",
- "ce275064ad67d51e99f026084e20827901a8361c",
- "e713b52d7e13807e87a002e812041f248db3f643",
- "347c91919944a68e9413581a1bc15519550a3afe",
- }
- for i, commit := range commits {
- c.Assert(commit.ID().String(), Equals, expected[i])
- }
-}
-
-func testParents(c *C, nodeIndex CommitNodeIndex) {
- merge3, err := nodeIndex.Get(plumbing.NewHash("6f6c5d2be7852c782be1dd13e36496dd7ad39560"))
- c.Assert(err, IsNil)
-
- var parents []CommitNode
- merge3.ParentNodes().ForEach(func(c CommitNode) error {
- parents = append(parents, c)
- return nil
- })
-
- c.Assert(parents, HasLen, 3)
-
- expected := []string{
- "ce275064ad67d51e99f026084e20827901a8361c",
- "bb13916df33ed23004c3ce9ed3b8487528e655c1",
- "a45273fe2d63300e1962a9e26a6b15c276cd7082",
- }
- for i, parent := range parents {
- c.Assert(parent.ID().String(), Equals, expected[i])
- }
-}
-
-func testCommitAndTree(c *C, nodeIndex CommitNodeIndex) {
- merge3node, err := nodeIndex.Get(plumbing.NewHash("6f6c5d2be7852c782be1dd13e36496dd7ad39560"))
- c.Assert(err, IsNil)
- merge3commit, err := merge3node.Commit()
- c.Assert(err, IsNil)
- c.Assert(merge3node.ID().String(), Equals, merge3commit.ID().String())
- tree, err := merge3node.Tree()
- c.Assert(err, IsNil)
- c.Assert(tree.ID().String(), Equals, merge3commit.TreeHash.String())
-}
-
-func (s *CommitNodeSuite) TestObjectGraph(c *C) {
- f := fixtures.ByTag("commit-graph").One()
- storer := unpackRepositry(f)
-
- nodeIndex := NewObjectCommitNodeIndex(storer)
- testWalker(c, nodeIndex)
- testParents(c, nodeIndex)
- testCommitAndTree(c, nodeIndex)
-}
-
-func (s *CommitNodeSuite) TestCommitGraph(c *C) {
- f := fixtures.ByTag("commit-graph").One()
- storer := unpackRepositry(f)
- reader, err := storer.Filesystem().Open(path.Join("objects", "info", "commit-graph"))
- c.Assert(err, IsNil)
- defer reader.Close()
- index, err := commitgraph.OpenFileIndex(reader)
- c.Assert(err, IsNil)
-
- nodeIndex := NewGraphCommitNodeIndex(index, storer)
- testWalker(c, nodeIndex)
- testParents(c, nodeIndex)
- testCommitAndTree(c, nodeIndex)
-}
-
-func (s *CommitNodeSuite) TestMixedGraph(c *C) {
- f := fixtures.ByTag("commit-graph").One()
- storer := unpackRepositry(f)
-
- // Take the commit-graph file and copy it to memory index without the last commit
- reader, err := storer.Filesystem().Open(path.Join("objects", "info", "commit-graph"))
- c.Assert(err, IsNil)
- defer reader.Close()
- fileIndex, err := commitgraph.OpenFileIndex(reader)
- c.Assert(err, IsNil)
- memoryIndex := commitgraph.NewMemoryIndex()
- for i, hash := range fileIndex.Hashes() {
- if hash.String() != "b9d69064b190e7aedccf84731ca1d917871f8a1c" {
- node, err := fileIndex.GetCommitDataByIndex(i)
- c.Assert(err, IsNil)
- memoryIndex.Add(hash, node)
- }
- }
-
- nodeIndex := NewGraphCommitNodeIndex(memoryIndex, storer)
- testWalker(c, nodeIndex)
- testParents(c, nodeIndex)
- testCommitAndTree(c, nodeIndex)
-}
+package commitgraph
+
+import (
+ "path"
+ "testing"
+
+ "github.com/go-git/go-git/v5/plumbing"
+ "github.com/go-git/go-git/v5/plumbing/cache"
+ commitgraph "github.com/go-git/go-git/v5/plumbing/format/commitgraph/v2"
+ "github.com/go-git/go-git/v5/plumbing/format/packfile"
+ "github.com/go-git/go-git/v5/storage/filesystem"
+
+ fixtures "github.com/go-git/go-git-fixtures/v4"
+ . "gopkg.in/check.v1"
+)
+
+func Test(t *testing.T) { TestingT(t) }
+
+type CommitNodeSuite struct {
+ fixtures.Suite
+}
+
+var _ = Suite(&CommitNodeSuite{})
+
+func unpackRepository(f *fixtures.Fixture) *filesystem.Storage {
+ storer := filesystem.NewStorage(f.DotGit(), cache.NewObjectLRUDefault())
+ p := f.Packfile()
+ defer p.Close()
+ packfile.UpdateObjectStorage(storer, p)
+ return storer
+}
+
+func testWalker(c *C, nodeIndex CommitNodeIndex) {
+ head, err := nodeIndex.Get(plumbing.NewHash("b9d69064b190e7aedccf84731ca1d917871f8a1c"))
+ c.Assert(err, IsNil)
+
+ iter := NewCommitNodeIterCTime(
+ head,
+ nil,
+ nil,
+ )
+
+ var commits []CommitNode
+ iter.ForEach(func(c CommitNode) error {
+ commits = append(commits, c)
+ return nil
+ })
+
+ c.Assert(commits, HasLen, 9)
+
+ expected := []string{
+ "b9d69064b190e7aedccf84731ca1d917871f8a1c",
+ "6f6c5d2be7852c782be1dd13e36496dd7ad39560",
+ "a45273fe2d63300e1962a9e26a6b15c276cd7082",
+ "c0edf780dd0da6a65a7a49a86032fcf8a0c2d467",
+ "bb13916df33ed23004c3ce9ed3b8487528e655c1",
+ "03d2c021ff68954cf3ef0a36825e194a4b98f981",
+ "ce275064ad67d51e99f026084e20827901a8361c",
+ "e713b52d7e13807e87a002e812041f248db3f643",
+ "347c91919944a68e9413581a1bc15519550a3afe",
+ }
+ for i, commit := range commits {
+ c.Assert(commit.ID().String(), Equals, expected[i])
+ }
+}
+
+func testParents(c *C, nodeIndex CommitNodeIndex) {
+ merge3, err := nodeIndex.Get(plumbing.NewHash("6f6c5d2be7852c782be1dd13e36496dd7ad39560"))
+ c.Assert(err, IsNil)
+
+ var parents []CommitNode
+ merge3.ParentNodes().ForEach(func(c CommitNode) error {
+ parents = append(parents, c)
+ return nil
+ })
+
+ c.Assert(parents, HasLen, 3)
+
+ expected := []string{
+ "ce275064ad67d51e99f026084e20827901a8361c",
+ "bb13916df33ed23004c3ce9ed3b8487528e655c1",
+ "a45273fe2d63300e1962a9e26a6b15c276cd7082",
+ }
+ for i, parent := range parents {
+ c.Assert(parent.ID().String(), Equals, expected[i])
+ }
+}
+
+func testCommitAndTree(c *C, nodeIndex CommitNodeIndex) {
+ merge3node, err := nodeIndex.Get(plumbing.NewHash("6f6c5d2be7852c782be1dd13e36496dd7ad39560"))
+ c.Assert(err, IsNil)
+ merge3commit, err := merge3node.Commit()
+ c.Assert(err, IsNil)
+ c.Assert(merge3node.ID().String(), Equals, merge3commit.ID().String())
+ tree, err := merge3node.Tree()
+ c.Assert(err, IsNil)
+ c.Assert(tree.ID().String(), Equals, merge3commit.TreeHash.String())
+}
+
+func (s *CommitNodeSuite) TestObjectGraph(c *C) {
+ f := fixtures.ByTag("commit-graph").One()
+ storer := unpackRepository(f)
+
+ nodeIndex := NewObjectCommitNodeIndex(storer)
+ testWalker(c, nodeIndex)
+ testParents(c, nodeIndex)
+ testCommitAndTree(c, nodeIndex)
+}
+
+func (s *CommitNodeSuite) TestCommitGraph(c *C) {
+ f := fixtures.ByTag("commit-graph").One()
+ storer := unpackRepository(f)
+ reader, err := storer.Filesystem().Open(path.Join("objects", "info", "commit-graph"))
+ c.Assert(err, IsNil)
+ defer reader.Close()
+ index, err := commitgraph.OpenFileIndex(reader)
+ c.Assert(err, IsNil)
+ defer index.Close()
+
+ nodeIndex := NewGraphCommitNodeIndex(index, storer)
+ testWalker(c, nodeIndex)
+ testParents(c, nodeIndex)
+ testCommitAndTree(c, nodeIndex)
+}
+
+func (s *CommitNodeSuite) TestMixedGraph(c *C) {
+ f := fixtures.ByTag("commit-graph").One()
+ storer := unpackRepository(f)
+
+ // Take the commit-graph file and copy it to memory index without the last commit
+ reader, err := storer.Filesystem().Open(path.Join("objects", "info", "commit-graph"))
+ c.Assert(err, IsNil)
+ defer reader.Close()
+ fileIndex, err := commitgraph.OpenFileIndex(reader)
+ c.Assert(err, IsNil)
+ defer fileIndex.Close()
+
+ memoryIndex := commitgraph.NewMemoryIndex()
+ defer memoryIndex.Close()
+
+ for i, hash := range fileIndex.Hashes() {
+ if hash.String() != "b9d69064b190e7aedccf84731ca1d917871f8a1c" {
+ node, err := fileIndex.GetCommitDataByIndex(uint32(i))
+ c.Assert(err, IsNil)
+ memoryIndex.Add(hash, node)
+ }
+ }
+
+ nodeIndex := NewGraphCommitNodeIndex(memoryIndex, storer)
+ testWalker(c, nodeIndex)
+ testParents(c, nodeIndex)
+ testCommitAndTree(c, nodeIndex)
+}
diff --git a/plumbing/object/commitgraph/commitnode_walker_author_order.go b/plumbing/object/commitgraph/commitnode_walker_author_order.go
new file mode 100644
index 0000000..f5b23cc
--- /dev/null
+++ b/plumbing/object/commitgraph/commitnode_walker_author_order.go
@@ -0,0 +1,61 @@
+package commitgraph
+
+import (
+ "github.com/go-git/go-git/v5/plumbing"
+
+ "github.com/emirpasic/gods/trees/binaryheap"
+)
+
+// NewCommitNodeIterAuthorDateOrder returns a CommitNodeIter that walks the commit history,
+// starting at the given commit and visiting its parents in Author Time order but with the
+// constraint that no parent is emitted before its children are emitted.
+//
+// This matches `git log --author-order`
+//
+// This ordering requires that commit objects need to be loaded into memory - thus this
+// ordering is likely to be slower than other orderings.
+func NewCommitNodeIterAuthorDateOrder(c CommitNode,
+ seenExternal map[plumbing.Hash]bool,
+ ignore []plumbing.Hash,
+) CommitNodeIter {
+ seen := make(map[plumbing.Hash]struct{})
+ for _, h := range ignore {
+ seen[h] = struct{}{}
+ }
+ for h, ext := range seenExternal {
+ if ext {
+ seen[h] = struct{}{}
+ }
+ }
+ inCounts := make(map[plumbing.Hash]int)
+
+ exploreHeap := &commitNodeHeap{binaryheap.NewWith(generationAndDateOrderComparator)}
+ exploreHeap.Push(c)
+
+ visitHeap := &commitNodeHeap{binaryheap.NewWith(func(left, right interface{}) int {
+ leftCommit, err := left.(CommitNode).Commit()
+ if err != nil {
+ return -1
+ }
+ rightCommit, err := right.(CommitNode).Commit()
+ if err != nil {
+ return -1
+ }
+
+ switch {
+ case rightCommit.Author.When.Before(leftCommit.Author.When):
+ return -1
+ case leftCommit.Author.When.Before(rightCommit.Author.When):
+ return 1
+ }
+ return 0
+ })}
+ visitHeap.Push(c)
+
+ return &commitNodeIteratorTopological{
+ exploreStack: exploreHeap,
+ visitStack: visitHeap,
+ inCounts: inCounts,
+ ignore: seen,
+ }
+}
diff --git a/plumbing/object/commitgraph/commitnode_walker_ctime.go b/plumbing/object/commitgraph/commitnode_walker_ctime.go
index 281f10b..3ab9e6e 100644
--- a/plumbing/object/commitgraph/commitnode_walker_ctime.go
+++ b/plumbing/object/commitgraph/commitnode_walker_ctime.go
@@ -1,105 +1,106 @@
-package commitgraph
-
-import (
- "io"
-
- "github.com/go-git/go-git/v5/plumbing"
- "github.com/go-git/go-git/v5/plumbing/storer"
-
- "github.com/emirpasic/gods/trees/binaryheap"
-)
-
-type commitNodeIteratorByCTime struct {
- heap *binaryheap.Heap
- seenExternal map[plumbing.Hash]bool
- seen map[plumbing.Hash]bool
-}
-
-// NewCommitNodeIterCTime returns a CommitNodeIter that walks the commit history,
-// starting at the given commit and visiting its parents while preserving Committer Time order.
-// this appears to be the closest order to `git log`
-// The given callback will be called for each visited commit. Each commit will
-// be visited only once. If the callback returns an error, walking will stop
-// and will return the error. Other errors might be returned if the history
-// cannot be traversed (e.g. missing objects). Ignore allows to skip some
-// commits from being iterated.
-func NewCommitNodeIterCTime(
- c CommitNode,
- seenExternal map[plumbing.Hash]bool,
- ignore []plumbing.Hash,
-) CommitNodeIter {
- seen := make(map[plumbing.Hash]bool)
- for _, h := range ignore {
- seen[h] = true
- }
-
- heap := binaryheap.NewWith(func(a, b interface{}) int {
- if a.(CommitNode).CommitTime().Before(b.(CommitNode).CommitTime()) {
- return 1
- }
- return -1
- })
-
- heap.Push(c)
-
- return &commitNodeIteratorByCTime{
- heap: heap,
- seenExternal: seenExternal,
- seen: seen,
- }
-}
-
-func (w *commitNodeIteratorByCTime) Next() (CommitNode, error) {
- var c CommitNode
- for {
- cIn, ok := w.heap.Pop()
- if !ok {
- return nil, io.EOF
- }
- c = cIn.(CommitNode)
- cID := c.ID()
-
- if w.seen[cID] || w.seenExternal[cID] {
- continue
- }
-
- w.seen[cID] = true
-
- for i, h := range c.ParentHashes() {
- if w.seen[h] || w.seenExternal[h] {
- continue
- }
- pc, err := c.ParentNode(i)
- if err != nil {
- return nil, err
- }
- w.heap.Push(pc)
- }
-
- return c, nil
- }
-}
-
-func (w *commitNodeIteratorByCTime) ForEach(cb func(CommitNode) error) error {
- for {
- c, err := w.Next()
- if err == io.EOF {
- break
- }
- if err != nil {
- return err
- }
-
- err = cb(c)
- if err == storer.ErrStop {
- break
- }
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (w *commitNodeIteratorByCTime) Close() {}
+package commitgraph
+
+import (
+ "io"
+
+ "github.com/go-git/go-git/v5/plumbing"
+ "github.com/go-git/go-git/v5/plumbing/storer"
+
+ "github.com/emirpasic/gods/trees/binaryheap"
+)
+
+type commitNodeIteratorByCTime struct {
+ heap *binaryheap.Heap
+ seenExternal map[plumbing.Hash]bool
+ seen map[plumbing.Hash]bool
+}
+
+// NewCommitNodeIterCTime returns a CommitNodeIter that walks the commit history,
+// starting at the given commit and visiting its parents while preserving Committer Time order.
+// this is close in order to `git log` but does not guarantee topological order and will
+// order things incorrectly occasionally.
+// The given callback will be called for each visited commit. Each commit will
+// be visited only once. If the callback returns an error, walking will stop
+// and will return the error. Other errors might be returned if the history
+// cannot be traversed (e.g. missing objects). Ignore allows to skip some
+// commits from being iterated.
+func NewCommitNodeIterCTime(
+ c CommitNode,
+ seenExternal map[plumbing.Hash]bool,
+ ignore []plumbing.Hash,
+) CommitNodeIter {
+ seen := make(map[plumbing.Hash]bool)
+ for _, h := range ignore {
+ seen[h] = true
+ }
+
+ heap := binaryheap.NewWith(func(a, b interface{}) int {
+ if a.(CommitNode).CommitTime().Before(b.(CommitNode).CommitTime()) {
+ return 1
+ }
+ return -1
+ })
+
+ heap.Push(c)
+
+ return &commitNodeIteratorByCTime{
+ heap: heap,
+ seenExternal: seenExternal,
+ seen: seen,
+ }
+}
+
+func (w *commitNodeIteratorByCTime) Next() (CommitNode, error) {
+ var c CommitNode
+ for {
+ cIn, ok := w.heap.Pop()
+ if !ok {
+ return nil, io.EOF
+ }
+ c = cIn.(CommitNode)
+ cID := c.ID()
+
+ if w.seen[cID] || w.seenExternal[cID] {
+ continue
+ }
+
+ w.seen[cID] = true
+
+ for i, h := range c.ParentHashes() {
+ if w.seen[h] || w.seenExternal[h] {
+ continue
+ }
+ pc, err := c.ParentNode(i)
+ if err != nil {
+ return nil, err
+ }
+ w.heap.Push(pc)
+ }
+
+ return c, nil
+ }
+}
+
+func (w *commitNodeIteratorByCTime) ForEach(cb func(CommitNode) error) error {
+ for {
+ c, err := w.Next()
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ return err
+ }
+
+ err = cb(c)
+ if err == storer.ErrStop {
+ break
+ }
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (w *commitNodeIteratorByCTime) Close() {}
diff --git a/plumbing/object/commitgraph/commitnode_walker_date_order.go b/plumbing/object/commitgraph/commitnode_walker_date_order.go
new file mode 100644
index 0000000..659a4fa
--- /dev/null
+++ b/plumbing/object/commitgraph/commitnode_walker_date_order.go
@@ -0,0 +1,41 @@
+package commitgraph
+
+import (
+ "github.com/go-git/go-git/v5/plumbing"
+
+ "github.com/emirpasic/gods/trees/binaryheap"
+)
+
+// NewCommitNodeIterDateOrder returns a CommitNodeIter that walks the commit history,
+// starting at the given commit and visiting its parents in Committer Time and Generation order,
+// but with the constraint that no parent is emitted before its children are emitted.
+//
+// This matches `git log --date-order`
+func NewCommitNodeIterDateOrder(c CommitNode,
+ seenExternal map[plumbing.Hash]bool,
+ ignore []plumbing.Hash,
+) CommitNodeIter {
+ seen := make(map[plumbing.Hash]struct{})
+ for _, h := range ignore {
+ seen[h] = struct{}{}
+ }
+ for h, ext := range seenExternal {
+ if ext {
+ seen[h] = struct{}{}
+ }
+ }
+ inCounts := make(map[plumbing.Hash]int)
+
+ exploreHeap := &commitNodeHeap{binaryheap.NewWith(generationAndDateOrderComparator)}
+ exploreHeap.Push(c)
+
+ visitHeap := &commitNodeHeap{binaryheap.NewWith(generationAndDateOrderComparator)}
+ visitHeap.Push(c)
+
+ return &commitNodeIteratorTopological{
+ exploreStack: exploreHeap,
+ visitStack: visitHeap,
+ inCounts: inCounts,
+ ignore: seen,
+ }
+}
diff --git a/plumbing/object/commitgraph/commitnode_walker_helper.go b/plumbing/object/commitgraph/commitnode_walker_helper.go
new file mode 100644
index 0000000..c54f6ca
--- /dev/null
+++ b/plumbing/object/commitgraph/commitnode_walker_helper.go
@@ -0,0 +1,164 @@
+package commitgraph
+
+import (
+ "math"
+
+ "github.com/go-git/go-git/v5/plumbing"
+
+ "github.com/emirpasic/gods/trees/binaryheap"
+)
+
+// commitNodeStackable represents a common interface between heaps and stacks
+type commitNodeStackable interface {
+ Push(c CommitNode)
+ Pop() (CommitNode, bool)
+ Peek() (CommitNode, bool)
+ Size() int
+}
+
+// commitNodeLifo is a stack implementation using an underlying slice
+type commitNodeLifo struct {
+ l []CommitNode
+}
+
+// Push pushes a new CommitNode to the stack
+func (l *commitNodeLifo) Push(c CommitNode) {
+ l.l = append(l.l, c)
+}
+
+// Pop pops the most recently added CommitNode from the stack
+func (l *commitNodeLifo) Pop() (CommitNode, bool) {
+ if len(l.l) == 0 {
+ return nil, false
+ }
+ c := l.l[len(l.l)-1]
+ l.l = l.l[:len(l.l)-1]
+ return c, true
+}
+
+// Peek returns the most recently added CommitNode from the stack without removing it
+func (l *commitNodeLifo) Peek() (CommitNode, bool) {
+ if len(l.l) == 0 {
+ return nil, false
+ }
+ return l.l[len(l.l)-1], true
+}
+
+// Size returns the number of CommitNodes in the stack
+func (l *commitNodeLifo) Size() int {
+ return len(l.l)
+}
+
+// commitNodeHeap is a stack implementation using an underlying binary heap
+type commitNodeHeap struct {
+ *binaryheap.Heap
+}
+
+// Push pushes a new CommitNode to the heap
+func (h *commitNodeHeap) Push(c CommitNode) {
+ h.Heap.Push(c)
+}
+
+// Pop removes top element on heap and returns it, or nil if heap is empty.
+// Second return parameter is true, unless the heap was empty and there was nothing to pop.
+func (h *commitNodeHeap) Pop() (CommitNode, bool) {
+ c, ok := h.Heap.Pop()
+ if !ok {
+ return nil, false
+ }
+ return c.(CommitNode), true
+}
+
+// Peek returns top element on the heap without removing it, or nil if heap is empty.
+// Second return parameter is true, unless the heap was empty and there was nothing to peek.
+func (h *commitNodeHeap) Peek() (CommitNode, bool) {
+ c, ok := h.Heap.Peek()
+ if !ok {
+ return nil, false
+ }
+ return c.(CommitNode), true
+}
+
+// Size returns number of elements within the heap.
+func (h *commitNodeHeap) Size() int {
+ return h.Heap.Size()
+}
+
+// generationAndDateOrderComparator compares two CommitNode objects based on their generation and commit time.
+// If the left CommitNode object is in a higher generation or is newer than the right one, it returns a -1.
+// If the left CommitNode object is in a lower generation or is older than the right one, it returns a 1.
+// If the two CommitNode objects have the same commit time and generation, it returns 0.
+func generationAndDateOrderComparator(left, right interface{}) int {
+ leftCommit := left.(CommitNode)
+ rightCommit := right.(CommitNode)
+
+ // if GenerationV2 is MaxUint64, then the node is not in the graph
+ if leftCommit.GenerationV2() == math.MaxUint64 {
+ if rightCommit.GenerationV2() == math.MaxUint64 {
+ switch {
+ case rightCommit.CommitTime().Before(leftCommit.CommitTime()):
+ return -1
+ case leftCommit.CommitTime().Before(rightCommit.CommitTime()):
+ return 1
+ }
+ return 0
+ }
+ // left is not in the graph, but right is, so it is newer than the right
+ return -1
+ }
+
+ if rightCommit.GenerationV2() == math.MaxInt64 {
+ // the right is not in the graph, therefore the left is before the right
+ return 1
+ }
+
+ if leftCommit.GenerationV2() == 0 || rightCommit.GenerationV2() == 0 {
+ // We need to assess generation and date
+ if leftCommit.Generation() < rightCommit.Generation() {
+ return 1
+ }
+ if leftCommit.Generation() > rightCommit.Generation() {
+ return -1
+ }
+ switch {
+ case rightCommit.CommitTime().Before(leftCommit.CommitTime()):
+ return -1
+ case leftCommit.CommitTime().Before(rightCommit.CommitTime()):
+ return 1
+ }
+ return 0
+ }
+
+ if leftCommit.GenerationV2() < rightCommit.GenerationV2() {
+ return 1
+ }
+ if leftCommit.GenerationV2() > rightCommit.GenerationV2() {
+ return -1
+ }
+
+ return 0
+}
+
+// composeIgnores composes the ignore list with the provided seenExternal list
+func composeIgnores(ignore []plumbing.Hash, seenExternal map[plumbing.Hash]bool) map[plumbing.Hash]struct{} {
+ if len(ignore) == 0 {
+ seen := make(map[plumbing.Hash]struct{})
+ for h, ext := range seenExternal {
+ if ext {
+ seen[h] = struct{}{}
+ }
+ }
+ return seen
+ }
+
+ seen := make(map[plumbing.Hash]struct{})
+ for _, h := range ignore {
+ seen[h] = struct{}{}
+ }
+ for h, ext := range seenExternal {
+ if ext {
+ seen[h] = struct{}{}
+ }
+ }
+ return seen
+}
diff --git a/plumbing/object/commitgraph/commitnode_walker_test.go b/plumbing/object/commitgraph/commitnode_walker_test.go
new file mode 100644
index 0000000..1e09c0b
--- /dev/null
+++ b/plumbing/object/commitgraph/commitnode_walker_test.go
@@ -0,0 +1,187 @@
+package commitgraph
+
+import (
+ "strings"
+
+ "github.com/go-git/go-git/v5/plumbing"
+ commitgraph "github.com/go-git/go-git/v5/plumbing/format/commitgraph/v2"
+
+ fixtures "github.com/go-git/go-git-fixtures/v4"
+ . "gopkg.in/check.v1"
+)
+
+func (s *CommitNodeSuite) TestCommitNodeIter(c *C) {
+ f := fixtures.ByTag("commit-graph-chain-2").One()
+
+ storer := unpackRepository(f)
+
+ index, err := commitgraph.OpenChainOrFileIndex(storer.Filesystem())
+ c.Assert(err, IsNil)
+
+ nodeIndex := NewGraphCommitNodeIndex(index, storer)
+
+ head, err := nodeIndex.Get(plumbing.NewHash("ec6f456c0e8c7058a29611429965aa05c190b54b"))
+ c.Assert(err, IsNil)
+
+ testTopoOrder(c, head)
+ testDateOrder(c, head)
+ testAuthorDateOrder(c, head)
+}
+
+func testTopoOrder(c *C, head CommitNode) {
+ iter := NewCommitNodeIterTopoOrder(
+ head,
+ nil,
+ nil,
+ )
+
+ var commits []string
+ iter.ForEach(func(c CommitNode) error {
+ commits = append(commits, c.ID().String())
+ return nil
+ })
+ c.Assert(commits, DeepEquals, strings.Split(`ec6f456c0e8c7058a29611429965aa05c190b54b
+d82f291cde9987322c8a0c81a325e1ba6159684c
+3048d280d2d5b258d9e582a226ff4bbed34fd5c9
+27aa8cdd2431068606741a589383c02c149ea625
+fa058d42fa3bc53f39108a56dad67157169b2191
+6c629843a1750a27c9af01ed2985f362f619c47a
+d10a0e7c1f340a6cfc14540a5f8c508ce7e2eabf
+d0a18ccd8eea3bdabc76d6dc5420af1ea30aae9f
+cf2874632223220e0445abf0a7806dc772c0b37a
+758ac33217f092bfcded4ad4774954ac054c9609
+214e1dca024fb6da5ed65564d2de734df5dc2127
+70923099e61fa33f0bc5256d2f938fa44c4df10e
+bcaa1ac5644b16f1febb72f31e204720b7bb8934
+e1d8866ffa78fa16d2f39b0ba5344a7269ee5371
+2275fa7d0c75d20103f90b0e1616937d5a9fc5e6
+bdd9a92789d4a86b20a8d3df462df373f41acf23
+b359f11ea09e642695edcd114b463da4395b10c1
+6f43e8933ba3c04072d5d104acc6118aac3e52ee
+ccafe8bd5f9dbfb8b98b0da03ced29608dcfdeec
+939814f341fdd5d35e81a3845a33c4fedb19d2d2
+5f5ad88bf2babe506f927d64d2b7a1e1493dc2ae
+a2014124ca3b3f9ff28fbab0a83ce3c71bf4622e
+77906b653c3eb8a1cd5bd7254e161c00c6086d83
+465cba710284204f9851854587c2887c247222db
+b9471b13256703d3f5eb88b280b4a16ce325ec1b
+62925030859646daeeaf5a4d386a0c41e00dda8a
+5f56aea0ca8b74215a5b982bca32236e1e28c76b
+23148841baa5dbce48f6adcb7ddf83dcd97debb3
+c336d16298a017486c4164c40f8acb28afe64e84
+31eae7b619d166c366bf5df4991f04ba8cebea0a
+d2a38b4a5965d529566566640519d03d2bd10f6c
+b977a025ca21e3b5ca123d8093bd7917694f6da7
+35b585759cbf29f8ec428ef89da20705d59f99ec
+c2bbf9fe8009b22d0f390f3c8c3f13937067590f
+fc9f0643b21cfe571046e27e0c4565f3a1ee96c8
+c088fd6a7e1a38e9d5a9815265cb575bb08d08ff
+5fddbeb678bd2c36c5e5c891ab8f2b143ced5baf
+5d7303c49ac984a9fec60523f2d5297682e16646`, "\n"))
+}
+
+func testDateOrder(c *C, head CommitNode) {
+ iter := NewCommitNodeIterDateOrder(
+ head,
+ nil,
+ nil,
+ )
+
+ var commits []string
+ iter.ForEach(func(c CommitNode) error {
+ commits = append(commits, c.ID().String())
+ return nil
+ })
+
+ c.Assert(commits, DeepEquals, strings.Split(`ec6f456c0e8c7058a29611429965aa05c190b54b
+3048d280d2d5b258d9e582a226ff4bbed34fd5c9
+d82f291cde9987322c8a0c81a325e1ba6159684c
+27aa8cdd2431068606741a589383c02c149ea625
+fa058d42fa3bc53f39108a56dad67157169b2191
+d0a18ccd8eea3bdabc76d6dc5420af1ea30aae9f
+6c629843a1750a27c9af01ed2985f362f619c47a
+cf2874632223220e0445abf0a7806dc772c0b37a
+d10a0e7c1f340a6cfc14540a5f8c508ce7e2eabf
+758ac33217f092bfcded4ad4774954ac054c9609
+214e1dca024fb6da5ed65564d2de734df5dc2127
+70923099e61fa33f0bc5256d2f938fa44c4df10e
+bcaa1ac5644b16f1febb72f31e204720b7bb8934
+e1d8866ffa78fa16d2f39b0ba5344a7269ee5371
+2275fa7d0c75d20103f90b0e1616937d5a9fc5e6
+bdd9a92789d4a86b20a8d3df462df373f41acf23
+b359f11ea09e642695edcd114b463da4395b10c1
+6f43e8933ba3c04072d5d104acc6118aac3e52ee
+ccafe8bd5f9dbfb8b98b0da03ced29608dcfdeec
+939814f341fdd5d35e81a3845a33c4fedb19d2d2
+5f5ad88bf2babe506f927d64d2b7a1e1493dc2ae
+a2014124ca3b3f9ff28fbab0a83ce3c71bf4622e
+77906b653c3eb8a1cd5bd7254e161c00c6086d83
+465cba710284204f9851854587c2887c247222db
+b9471b13256703d3f5eb88b280b4a16ce325ec1b
+62925030859646daeeaf5a4d386a0c41e00dda8a
+5f56aea0ca8b74215a5b982bca32236e1e28c76b
+23148841baa5dbce48f6adcb7ddf83dcd97debb3
+c336d16298a017486c4164c40f8acb28afe64e84
+31eae7b619d166c366bf5df4991f04ba8cebea0a
+b977a025ca21e3b5ca123d8093bd7917694f6da7
+d2a38b4a5965d529566566640519d03d2bd10f6c
+35b585759cbf29f8ec428ef89da20705d59f99ec
+c2bbf9fe8009b22d0f390f3c8c3f13937067590f
+fc9f0643b21cfe571046e27e0c4565f3a1ee96c8
+c088fd6a7e1a38e9d5a9815265cb575bb08d08ff
+5fddbeb678bd2c36c5e5c891ab8f2b143ced5baf
+5d7303c49ac984a9fec60523f2d5297682e16646`, "\n"))
+}
+
+func testAuthorDateOrder(c *C, head CommitNode) {
+ iter := NewCommitNodeIterAuthorDateOrder(
+ head,
+ nil,
+ nil,
+ )
+
+ var commits []string
+ iter.ForEach(func(c CommitNode) error {
+ commits = append(commits, c.ID().String())
+ return nil
+ })
+
+ c.Assert(commits, DeepEquals, strings.Split(`ec6f456c0e8c7058a29611429965aa05c190b54b
+3048d280d2d5b258d9e582a226ff4bbed34fd5c9
+d82f291cde9987322c8a0c81a325e1ba6159684c
+27aa8cdd2431068606741a589383c02c149ea625
+fa058d42fa3bc53f39108a56dad67157169b2191
+d0a18ccd8eea3bdabc76d6dc5420af1ea30aae9f
+6c629843a1750a27c9af01ed2985f362f619c47a
+cf2874632223220e0445abf0a7806dc772c0b37a
+d10a0e7c1f340a6cfc14540a5f8c508ce7e2eabf
+758ac33217f092bfcded4ad4774954ac054c9609
+214e1dca024fb6da5ed65564d2de734df5dc2127
+70923099e61fa33f0bc5256d2f938fa44c4df10e
+bcaa1ac5644b16f1febb72f31e204720b7bb8934
+e1d8866ffa78fa16d2f39b0ba5344a7269ee5371
+2275fa7d0c75d20103f90b0e1616937d5a9fc5e6
+bdd9a92789d4a86b20a8d3df462df373f41acf23
+b359f11ea09e642695edcd114b463da4395b10c1
+6f43e8933ba3c04072d5d104acc6118aac3e52ee
+ccafe8bd5f9dbfb8b98b0da03ced29608dcfdeec
+939814f341fdd5d35e81a3845a33c4fedb19d2d2
+5f5ad88bf2babe506f927d64d2b7a1e1493dc2ae
+a2014124ca3b3f9ff28fbab0a83ce3c71bf4622e
+77906b653c3eb8a1cd5bd7254e161c00c6086d83
+465cba710284204f9851854587c2887c247222db
+b9471b13256703d3f5eb88b280b4a16ce325ec1b
+5f56aea0ca8b74215a5b982bca32236e1e28c76b
+62925030859646daeeaf5a4d386a0c41e00dda8a
+23148841baa5dbce48f6adcb7ddf83dcd97debb3
+c336d16298a017486c4164c40f8acb28afe64e84
+31eae7b619d166c366bf5df4991f04ba8cebea0a
+b977a025ca21e3b5ca123d8093bd7917694f6da7
+d2a38b4a5965d529566566640519d03d2bd10f6c
+35b585759cbf29f8ec428ef89da20705d59f99ec
+c2bbf9fe8009b22d0f390f3c8c3f13937067590f
+fc9f0643b21cfe571046e27e0c4565f3a1ee96c8
+c088fd6a7e1a38e9d5a9815265cb575bb08d08ff
+5fddbeb678bd2c36c5e5c891ab8f2b143ced5baf
+5d7303c49ac984a9fec60523f2d5297682e16646`, "\n"))
+}
diff --git a/plumbing/object/commitgraph/commitnode_walker_topo_order.go b/plumbing/object/commitgraph/commitnode_walker_topo_order.go
new file mode 100644
index 0000000..29f4bb7
--- /dev/null
+++ b/plumbing/object/commitgraph/commitnode_walker_topo_order.go
@@ -0,0 +1,161 @@
+package commitgraph
+
+import (
+ "io"
+
+ "github.com/go-git/go-git/v5/plumbing"
+ "github.com/go-git/go-git/v5/plumbing/storer"
+
+ "github.com/emirpasic/gods/trees/binaryheap"
+)
+
+type commitNodeIteratorTopological struct {
+ exploreStack commitNodeStackable
+ visitStack commitNodeStackable
+ inCounts map[plumbing.Hash]int
+
+ ignore map[plumbing.Hash]struct{}
+}
+
+// NewCommitNodeIterTopoOrder returns a CommitNodeIter that walks the commit history,
+// starting at the given commit and visiting its parents in a topological order but
+// with the constraint that no parent is emitted before its children are emitted.
+//
+// This matches `git log --topo-order`
+func NewCommitNodeIterTopoOrder(c CommitNode,
+ seenExternal map[plumbing.Hash]bool,
+ ignore []plumbing.Hash,
+) CommitNodeIter {
+ seen := composeIgnores(ignore, seenExternal)
+ inCounts := make(map[plumbing.Hash]int)
+
+ heap := &commitNodeHeap{binaryheap.NewWith(generationAndDateOrderComparator)}
+ heap.Push(c)
+
+ lifo := &commitNodeLifo{make([]CommitNode, 0, 8)}
+ lifo.Push(c)
+
+ return &commitNodeIteratorTopological{
+ exploreStack: heap,
+ visitStack: lifo,
+ inCounts: inCounts,
+ ignore: seen,
+ }
+}
+
+func (iter *commitNodeIteratorTopological) Next() (CommitNode, error) {
+ var next CommitNode
+ for {
+ var ok bool
+ next, ok = iter.visitStack.Pop()
+ if !ok {
+ return nil, io.EOF
+ }
+
+ if iter.inCounts[next.ID()] == 0 {
+ break
+ }
+ }
+
+ minimumLevel, generationV2 := next.GenerationV2(), true
+ if minimumLevel == 0 {
+ minimumLevel, generationV2 = next.Generation(), false
+ }
+
+ parents := make([]CommitNode, 0, len(next.ParentHashes()))
+ for i := range next.ParentHashes() {
+ pc, err := next.ParentNode(i)
+ if err != nil {
+ return nil, err
+ }
+
+ parents = append(parents, pc)
+
+ if generationV2 {
+ if pc.GenerationV2() < minimumLevel {
+ minimumLevel = pc.GenerationV2()
+ }
+ continue
+ }
+
+ if pc.Generation() < minimumLevel {
+ minimumLevel = pc.Generation()
+ }
+ }
+
+ // EXPLORE
+ for {
+ toExplore, ok := iter.exploreStack.Peek()
+ if !ok {
+ break
+ }
+
+ if toExplore.ID() != next.ID() && iter.exploreStack.Size() == 1 {
+ break
+ }
+ if generationV2 {
+ if toExplore.GenerationV2() < minimumLevel {
+ break
+ }
+ } else {
+ if toExplore.Generation() < minimumLevel {
+ break
+ }
+ }
+
+ iter.exploreStack.Pop()
+ for i, h := range toExplore.ParentHashes() {
+ if _, has := iter.ignore[h]; has {
+ continue
+ }
+ iter.inCounts[h]++
+
+ if iter.inCounts[h] == 1 {
+ pc, err := toExplore.ParentNode(i)
+ if err != nil {
+ return nil, err
+ }
+ iter.exploreStack.Push(pc)
+ }
+ }
+ }
+
+ // VISIT
+ for i, h := range next.ParentHashes() {
+ if _, has := iter.ignore[h]; has {
+ continue
+ }
+ iter.inCounts[h]--
+
+ if iter.inCounts[h] == 0 {
+ iter.visitStack.Push(parents[i])
+ }
+ }
+ delete(iter.inCounts, next.ID())
+
+ return next, nil
+}
+
+func (iter *commitNodeIteratorTopological) ForEach(cb func(CommitNode) error) error {
+ for {
+ obj, err := iter.Next()
+ if err != nil {
+ if err == io.EOF {
+ return nil
+ }
+
+ return err
+ }
+
+ if err := cb(obj); err != nil {
+ if err == storer.ErrStop {
+ return nil
+ }
+
+ return err
+ }
+ }
+}
+
+func (iter *commitNodeIteratorTopological) Close() {
+}
diff --git a/plumbing/object/patch.go b/plumbing/object/patch.go
index 06bc35b..dd8fef4 100644
--- a/plumbing/object/patch.go
+++ b/plumbing/object/patch.go
@@ -317,8 +317,8 @@ func getFileStatsFromFilePatches(filePatches []fdiff.FilePatch) FileStats {
// File is deleted.
cs.Name = from.Path()
} else if from.Path() != to.Path() {
- // File is renamed. Not supported.
- // cs.Name = fmt.Sprintf("%s => %s", from.Path(), to.Path())
+ // File is renamed.
+ cs.Name = fmt.Sprintf("%s => %s", from.Path(), to.Path())
} else {
cs.Name = from.Path()
}
diff --git a/plumbing/object/patch_stats_test.go b/plumbing/object/patch_stats_test.go
new file mode 100644
index 0000000..f393c30
--- /dev/null
+++ b/plumbing/object/patch_stats_test.go
@@ -0,0 +1,54 @@
+package object_test
+
+import (
+ "time"
+
+ "github.com/go-git/go-billy/v5/memfs"
+ "github.com/go-git/go-billy/v5/util"
+ "github.com/go-git/go-git/v5"
+ "github.com/go-git/go-git/v5/plumbing/object"
+ "github.com/go-git/go-git/v5/storage/memory"
+
+ fixtures "github.com/go-git/go-git-fixtures/v4"
+ . "gopkg.in/check.v1"
+)
+
+type PatchStatsSuite struct {
+ fixtures.Suite
+}
+
+var _ = Suite(&PatchStatsSuite{})
+
+func (s *PatchStatsSuite) TestStatsWithRename(c *C) {
+ cm := &git.CommitOptions{
+ Author: &object.Signature{Name: "Foo", Email: "foo@example.local", When: time.Now()},
+ }
+
+ fs := memfs.New()
+ r, err := git.Init(memory.NewStorage(), fs)
+ c.Assert(err, IsNil)
+
+ w, err := r.Worktree()
+ c.Assert(err, IsNil)
+
+ util.WriteFile(fs, "foo", []byte("foo\nbar\n"), 0644)
+
+ _, err = w.Add("foo")
+ c.Assert(err, IsNil)
+
+ _, err = w.Commit("foo\n", cm)
+ c.Assert(err, IsNil)
+
+ _, err = w.Move("foo", "bar")
+ c.Assert(err, IsNil)
+
+ hash, err := w.Commit("rename foo to bar", cm)
+ c.Assert(err, IsNil)
+
+ commit, err := r.CommitObject(hash)
+ c.Assert(err, IsNil)
+
+ fileStats, err := commit.Stats()
+ c.Assert(err, IsNil)
+ c.Assert(fileStats[0].Name, Equals, "foo => bar")
+}
diff --git a/plumbing/object/signature_test.go b/plumbing/object/signature_test.go
index 1bdb1d1..3b20cde 100644
--- a/plumbing/object/signature_test.go
+++ b/plumbing/object/signature_test.go
@@ -178,3 +178,10 @@ signed tag`),
})
}
}
+
+func FuzzParseSignedBytes(f *testing.F) {
+
+ f.Fuzz(func(t *testing.T, input []byte) {
+ parseSignedBytes(input)
+ })
+}
diff --git a/plumbing/object/tree_test.go b/plumbing/object/tree_test.go
index d9dad47..bb5fc7a 100644
--- a/plumbing/object/tree_test.go
+++ b/plumbing/object/tree_test.go
@@ -4,6 +4,7 @@ import (
"context"
"errors"
"io"
+ "testing"
fixtures "github.com/go-git/go-git-fixtures/v4"
"github.com/go-git/go-git/v5/plumbing"
@@ -1623,3 +1624,19 @@ func (s *TreeSuite) TestTreeDecodeReadBug(c *C) {
c.Assert(err, IsNil)
c.Assert(entriesEquals(obtained.Entries, expected.Entries), Equals, true)
}
+
+func FuzzDecode(f *testing.F) {
+
+ f.Fuzz(func(t *testing.T, input []byte) {
+
+ obj := &SortReadObject{
+ t: plumbing.TreeObject,
+ h: plumbing.ZeroHash,
+ cont: input,
+ sz: int64(len(input)),
+ }
+
+ newTree := &Tree{}
+ newTree.Decode(obj)
+ })
+}
diff --git a/plumbing/protocol/packp/common.go b/plumbing/protocol/packp/common.go
index fef50a4..a858323 100644
--- a/plumbing/protocol/packp/common.go
+++ b/plumbing/protocol/packp/common.go
@@ -48,6 +48,11 @@ func isFlush(payload []byte) bool {
return len(payload) == 0
}
+var (
+ // ErrNilWriter is returned when a nil writer is passed to the encoder.
+ ErrNilWriter = fmt.Errorf("nil writer")
+)
+
// ErrUnexpectedData represents an unexpected data decoding a message
type ErrUnexpectedData struct {
Msg string
diff --git a/plumbing/protocol/packp/gitproto.go b/plumbing/protocol/packp/gitproto.go
new file mode 100644
index 0000000..0b7ff8f
--- /dev/null
+++ b/plumbing/protocol/packp/gitproto.go
@@ -0,0 +1,120 @@
+package packp
+
+import (
+ "fmt"
+ "io"
+ "strings"
+
+ "github.com/go-git/go-git/v5/plumbing/format/pktline"
+)
+
+var (
+ // ErrInvalidGitProtoRequest is returned by Decode if the input is not a
+ // valid git protocol request.
+ ErrInvalidGitProtoRequest = fmt.Errorf("invalid git protocol request")
+)
+
+// GitProtoRequest is a command request for the git protocol.
+// It is used to send the command, endpoint, and extra parameters to the
+// remote.
+// See https://git-scm.com/docs/pack-protocol#_git_transport
+type GitProtoRequest struct {
+ RequestCommand string
+ Pathname string
+
+ // Optional
+ Host string
+
+ // Optional
+ ExtraParams []string
+}
+
+// validate validates the request.
+func (g *GitProtoRequest) validate() error {
+ if g.RequestCommand == "" {
+ return fmt.Errorf("%w: empty request command", ErrInvalidGitProtoRequest)
+ }
+
+ if g.Pathname == "" {
+ return fmt.Errorf("%w: empty pathname", ErrInvalidGitProtoRequest)
+ }
+
+ return nil
+}
+
+// Encode encodes the request into the writer.
+func (g *GitProtoRequest) Encode(w io.Writer) error {
+ if w == nil {
+ return ErrNilWriter
+ }
+
+ if err := g.validate(); err != nil {
+ return err
+ }
+
+ p := pktline.NewEncoder(w)
+ req := fmt.Sprintf("%s %s\x00", g.RequestCommand, g.Pathname)
+ if host := g.Host; host != "" {
+ req += fmt.Sprintf("host=%s\x00", host)
+ }
+
+ if len(g.ExtraParams) > 0 {
+ req += "\x00"
+ for _, param := range g.ExtraParams {
+ req += param + "\x00"
+ }
+ }
+
+ if err := p.Encode([]byte(req)); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// Decode decodes the request from the reader.
+func (g *GitProtoRequest) Decode(r io.Reader) error {
+ s := pktline.NewScanner(r)
+ if !s.Scan() {
+ err := s.Err()
+ if err == nil {
+ return ErrInvalidGitProtoRequest
+ }
+ return err
+ }
+
+ line := string(s.Bytes())
+ if len(line) == 0 {
+ return io.EOF
+ }
+
+ if line[len(line)-1] != 0 {
+ return fmt.Errorf("%w: missing null terminator", ErrInvalidGitProtoRequest)
+ }
+
+ parts := strings.SplitN(line, " ", 2)
+ if len(parts) != 2 {
+ return fmt.Errorf("%w: short request", ErrInvalidGitProtoRequest)
+ }
+
+ g.RequestCommand = parts[0]
+ params := strings.Split(parts[1], string(null))
+ if len(params) < 1 {
+ return fmt.Errorf("%w: missing pathname", ErrInvalidGitProtoRequest)
+ }
+
+ g.Pathname = params[0]
+ if len(params) > 1 {
+ g.Host = strings.TrimPrefix(params[1], "host=")
+ }
+
+ if len(params) > 2 {
+ for _, param := range params[2:] {
+ if param != "" {
+ g.ExtraParams = append(g.ExtraParams, param)
+ }
+ }
+ }
+
+ return nil
+}
diff --git a/plumbing/protocol/packp/gitproto_test.go b/plumbing/protocol/packp/gitproto_test.go
new file mode 100644
index 0000000..9cf1049
--- /dev/null
+++ b/plumbing/protocol/packp/gitproto_test.go
@@ -0,0 +1,99 @@
+package packp
+
+import (
+ "bytes"
+ "testing"
+)
+
+func TestEncodeEmptyGitProtoRequest(t *testing.T) {
+ var buf bytes.Buffer
+ var p GitProtoRequest
+ err := p.Encode(&buf)
+ if err == nil {
+ t.Fatal("expected error")
+ }
+}
+
+func TestEncodeGitProtoRequest(t *testing.T) {
+ var buf bytes.Buffer
+ p := GitProtoRequest{
+ RequestCommand: "command",
+ Pathname: "pathname",
+ Host: "host",
+ ExtraParams: []string{"param1", "param2"},
+ }
+ err := p.Encode(&buf)
+ if err != nil {
+ t.Fatal(err)
+ }
+ expected := "002ecommand pathname\x00host=host\x00\x00param1\x00param2\x00"
+ if buf.String() != expected {
+ t.Fatalf("expected %q, got %q", expected, buf.String())
+ }
+}
+
+func TestEncodeInvalidGitProtoRequest(t *testing.T) {
+ var buf bytes.Buffer
+ p := GitProtoRequest{
+ RequestCommand: "command",
+ }
+ err := p.Encode(&buf)
+ if err == nil {
+ t.Fatal("expected error")
+ }
+}
+
+func TestDecodeEmptyGitProtoRequest(t *testing.T) {
+ var buf bytes.Buffer
+ var p GitProtoRequest
+ err := p.Decode(&buf)
+ if err == nil {
+ t.Fatal("expected error")
+ }
+}
+
+func TestDecodeGitProtoRequest(t *testing.T) {
+ var buf bytes.Buffer
+ buf.WriteString("002ecommand pathname\x00host=host\x00\x00param1\x00param2\x00")
+ var p GitProtoRequest
+ err := p.Decode(&buf)
+ if err != nil {
+ t.Fatal(err)
+ }
+ expected := GitProtoRequest{
+ RequestCommand: "command",
+ Pathname: "pathname",
+ Host: "host",
+ ExtraParams: []string{"param1", "param2"},
+ }
+ if p.RequestCommand != expected.RequestCommand {
+ t.Fatalf("expected %q, got %q", expected.RequestCommand, p.RequestCommand)
+ }
+ if p.Pathname != expected.Pathname {
+ t.Fatalf("expected %q, got %q", expected.Pathname, p.Pathname)
+ }
+ if p.Host != expected.Host {
+ t.Fatalf("expected %q, got %q", expected.Host, p.Host)
+ }
+ if len(p.ExtraParams) != len(expected.ExtraParams) {
+ t.Fatalf("expected %d, got %d", len(expected.ExtraParams), len(p.ExtraParams))
+ }
+}
+
+func TestDecodeInvalidGitProtoRequest(t *testing.T) {
+ var buf bytes.Buffer
+ buf.WriteString("0026command \x00host=host\x00\x00param1\x00param2")
+ var p GitProtoRequest
+ err := p.Decode(&buf)
+ if err == nil {
+ t.Fatal("expected error")
+ }
+}
+
+func TestValidateEmptyGitProtoRequest(t *testing.T) {
+ var p GitProtoRequest
+ err := p.validate()
+ if err == nil {
+ t.Fatal("expected error")
+ }
+}
diff --git a/plumbing/protocol/packp/srvresp.go b/plumbing/protocol/packp/srvresp.go
index 8cd0a72..a9ddb53 100644
--- a/plumbing/protocol/packp/srvresp.go
+++ b/plumbing/protocol/packp/srvresp.go
@@ -101,12 +101,14 @@ func (r *ServerResponse) decodeLine(line []byte) error {
return fmt.Errorf("unexpected flush")
}
- if bytes.Equal(line[0:3], ack) {
- return r.decodeACKLine(line)
- }
+ if len(line) >= 3 {
+ if bytes.Equal(line[0:3], ack) {
+ return r.decodeACKLine(line)
+ }
- if bytes.Equal(line[0:3], nak) {
- return nil
+ if bytes.Equal(line[0:3], nak) {
+ return nil
+ }
}
return fmt.Errorf("unexpected content %q", string(line))
diff --git a/plumbing/protocol/packp/srvresp_test.go b/plumbing/protocol/packp/srvresp_test.go
index aa0af52..b7270e7 100644
--- a/plumbing/protocol/packp/srvresp_test.go
+++ b/plumbing/protocol/packp/srvresp_test.go
@@ -3,6 +3,7 @@ package packp
import (
"bufio"
"bytes"
+ "fmt"
"github.com/go-git/go-git/v5/plumbing"
@@ -23,6 +24,32 @@ func (s *ServerResponseSuite) TestDecodeNAK(c *C) {
c.Assert(sr.ACKs, HasLen, 0)
}
+func (s *ServerResponseSuite) TestDecodeNewLine(c *C) {
+ raw := "\n"
+
+ sr := &ServerResponse{}
+ err := sr.Decode(bufio.NewReader(bytes.NewBufferString(raw)), false)
+ c.Assert(err, NotNil)
+ c.Assert(err.Error(), Equals, "invalid pkt-len found")
+}
+
+func (s *ServerResponseSuite) TestDecodeEmpty(c *C) {
+ raw := ""
+
+ sr := &ServerResponse{}
+ err := sr.Decode(bufio.NewReader(bytes.NewBufferString(raw)), false)
+ c.Assert(err, IsNil)
+}
+
+func (s *ServerResponseSuite) TestDecodePartial(c *C) {
+ raw := "000600\n"
+
+ sr := &ServerResponse{}
+ err := sr.Decode(bufio.NewReader(bytes.NewBufferString(raw)), false)
+ c.Assert(err, NotNil)
+ c.Assert(err.Error(), Equals, fmt.Sprintf("unexpected content %q", "00"))
+}
+
func (s *ServerResponseSuite) TestDecodeACK(c *C) {
raw := "0031ACK 6ecf0ef2c2dffb796033e5a02219af86ec6584e5\n"
diff --git a/plumbing/protocol/packp/ulreq_decode.go b/plumbing/protocol/packp/ulreq_decode.go
index 895a3bf..3da2998 100644
--- a/plumbing/protocol/packp/ulreq_decode.go
+++ b/plumbing/protocol/packp/ulreq_decode.go
@@ -43,7 +43,7 @@ func (d *ulReqDecoder) Decode(v *UploadRequest) error {
return d.err
}
-// fills out the parser stiky error
+// fills out the parser sticky error
func (d *ulReqDecoder) error(format string, a ...interface{}) {
msg := fmt.Sprintf(
"pkt-line %d: %s", d.nLine,
diff --git a/plumbing/protocol/packp/ulreq_decode_test.go b/plumbing/protocol/packp/ulreq_decode_test.go
index efcc7b4..7658922 100644
--- a/plumbing/protocol/packp/ulreq_decode_test.go
+++ b/plumbing/protocol/packp/ulreq_decode_test.go
@@ -398,7 +398,7 @@ func (s *UlReqDecodeSuite) TestDeepenCommits(c *C) {
c.Assert(int(commits), Equals, 1234)
}
-func (s *UlReqDecodeSuite) TestDeepenCommitsInfiniteInplicit(c *C) {
+func (s *UlReqDecodeSuite) TestDeepenCommitsInfiniteImplicit(c *C) {
payloads := []string{
"want 3333333333333333333333333333333333333333 ofs-delta multi_ack",
"deepen 0",
diff --git a/plumbing/protocol/packp/uppackresp_test.go b/plumbing/protocol/packp/uppackresp_test.go
index 8fbf924..ec56507 100644
--- a/plumbing/protocol/packp/uppackresp_test.go
+++ b/plumbing/protocol/packp/uppackresp_test.go
@@ -3,6 +3,7 @@ package packp
import (
"bytes"
"io"
+ "testing"
"github.com/go-git/go-git/v5/plumbing"
"github.com/go-git/go-git/v5/plumbing/protocol/packp/capability"
@@ -128,3 +129,14 @@ func (s *UploadPackResponseSuite) TestEncodeMultiACK(c *C) {
b := bytes.NewBuffer(nil)
c.Assert(res.Encode(b), NotNil)
}
+
+func FuzzDecoder(f *testing.F) {
+
+ f.Fuzz(func(t *testing.T, input []byte) {
+ req := NewUploadPackRequest()
+ res := NewUploadPackResponse(req)
+ defer res.Close()
+
+ res.Decode(io.NopCloser(bytes.NewReader(input)))
+ })
+}
diff --git a/plumbing/reference.go b/plumbing/reference.go
index 5a67f69..ddba930 100644
--- a/plumbing/reference.go
+++ b/plumbing/reference.go
@@ -3,6 +3,7 @@ package plumbing
import (
"errors"
"fmt"
+ "regexp"
"strings"
)
@@ -29,6 +30,9 @@ var RefRevParseRules = []string{
var (
ErrReferenceNotFound = errors.New("reference not found")
+
+ // ErrInvalidReferenceName is returned when a reference name is invalid.
+ ErrInvalidReferenceName = errors.New("invalid reference name")
)
// ReferenceType reference type's
@@ -124,6 +128,91 @@ func (r ReferenceName) Short() string {
return res
}
+var (
+ ctrlSeqs = regexp.MustCompile(`[\000-\037\177]`)
+)
+
+// Validate validates a reference name.
+// This follows the git-check-ref-format rules.
+// See https://git-scm.com/docs/git-check-ref-format
+//
+// It is important to note that this function does not check if the reference
+// exists in the repository.
+// It only checks if the reference name is valid.
+// This functions does not support the --refspec-pattern, --normalize, and
+// --allow-onelevel options.
+//
+// Git imposes the following rules on how references are named:
+//
+// 1. They can include slash / for hierarchical (directory) grouping, but no
+// slash-separated component can begin with a dot . or end with the
+// sequence .lock.
+// 2. They must contain at least one /. This enforces the presence of a
+// category like heads/, tags/ etc. but the actual names are not
+// restricted. If the --allow-onelevel option is used, this rule is
+// waived.
+// 3. They cannot have two consecutive dots .. anywhere.
+// 4. They cannot have ASCII control characters (i.e. bytes whose values are
+// lower than \040, or \177 DEL), space, tilde ~, caret ^, or colon :
+// anywhere.
+// 5. They cannot have question-mark ?, asterisk *, or open bracket [
+// anywhere. See the --refspec-pattern option below for an exception to this
+// rule.
+// 6. They cannot begin or end with a slash / or contain multiple consecutive
+// slashes (see the --normalize option below for an exception to this rule).
+// 7. They cannot end with a dot ..
+// 8. They cannot contain a sequence @{.
+// 9. They cannot be the single character @.
+// 10. They cannot contain a \.
+func (r ReferenceName) Validate() error {
+ s := string(r)
+ if len(s) == 0 {
+ return ErrInvalidReferenceName
+ }
+
+ // HEAD is a special case
+ if r == HEAD {
+ return nil
+ }
+
+ // rule 7
+ if strings.HasSuffix(s, ".") {
+ return ErrInvalidReferenceName
+ }
+
+ // rule 2
+ parts := strings.Split(s, "/")
+ if len(parts) < 2 {
+ return ErrInvalidReferenceName
+ }
+
+ isBranch := r.IsBranch()
+ isTag := r.IsTag()
+ for _, part := range parts {
+ // rule 6
+ if len(part) == 0 {
+ return ErrInvalidReferenceName
+ }
+
+ if strings.HasPrefix(part, ".") || // rule 1
+ strings.Contains(part, "..") || // rule 3
+ ctrlSeqs.MatchString(part) || // rule 4
+ strings.ContainsAny(part, "~^:?*[ \t\n") || // rule 4 & 5
+ strings.Contains(part, "@{") || // rule 8
+ part == "@" || // rule 9
+ strings.Contains(part, "\\") || // rule 10
+ strings.HasSuffix(part, ".lock") { // rule 1
+ return ErrInvalidReferenceName
+ }
+
+ if (isBranch || isTag) && strings.HasPrefix(part, "-") { // branches & tags can't start with -
+ return ErrInvalidReferenceName
+ }
+ }
+
+ return nil
+}
+
const (
HEAD ReferenceName = "HEAD"
Master ReferenceName = "refs/heads/master"
diff --git a/plumbing/reference_test.go b/plumbing/reference_test.go
index 04dfef9..ce57075 100644
--- a/plumbing/reference_test.go
+++ b/plumbing/reference_test.go
@@ -103,6 +103,65 @@ func (s *ReferenceSuite) TestIsTag(c *C) {
c.Assert(r.IsTag(), Equals, true)
}
+func (s *ReferenceSuite) TestValidReferenceNames(c *C) {
+ valid := []ReferenceName{
+ "refs/heads/master",
+ "refs/notes/commits",
+ "refs/remotes/origin/master",
+ "HEAD",
+ "refs/tags/v3.1.1",
+ "refs/pulls/1/head",
+ "refs/pulls/1/merge",
+ "refs/pulls/1/abc.123",
+ "refs/pulls",
+ "refs/-", // should this be allowed?
+ }
+ for _, v := range valid {
+ c.Assert(v.Validate(), IsNil)
+ }
+
+ invalid := []ReferenceName{
+ "refs",
+ "refs/",
+ "refs//",
+ "refs/heads/\\",
+ "refs/heads/\\foo",
+ "refs/heads/\\foo/bar",
+ "abc",
+ "",
+ "refs/heads/ ",
+ "refs/heads/ /",
+ "refs/heads/ /foo",
+ "refs/heads/.",
+ "refs/heads/..",
+ "refs/heads/foo..",
+ "refs/heads/foo.lock",
+ "refs/heads/foo@{bar}",
+ "refs/heads/foo[",
+ "refs/heads/foo~",
+ "refs/heads/foo^",
+ "refs/heads/foo:",
+ "refs/heads/foo?",
+ "refs/heads/foo*",
+ "refs/heads/foo[bar",
+ "refs/heads/foo\t",
+ "refs/heads/@",
+ "refs/heads/@{bar}",
+ "refs/heads/\n",
+ "refs/heads/-foo",
+ "refs/heads/foo..bar",
+ "refs/heads/-",
+ "refs/tags/-",
+ "refs/tags/-foo",
+ }
+
+ for i, v := range invalid {
+ comment := Commentf("invalid reference name case %d: %s", i, v)
+ c.Assert(v.Validate(), NotNil, comment)
+ c.Assert(v.Validate(), ErrorMatches, "invalid reference name", comment)
+ }
+}
+
func benchMarkReferenceString(r *Reference, b *testing.B) {
for n := 0; n < b.N; n++ {
_ = r.String()
diff --git a/plumbing/serverinfo/serverinfo.go b/plumbing/serverinfo/serverinfo.go
new file mode 100644
index 0000000..d7ea7ef
--- /dev/null
+++ b/plumbing/serverinfo/serverinfo.go
@@ -0,0 +1,94 @@
+package serverinfo
+
+import (
+ "fmt"
+
+ "github.com/go-git/go-billy/v5"
+ "github.com/go-git/go-git/v5"
+ "github.com/go-git/go-git/v5/internal/reference"
+ "github.com/go-git/go-git/v5/plumbing"
+ "github.com/go-git/go-git/v5/plumbing/object"
+ "github.com/go-git/go-git/v5/plumbing/storer"
+ "github.com/go-git/go-git/v5/storage"
+)
+
+// UpdateServerInfo updates the server info files in the repository.
+//
+// It generates a list of available refs for the repository.
+// Used by git http transport (dumb), for more information refer to:
+// https://git-scm.com/book/id/v2/Git-Internals-Transfer-Protocols#_the_dumb_protocol
+func UpdateServerInfo(s storage.Storer, fs billy.Filesystem) error {
+ pos, ok := s.(storer.PackedObjectStorer)
+ if !ok {
+ return git.ErrPackedObjectsNotSupported
+ }
+
+ infoRefs, err := fs.Create("info/refs")
+ if err != nil {
+ return err
+ }
+
+ defer infoRefs.Close()
+
+ refsIter, err := s.IterReferences()
+ if err != nil {
+ return err
+ }
+
+ defer refsIter.Close()
+
+ var refs []*plumbing.Reference
+ if err := refsIter.ForEach(func(ref *plumbing.Reference) error {
+ refs = append(refs, ref)
+ return nil
+ }); err != nil {
+ return err
+ }
+
+ reference.Sort(refs)
+ for _, ref := range refs {
+ name := ref.Name()
+ hash := ref.Hash()
+ switch ref.Type() {
+ case plumbing.SymbolicReference:
+ if name == plumbing.HEAD {
+ continue
+ }
+ ref, err := s.Reference(ref.Target())
+ if err != nil {
+ return err
+ }
+
+ hash = ref.Hash()
+ fallthrough
+ case plumbing.HashReference:
+ fmt.Fprintf(infoRefs, "%s\t%s\n", hash, name)
+ if name.IsTag() {
+ tag, err := object.GetTag(s, hash)
+ if err == nil {
+ fmt.Fprintf(infoRefs, "%s\t%s^{}\n", tag.Target, name)
+ }
+ }
+ }
+ }
+
+ infoPacks, err := fs.Create("objects/info/packs")
+ if err != nil {
+ return err
+ }
+
+ defer infoPacks.Close()
+
+ packs, err := pos.ObjectPacks()
+ if err != nil {
+ return err
+ }
+
+ for _, p := range packs {
+ fmt.Fprintf(infoPacks, "P pack-%s.pack\n", p)
+ }
+
+ fmt.Fprintln(infoPacks)
+
+ return nil
+}
diff --git a/plumbing/serverinfo/serverinfo_test.go b/plumbing/serverinfo/serverinfo_test.go
new file mode 100644
index 0000000..0a52ea2
--- /dev/null
+++ b/plumbing/serverinfo/serverinfo_test.go
@@ -0,0 +1,185 @@
+package serverinfo
+
+import (
+ "io"
+ "strings"
+ "testing"
+
+ "github.com/go-git/go-billy/v5"
+ "github.com/go-git/go-billy/v5/memfs"
+ fixtures "github.com/go-git/go-git-fixtures/v4"
+ "github.com/go-git/go-git/v5"
+ "github.com/go-git/go-git/v5/plumbing"
+ "github.com/go-git/go-git/v5/plumbing/object"
+ "github.com/go-git/go-git/v5/plumbing/storer"
+ "github.com/go-git/go-git/v5/storage"
+ "github.com/go-git/go-git/v5/storage/memory"
+ . "gopkg.in/check.v1"
+)
+
+type ServerInfoSuite struct{}
+
+var _ = Suite(&ServerInfoSuite{})
+
+func Test(t *testing.T) { TestingT(t) }
+
+func (s *ServerInfoSuite) TestUpdateServerInfoInit(c *C) {
+ fs := memfs.New()
+ st := memory.NewStorage()
+ r, err := git.Init(st, fs)
+ c.Assert(err, IsNil)
+ c.Assert(r, NotNil)
+
+ err = UpdateServerInfo(st, fs)
+ c.Assert(err, IsNil)
+}
+
+func assertInfoRefs(c *C, st storage.Storer, fs billy.Filesystem) {
+ refsFile, err := fs.Open("info/refs")
+ c.Assert(err, IsNil)
+
+ defer refsFile.Close()
+ bts, err := io.ReadAll(refsFile)
+ c.Assert(err, IsNil)
+
+ localRefs := make(map[plumbing.ReferenceName]plumbing.Hash)
+ for _, line := range strings.Split(string(bts), "\n") {
+ if line == "" {
+ continue
+ }
+ parts := strings.Split(line, "\t")
+ c.Assert(parts, HasLen, 2)
+ hash := plumbing.NewHash(parts[0])
+ name := plumbing.ReferenceName(parts[1])
+ localRefs[name] = hash
+ }
+
+ refs, err := st.IterReferences()
+ c.Assert(err, IsNil)
+
+ err = refs.ForEach(func(ref *plumbing.Reference) error {
+ name := ref.Name()
+ hash := ref.Hash()
+ switch ref.Type() {
+ case plumbing.SymbolicReference:
+ if name == plumbing.HEAD {
+ return nil
+ }
+ ref, err := st.Reference(ref.Target())
+ c.Assert(err, IsNil)
+ hash = ref.Hash()
+ fallthrough
+ case plumbing.HashReference:
+ h, ok := localRefs[name]
+ c.Assert(ok, Equals, true)
+ c.Assert(h, Equals, hash)
+ if name.IsTag() {
+ tag, err := object.GetTag(st, hash)
+ if err == nil {
+ t, ok := localRefs[name+"^{}"]
+ c.Assert(ok, Equals, true)
+ c.Assert(t, Equals, tag.Target)
+ }
+ }
+ }
+ return nil
+ })
+
+ c.Assert(err, IsNil)
+}
+
+func assertObjectPacks(c *C, st storage.Storer, fs billy.Filesystem) {
+ infoPacks, err := fs.Open("objects/info/packs")
+ c.Assert(err, IsNil)
+
+ defer infoPacks.Close()
+ bts, err := io.ReadAll(infoPacks)
+ c.Assert(err, IsNil)
+
+ pos, ok := st.(storer.PackedObjectStorer)
+ c.Assert(ok, Equals, true)
+ localPacks := make(map[string]struct{})
+ packs, err := pos.ObjectPacks()
+ c.Assert(err, IsNil)
+
+ for _, line := range strings.Split(string(bts), "\n") {
+ if line == "" {
+ continue
+ }
+ parts := strings.Split(line, " ")
+ c.Assert(parts, HasLen, 2)
+ pack := strings.TrimPrefix(parts[1], "pack-")
+ pack = strings.TrimSuffix(pack, ".pack")
+ localPacks[pack] = struct{}{}
+ }
+
+ for _, p := range packs {
+ _, ok := localPacks[p.String()]
+ c.Assert(ok, Equals, true)
+ }
+}
+
+func (s *ServerInfoSuite) TestUpdateServerInfoTags(c *C) {
+ fs := memfs.New()
+ st := memory.NewStorage()
+ r, err := git.Clone(st, fs, &git.CloneOptions{
+ URL: fixtures.ByURL("https://github.com/git-fixtures/tags.git").One().URL,
+ })
+ c.Assert(err, IsNil)
+ c.Assert(r, NotNil)
+
+ err = UpdateServerInfo(st, fs)
+ c.Assert(err, IsNil)
+
+ assertInfoRefs(c, st, fs)
+ assertObjectPacks(c, st, fs)
+}
+
+func (s *ServerInfoSuite) TestUpdateServerInfoBasic(c *C) {
+ fs := memfs.New()
+ st := memory.NewStorage()
+ r, err := git.Clone(st, fs, &git.CloneOptions{
+ URL: fixtures.Basic().One().URL,
+ })
+ c.Assert(err, IsNil)
+ c.Assert(r, NotNil)
+
+ err = UpdateServerInfo(st, fs)
+ c.Assert(err, IsNil)
+
+ assertInfoRefs(c, st, fs)
+ assertObjectPacks(c, st, fs)
+}
+
+func (s *ServerInfoSuite) TestUpdateServerInfoBasicChange(c *C) {
+ fs := memfs.New()
+ st := memory.NewStorage()
+ r, err := git.Clone(st, fs, &git.CloneOptions{
+ URL: fixtures.Basic().One().URL,
+ })
+ c.Assert(err, IsNil)
+ c.Assert(r, NotNil)
+
+ err = UpdateServerInfo(st, fs)
+ c.Assert(err, IsNil)
+
+ assertInfoRefs(c, st, fs)
+ assertObjectPacks(c, st, fs)
+
+ head, err := r.Head()
+ c.Assert(err, IsNil)
+
+ ref := plumbing.NewHashReference("refs/heads/my-branch", head.Hash())
+ err = r.Storer.SetReference(ref)
+ c.Assert(err, IsNil)
+
+ _, err = r.CreateTag("test-tag", head.Hash(), &git.CreateTagOptions{
+ Message: "test-tag",
+ })
+ c.Assert(err, IsNil)
+
+ err = UpdateServerInfo(st, fs)
+
+ assertInfoRefs(c, st, fs)
+ assertObjectPacks(c, st, fs)
+}
diff --git a/plumbing/storer/object.go b/plumbing/storer/object.go
index d8a9c27..126b374 100644
--- a/plumbing/storer/object.go
+++ b/plumbing/storer/object.go
@@ -42,6 +42,7 @@ type EncodedObjectStorer interface {
HasEncodedObject(plumbing.Hash) error
// EncodedObjectSize returns the plaintext size of the encoded object.
EncodedObjectSize(plumbing.Hash) (int64, error)
+ AddAlternate(remote string) error
}
// DeltaObjectStorer is an EncodedObjectStorer that can return delta
diff --git a/plumbing/storer/object_test.go b/plumbing/storer/object_test.go
index 30424ff..f2e6a5e 100644
--- a/plumbing/storer/object_test.go
+++ b/plumbing/storer/object_test.go
@@ -168,3 +168,7 @@ func (o *MockObjectStorage) IterEncodedObjects(t plumbing.ObjectType) (EncodedOb
func (o *MockObjectStorage) Begin() Transaction {
return nil
}
+
+func (o *MockObjectStorage) AddAlternate(remote string) error {
+ return nil
+}
diff --git a/plumbing/transport/common.go b/plumbing/transport/common.go
index c6a054a..b05437f 100644
--- a/plumbing/transport/common.go
+++ b/plumbing/transport/common.go
@@ -108,7 +108,7 @@ type Endpoint struct {
// Host is the host.
Host string
// Port is the port to connect, if 0 the default port for the given protocol
- // wil be used.
+ // will be used.
Port int
// Path is the repository path.
Path string
diff --git a/plumbing/transport/common_test.go b/plumbing/transport/common_test.go
index d9f12ab..3efc555 100644
--- a/plumbing/transport/common_test.go
+++ b/plumbing/transport/common_test.go
@@ -210,3 +210,10 @@ func (s *SuiteCommon) TestNewEndpointIPv6(c *C) {
c.Assert(e.Host, Equals, "[::1]")
c.Assert(e.String(), Equals, "http://[::1]:8080/foo.git")
}
+
+func FuzzNewEndpoint(f *testing.F) {
+
+ f.Fuzz(func(t *testing.T, input string) {
+ NewEndpoint(input)
+ })
+}
diff --git a/plumbing/transport/file/client.go b/plumbing/transport/file/client.go
index 6f0a380..38714e2 100644
--- a/plumbing/transport/file/client.go
+++ b/plumbing/transport/file/client.go
@@ -11,7 +11,6 @@ import (
"github.com/go-git/go-git/v5/plumbing/transport"
"github.com/go-git/go-git/v5/plumbing/transport/internal/common"
- "github.com/go-git/go-git/v5/utils/ioutil"
"golang.org/x/sys/execabs"
)
@@ -112,7 +111,7 @@ func (c *command) Start() error {
func (c *command) StderrPipe() (io.Reader, error) {
// Pipe returned by Command.StderrPipe has a race with Read + Command.Wait.
// We use an io.Pipe and close it after the command finishes.
- r, w := ioutil.Pipe()
+ r, w := io.Pipe()
c.cmd.Stderr = w
c.stderrCloser = r
return r, nil
diff --git a/plumbing/transport/file/common_test.go b/plumbing/transport/file/common_test.go
index 7e033a8..a217e97 100644
--- a/plumbing/transport/file/common_test.go
+++ b/plumbing/transport/file/common_test.go
@@ -29,8 +29,8 @@ func (s *CommonSuite) SetUpSuite(c *C) {
s.ReceivePackBin = filepath.Join(s.tmpDir, "git-receive-pack")
s.UploadPackBin = filepath.Join(s.tmpDir, "git-upload-pack")
bin := filepath.Join(s.tmpDir, "go-git")
- cmd := exec.Command("go", "build", "-o", bin,
- "../../../cli/go-git/...")
+ cmd := exec.Command("go", "build", "-o", bin)
+ cmd.Dir = "../../../cli/go-git"
c.Assert(cmd.Run(), IsNil)
c.Assert(os.Symlink(bin, s.ReceivePackBin), IsNil)
c.Assert(os.Symlink(bin, s.UploadPackBin), IsNil)
diff --git a/plumbing/transport/git/common.go b/plumbing/transport/git/common.go
index 92fc0be..2b878b0 100644
--- a/plumbing/transport/git/common.go
+++ b/plumbing/transport/git/common.go
@@ -2,12 +2,11 @@
package git
import (
- "fmt"
"io"
"net"
"strconv"
- "github.com/go-git/go-git/v5/plumbing/format/pktline"
+ "github.com/go-git/go-git/v5/plumbing/protocol/packp"
"github.com/go-git/go-git/v5/plumbing/transport"
"github.com/go-git/go-git/v5/plumbing/transport/internal/common"
"github.com/go-git/go-git/v5/utils/ioutil"
@@ -42,10 +41,18 @@ type command struct {
// Start executes the command sending the required message to the TCP connection
func (c *command) Start() error {
- cmd := endpointToCommand(c.command, c.endpoint)
+ req := packp.GitProtoRequest{
+ RequestCommand: c.command,
+ Pathname: c.endpoint.Path,
+ }
+ host := c.endpoint.Host
+ if c.endpoint.Port != DefaultPort {
+ host = net.JoinHostPort(c.endpoint.Host, strconv.Itoa(c.endpoint.Port))
+ }
+
+ req.Host = host
- e := pktline.NewEncoder(c.conn)
- return e.Encode([]byte(cmd))
+ return req.Encode(c.conn)
}
func (c *command) connect() error {
@@ -90,15 +97,6 @@ func (c *command) StdoutPipe() (io.Reader, error) {
return c.conn, nil
}
-func endpointToCommand(cmd string, ep *transport.Endpoint) string {
- host := ep.Host
- if ep.Port != DefaultPort {
- host = net.JoinHostPort(ep.Host, strconv.Itoa(ep.Port))
- }
-
- return fmt.Sprintf("%s %s%chost=%s%c", cmd, ep.Path, 0, host, 0)
-}
-
// Close closes the TCP connection and connection.
func (c *command) Close() error {
if !c.connected {
diff --git a/plumbing/transport/git/common_test.go b/plumbing/transport/git/common_test.go
index 7389919..3cab933 100644
--- a/plumbing/transport/git/common_test.go
+++ b/plumbing/transport/git/common_test.go
@@ -1,6 +1,7 @@
package git
import (
+ "bytes"
"fmt"
"net"
"os"
@@ -32,7 +33,12 @@ func (s *BaseSuite) SetUpTest(c *C) {
See https://github.com/git-for-windows/git/issues/907`)
}
- var err error
+ cmd := exec.Command("git", "daemon", "--help")
+ output, err := cmd.CombinedOutput()
+ if err != nil && bytes.Contains(output, []byte("'daemon' is not a git command")) {
+ c.Fatal("git daemon cannot be found")
+ }
+
s.port, err = freePort()
c.Assert(err, IsNil)
@@ -85,11 +91,15 @@ func (s *BaseSuite) prepareRepository(c *C, f *fixtures.Fixture, name string) *t
}
func (s *BaseSuite) TearDownTest(c *C) {
- _ = s.daemon.Process.Signal(os.Kill)
- _ = s.daemon.Wait()
+ if s.daemon != nil {
+ _ = s.daemon.Process.Signal(os.Kill)
+ _ = s.daemon.Wait()
+ }
- err := os.RemoveAll(s.base)
- c.Assert(err, IsNil)
+ if s.base != "" {
+ err := os.RemoveAll(s.base)
+ c.Assert(err, IsNil)
+ }
}
func freePort() (int, error) {
diff --git a/plumbing/transport/http/common.go b/plumbing/transport/http/common.go
index a7cdc1e..54126fe 100644
--- a/plumbing/transport/http/common.go
+++ b/plumbing/transport/http/common.go
@@ -406,14 +406,28 @@ func (a *TokenAuth) String() string {
// Err is a dedicated error to return errors based on status code
type Err struct {
Response *http.Response
+ Reason string
}
-// NewErr returns a new Err based on a http response
+// NewErr returns a new Err based on a http response and closes response body
+// if needed
func NewErr(r *http.Response) error {
if r.StatusCode >= http.StatusOK && r.StatusCode < http.StatusMultipleChoices {
return nil
}
+ var reason string
+
+ // If a response message is present, add it to error
+ var messageBuffer bytes.Buffer
+ if r.Body != nil {
+ messageLength, _ := messageBuffer.ReadFrom(r.Body)
+ if messageLength > 0 {
+ reason = messageBuffer.String()
+ }
+ _ = r.Body.Close()
+ }
+
switch r.StatusCode {
case http.StatusUnauthorized:
return transport.ErrAuthenticationRequired
@@ -423,7 +437,7 @@ func NewErr(r *http.Response) error {
return transport.ErrRepositoryNotFound
}
- return plumbing.NewUnexpectedError(&Err{r})
+ return plumbing.NewUnexpectedError(&Err{r, reason})
}
// StatusCode returns the status code of the response
diff --git a/plumbing/transport/http/common_test.go b/plumbing/transport/http/common_test.go
index 1517228..6bd018b 100644
--- a/plumbing/transport/http/common_test.go
+++ b/plumbing/transport/http/common_test.go
@@ -3,6 +3,7 @@ package http
import (
"crypto/tls"
"fmt"
+ "io"
"log"
"net"
"net/http"
@@ -14,6 +15,7 @@ import (
"strings"
"testing"
+ "github.com/go-git/go-git/v5/plumbing"
"github.com/go-git/go-git/v5/plumbing/transport"
fixtures "github.com/go-git/go-git-fixtures/v4"
@@ -90,6 +92,23 @@ func (s *ClientSuite) TestNewHTTPError40x(c *C) {
"unexpected client error.*")
}
+func (s *ClientSuite) TestNewUnexpectedError(c *C) {
+ res := &http.Response{
+ StatusCode: 500,
+ Body: io.NopCloser(strings.NewReader("Unexpected error")),
+ }
+
+ err := NewErr(res)
+ c.Assert(err, NotNil)
+ c.Assert(err, FitsTypeOf, &plumbing.UnexpectedError{})
+
+ unexpectedError, _ := err.(*plumbing.UnexpectedError)
+ c.Assert(unexpectedError.Err, FitsTypeOf, &Err{})
+
+ httpError, _ := unexpectedError.Err.(*Err)
+ c.Assert(httpError.Reason, Equals, "Unexpected error")
+}
+
func (s *ClientSuite) Test_newSession(c *C) {
cl := NewClientWithOptions(nil, &ClientOptions{
CacheMaxEntries: 2,
diff --git a/plumbing/transport/http/receive_pack.go b/plumbing/transport/http/receive_pack.go
index 4387ecf..3e736cd 100644
--- a/plumbing/transport/http/receive_pack.go
+++ b/plumbing/transport/http/receive_pack.go
@@ -102,7 +102,6 @@ func (s *rpSession) doRequest(
}
if err := NewErr(res); err != nil {
- _ = res.Body.Close()
return nil, err
}
diff --git a/plumbing/transport/http/upload_pack.go b/plumbing/transport/http/upload_pack.go
index 4f85145..3432618 100644
--- a/plumbing/transport/http/upload_pack.go
+++ b/plumbing/transport/http/upload_pack.go
@@ -100,7 +100,6 @@ func (s *upSession) doRequest(
}
if err := NewErr(res); err != nil {
- _ = res.Body.Close()
return nil, err
}
diff --git a/plumbing/transport/internal/common/common.go b/plumbing/transport/internal/common/common.go
index 5fdf425..9e1d023 100644
--- a/plumbing/transport/internal/common/common.go
+++ b/plumbing/transport/internal/common/common.go
@@ -11,6 +11,7 @@ import (
"errors"
"fmt"
"io"
+ "regexp"
"strings"
"time"
@@ -28,6 +29,10 @@ const (
var (
ErrTimeoutExceeded = errors.New("timeout exceeded")
+ // stdErrSkipPattern is used for skipping lines from a command's stderr output.
+ // Any line matching this pattern will be skipped from further
+ // processing and not be returned to calling code.
+ stdErrSkipPattern = regexp.MustCompile("^remote:( =*){0,1}$")
)
// Commander creates Command instances. This is the main entry point for
@@ -149,10 +154,17 @@ func (c *client) listenFirstError(r io.Reader) chan string {
errLine := make(chan string, 1)
go func() {
s := bufio.NewScanner(r)
- if s.Scan() {
- errLine <- s.Text()
- } else {
- close(errLine)
+ for {
+ if s.Scan() {
+ line := s.Text()
+ if !stdErrSkipPattern.MatchString(line) {
+ errLine <- line
+ break
+ }
+ } else {
+ close(errLine)
+ break
+ }
}
_, _ = io.Copy(io.Discard, r)
@@ -191,9 +203,22 @@ func (s *session) AdvertisedReferencesContext(ctx context.Context) (*packp.AdvRe
}
func (s *session) handleAdvRefDecodeError(err error) error {
+ var errLine *pktline.ErrorLine
+ if errors.As(err, &errLine) {
+ if isRepoNotFoundError(errLine.Text) {
+ return transport.ErrRepositoryNotFound
+ }
+
+ return errLine
+ }
+
// If repository is not found, we get empty stdout and server writes an
// error to stderr.
- if err == packp.ErrEmptyInput {
+ if errors.Is(err, packp.ErrEmptyInput) {
+ // TODO:(v6): handle this error in a better way.
+ // Instead of checking the stderr output for a specific error message,
+ // define an ExitError and embed the stderr output and exit (if one
+ // exists) in the error struct. Just like exec.ExitError.
s.finished = true
if err := s.checkNotFoundError(); err != nil {
return err
@@ -233,6 +258,12 @@ func (s *session) handleAdvRefDecodeError(err error) error {
// returned with the packfile content. The reader must be closed after reading.
func (s *session) UploadPack(ctx context.Context, req *packp.UploadPackRequest) (*packp.UploadPackResponse, error) {
if req.IsEmpty() {
+ // XXX: IsEmpty means haves are a subset of wants, in that case we have
+ // everything we asked for. Close the connection and return nil.
+ if err := s.finish(); err != nil {
+ return nil, err
+ }
+ // TODO:(v6) return nil here
return nil, transport.ErrEmptyUploadPackRequest
}
@@ -381,54 +412,43 @@ func (s *session) checkNotFoundError() error {
return transport.ErrRepositoryNotFound
}
+ // TODO:(v6): return server error just as it is without a prefix
return fmt.Errorf("unknown error: %s", line)
}
}
-var (
- githubRepoNotFoundErr = "ERROR: Repository not found."
- bitbucketRepoNotFoundErr = "conq: repository does not exist."
+const (
+ githubRepoNotFoundErr = "Repository not found."
+ bitbucketRepoNotFoundErr = "repository does not exist."
localRepoNotFoundErr = "does not appear to be a git repository"
- gitProtocolNotFoundErr = "ERR \n Repository not found."
- gitProtocolNoSuchErr = "ERR no such repository"
- gitProtocolAccessDeniedErr = "ERR access denied"
- gogsAccessDeniedErr = "Gogs: Repository does not exist or you do not have access"
+ gitProtocolNotFoundErr = "Repository not found."
+ gitProtocolNoSuchErr = "no such repository"
+ gitProtocolAccessDeniedErr = "access denied"
+ gogsAccessDeniedErr = "Repository does not exist or you do not have access"
+ gitlabRepoNotFoundErr = "The project you were looking for could not be found"
)
func isRepoNotFoundError(s string) bool {
- if strings.HasPrefix(s, githubRepoNotFoundErr) {
- return true
- }
-
- if strings.HasPrefix(s, bitbucketRepoNotFoundErr) {
- return true
- }
-
- if strings.HasSuffix(s, localRepoNotFoundErr) {
- return true
- }
-
- if strings.HasPrefix(s, gitProtocolNotFoundErr) {
- return true
- }
-
- if strings.HasPrefix(s, gitProtocolNoSuchErr) {
- return true
- }
-
- if strings.HasPrefix(s, gitProtocolAccessDeniedErr) {
- return true
- }
-
- if strings.HasPrefix(s, gogsAccessDeniedErr) {
- return true
+ for _, err := range []string{
+ githubRepoNotFoundErr,
+ bitbucketRepoNotFoundErr,
+ localRepoNotFoundErr,
+ gitProtocolNotFoundErr,
+ gitProtocolNoSuchErr,
+ gitProtocolAccessDeniedErr,
+ gogsAccessDeniedErr,
+ gitlabRepoNotFoundErr,
+ } {
+ if strings.Contains(s, err) {
+ return true
+ }
}
return false
}
// uploadPack implements the git-upload-pack protocol.
-func uploadPack(w io.WriteCloser, r io.Reader, req *packp.UploadPackRequest) error {
+func uploadPack(w io.WriteCloser, _ io.Reader, req *packp.UploadPackRequest) error {
// TODO support multi_ack mode
// TODO support multi_ack_detailed mode
// TODO support acks for common objects
diff --git a/plumbing/transport/internal/common/common_test.go b/plumbing/transport/internal/common/common_test.go
index affa787..9344bb6 100644
--- a/plumbing/transport/internal/common/common_test.go
+++ b/plumbing/transport/internal/common/common_test.go
@@ -4,6 +4,7 @@ import (
"fmt"
"testing"
+ "github.com/go-git/go-git/v5/plumbing/transport"
. "gopkg.in/check.v1"
)
@@ -21,56 +22,8 @@ func (s *CommonSuite) TestIsRepoNotFoundErrorForUnknownSource(c *C) {
c.Assert(isRepoNotFound, Equals, false)
}
-func (s *CommonSuite) TestIsRepoNotFoundErrorForGithub(c *C) {
- msg := fmt.Sprintf("%s : some error stuf", githubRepoNotFoundErr)
-
- isRepoNotFound := isRepoNotFoundError(msg)
-
- c.Assert(isRepoNotFound, Equals, true)
-}
-
-func (s *CommonSuite) TestIsRepoNotFoundErrorForBitBucket(c *C) {
- msg := fmt.Sprintf("%s : some error stuf", bitbucketRepoNotFoundErr)
-
- isRepoNotFound := isRepoNotFoundError(msg)
-
- c.Assert(isRepoNotFound, Equals, true)
-}
-
-func (s *CommonSuite) TestIsRepoNotFoundErrorForLocal(c *C) {
- msg := fmt.Sprintf("some error stuf : %s", localRepoNotFoundErr)
-
- isRepoNotFound := isRepoNotFoundError(msg)
-
- c.Assert(isRepoNotFound, Equals, true)
-}
-
-func (s *CommonSuite) TestIsRepoNotFoundErrorForGitProtocolNotFound(c *C) {
- msg := fmt.Sprintf("%s : some error stuf", gitProtocolNotFoundErr)
-
- isRepoNotFound := isRepoNotFoundError(msg)
-
- c.Assert(isRepoNotFound, Equals, true)
-}
-
-func (s *CommonSuite) TestIsRepoNotFoundErrorForGitProtocolNoSuch(c *C) {
- msg := fmt.Sprintf("%s : some error stuf", gitProtocolNoSuchErr)
-
- isRepoNotFound := isRepoNotFoundError(msg)
-
- c.Assert(isRepoNotFound, Equals, true)
-}
-
-func (s *CommonSuite) TestIsRepoNotFoundErrorForGitProtocolAccessDenied(c *C) {
- msg := fmt.Sprintf("%s : some error stuf", gitProtocolAccessDeniedErr)
-
- isRepoNotFound := isRepoNotFoundError(msg)
-
- c.Assert(isRepoNotFound, Equals, true)
-}
-
-func (s *CommonSuite) TestIsRepoNotFoundErrorForGogsAccessDenied(c *C) {
- msg := fmt.Sprintf("%s : some error stuf", gogsAccessDeniedErr)
+func (s *CommonSuite) TestIsRepoNotFoundError(c *C) {
+ msg := "no such repository : some error stuf"
isRepoNotFound := isRepoNotFoundError(msg)
@@ -90,3 +43,51 @@ func (s *CommonSuite) TestCheckNotFoundError(c *C) {
c.Assert(err, IsNil)
}
+
+func TestAdvertisedReferencesWithRemoteError(t *testing.T) {
+ tests := []struct {
+ name string
+ stderr string
+ wantErr error
+ }{
+ {
+ name: "unknown error",
+ stderr: "something",
+ wantErr: fmt.Errorf("unknown error: something"),
+ },
+ {
+ name: "GitLab: repository not found",
+ stderr: `remote:
+remote: ========================================================================
+remote:
+remote: ERROR: The project you were looking for could not be found or you don't have permission to view it.
+
+remote:
+remote: ========================================================================
+remote:`,
+ wantErr: transport.ErrRepositoryNotFound,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ client := NewClient(MockCommander{stderr: tt.stderr})
+ sess, err := client.NewUploadPackSession(nil, nil)
+ if err != nil {
+ t.Fatalf("unexpected error: %s", err)
+ }
+
+ _, err = sess.AdvertisedReferences()
+
+ if tt.wantErr != nil {
+ if tt.wantErr != err {
+ if tt.wantErr.Error() != err.Error() {
+ t.Fatalf("expected a different error: got '%s', expected '%s'", err, tt.wantErr)
+ }
+ }
+ } else if err != nil {
+ t.Fatalf("unexpected error: %s", err)
+ }
+ })
+ }
+}
diff --git a/plumbing/transport/internal/common/mocks.go b/plumbing/transport/internal/common/mocks.go
new file mode 100644
index 0000000..bc18b27
--- /dev/null
+++ b/plumbing/transport/internal/common/mocks.go
@@ -0,0 +1,46 @@
+package common
+
+import (
+ "bytes"
+ "io"
+
+ gogitioutil "github.com/go-git/go-git/v5/utils/ioutil"
+
+ "github.com/go-git/go-git/v5/plumbing/transport"
+)
+
+type MockCommand struct {
+ stdin bytes.Buffer
+ stdout bytes.Buffer
+ stderr bytes.Buffer
+}
+
+func (c MockCommand) StderrPipe() (io.Reader, error) {
+ return &c.stderr, nil
+}
+
+func (c MockCommand) StdinPipe() (io.WriteCloser, error) {
+ return gogitioutil.WriteNopCloser(&c.stdin), nil
+}
+
+func (c MockCommand) StdoutPipe() (io.Reader, error) {
+ return &c.stdout, nil
+}
+
+func (c MockCommand) Start() error {
+ return nil
+}
+
+func (c MockCommand) Close() error {
+ panic("not implemented")
+}
+
+type MockCommander struct {
+ stderr string
+}
+
+func (c MockCommander) Command(cmd string, ep *transport.Endpoint, auth transport.AuthMethod) (Command, error) {
+ return &MockCommand{
+ stderr: *bytes.NewBufferString(c.stderr),
+ }, nil
+}
diff --git a/plumbing/transport/server/server.go b/plumbing/transport/server/server.go
index 11fa0c8..cf5d6f4 100644
--- a/plumbing/transport/server/server.go
+++ b/plumbing/transport/server/server.go
@@ -166,7 +166,7 @@ func (s *upSession) UploadPack(ctx context.Context, req *packp.UploadPackRequest
return nil, err
}
- pr, pw := ioutil.Pipe()
+ pr, pw := io.Pipe()
e := packfile.NewEncoder(pw, s.storer, false)
go func() {
// TODO: plumb through a pack window.
diff --git a/plumbing/transport/ssh/common.go b/plumbing/transport/ssh/common.go
index 1531603..05dea44 100644
--- a/plumbing/transport/ssh/common.go
+++ b/plumbing/transport/ssh/common.go
@@ -49,7 +49,9 @@ type runner struct {
func (r *runner) Command(cmd string, ep *transport.Endpoint, auth transport.AuthMethod) (common.Command, error) {
c := &command{command: cmd, endpoint: ep, config: r.config}
if auth != nil {
- c.setAuth(auth)
+ if err := c.setAuth(auth); err != nil {
+ return nil, err
+ }
}
if err := c.connect(); err != nil {
@@ -168,7 +170,7 @@ func dial(network, addr string, proxyOpts transport.ProxyOptions, config *ssh.Cl
defer cancel()
var conn net.Conn
- var err error
+ var dialErr error
if proxyOpts.URL != "" {
proxyUrl, err := proxyOpts.FullURL()
@@ -186,12 +188,12 @@ func dial(network, addr string, proxyOpts transport.ProxyOptions, config *ssh.Cl
return nil, fmt.Errorf("expected ssh proxy dialer to be of type %s; got %s",
reflect.TypeOf(ctxDialer), reflect.TypeOf(dialer))
}
- conn, err = ctxDialer.DialContext(ctx, "tcp", addr)
+ conn, dialErr = ctxDialer.DialContext(ctx, "tcp", addr)
} else {
- conn, err = proxy.Dial(ctx, network, addr)
+ conn, dialErr = proxy.Dial(ctx, network, addr)
}
- if err != nil {
- return nil, err
+ if dialErr != nil {
+ return nil, dialErr
}
c, chans, reqs, err := ssh.NewClientConn(conn, addr, config)
diff --git a/plumbing/transport/ssh/common_test.go b/plumbing/transport/ssh/common_test.go
index 496e82d..a724936 100644
--- a/plumbing/transport/ssh/common_test.go
+++ b/plumbing/transport/ssh/common_test.go
@@ -172,6 +172,28 @@ func (s *SuiteCommon) TestIssue70(c *C) {
c.Assert(err, IsNil)
}
+/*
+Given, an endpoint to a git server with a socks5 proxy URL,
+When, the socks5 proxy server is not reachable,
+Then, there should not be any panic and an error with appropriate message should be returned.
+Related issue : https://github.com/go-git/go-git/pull/900
+*/
+func (s *SuiteCommon) TestInvalidSocks5Proxy(c *C) {
+ ep, err := transport.NewEndpoint("git@github.com:foo/bar.git")
+ c.Assert(err, IsNil)
+ ep.Proxy.URL = "socks5://127.0.0.1:1080"
+
+ auth, err := NewPublicKeys("foo", testdata.PEMBytes["rsa"], "")
+ c.Assert(err, IsNil)
+ c.Assert(auth, NotNil)
+
+ ps, err := DefaultClient.NewUploadPackSession(ep, auth)
+ //Since the proxy server is not running, we expect an error.
+ c.Assert(ps, IsNil)
+ c.Assert(err, NotNil)
+ c.Assert(err, ErrorMatches, "socks connect .* dial tcp 127.0.0.1:1080: .*")
+}
+
type mockSSHConfig struct {
Values map[string]map[string]string
}
@@ -184,3 +206,26 @@ func (c *mockSSHConfig) Get(alias, key string) string {
return a[key]
}
+
+type invalidAuthMethod struct {
+}
+
+func (a *invalidAuthMethod) Name() string {
+ return "invalid"
+}
+
+func (a *invalidAuthMethod) String() string {
+ return "invalid"
+}
+
+func (s *SuiteCommon) TestCommandWithInvalidAuthMethod(c *C) {
+ uploadPack := &UploadPackSuite{}
+ uploadPack.SetUpSuite(c)
+ r := &runner{}
+ auth := &invalidAuthMethod{}
+
+ _, err := r.Command("command", uploadPack.newEndpoint(c, "endpoint"), auth)
+
+ c.Assert(err, NotNil)
+ c.Assert(err, ErrorMatches, "invalid auth method")
+}
diff --git a/remote.go b/remote.go
index 679e0af..0cb70bc 100644
--- a/remote.go
+++ b/remote.go
@@ -552,6 +552,10 @@ func (r *Remote) fetchPack(ctx context.Context, o *FetchOptions, s transport.Upl
reader, err := s.UploadPack(ctx, req)
if err != nil {
+ if errors.Is(err, transport.ErrEmptyUploadPackRequest) {
+ // XXX: no packfile provided, everything is up-to-date.
+ return nil
+ }
return err
}
@@ -614,7 +618,7 @@ func (r *Remote) addOrUpdateReferences(
req *packp.ReferenceUpdateRequest,
forceWithLease *ForceWithLease,
) error {
- // If it is not a wilcard refspec we can directly search for the reference
+ // If it is not a wildcard refspec we can directly search for the reference
// in the references dictionary.
if !rs.IsWildcard() {
ref, ok := refsDict[rs.Src()]
@@ -693,7 +697,7 @@ func (r *Remote) addCommit(rs config.RefSpec,
remoteRef, err := remoteRefs.Reference(cmd.Name)
if err == nil {
if remoteRef.Type() != plumbing.HashReference {
- //TODO: check actual git behavior here
+ // TODO: check actual git behavior here
return nil
}
@@ -735,7 +739,7 @@ func (r *Remote) addReferenceIfRefSpecMatches(rs config.RefSpec,
remoteRef, err := remoteRefs.Reference(cmd.Name)
if err == nil {
if remoteRef.Type() != plumbing.HashReference {
- //TODO: check actual git behavior here
+ // TODO: check actual git behavior here
return nil
}
@@ -1066,7 +1070,7 @@ func checkFastForwardUpdate(s storer.EncodedObjectStorer, remoteRefs storer.Refe
return fmt.Errorf("non-fast-forward update: %s", cmd.Name.String())
}
- ff, err := isFastForward(s, cmd.Old, cmd.New)
+ ff, err := isFastForward(s, cmd.Old, cmd.New, nil)
if err != nil {
return err
}
@@ -1078,14 +1082,28 @@ func checkFastForwardUpdate(s storer.EncodedObjectStorer, remoteRefs storer.Refe
return nil
}
-func isFastForward(s storer.EncodedObjectStorer, old, new plumbing.Hash) (bool, error) {
+func isFastForward(s storer.EncodedObjectStorer, old, new plumbing.Hash, earliestShallow *plumbing.Hash) (bool, error) {
c, err := object.GetCommit(s, new)
if err != nil {
return false, err
}
+ parentsToIgnore := []plumbing.Hash{}
+ if earliestShallow != nil {
+ earliestCommit, err := object.GetCommit(s, *earliestShallow)
+ if err != nil {
+ return false, err
+ }
+
+ parentsToIgnore = earliestCommit.ParentHashes
+ }
+
found := false
- iter := object.NewCommitPreorderIter(c, nil, nil)
+ // stop iterating at the earlist shallow commit, ignoring its parents
+ // note: when pull depth is smaller than the number of new changes on the remote, this fails due to missing parents.
+ // as far as i can tell, without the commits in-between the shallow pull and the earliest shallow, there's no
+ // real way of telling whether it will be a fast-forward merge.
+ iter := object.NewCommitPreorderIter(c, nil, parentsToIgnore)
err = iter.ForEach(func(c *object.Commit) error {
if c.Hash != old {
return nil
@@ -1198,10 +1216,10 @@ func (r *Remote) updateLocalReferenceStorage(
old, _ := storer.ResolveReference(r.s, localName)
new := plumbing.NewHashReference(localName, ref.Hash())
- // If the ref exists locally as a branch and force is not specified,
- // only update if the new ref is an ancestor of the old
- if old != nil && old.Name().IsBranch() && !force && !spec.IsForceUpdate() {
- ff, err := isFastForward(r.s, old.Hash(), new.Hash())
+ // If the ref exists locally as a non-tag and force is not
+ // specified, only update if the new ref is an ancestor of the old
+ if old != nil && !old.Name().IsTag() && !force && !spec.IsForceUpdate() {
+ ff, err := isFastForward(r.s, old.Hash(), new.Hash(), nil)
if err != nil {
return updated, err
}
@@ -1386,8 +1404,7 @@ func pushHashes(
useRefDeltas bool,
allDelete bool,
) (*packp.ReportStatus, error) {
-
- rd, wr := ioutil.Pipe()
+ rd, wr := io.Pipe()
config, err := s.Config()
if err != nil {
diff --git a/remote_test.go b/remote_test.go
index ca5f261..81c60bc 100644
--- a/remote_test.go
+++ b/remote_test.go
@@ -4,16 +4,20 @@ import (
"bytes"
"context"
"errors"
+ "fmt"
"io"
"os"
"path/filepath"
"runtime"
"strings"
+ "testing"
"time"
+ "github.com/go-git/go-billy/v5/memfs"
"github.com/go-git/go-git/v5/config"
"github.com/go-git/go-git/v5/plumbing"
"github.com/go-git/go-git/v5/plumbing/cache"
+ "github.com/go-git/go-git/v5/plumbing/object"
"github.com/go-git/go-git/v5/plumbing/protocol/packp"
"github.com/go-git/go-git/v5/plumbing/protocol/packp/capability"
"github.com/go-git/go-git/v5/plumbing/storer"
@@ -196,7 +200,7 @@ func (s *RemoteSuite) TestFetchToNewBranchWithAllTags(c *C) {
})
}
-func (s *RemoteSuite) TestFetchNonExistantReference(c *C) {
+func (s *RemoteSuite) TestFetchNonExistentReference(c *C) {
r := NewRemote(memory.NewStorage(), &config.RemoteConfig{
URLs: []string{s.GetLocalRepositoryURL(fixtures.ByTag("tags").One())},
})
@@ -1555,3 +1559,140 @@ func (s *RemoteSuite) TestFetchAfterShallowClone(c *C) {
plumbing.NewSymbolicReference("HEAD", "refs/heads/master"),
})
}
+
+func TestFetchFastForwardForCustomRef(t *testing.T) {
+ customRef := "refs/custom/branch"
+ // 1. Set up a remote with a URL
+ remoteURL := t.TempDir()
+ remoteRepo, err := PlainInit(remoteURL, true)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // 2. Add a commit with an empty tree to master and custom ref, also set HEAD
+ emptyTreeID := writeEmptyTree(t, remoteRepo)
+ writeCommitToRef(t, remoteRepo, "refs/heads/master", emptyTreeID, time.Now())
+ writeCommitToRef(t, remoteRepo, customRef, emptyTreeID, time.Now())
+ if err := remoteRepo.Storer.SetReference(plumbing.NewSymbolicReference(plumbing.HEAD, "refs/heads/master")); err != nil {
+ t.Fatal(err)
+ }
+
+ // 3. Clone repo, then fetch the custom ref
+ // Note that using custom ref in ReferenceName has an IsBranch issue
+ localRepo, err := Clone(memory.NewStorage(), memfs.New(), &CloneOptions{
+ URL: remoteURL,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := localRepo.Fetch(&FetchOptions{
+ RefSpecs: []config.RefSpec{
+ config.RefSpec(fmt.Sprintf("%s:%s", customRef, customRef)),
+ },
+ }); err != nil {
+ t.Fatal(err)
+ }
+
+ // 4. Make divergent changes
+ remoteCommitID := writeCommitToRef(t, remoteRepo, customRef, emptyTreeID, time.Now())
+ // Consecutive calls to writeCommitToRef with time.Now() might have the same
+ // time value, explicitly set distinct ones to ensure the commit hashes
+ // differ
+ writeCommitToRef(t, localRepo, customRef, emptyTreeID, time.Now().Add(time.Second))
+
+ // 5. Try to fetch with fast-forward only mode
+ remote, err := localRepo.Remote(DefaultRemoteName)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = remote.Fetch(&FetchOptions{RefSpecs: []config.RefSpec{
+ config.RefSpec(fmt.Sprintf("%s:%s", customRef, customRef)),
+ }})
+ if !errors.Is(err, ErrForceNeeded) {
+ t.Errorf("expected %v, got %v", ErrForceNeeded, err)
+ }
+
+ // 6. Fetch with force
+ err = remote.Fetch(&FetchOptions{RefSpecs: []config.RefSpec{
+ config.RefSpec(fmt.Sprintf("+%s:%s", customRef, customRef)),
+ }})
+ if err != nil {
+ t.Errorf("unexpected error %v", err)
+ }
+
+ // 7. Assert commit ID matches
+ ref, err := localRepo.Reference(plumbing.ReferenceName(customRef), true)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if remoteCommitID != ref.Hash() {
+ t.Errorf("expected %s, got %s", remoteCommitID.String(), ref.Hash().String())
+ }
+}
+
+func writeEmptyTree(t *testing.T, repo *Repository) plumbing.Hash {
+ t.Helper()
+
+ obj := repo.Storer.NewEncodedObject()
+ obj.SetType(plumbing.TreeObject)
+
+ tree := object.Tree{Entries: nil}
+ if err := tree.Encode(obj); err != nil {
+ t.Fatal(err)
+ }
+
+ treeID, err := repo.Storer.SetEncodedObject(obj)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ return treeID
+}
+
+func writeCommitToRef(t *testing.T, repo *Repository, refName string, treeID plumbing.Hash, when time.Time) plumbing.Hash {
+ t.Helper()
+
+ ref, err := repo.Reference(plumbing.ReferenceName(refName), true)
+ if err != nil {
+ if errors.Is(err, plumbing.ErrReferenceNotFound) {
+ if err := repo.Storer.SetReference(plumbing.NewHashReference(plumbing.ReferenceName(refName), plumbing.ZeroHash)); err != nil {
+ t.Fatal(err)
+ }
+
+ ref, err = repo.Reference(plumbing.ReferenceName(refName), true)
+ if err != nil {
+ t.Fatal(err)
+ }
+ } else {
+ t.Fatal(err)
+ }
+ }
+
+ commit := &object.Commit{
+ TreeHash: treeID,
+ Author: object.Signature{
+ When: when,
+ },
+ }
+ if !ref.Hash().IsZero() {
+ commit.ParentHashes = []plumbing.Hash{ref.Hash()}
+ }
+
+ obj := repo.Storer.NewEncodedObject()
+ if err := commit.Encode(obj); err != nil {
+ t.Fatal(err)
+ }
+
+ commitID, err := repo.Storer.SetEncodedObject(obj)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ newRef := plumbing.NewHashReference(plumbing.ReferenceName(refName), commitID)
+ if err := repo.Storer.CheckAndSetReference(newRef, ref); err != nil {
+ t.Fatal(err)
+ }
+
+ return commitID
+}
diff --git a/repository.go b/repository.go
index 3154ac0..1524a69 100644
--- a/repository.go
+++ b/repository.go
@@ -22,6 +22,7 @@ import (
"github.com/go-git/go-git/v5/config"
"github.com/go-git/go-git/v5/internal/path_util"
"github.com/go-git/go-git/v5/internal/revision"
+ "github.com/go-git/go-git/v5/internal/url"
"github.com/go-git/go-git/v5/plumbing"
"github.com/go-git/go-git/v5/plumbing/cache"
formatcfg "github.com/go-git/go-git/v5/plumbing/format/config"
@@ -62,6 +63,7 @@ var (
ErrUnableToResolveCommit = errors.New("unable to resolve commit")
ErrPackedObjectsNotSupported = errors.New("packed objects not supported")
ErrSHA256NotSupported = errors.New("go-git was not compiled with SHA256 support")
+ ErrAlternatePathNotSupported = errors.New("alternate path must use the file scheme")
)
// Repository represents a git repository
@@ -96,6 +98,10 @@ func InitWithOptions(s storage.Storer, worktree billy.Filesystem, options InitOp
options.DefaultBranch = plumbing.Master
}
+ if err := options.DefaultBranch.Validate(); err != nil {
+ return nil, err
+ }
+
r := newRepository(s, worktree)
_, err := r.Reference(plumbing.HEAD, false)
switch err {
@@ -235,9 +241,19 @@ func CloneContext(
// if the repository will have worktree (non-bare) or not (bare), if the path
// is not empty ErrRepositoryAlreadyExists is returned.
func PlainInit(path string, isBare bool) (*Repository, error) {
+ return PlainInitWithOptions(path, &PlainInitOptions{
+ Bare: isBare,
+ })
+}
+
+func PlainInitWithOptions(path string, opts *PlainInitOptions) (*Repository, error) {
+ if opts == nil {
+ opts = &PlainInitOptions{}
+ }
+
var wt, dot billy.Filesystem
- if isBare {
+ if opts.Bare {
dot = osfs.New(path)
} else {
wt = osfs.New(path)
@@ -246,16 +262,7 @@ func PlainInit(path string, isBare bool) (*Repository, error) {
s := filesystem.NewStorage(dot, cache.NewObjectLRUDefault())
- return Init(s, wt)
-}
-
-func PlainInitWithOptions(path string, opts *PlainInitOptions) (*Repository, error) {
- wt := osfs.New(path)
- dot, _ := wt.Chroot(GitDirName)
-
- s := filesystem.NewStorage(dot, cache.NewObjectLRUDefault())
-
- r, err := Init(s, wt)
+ r, err := InitWithOptions(s, wt, opts.InitOptions)
if err != nil {
return nil, err
}
@@ -265,7 +272,7 @@ func PlainInitWithOptions(path string, opts *PlainInitOptions) (*Repository, err
return nil, err
}
- if opts != nil {
+ if opts.ObjectFormat != "" {
if opts.ObjectFormat == formatcfg.SHA256 && hash.CryptoType != crypto.SHA256 {
return nil, ErrSHA256NotSupported
}
@@ -721,7 +728,10 @@ func (r *Repository) DeleteBranch(name string) error {
// CreateTag creates a tag. If opts is included, the tag is an annotated tag,
// otherwise a lightweight tag is created.
func (r *Repository) CreateTag(name string, hash plumbing.Hash, opts *CreateTagOptions) (*plumbing.Reference, error) {
- rname := plumbing.ReferenceName(path.Join("refs", "tags", name))
+ rname := plumbing.NewTagReferenceName(name)
+ if err := rname.Validate(); err != nil {
+ return nil, err
+ }
_, err := r.Storer.Reference(rname)
switch err {
@@ -886,6 +896,30 @@ func (r *Repository) clone(ctx context.Context, o *CloneOptions) error {
return err
}
+ // When the repository to clone is on the local machine,
+ // instead of using hard links, automatically setup .git/objects/info/alternates
+ // to share the objects with the source repository
+ if o.Shared {
+ if !url.IsLocalEndpoint(o.URL) {
+ return ErrAlternatePathNotSupported
+ }
+ altpath := o.URL
+ remoteRepo, err := PlainOpen(o.URL)
+ if err != nil {
+ return fmt.Errorf("failed to open remote repository: %w", err)
+ }
+ conf, err := remoteRepo.Config()
+ if err != nil {
+ return fmt.Errorf("failed to read remote repository configuration: %w", err)
+ }
+ if !conf.Core.IsBare {
+ altpath = path.Join(altpath, GitDirName)
+ }
+ if err := r.Storer.AddAlternate(altpath); err != nil {
+ return fmt.Errorf("failed to add alternate file to git objects dir: %w", err)
+ }
+ }
+
ref, err := r.fetchAndUpdateReferences(ctx, &FetchOptions{
RefSpecs: c.Fetch,
Depth: o.Depth,
diff --git a/repository_test.go b/repository_test.go
index 9e000a3..51df845 100644
--- a/repository_test.go
+++ b/repository_test.go
@@ -9,6 +9,7 @@ import (
"os"
"os/exec"
"os/user"
+ "path"
"path/filepath"
"regexp"
"strings"
@@ -74,6 +75,13 @@ func (s *RepositorySuite) TestInitWithOptions(c *C) {
}
+func (s *RepositorySuite) TestInitWithInvalidDefaultBranch(c *C) {
+ _, err := InitWithOptions(memory.NewStorage(), memfs.New(), InitOptions{
+ DefaultBranch: "foo",
+ })
+ c.Assert(err, NotNil)
+}
+
func createCommit(c *C, r *Repository) {
// Create a commit so there is a HEAD to check
wt, err := r.Worktree()
@@ -390,6 +398,22 @@ func (s *RepositorySuite) TestDeleteRemote(c *C) {
c.Assert(alt, IsNil)
}
+func (s *RepositorySuite) TestEmptyCreateBranch(c *C) {
+ r, _ := Init(memory.NewStorage(), nil)
+ err := r.CreateBranch(&config.Branch{})
+
+ c.Assert(err, NotNil)
+}
+
+func (s *RepositorySuite) TestInvalidCreateBranch(c *C) {
+ r, _ := Init(memory.NewStorage(), nil)
+ err := r.CreateBranch(&config.Branch{
+ Name: "-foo",
+ })
+
+ c.Assert(err, NotNil)
+}
+
func (s *RepositorySuite) TestCreateBranchAndBranch(c *C) {
r, _ := Init(memory.NewStorage(), nil)
testBranch := &config.Branch{
@@ -518,6 +542,30 @@ func (s *RepositorySuite) TestPlainInit(c *C) {
c.Assert(cfg.Core.IsBare, Equals, true)
}
+func (s *RepositorySuite) TestPlainInitWithOptions(c *C) {
+ dir, clean := s.TemporalDir()
+ defer clean()
+
+ r, err := PlainInitWithOptions(dir, &PlainInitOptions{
+ InitOptions: InitOptions{
+ DefaultBranch: "refs/heads/foo",
+ },
+ Bare: false,
+ })
+ c.Assert(err, IsNil)
+ c.Assert(r, NotNil)
+
+ cfg, err := r.Config()
+ c.Assert(err, IsNil)
+ c.Assert(cfg.Core.IsBare, Equals, false)
+
+ createCommit(c, r)
+
+ ref, err := r.Head()
+ c.Assert(err, IsNil)
+ c.Assert(ref.Name().String(), Equals, "refs/heads/foo")
+}
+
func (s *RepositorySuite) TestPlainInitAlreadyExists(c *C) {
dir, clean := s.TemporalDir()
defer clean()
@@ -767,6 +815,101 @@ func (s *RepositorySuite) TestPlainClone(c *C) {
c.Assert(cfg.Branches["master"].Name, Equals, "master")
}
+func (s *RepositorySuite) TestPlainCloneBareAndShared(c *C) {
+ dir, clean := s.TemporalDir()
+ defer clean()
+
+ remote := s.GetBasicLocalRepositoryURL()
+
+ r, err := PlainClone(dir, true, &CloneOptions{
+ URL: remote,
+ Shared: true,
+ })
+ c.Assert(err, IsNil)
+
+ altpath := path.Join(dir, "objects", "info", "alternates")
+ _, err = os.Stat(altpath)
+ c.Assert(err, IsNil)
+
+ data, err := os.ReadFile(altpath)
+ c.Assert(err, IsNil)
+
+ line := path.Join(remote, GitDirName, "objects") + "\n"
+ c.Assert(string(data), Equals, line)
+
+ cfg, err := r.Config()
+ c.Assert(err, IsNil)
+ c.Assert(cfg.Branches, HasLen, 1)
+ c.Assert(cfg.Branches["master"].Name, Equals, "master")
+}
+
+func (s *RepositorySuite) TestPlainCloneShared(c *C) {
+ dir, clean := s.TemporalDir()
+ defer clean()
+
+ remote := s.GetBasicLocalRepositoryURL()
+
+ r, err := PlainClone(dir, false, &CloneOptions{
+ URL: remote,
+ Shared: true,
+ })
+ c.Assert(err, IsNil)
+
+ altpath := path.Join(dir, GitDirName, "objects", "info", "alternates")
+ _, err = os.Stat(altpath)
+ c.Assert(err, IsNil)
+
+ data, err := os.ReadFile(altpath)
+ c.Assert(err, IsNil)
+
+ line := path.Join(remote, GitDirName, "objects") + "\n"
+ c.Assert(string(data), Equals, line)
+
+ cfg, err := r.Config()
+ c.Assert(err, IsNil)
+ c.Assert(cfg.Branches, HasLen, 1)
+ c.Assert(cfg.Branches["master"].Name, Equals, "master")
+}
+
+func (s *RepositorySuite) TestPlainCloneSharedHttpShouldReturnError(c *C) {
+ dir, clean := s.TemporalDir()
+ defer clean()
+
+ remote := "http://somerepo"
+
+ _, err := PlainClone(dir, false, &CloneOptions{
+ URL: remote,
+ Shared: true,
+ })
+ c.Assert(err, Equals, ErrAlternatePathNotSupported)
+}
+
+func (s *RepositorySuite) TestPlainCloneSharedHttpsShouldReturnError(c *C) {
+ dir, clean := s.TemporalDir()
+ defer clean()
+
+ remote := "https://somerepo"
+
+ _, err := PlainClone(dir, false, &CloneOptions{
+ URL: remote,
+ Shared: true,
+ })
+ c.Assert(err, Equals, ErrAlternatePathNotSupported)
+}
+
+func (s *RepositorySuite) TestPlainCloneSharedSSHShouldReturnError(c *C) {
+ dir, clean := s.TemporalDir()
+ defer clean()
+
+ remote := "ssh://somerepo"
+
+ _, err := PlainClone(dir, false, &CloneOptions{
+ URL: remote,
+ Shared: true,
+ })
+ c.Assert(err, Equals, ErrAlternatePathNotSupported)
+}
+
func (s *RepositorySuite) TestPlainCloneWithRemoteName(c *C) {
dir, clean := s.TemporalDir()
defer clean()
@@ -2677,6 +2820,20 @@ func (s *RepositorySuite) TestDeleteTagAnnotatedUnpacked(c *C) {
c.Assert(err, Equals, plumbing.ErrObjectNotFound)
}
+func (s *RepositorySuite) TestInvalidTagName(c *C) {
+ r, err := Init(memory.NewStorage(), nil)
+ c.Assert(err, IsNil)
+ for i, name := range []string{
+ "",
+ "foo bar",
+ "foo\tbar",
+ "foo\nbar",
+ } {
+ _, err = r.CreateTag(name, plumbing.ZeroHash, nil)
+ c.Assert(err, NotNil, Commentf("case %d %q", i, name))
+ }
+}
+
func (s *RepositorySuite) TestBranches(c *C) {
f := fixtures.ByURL("https://github.com/git-fixtures/root-references.git").One()
sto := filesystem.NewStorage(f.DotGit(), cache.NewObjectLRUDefault())
@@ -3191,20 +3348,25 @@ func BenchmarkObjects(b *testing.B) {
}
func BenchmarkPlainClone(b *testing.B) {
- for i := 0; i < b.N; i++ {
- t, err := os.MkdirTemp("", "")
- if err != nil {
- b.Fatal(err)
- }
- _, err = PlainClone(t, false, &CloneOptions{
- URL: "https://github.com/knqyf263/vuln-list",
- Depth: 1,
+ b.StopTimer()
+ clone := func(b *testing.B) {
+ _, err := PlainClone(b.TempDir(), true, &CloneOptions{
+ URL: "https://github.com/go-git/go-git.git",
+ Depth: 1,
+ Tags: NoTags,
+ SingleBranch: true,
})
if err != nil {
b.Error(err)
}
- b.StopTimer()
- os.RemoveAll(t)
- b.StartTimer()
+ }
+
+ // Warm-up as the initial clone could have a higher cost which
+ // may skew results.
+ clone(b)
+
+ b.StartTimer()
+ for i := 0; i < b.N; i++ {
+ clone(b)
}
}
diff --git a/storage/filesystem/dotgit/dotgit.go b/storage/filesystem/dotgit/dotgit.go
index e02e6dd..31c4694 100644
--- a/storage/filesystem/dotgit/dotgit.go
+++ b/storage/filesystem/dotgit/dotgit.go
@@ -8,18 +8,21 @@ import (
"fmt"
"io"
"os"
+ "path"
"path/filepath"
+ "reflect"
+ "runtime"
"sort"
"strings"
"time"
- "github.com/go-git/go-billy/v5/osfs"
"github.com/go-git/go-git/v5/plumbing"
"github.com/go-git/go-git/v5/plumbing/hash"
"github.com/go-git/go-git/v5/storage"
"github.com/go-git/go-git/v5/utils/ioutil"
"github.com/go-git/go-billy/v5"
+ "github.com/go-git/go-billy/v5/helper/chroot"
)
const (
@@ -38,6 +41,7 @@ const (
remotesPath = "remotes"
logsPath = "logs"
worktreesPath = "worktrees"
+ alternatesPath = "alternates"
tmpPackedRefsPrefix = "._packed-refs"
@@ -78,6 +82,10 @@ type Options struct {
// KeepDescriptors makes the file descriptors to be reused but they will
// need to be manually closed calling Close().
KeepDescriptors bool
+ // AlternatesFS provides the billy filesystem to be used for Git Alternates.
+ // If none is provided, it falls back to using the underlying instance used for
+ // DotGit.
+ AlternatesFS billy.Filesystem
}
// The DotGit type represents a local git repository on disk. This
@@ -1105,38 +1113,93 @@ func (d *DotGit) Module(name string) (billy.Filesystem, error) {
return d.fs.Chroot(d.fs.Join(modulePath, name))
}
+func (d *DotGit) AddAlternate(remote string) error {
+ altpath := d.fs.Join(objectsPath, infoPath, alternatesPath)
+
+ f, err := d.fs.OpenFile(altpath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0640)
+ if err != nil {
+ return fmt.Errorf("cannot open file: %w", err)
+ }
+ defer f.Close()
+
+ // locking in windows throws an error, based on comments
+ // https://github.com/go-git/go-git/pull/860#issuecomment-1751823044
+ // do not lock on windows platform.
+ if runtime.GOOS != "windows" {
+ if err = f.Lock(); err != nil {
+ return fmt.Errorf("cannot lock file: %w", err)
+ }
+ defer f.Unlock()
+ }
+
+ line := path.Join(remote, objectsPath) + "\n"
+ _, err = io.WriteString(f, line)
+ if err != nil {
+ return fmt.Errorf("error writing 'alternates' file: %w", err)
+ }
+
+ return nil
+}
+
// Alternates returns DotGit(s) based off paths in objects/info/alternates if
// available. This can be used to checks if it's a shared repository.
func (d *DotGit) Alternates() ([]*DotGit, error) {
- altpath := d.fs.Join("objects", "info", "alternates")
+ altpath := d.fs.Join(objectsPath, infoPath, alternatesPath)
f, err := d.fs.Open(altpath)
if err != nil {
return nil, err
}
defer f.Close()
+ fs := d.options.AlternatesFS
+ if fs == nil {
+ fs = d.fs
+ }
+
var alternates []*DotGit
+ seen := make(map[string]struct{})
// Read alternate paths line-by-line and create DotGit objects.
scanner := bufio.NewScanner(f)
for scanner.Scan() {
path := scanner.Text()
- if !filepath.IsAbs(path) {
- // For relative paths, we can perform an internal conversion to
- // slash so that they work cross-platform.
- slashPath := filepath.ToSlash(path)
- // If the path is not absolute, it must be relative to object
- // database (.git/objects/info).
- // https://www.kernel.org/pub/software/scm/git/docs/gitrepository-layout.html
- // Hence, derive a path relative to DotGit's root.
- // "../../../reponame/.git/" -> "../../reponame/.git"
- // Remove the first ../
- relpath := filepath.Join(strings.Split(slashPath, "/")[1:]...)
- normalPath := filepath.FromSlash(relpath)
- path = filepath.Join(d.fs.Root(), normalPath)
+
+ // Avoid creating multiple dotgits for the same alternative path.
+ if _, ok := seen[path]; ok {
+ continue
+ }
+
+ seen[path] = struct{}{}
+
+ if filepath.IsAbs(path) {
+ // Handling absolute paths should be straight-forward. However, the default osfs (Chroot)
+ // tries to concatenate an abs path with the root path in some operations (e.g. Stat),
+ // which leads to unexpected errors. Therefore, make the path relative to the current FS instead.
+ if reflect.TypeOf(fs) == reflect.TypeOf(&chroot.ChrootHelper{}) {
+ path, err = filepath.Rel(fs.Root(), path)
+ if err != nil {
+ return nil, fmt.Errorf("cannot make path %q relative: %w", path, err)
+ }
+ }
+ } else {
+ // By Git conventions, relative paths should be based on the object database (.git/objects/info)
+ // location as per: https://www.kernel.org/pub/software/scm/git/docs/gitrepository-layout.html
+ // However, due to the nature of go-git and its filesystem handling via Billy, paths cannot
+ // cross its "chroot boundaries". Therefore, ignore any "../" and treat the path from the
+ // fs root. If this is not correct based on the dotgit fs, set a different one via AlternatesFS.
+ abs := filepath.Join(string(filepath.Separator), filepath.ToSlash(path))
+ path = filepath.FromSlash(abs)
+ }
+
+ // Aligns with upstream behavior: exit if target path is not a valid directory.
+ if fi, err := fs.Stat(path); err != nil || !fi.IsDir() {
+ return nil, fmt.Errorf("invalid object directory %q: %w", path, err)
+ }
+ afs, err := fs.Chroot(filepath.Dir(path))
+ if err != nil {
+ return nil, fmt.Errorf("cannot chroot %q: %w", path, err)
}
- fs := osfs.New(filepath.Dir(path))
- alternates = append(alternates, New(fs))
+ alternates = append(alternates, New(afs))
}
if err = scanner.Err(); err != nil {
diff --git a/storage/filesystem/dotgit/dotgit_test.go b/storage/filesystem/dotgit/dotgit_test.go
index 1b6c113..2cbdb0c 100644
--- a/storage/filesystem/dotgit/dotgit_test.go
+++ b/storage/filesystem/dotgit/dotgit_test.go
@@ -6,6 +6,7 @@ import (
"io"
"os"
"path/filepath"
+ "regexp"
"runtime"
"strings"
"testing"
@@ -15,6 +16,7 @@ import (
"github.com/go-git/go-billy/v5/util"
fixtures "github.com/go-git/go-git-fixtures/v4"
"github.com/go-git/go-git/v5/plumbing"
+ "github.com/stretchr/testify/assert"
. "gopkg.in/check.v1"
)
@@ -810,53 +812,139 @@ func (s *SuiteDotGit) TestPackRefs(c *C) {
c.Assert(ref.Hash().String(), Equals, "b8d3ffab552895c19b9fcf7aa264d277cde33881")
}
-func (s *SuiteDotGit) TestAlternates(c *C) {
- fs, clean := s.TemporalFilesystem()
- defer clean()
+func TestAlternatesDefault(t *testing.T) {
+ // Create a new dotgit object.
+ dotFS := osfs.New(t.TempDir())
- // Create a new dotgit object and initialize.
- dir := New(fs)
- err := dir.Initialize()
- c.Assert(err, IsNil)
+ testAlternates(t, dotFS, dotFS)
+}
- // Create alternates file.
- altpath := fs.Join("objects", "info", "alternates")
- f, err := fs.Create(altpath)
- c.Assert(err, IsNil)
+func TestAlternatesWithFS(t *testing.T) {
+ // Create a new dotgit object with a specific FS for alternates.
+ altFS := osfs.New(t.TempDir())
+ dotFS, _ := altFS.Chroot("repo2")
- // Multiple alternates.
- var strContent string
- if runtime.GOOS == "windows" {
- strContent = "C:\\Users\\username\\repo1\\.git\\objects\r\n..\\..\\..\\rep2\\.git\\objects"
- } else {
- strContent = "/Users/username/rep1//.git/objects\n../../../rep2//.git/objects"
+ testAlternates(t, dotFS, altFS)
+}
+
+func TestAlternatesWithBoundOS(t *testing.T) {
+ // Create a new dotgit object with a specific FS for alternates.
+ altFS := osfs.New(t.TempDir(), osfs.WithBoundOS())
+ dotFS, _ := altFS.Chroot("repo2")
+
+ testAlternates(t, dotFS, altFS)
+}
+
+func testAlternates(t *testing.T, dotFS, altFS billy.Filesystem) {
+ tests := []struct {
+ name string
+ in []string
+ inWindows []string
+ setup func()
+ wantErr bool
+ wantRoots []string
+ }{
+ {
+ name: "no alternates",
+ },
+ {
+ name: "abs path",
+ in: []string{filepath.Join(altFS.Root(), "./repo1/.git/objects")},
+ inWindows: []string{filepath.Join(altFS.Root(), ".\\repo1\\.git\\objects")},
+ setup: func() {
+ err := altFS.MkdirAll(filepath.Join("repo1", ".git", "objects"), 0o700)
+ assert.NoError(t, err)
+ },
+ wantRoots: []string{filepath.Join("repo1", ".git")},
+ },
+ {
+ name: "rel path",
+ in: []string{"../../../repo3//.git/objects"},
+ inWindows: []string{"..\\..\\..\\repo3\\.git\\objects"},
+ setup: func() {
+ err := altFS.MkdirAll(filepath.Join("repo3", ".git", "objects"), 0o700)
+ assert.NoError(t, err)
+ },
+ wantRoots: []string{filepath.Join("repo3", ".git")},
+ },
+ {
+ name: "invalid abs path",
+ in: []string{"/alt/target2"},
+ inWindows: []string{"\\alt\\target2"},
+ wantErr: true,
+ },
+ {
+ name: "invalid rel path",
+ in: []string{"../../../alt/target3"},
+ inWindows: []string{"..\\..\\..\\alt\\target3"},
+ wantErr: true,
+ },
}
- content := []byte(strContent)
- f.Write(content)
- f.Close()
- dotgits, err := dir.Alternates()
- c.Assert(err, IsNil)
- if runtime.GOOS == "windows" {
- c.Assert(dotgits[0].fs.Root(), Equals, "C:\\Users\\username\\repo1\\.git")
- } else {
- c.Assert(dotgits[0].fs.Root(), Equals, "/Users/username/rep1/.git")
+ for _, tc := range tests {
+ t.Run(tc.name, func(t *testing.T) {
+ dir := NewWithOptions(dotFS, Options{AlternatesFS: altFS})
+ err := dir.Initialize()
+ assert.NoError(t, err)
+
+ content := strings.Join(tc.in, "\n")
+ if runtime.GOOS == "windows" {
+ content = strings.Join(tc.inWindows, "\r\n")
+ }
+
+ // Create alternates file.
+ altpath := dotFS.Join("objects", "info", "alternates")
+ f, err := dotFS.Create(altpath)
+ assert.NoError(t, err)
+ f.Write([]byte(content))
+ f.Close()
+
+ if tc.setup != nil {
+ tc.setup()
+ }
+
+ dotgits, err := dir.Alternates()
+ if tc.wantErr {
+ assert.Error(t, err)
+ } else {
+ assert.NoError(t, err)
+ }
+
+ for i, d := range dotgits {
+ assert.Regexp(t, "^"+regexp.QuoteMeta(altFS.Root()), d.fs.Root())
+ assert.Regexp(t, regexp.QuoteMeta(tc.wantRoots[i])+"$", d.fs.Root())
+ }
+ })
}
+}
- // For relative path:
- // /some/absolute/path/to/dot-git -> /some/absolute/path
- pathx := strings.Split(fs.Root(), string(filepath.Separator))
- pathx = pathx[:len(pathx)-2]
- // Use string.Join() to avoid malformed absolutepath on windows
- // C:Users\\User\\... instead of C:\\Users\\appveyor\\... .
- resolvedPath := strings.Join(pathx, string(filepath.Separator))
- // Append the alternate path to the resolvedPath
- expectedPath := fs.Join(string(filepath.Separator), resolvedPath, "rep2", ".git")
+func TestAlternatesDupes(t *testing.T) {
+ dotFS := osfs.New(t.TempDir())
+ dir := New(dotFS)
+ err := dir.Initialize()
+ assert.NoError(t, err)
+
+ path := filepath.Join(dotFS.Root(), "target3")
+ dupes := []string{path, path, path, path, path}
+
+ content := strings.Join(dupes, "\n")
if runtime.GOOS == "windows" {
- expectedPath = fs.Join(resolvedPath, "rep2", ".git")
+ content = strings.Join(dupes, "\r\n")
}
- c.Assert(dotgits[1].fs.Root(), Equals, expectedPath)
+ err = dotFS.MkdirAll("target3", 0o700)
+ assert.NoError(t, err)
+
+ // Create alternates file.
+ altpath := dotFS.Join("objects", "info", "alternates")
+ f, err := dotFS.Create(altpath)
+ assert.NoError(t, err)
+ f.Write([]byte(content))
+ f.Close()
+
+ dotgits, err := dir.Alternates()
+ assert.NoError(t, err)
+ assert.Len(t, dotgits, 1)
}
type norwfs struct {
diff --git a/storage/filesystem/object.go b/storage/filesystem/object.go
index 846a7b8..e812fe9 100644
--- a/storage/filesystem/object.go
+++ b/storage/filesystem/object.go
@@ -146,6 +146,19 @@ func (s *ObjectStorage) SetEncodedObject(o plumbing.EncodedObject) (h plumbing.H
return o.Hash(), err
}
+// LazyWriter returns a lazy ObjectWriter that is bound to a DotGit file.
+// It first write the header passing on the object type and size, so
+// that the object contents can be written later, without the need to
+// create a MemoryObject and buffering its entire contents into memory.
+func (s *ObjectStorage) LazyWriter() (w io.WriteCloser, wh func(typ plumbing.ObjectType, sz int64) error, err error) {
+ ow, err := s.dir.NewObject()
+ if err != nil {
+ return nil, nil, err
+ }
+
+ return ow, ow.WriteHeader, nil
+}
+
// HasEncodedObject returns nil if the object exists, without actually
// reading the object data from storage.
func (s *ObjectStorage) HasEncodedObject(h plumbing.Hash) (err error) {
diff --git a/storage/filesystem/storage.go b/storage/filesystem/storage.go
index 7e7a2c5..951ea00 100644
--- a/storage/filesystem/storage.go
+++ b/storage/filesystem/storage.go
@@ -37,6 +37,10 @@ type Options struct {
// LargeObjectThreshold maximum object size (in bytes) that will be read in to memory.
// If left unset or set to 0 there is no limit
LargeObjectThreshold int64
+ // AlternatesFS provides the billy filesystem to be used for Git Alternates.
+ // If none is provided, it falls back to using the underlying instance used for
+ // DotGit.
+ AlternatesFS billy.Filesystem
}
// NewStorage returns a new Storage backed by a given `fs.Filesystem` and cache.
@@ -49,6 +53,7 @@ func NewStorage(fs billy.Filesystem, cache cache.Object) *Storage {
func NewStorageWithOptions(fs billy.Filesystem, cache cache.Object, ops Options) *Storage {
dirOps := dotgit.Options{
ExclusiveAccess: ops.ExclusiveAccess,
+ AlternatesFS: ops.AlternatesFS,
}
dir := dotgit.NewWithOptions(fs, dirOps)
@@ -74,3 +79,7 @@ func (s *Storage) Filesystem() billy.Filesystem {
func (s *Storage) Init() error {
return s.dir.Initialize()
}
+
+func (s *Storage) AddAlternate(remote string) error {
+ return s.dir.AddAlternate(remote)
+}
diff --git a/storage/memory/storage.go b/storage/memory/storage.go
index ef6a445..79211c7 100644
--- a/storage/memory/storage.go
+++ b/storage/memory/storage.go
@@ -202,6 +202,10 @@ func (o *ObjectStorage) DeleteLooseObject(plumbing.Hash) error {
return errNotSupported
}
+func (o *ObjectStorage) AddAlternate(remote string) error {
+ return errNotSupported
+}
+
type TxObjectStorage struct {
Storage *ObjectStorage
Objects map[plumbing.Hash]plumbing.EncodedObject
diff --git a/storage/transactional/object.go b/storage/transactional/object.go
index 5d102b0..b43c96d 100644
--- a/storage/transactional/object.go
+++ b/storage/transactional/object.go
@@ -82,3 +82,7 @@ func (o *ObjectStorage) Commit() error {
return err
})
}
+
+func (o *ObjectStorage) AddAlternate(remote string) error {
+ return o.temporal.AddAlternate(remote)
+}
diff --git a/utils/binary/read.go b/utils/binary/read.go
index a14d48d..b8f9df1 100644
--- a/utils/binary/read.go
+++ b/utils/binary/read.go
@@ -1,4 +1,4 @@
-// Package binary implements sintax-sugar functions on top of the standard
+// Package binary implements syntax-sugar functions on top of the standard
// library binary package
package binary
diff --git a/utils/ioutil/common.go b/utils/ioutil/common.go
index b0ace4e..235af71 100644
--- a/utils/ioutil/common.go
+++ b/utils/ioutil/common.go
@@ -195,7 +195,7 @@ func NewWriterOnError(w io.Writer, notify func(error)) io.Writer {
}
// NewWriteCloserOnError returns a io.WriteCloser that call the notify function
-//when an unexpected (!io.EOF) error happens, after call Write function.
+// when an unexpected (!io.EOF) error happens, after call Write function.
func NewWriteCloserOnError(w io.WriteCloser, notify func(error)) io.WriteCloser {
return NewWriteCloser(NewWriterOnError(w, notify), w)
}
@@ -208,13 +208,3 @@ func (r *writerOnError) Write(p []byte) (n int, err error) {
return
}
-
-type PipeReader interface {
- io.ReadCloser
- CloseWithError(err error) error
-}
-
-type PipeWriter interface {
- io.WriteCloser
- CloseWithError(err error) error
-}
diff --git a/utils/ioutil/pipe.go b/utils/ioutil/pipe.go
deleted file mode 100644
index f30c452..0000000
--- a/utils/ioutil/pipe.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// +build !js
-
-package ioutil
-
-import "io"
-
-func Pipe() (PipeReader, PipeWriter) {
- return io.Pipe()
-}
diff --git a/utils/ioutil/pipe_js.go b/utils/ioutil/pipe_js.go
deleted file mode 100644
index cf102e6..0000000
--- a/utils/ioutil/pipe_js.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// +build js
-
-package ioutil
-
-import "github.com/acomagu/bufpipe"
-
-func Pipe() (PipeReader, PipeWriter) {
- return bufpipe.New(nil)
-}
diff --git a/utils/merkletrie/difftree.go b/utils/merkletrie/difftree.go
index 9f5145a..8090942 100644
--- a/utils/merkletrie/difftree.go
+++ b/utils/merkletrie/difftree.go
@@ -55,7 +55,7 @@ package merkletrie
// Here is a full list of all the cases that are similar and how to
// merge them together into more general cases. Each general case
// is labeled with an uppercase letter for further reference, and it
-// is followed by the pseudocode of the checks you have to perfrom
+// is followed by the pseudocode of the checks you have to perform
// on both noders to see if you are in such a case, the actions to
// perform (i.e. what changes to output) and how to advance the
// iterators of each tree to continue the comparison process.
diff --git a/utils/merkletrie/filesystem/node.go b/utils/merkletrie/filesystem/node.go
index f9a54d7..6c91f44 100644
--- a/utils/merkletrie/filesystem/node.go
+++ b/utils/merkletrie/filesystem/node.go
@@ -108,6 +108,10 @@ func (n *node) calculateChildren() error {
continue
}
+ if file.Mode()&os.ModeSocket != 0 {
+ continue
+ }
+
c, err := n.newChildNode(file)
if err != nil {
return err
diff --git a/utils/merkletrie/filesystem/node_test.go b/utils/merkletrie/filesystem/node_test.go
index 159e63d..b76abc4 100644
--- a/utils/merkletrie/filesystem/node_test.go
+++ b/utils/merkletrie/filesystem/node_test.go
@@ -2,9 +2,12 @@ package filesystem
import (
"bytes"
+ "fmt"
"io"
+ "net"
"os"
"path"
+ "runtime"
"testing"
"github.com/go-git/go-git/v5/plumbing"
@@ -13,6 +16,7 @@ import (
"github.com/go-git/go-billy/v5"
"github.com/go-git/go-billy/v5/memfs"
+ "github.com/go-git/go-billy/v5/osfs"
. "gopkg.in/check.v1"
)
@@ -196,6 +200,28 @@ func (s *NoderSuite) TestDiffDirectory(c *C) {
c.Assert(a, Equals, merkletrie.Modify)
}
+func (s *NoderSuite) TestSocket(c *C) {
+ if runtime.GOOS == "windows" {
+ c.Skip("socket files do not exist on windows")
+ }
+
+ td, err := os.MkdirTemp("", "socket-test")
+ defer os.RemoveAll(td)
+ c.Assert(err, IsNil)
+
+ sock, err := net.ListenUnix("unix", &net.UnixAddr{Name: fmt.Sprintf("%s/socket", td), Net: "unix"})
+ c.Assert(err, IsNil)
+ defer sock.Close()
+
+ fsA := osfs.New(td)
+ WriteFile(fsA, "foo", []byte("foo"), 0644)
+
+ noder := NewRootNode(fsA, nil)
+ childs, err := noder.Children()
+ c.Assert(err, IsNil)
+ c.Assert(childs, HasLen, 1)
+}
+
func WriteFile(fs billy.Filesystem, filename string, data []byte, perm os.FileMode) error {
f, err := fs.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm)
if err != nil {
diff --git a/utils/merkletrie/internal/fsnoder/file.go b/utils/merkletrie/internal/fsnoder/file.go
index 0bb908b..453efee 100644
--- a/utils/merkletrie/internal/fsnoder/file.go
+++ b/utils/merkletrie/internal/fsnoder/file.go
@@ -32,7 +32,7 @@ func newFile(name, contents string) (*file, error) {
func (f *file) Hash() []byte {
if f.hash == nil {
h := fnv.New64a()
- h.Write([]byte(f.contents)) // it nevers returns an error.
+ h.Write([]byte(f.contents)) // it never returns an error.
f.hash = h.Sum(nil)
}
diff --git a/utils/trace/trace.go b/utils/trace/trace.go
new file mode 100644
index 0000000..3e15c5b
--- /dev/null
+++ b/utils/trace/trace.go
@@ -0,0 +1,55 @@
+package trace
+
+import (
+ "fmt"
+ "log"
+ "os"
+ "sync/atomic"
+)
+
+var (
+ // logger is the logger to use for tracing.
+ logger = newLogger()
+
+ // current is the targets that are enabled for tracing.
+ current atomic.Int32
+)
+
+func newLogger() *log.Logger {
+ return log.New(os.Stderr, "", log.Ltime|log.Lmicroseconds|log.Lshortfile)
+}
+
+// Target is a tracing target.
+type Target int32
+
+const (
+ // General traces general operations.
+ General Target = 1 << iota
+
+ // Packet traces git packets.
+ Packet
+)
+
+// SetTarget sets the tracing targets.
+func SetTarget(target Target) {
+ current.Store(int32(target))
+}
+
+// SetLogger sets the logger to use for tracing.
+func SetLogger(l *log.Logger) {
+ logger = l
+}
+
+// Print prints the given message only if the target is enabled.
+func (t Target) Print(args ...interface{}) {
+ if int32(t)&current.Load() != 0 {
+ logger.Output(2, fmt.Sprint(args...)) // nolint: errcheck
+ }
+}
+
+// Printf prints the given message only if the target is enabled.
+func (t Target) Printf(format string, args ...interface{}) {
+ if int32(t)&current.Load() != 0 {
+ logger.Output(2, fmt.Sprintf(format, args...)) // nolint: errcheck
+ }
+}
diff --git a/utils/trace/trace_test.go b/utils/trace/trace_test.go
new file mode 100644
index 0000000..6f8f140
--- /dev/null
+++ b/utils/trace/trace_test.go
@@ -0,0 +1,95 @@
+package trace
+
+import (
+ "bytes"
+ "io"
+ "log"
+ "testing"
+)
+
+func TestMain(m *testing.M) {
+ defer SetLogger(newLogger())
+ if code := m.Run(); code != 0 {
+ panic(code)
+ }
+}
+
+func setUpTest(t testing.TB, buf *bytes.Buffer) {
+ t.Cleanup(func() {
+ if buf != nil {
+ buf.Reset()
+ }
+ SetTarget(0)
+ })
+ w := io.Discard
+ if buf != nil {
+ w = buf
+ }
+ SetLogger(log.New(w, "", 0))
+}
+
+func TestEmpty(t *testing.T) {
+ var buf bytes.Buffer
+ setUpTest(t, &buf)
+ General.Print("test")
+ if buf.String() != "" {
+ t.Error("expected empty string")
+ }
+}
+
+func TestOneTarget(t *testing.T) {
+ var buf bytes.Buffer
+ setUpTest(t, &buf)
+ SetTarget(General)
+ General.Print("test")
+ if buf.String() != "test\n" {
+ t.Error("expected 'test'")
+ }
+}
+
+func TestMultipleTargets(t *testing.T) {
+ var buf bytes.Buffer
+ setUpTest(t, &buf)
+ SetTarget(General | Packet)
+ General.Print("a")
+ Packet.Print("b")
+ if buf.String() != "a\nb\n" {
+ t.Error("expected 'a\nb\n'")
+ }
+}
+
+func TestPrintf(t *testing.T) {
+ var buf bytes.Buffer
+ setUpTest(t, &buf)
+ SetTarget(General)
+ General.Printf("a %d", 1)
+ if buf.String() != "a 1\n" {
+ t.Error("expected 'a 1\n'")
+ }
+}
+
+func TestDisabledMultipleTargets(t *testing.T) {
+ var buf bytes.Buffer
+ setUpTest(t, &buf)
+ SetTarget(General)
+ General.Print("a")
+ Packet.Print("b")
+ if buf.String() != "a\n" {
+ t.Error("expected 'a\n'")
+ }
+}
+
+func BenchmarkDisabledTarget(b *testing.B) {
+ setUpTest(b, nil)
+ for i := 0; i < b.N; i++ {
+ General.Print("test")
+ }
+}
+
+func BenchmarkEnabledTarget(b *testing.B) {
+ setUpTest(b, nil)
+ SetTarget(General)
+ for i := 0; i < b.N; i++ {
+ General.Print("test")
+ }
+}
diff --git a/worktree.go b/worktree.go
index 595dcea..4dfe036 100644
--- a/worktree.go
+++ b/worktree.go
@@ -7,6 +7,7 @@ import (
"io"
"os"
"path/filepath"
+ "runtime"
"strings"
"github.com/go-git/go-billy/v5"
@@ -78,6 +79,7 @@ func (w *Worktree) PullContext(ctx context.Context, o *PullOptions) error {
Force: o.Force,
InsecureSkipTLS: o.InsecureSkipTLS,
CABundle: o.CABundle,
+ ProxyOptions: o.ProxyOptions,
})
updated := true
@@ -94,7 +96,15 @@ func (w *Worktree) PullContext(ctx context.Context, o *PullOptions) error {
head, err := w.r.Head()
if err == nil {
- headAheadOfRef, err := isFastForward(w.r.Storer, ref.Hash(), head.Hash())
+ // if we don't have a shallows list, just ignore it
+ shallowList, _ := w.r.Storer.Shallow()
+
+ var earliestShallow *plumbing.Hash
+ if len(shallowList) > 0 {
+ earliestShallow = &shallowList[0]
+ }
+
+ headAheadOfRef, err := isFastForward(w.r.Storer, ref.Hash(), head.Hash(), earliestShallow)
if err != nil {
return err
}
@@ -103,7 +113,7 @@ func (w *Worktree) PullContext(ctx context.Context, o *PullOptions) error {
return NoErrAlreadyUpToDate
}
- ff, err := isFastForward(w.r.Storer, head.Hash(), ref.Hash())
+ ff, err := isFastForward(w.r.Storer, head.Hash(), ref.Hash(), earliestShallow)
if err != nil {
return err
}
@@ -187,7 +197,12 @@ func (w *Worktree) Checkout(opts *CheckoutOptions) error {
return w.Reset(ro)
}
+
func (w *Worktree) createBranch(opts *CheckoutOptions) error {
+ if err := opts.Branch.Validate(); err != nil {
+ return err
+ }
+
_, err := w.r.Storer.Reference(opts.Branch)
if err == nil {
return fmt.Errorf("a branch named %q already exists", opts.Branch)
@@ -212,20 +227,17 @@ func (w *Worktree) createBranch(opts *CheckoutOptions) error {
}
func (w *Worktree) getCommitFromCheckoutOptions(opts *CheckoutOptions) (plumbing.Hash, error) {
- if !opts.Hash.IsZero() {
- return opts.Hash, nil
- }
-
- b, err := w.r.Reference(opts.Branch, true)
- if err != nil {
- return plumbing.ZeroHash, err
- }
+ hash := opts.Hash
+ if hash.IsZero() {
+ b, err := w.r.Reference(opts.Branch, true)
+ if err != nil {
+ return plumbing.ZeroHash, err
+ }
- if !b.Name().IsTag() {
- return b.Hash(), nil
+ hash = b.Hash()
}
- o, err := w.r.Object(plumbing.AnyObject, b.Hash())
+ o, err := w.r.Object(plumbing.AnyObject, hash)
if err != nil {
return plumbing.ZeroHash, err
}
@@ -233,7 +245,7 @@ func (w *Worktree) getCommitFromCheckoutOptions(opts *CheckoutOptions) (plumbing
switch o := o.(type) {
case *object.Tag:
if o.TargetType != plumbing.CommitObject {
- return plumbing.ZeroHash, fmt.Errorf("unsupported tag object target %q", o.TargetType)
+ return plumbing.ZeroHash, fmt.Errorf("%w: tag target %q", object.ErrUnsupportedObject, o.TargetType)
}
return o.Target, nil
@@ -241,7 +253,7 @@ func (w *Worktree) getCommitFromCheckoutOptions(opts *CheckoutOptions) (plumbing
return o.Hash, nil
}
- return plumbing.ZeroHash, fmt.Errorf("unsupported tag target %q", o.Type())
+ return plumbing.ZeroHash, fmt.Errorf("%w: %q", object.ErrUnsupportedObject, o.Type())
}
func (w *Worktree) setHEADToCommit(commit plumbing.Hash) error {
@@ -368,7 +380,7 @@ func (w *Worktree) resetIndex(t *object.Tree, dirs []string) error {
}
func (w *Worktree) resetWorktree(t *object.Tree) error {
- changes, err := w.diffStagingWithWorktree(true)
+ changes, err := w.diffStagingWithWorktree(true, false)
if err != nil {
return err
}
@@ -380,6 +392,9 @@ func (w *Worktree) resetWorktree(t *object.Tree) error {
b := newIndexBuilder(idx)
for _, ch := range changes {
+ if err := w.validChange(ch); err != nil {
+ return err
+ }
if err := w.checkoutChange(ch, t, b); err != nil {
return err
}
@@ -389,6 +404,104 @@ func (w *Worktree) resetWorktree(t *object.Tree) error {
return w.r.Storer.SetIndex(idx)
}
+// worktreeDeny is a list of paths that are not allowed
+// to be used when resetting the worktree.
+var worktreeDeny = map[string]struct{}{
+ // .git
+ GitDirName: {},
+
+ // For other historical reasons, file names that do not conform to the 8.3
+ // format (up to eight characters for the basename, three for the file
+ // extension, certain characters not allowed such as `+`, etc) are associated
+ // with a so-called "short name", at least on the `C:` drive by default.
+ // Which means that `git~1/` is a valid way to refer to `.git/`.
+ "git~1": {},
+}
+
+// validPath checks whether paths are valid.
+// The rules around invalid paths could differ from upstream based on how
+// filesystems are managed within go-git, but they are largely the same.
+//
+// For upstream rules:
+// https://github.com/git/git/blob/564d0252ca632e0264ed670534a51d18a689ef5d/read-cache.c#L946
+// https://github.com/git/git/blob/564d0252ca632e0264ed670534a51d18a689ef5d/path.c#L1383
+func validPath(paths ...string) error {
+ for _, p := range paths {
+ parts := strings.FieldsFunc(p, func(r rune) bool { return (r == '\\' || r == '/') })
+ if _, denied := worktreeDeny[strings.ToLower(parts[0])]; denied {
+ return fmt.Errorf("invalid path prefix: %q", p)
+ }
+
+ if runtime.GOOS == "windows" {
+ // Volume names are not supported, in both formats: \\ and <DRIVE_LETTER>:.
+ if vol := filepath.VolumeName(p); vol != "" {
+ return fmt.Errorf("invalid path: %q", p)
+ }
+
+ if !windowsValidPath(parts[0]) {
+ return fmt.Errorf("invalid path: %q", p)
+ }
+ }
+
+ for _, part := range parts {
+ if part == ".." {
+ return fmt.Errorf("invalid path %q: cannot use '..'", p)
+ }
+ }
+ }
+ return nil
+}
+
+// windowsPathReplacer defines the chars that need to be replaced
+// as part of windowsValidPath.
+var windowsPathReplacer *strings.Replacer
+
+func init() {
+ windowsPathReplacer = strings.NewReplacer(" ", "", ".", "")
+}
+
+func windowsValidPath(part string) bool {
+ if len(part) > 3 && strings.EqualFold(part[:4], GitDirName) {
+ // For historical reasons, file names that end in spaces or periods are
+ // automatically trimmed. Therefore, `.git . . ./` is a valid way to refer
+ // to `.git/`.
+ if windowsPathReplacer.Replace(part[4:]) == "" {
+ return false
+ }
+
+ // For yet other historical reasons, NTFS supports so-called "Alternate Data
+ // Streams", i.e. metadata associated with a given file, referred to via
+ // `<filename>:<stream-name>:<stream-type>`. There exists a default stream
+ // type for directories, allowing `.git/` to be accessed via
+ // `.git::$INDEX_ALLOCATION/`.
+ //
+ // For performance reasons, _all_ Alternate Data Streams of `.git/` are
+ // forbidden, not just `::$INDEX_ALLOCATION`.
+ if len(part) > 4 && part[4:5] == ":" {
+ return false
+ }
+ }
+ return true
+}
+
+func (w *Worktree) validChange(ch merkletrie.Change) error {
+ action, err := ch.Action()
+ if err != nil {
+ return nil
+ }
+
+ switch action {
+ case merkletrie.Delete:
+ return validPath(ch.From.String())
+ case merkletrie.Insert:
+ return validPath(ch.To.String())
+ case merkletrie.Modify:
+ return validPath(ch.From.String(), ch.To.String())
+ }
+
+ return nil
+}
+
func (w *Worktree) checkoutChange(ch merkletrie.Change, t *object.Tree, idx *indexBuilder) error {
a, err := ch.Action()
if err != nil {
@@ -420,7 +533,7 @@ func (w *Worktree) checkoutChange(ch merkletrie.Change, t *object.Tree, idx *ind
}
func (w *Worktree) containsUnstagedChanges() (bool, error) {
- ch, err := w.diffStagingWithWorktree(false)
+ ch, err := w.diffStagingWithWorktree(false, true)
if err != nil {
return false, err
}
@@ -561,6 +674,11 @@ func (w *Worktree) checkoutFile(f *object.File) (err error) {
}
func (w *Worktree) checkoutFileSymlink(f *object.File) (err error) {
+ // https://github.com/git/git/commit/10ecfa76491e4923988337b2e2243b05376b40de
+ if strings.EqualFold(f.Name, gitmodulesFile) {
+ return ErrGitModulesSymlink
+ }
+
from, err := f.Reader()
if err != nil {
return
diff --git a/worktree_commit.go b/worktree_commit.go
index eaa21c3..4d811f3 100644
--- a/worktree_commit.go
+++ b/worktree_commit.go
@@ -263,4 +263,4 @@ func (h *buildTreeHelper) copyTreeToStorageRecursive(parent string, t *object.Tr
return hash, nil
}
return h.s.SetEncodedObject(o)
-} \ No newline at end of file
+}
diff --git a/worktree_status.go b/worktree_status.go
index 61bb6f7..7301087 100644
--- a/worktree_status.go
+++ b/worktree_status.go
@@ -74,7 +74,7 @@ func (w *Worktree) status(commit plumbing.Hash) (Status, error) {
}
}
- right, err := w.diffStagingWithWorktree(false)
+ right, err := w.diffStagingWithWorktree(false, true)
if err != nil {
return nil, err
}
@@ -113,7 +113,7 @@ func nameFromAction(ch *merkletrie.Change) string {
return name
}
-func (w *Worktree) diffStagingWithWorktree(reverse bool) (merkletrie.Changes, error) {
+func (w *Worktree) diffStagingWithWorktree(reverse, excludeIgnoredChanges bool) (merkletrie.Changes, error) {
idx, err := w.r.Storer.Index()
if err != nil {
return nil, err
@@ -138,7 +138,10 @@ func (w *Worktree) diffStagingWithWorktree(reverse bool) (merkletrie.Changes, er
return nil, err
}
- return w.excludeIgnoredChanges(c), nil
+ if excludeIgnoredChanges {
+ return w.excludeIgnoredChanges(c), nil
+ }
+ return c, nil
}
func (w *Worktree) excludeIgnoredChanges(changes merkletrie.Changes) merkletrie.Changes {
diff --git a/worktree_test.go b/worktree_test.go
index 24d5bd5..5759ec4 100644
--- a/worktree_test.go
+++ b/worktree_test.go
@@ -4,6 +4,7 @@ import (
"bytes"
"context"
"errors"
+ "fmt"
"io"
"os"
"path/filepath"
@@ -16,11 +17,14 @@ import (
fixtures "github.com/go-git/go-git-fixtures/v4"
"github.com/go-git/go-git/v5/config"
"github.com/go-git/go-git/v5/plumbing"
+ "github.com/go-git/go-git/v5/plumbing/cache"
"github.com/go-git/go-git/v5/plumbing/filemode"
"github.com/go-git/go-git/v5/plumbing/format/gitignore"
"github.com/go-git/go-git/v5/plumbing/format/index"
"github.com/go-git/go-git/v5/plumbing/object"
+ "github.com/go-git/go-git/v5/storage/filesystem"
"github.com/go-git/go-git/v5/storage/memory"
+ "github.com/stretchr/testify/assert"
"github.com/go-git/go-billy/v5/memfs"
"github.com/go-git/go-billy/v5/osfs"
@@ -29,6 +33,10 @@ import (
. "gopkg.in/check.v1"
)
+var (
+ defaultTestCommitOptions = &CommitOptions{Author: &object.Signature{Name: "testuser", Email: "testemail"}}
+)
+
type WorktreeSuite struct {
BaseSuite
}
@@ -295,6 +303,56 @@ func (s *WorktreeSuite) TestPullAlreadyUptodate(c *C) {
c.Assert(err, Equals, NoErrAlreadyUpToDate)
}
+func (s *WorktreeSuite) TestPullDepth(c *C) {
+ r, err := Clone(memory.NewStorage(), memfs.New(), &CloneOptions{
+ URL: fixtures.Basic().One().URL,
+ Depth: 1,
+ })
+
+ c.Assert(err, IsNil)
+
+ w, err := r.Worktree()
+ c.Assert(err, IsNil)
+ err = w.Pull(&PullOptions{})
+ c.Assert(err, Equals, nil)
+}
+
+func (s *WorktreeSuite) TestPullAfterShallowClone(c *C) {
+ tempDir, clean := s.TemporalDir()
+ defer clean()
+ remoteURL := filepath.Join(tempDir, "remote")
+ repoDir := filepath.Join(tempDir, "repo")
+
+ remote, err := PlainInit(remoteURL, false)
+ c.Assert(err, IsNil)
+ c.Assert(remote, NotNil)
+
+ _ = CommitNewFile(c, remote, "File1")
+ _ = CommitNewFile(c, remote, "File2")
+
+ repo, err := PlainClone(repoDir, false, &CloneOptions{
+ URL: remoteURL,
+ Depth: 1,
+ Tags: NoTags,
+ SingleBranch: true,
+ ReferenceName: "master",
+ })
+ c.Assert(err, IsNil)
+
+ _ = CommitNewFile(c, remote, "File3")
+ _ = CommitNewFile(c, remote, "File4")
+
+ w, err := repo.Worktree()
+ c.Assert(err, IsNil)
+
+ err = w.Pull(&PullOptions{
+ RemoteName: DefaultRemoteName,
+ SingleBranch: true,
+ ReferenceName: plumbing.NewBranchReferenceName("master"),
+ })
+ c.Assert(err, IsNil)
+}
+
func (s *WorktreeSuite) TestCheckout(c *C) {
fs := memfs.New()
w := &Worktree{
@@ -767,6 +825,30 @@ func (s *WorktreeSuite) TestCheckoutCreateMissingBranch(c *C) {
c.Assert(err, Equals, ErrCreateRequiresBranch)
}
+func (s *WorktreeSuite) TestCheckoutCreateInvalidBranch(c *C) {
+ w := &Worktree{
+ r: s.Repository,
+ Filesystem: memfs.New(),
+ }
+
+ for _, name := range []plumbing.ReferenceName{
+ "foo",
+ "-",
+ "-foo",
+ "refs/heads//",
+ "refs/heads/..",
+ "refs/heads/a..b",
+ "refs/heads/.",
+ } {
+ err := w.Checkout(&CheckoutOptions{
+ Create: true,
+ Branch: name,
+ })
+
+ c.Assert(err, Equals, plumbing.ErrInvalidReferenceName)
+ }
+}
+
func (s *WorktreeSuite) TestCheckoutTag(c *C) {
f := fixtures.ByTag("tags").One()
r := s.NewRepositoryWithEmptyWorktree(f)
@@ -804,6 +886,41 @@ func (s *WorktreeSuite) TestCheckoutTag(c *C) {
c.Assert(head.Name().String(), Equals, "HEAD")
}
+func (s *WorktreeSuite) TestCheckoutTagHash(c *C) {
+ f := fixtures.ByTag("tags").One()
+ r := s.NewRepositoryWithEmptyWorktree(f)
+ w, err := r.Worktree()
+ c.Assert(err, IsNil)
+
+ for _, hash := range []string{
+ "b742a2a9fa0afcfa9a6fad080980fbc26b007c69", // annotated tag
+ "ad7897c0fb8e7d9a9ba41fa66072cf06095a6cfc", // commit tag
+ "f7b877701fbf855b44c0a9e86f3fdce2c298b07f", // lightweight tag
+ } {
+ err = w.Checkout(&CheckoutOptions{
+ Hash: plumbing.NewHash(hash),
+ })
+ c.Assert(err, IsNil)
+ head, err := w.r.Head()
+ c.Assert(err, IsNil)
+ c.Assert(head.Name().String(), Equals, "HEAD")
+
+ status, err := w.Status()
+ c.Assert(err, IsNil)
+ c.Assert(status.IsClean(), Equals, true)
+ }
+
+ for _, hash := range []string{
+ "fe6cb94756faa81e5ed9240f9191b833db5f40ae", // blob tag
+ "152175bf7e5580299fa1f0ba41ef6474cc043b70", // tree tag
+ } {
+ err = w.Checkout(&CheckoutOptions{
+ Hash: plumbing.NewHash(hash),
+ })
+ c.Assert(err, NotNil)
+ }
+}
+
func (s *WorktreeSuite) TestCheckoutBisect(c *C) {
if testing.Short() {
c.Skip("skipping test in short mode.")
@@ -884,21 +1001,22 @@ func (s *WorktreeSuite) TestStatusCheckedInBeforeIgnored(c *C) {
c.Assert(err, IsNil)
_, err = w.Add("fileToIgnore")
c.Assert(err, IsNil)
- _, err = w.Commit("Added file that will be ignored later", &CommitOptions{})
+
+ _, err = w.Commit("Added file that will be ignored later", defaultTestCommitOptions)
c.Assert(err, IsNil)
err = util.WriteFile(fs, ".gitignore", []byte("fileToIgnore\nsecondIgnoredFile"), 0755)
c.Assert(err, IsNil)
_, err = w.Add(".gitignore")
c.Assert(err, IsNil)
- _, err = w.Commit("Added .gitignore", &CommitOptions{})
+ _, err = w.Commit("Added .gitignore", defaultTestCommitOptions)
c.Assert(err, IsNil)
status, err := w.Status()
c.Assert(err, IsNil)
c.Assert(status.IsClean(), Equals, true)
c.Assert(status, NotNil)
- err = util.WriteFile(fs, "secondIgnoredFile", []byte("Should be completly ignored"), 0755)
+ err = util.WriteFile(fs, "secondIgnoredFile", []byte("Should be completely ignored"), 0755)
c.Assert(err, IsNil)
status = nil
status, err = w.Status()
@@ -1097,6 +1215,49 @@ func (s *WorktreeSuite) TestResetHard(c *C) {
c.Assert(branch.Hash(), Equals, commit)
}
+func (s *WorktreeSuite) TestResetHardWithGitIgnore(c *C) {
+ fs := memfs.New()
+ w := &Worktree{
+ r: s.Repository,
+ Filesystem: fs,
+ }
+
+ err := w.Checkout(&CheckoutOptions{})
+ c.Assert(err, IsNil)
+
+ tf, err := fs.Create("newTestFile.txt")
+ c.Assert(err, IsNil)
+ _, err = tf.Write([]byte("testfile content"))
+ c.Assert(err, IsNil)
+ err = tf.Close()
+ c.Assert(err, IsNil)
+ _, err = w.Add("newTestFile.txt")
+ c.Assert(err, IsNil)
+ _, err = w.Commit("testcommit", &CommitOptions{Author: &object.Signature{Name: "name", Email: "email"}})
+ c.Assert(err, IsNil)
+
+ err = fs.Remove("newTestFile.txt")
+ c.Assert(err, IsNil)
+ f, err := fs.Create(".gitignore")
+ c.Assert(err, IsNil)
+ _, err = f.Write([]byte("foo\n"))
+ _, err = f.Write([]byte("newTestFile.txt\n"))
+ c.Assert(err, IsNil)
+ err = f.Close()
+ c.Assert(err, IsNil)
+
+ status, err := w.Status()
+ c.Assert(err, IsNil)
+ c.Assert(status.IsClean(), Equals, false)
+
+ err = w.Reset(&ResetOptions{Mode: HardReset})
+ c.Assert(err, IsNil)
+
+ status, err = w.Status()
+ c.Assert(err, IsNil)
+ c.Assert(status.IsClean(), Equals, true)
+}
+
func (s *WorktreeSuite) TestStatusAfterCheckout(c *C) {
fs := memfs.New()
w := &Worktree{
@@ -2076,34 +2237,40 @@ func (s *WorktreeSuite) TestCleanBare(c *C) {
c.Assert(err, IsNil)
}
-func (s *WorktreeSuite) TestAlternatesRepo(c *C) {
+func TestAlternatesRepo(t *testing.T) {
fs := fixtures.ByTag("alternates").One().Worktree()
// Open 1st repo.
rep1fs, err := fs.Chroot("rep1")
- c.Assert(err, IsNil)
+ assert.NoError(t, err)
rep1, err := PlainOpen(rep1fs.Root())
- c.Assert(err, IsNil)
+ assert.NoError(t, err)
// Open 2nd repo.
rep2fs, err := fs.Chroot("rep2")
- c.Assert(err, IsNil)
- rep2, err := PlainOpen(rep2fs.Root())
- c.Assert(err, IsNil)
+ assert.NoError(t, err)
+ d, _ := rep2fs.Chroot(GitDirName)
+ storer := filesystem.NewStorageWithOptions(d,
+ cache.NewObjectLRUDefault(), filesystem.Options{
+ AlternatesFS: fs,
+ })
+ rep2, err := Open(storer, rep2fs)
+
+ assert.NoError(t, err)
// Get the HEAD commit from the main repo.
h, err := rep1.Head()
- c.Assert(err, IsNil)
+ assert.NoError(t, err)
commit1, err := rep1.CommitObject(h.Hash())
- c.Assert(err, IsNil)
+ assert.NoError(t, err)
// Get the HEAD commit from the shared repo.
h, err = rep2.Head()
- c.Assert(err, IsNil)
+ assert.NoError(t, err)
commit2, err := rep2.CommitObject(h.Hash())
- c.Assert(err, IsNil)
+ assert.NoError(t, err)
- c.Assert(commit1.String(), Equals, commit2.String())
+ assert.Equal(t, commit1.String(), commit2.String())
}
func (s *WorktreeSuite) TestGrep(c *C) {
@@ -2616,3 +2783,77 @@ func (s *WorktreeSuite) TestLinkedWorktree(c *C) {
c.Assert(err, Equals, ErrRepositoryIncomplete)
}
}
+
+func TestValidPath(t *testing.T) {
+ type testcase struct {
+ path string
+ wantErr bool
+ }
+
+ tests := []testcase{
+ {".git", true},
+ {".git/b", true},
+ {".git\\b", true},
+ {"git~1", true},
+ {"a/../b", true},
+ {"a\\..\\b", true},
+ {".gitmodules", false},
+ {".gitignore", false},
+ {"a..b", false},
+ {".", false},
+ {"a/.git", false},
+ {"a\\.git", false},
+ {"a/.git/b", false},
+ {"a\\.git\\b", false},
+ }
+
+ if runtime.GOOS == "windows" {
+ tests = append(tests, []testcase{
+ {"\\\\a\\b", true},
+ {"C:\\a\\b", true},
+ {".git . . .", true},
+ {".git . . ", true},
+ {".git ", true},
+ {".git.", true},
+ {".git::$INDEX_ALLOCATION", true},
+ }...)
+ }
+
+ for _, tc := range tests {
+ t.Run(fmt.Sprintf("%s", tc.path), func(t *testing.T) {
+ err := validPath(tc.path)
+ if tc.wantErr {
+ assert.Error(t, err)
+ } else {
+ assert.NoError(t, err)
+ }
+ })
+ }
+}
+
+func TestWindowsValidPath(t *testing.T) {
+ tests := []struct {
+ path string
+ want bool
+ }{
+ {".git", false},
+ {".git . . .", false},
+ {".git ", false},
+ {".git ", false},
+ {".git . .", false},
+ {".git . .", false},
+ {".git::$INDEX_ALLOCATION", false},
+ {".git:", false},
+ {"a", true},
+ {"a\\b", true},
+ {"a/b", true},
+ {".gitm", true},
+ }
+
+ for _, tc := range tests {
+ t.Run(fmt.Sprintf("%s", tc.path), func(t *testing.T) {
+ got := windowsValidPath(tc.path)
+ assert.Equal(t, tc.want, got)
+ })
+ }
+}