aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.github/workflows/test.yml18
-rw-r--r--COMPATIBILITY.md4
-rw-r--r--Makefile12
-rw-r--r--_examples/common_test.go13
-rw-r--r--_examples/log/main.go5
-rw-r--r--_examples/ls-remote/main.go42
-rw-r--r--_examples/merge_base/help-long.msg.go63
-rw-r--r--_examples/merge_base/helpers.go61
-rw-r--r--_examples/merge_base/main.go124
-rw-r--r--_examples/open/main.go2
-rw-r--r--_examples/progress/main.go2
-rw-r--r--_examples/pull/main.go2
-rw-r--r--_examples/tag/main.go2
-rw-r--r--blame.go6
-rw-r--r--config/branch.go23
-rw-r--r--config/branch_test.go8
-rw-r--r--config/config.go3
-rw-r--r--config/config_test.go6
-rw-r--r--config/modules_test.go6
-rw-r--r--config/refspec.go15
-rw-r--r--config/refspec_test.go70
-rw-r--r--go.mod21
-rw-r--r--go.sum23
-rw-r--r--internal/revision/parser.go2
-rw-r--r--internal/revision/parser_test.go2
-rw-r--r--internal/revision/scanner.go2
-rw-r--r--internal/url/url.go2
-rwxr-xr-xinternal/url/url_test.go60
-rw-r--r--object_walker.go2
-rw-r--r--options.go24
-rw-r--r--plumbing/filemode/filemode.go6
-rw-r--r--plumbing/filemode/filemode_test.go2
-rw-r--r--plumbing/format/commitgraph/encoder.go14
-rw-r--r--plumbing/format/commitgraph/file.go2
-rw-r--r--plumbing/format/commitgraph/memory.go2
-rw-r--r--plumbing/format/diff/unified_encoder.go25
-rw-r--r--plumbing/format/diff/unified_encoder_test.go90
-rw-r--r--plumbing/format/gitattributes/pattern.go2
-rw-r--r--plumbing/format/idxfile/decoder.go6
-rw-r--r--plumbing/format/idxfile/writer.go2
-rw-r--r--plumbing/format/index/decoder_test.go2
-rw-r--r--plumbing/format/index/doc.go4
-rw-r--r--plumbing/format/index/encoder_test.go6
-rw-r--r--plumbing/format/index/index.go2
-rw-r--r--plumbing/format/packfile/diff_delta.go13
-rw-r--r--plumbing/format/packfile/packfile.go8
-rw-r--r--plumbing/format/packfile/parser.go99
-rw-r--r--plumbing/format/packfile/patch_delta.go53
-rw-r--r--plumbing/format/packfile/scanner_test.go1
-rw-r--r--plumbing/hash.go2
-rw-r--r--plumbing/object/commit_stats_test.go8
-rw-r--r--plumbing/object/commit_walker_bfs_filtered.go176
-rw-r--r--plumbing/object/commit_walker_bfs_filtered_test.go256
-rw-r--r--plumbing/object/commit_walker_limit.go65
-rw-r--r--plumbing/object/commit_walker_path.go (renamed from plumbing/object/commit_walker_file.go)40
-rw-r--r--plumbing/object/merge_base.go210
-rw-r--r--plumbing/object/merge_base_test.go323
-rw-r--r--plumbing/object/object.go18
-rw-r--r--plumbing/object/patch.go2
-rw-r--r--plumbing/object/patch_test.go1
-rw-r--r--plumbing/object/tree.go4
-rw-r--r--plumbing/protocol/packp/advrefs.go2
-rw-r--r--plumbing/protocol/packp/advrefs_decode.go4
-rw-r--r--plumbing/protocol/packp/capability/list.go6
-rw-r--r--plumbing/protocol/packp/capability/list_test.go2
-rw-r--r--plumbing/protocol/packp/ulreq.go4
-rw-r--r--plumbing/protocol/packp/ulreq_encode.go4
-rw-r--r--plumbing/protocol/packp/updreq_decode.go12
-rw-r--r--plumbing/protocol/packp/uppackreq.go4
-rw-r--r--plumbing/storer/object.go4
-rw-r--r--plumbing/transport/http/common.go10
-rw-r--r--plumbing/transport/http/common_test.go2
-rw-r--r--plumbing/transport/internal/common/common.go2
-rw-r--r--plumbing/transport/internal/common/common_test.go2
-rw-r--r--plumbing/transport/server/server.go5
-rw-r--r--plumbing/transport/ssh/auth_method.go2
-rw-r--r--prune_test.go2
-rw-r--r--remote.go74
-rw-r--r--remote_test.go123
-rw-r--r--repository.go36
-rw-r--r--repository_plan9_test.go47
-rw-r--r--repository_test.go259
-rw-r--r--storage/filesystem/dotgit/dotgit_test.go1
-rw-r--r--storage/filesystem/index.go8
-rw-r--r--storage/memory/storage.go2
-rw-r--r--utils/diff/diff_ext_test.go31
-rw-r--r--utils/merkletrie/difftree.go10
-rw-r--r--utils/merkletrie/difftree_test.go2
-rw-r--r--utils/merkletrie/internal/frame/frame.go4
-rw-r--r--utils/merkletrie/internal/fsnoder/doc.go2
-rw-r--r--utils/merkletrie/internal/fsnoder/file.go2
-rw-r--r--utils/merkletrie/internal/fsnoder/new_test.go4
-rw-r--r--utils/merkletrie/iter_test.go2
-rw-r--r--utils/merkletrie/noder/path_test.go4
-rw-r--r--worktree.go77
-rw-r--r--worktree_commit_test.go2
-rw-r--r--worktree_plan9.go31
-rw-r--r--worktree_test.go43
98 files changed, 2542 insertions, 358 deletions
diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml
new file mode 100644
index 0000000..bf1651e
--- /dev/null
+++ b/.github/workflows/test.yml
@@ -0,0 +1,18 @@
+on: [push, pull_request]
+name: Test
+jobs:
+ test:
+ strategy:
+ matrix:
+ go-version: [1.12.x, 1.13.x, 1.14.x]
+ platform: [ubuntu-latest, macos-latest, windows-latest]
+ runs-on: ${{ matrix.platform }}
+ steps:
+ - name: Install Go
+ uses: actions/setup-go@v1
+ with:
+ go-version: ${{ matrix.go-version }}
+ - name: Checkout code
+ uses: actions/checkout@v2
+ - name: Test
+ run: go test -v ./... \ No newline at end of file
diff --git a/COMPATIBILITY.md b/COMPATIBILITY.md
index e07e799..395088b 100644
--- a/COMPATIBILITY.md
+++ b/COMPATIBILITY.md
@@ -12,7 +12,7 @@ is supported by go-git.
| init | ✔ | Plain init and `--bare` are supported. Flags `--template`, `--separate-git-dir` and `--shared` are not. |
| clone | ✔ | Plain clone and equivalents to `--progress`, `--single-branch`, `--depth`, `--origin`, `--recurse-submodules` are supported. Others are not. |
| **basic snapshotting** |
-| add | ✔ | Plain add is supported. Any other flag aren't supported |
+| add | ✔ | Plain add is supported. Any other flags aren't supported |
| status | ✔ |
| commit | ✔ |
| reset | ✔ |
@@ -86,7 +86,7 @@ is supported by go-git.
| for-each-ref | ✔ |
| hash-object | ✔ |
| ls-files | ✔ |
-| merge-base | |
+| merge-base | ✔ | Calculates the merge-base only between two commits, and supports `--independent` and `--is-ancestor` modifiers; Does not support `--fork-point` nor `--octopus` modifiers. |
| read-tree | |
| rev-list | ✔ |
| rev-parse | |
diff --git a/Makefile b/Makefile
index d576778..3866fb7 100644
--- a/Makefile
+++ b/Makefile
@@ -12,7 +12,6 @@ GIT_REPOSITORY = http://github.com/git/git.git
# Coverage
COVERAGE_REPORT = coverage.txt
-COVERAGE_PROFILE = profile.out
COVERAGE_MODE = atomic
ifneq ($(origin CI), undefined)
@@ -37,16 +36,7 @@ test:
test-coverage:
@cd $(WORKDIR); \
echo "" > $(COVERAGE_REPORT); \
- for dir in `find . -name "*.go" | grep -o '.*/' | sort | uniq`; do \
- $(GOTEST) $$dir -coverprofile=$(COVERAGE_PROFILE) -covermode=$(COVERAGE_MODE); \
- if [ $$? != 0 ]; then \
- exit 2; \
- fi; \
- if [ -f $(COVERAGE_PROFILE) ]; then \
- cat $(COVERAGE_PROFILE) >> $(COVERAGE_REPORT); \
- rm $(COVERAGE_PROFILE); \
- fi; \
- done; \
+ $(GOTEST) -coverprofile=$(COVERAGE_REPORT) -coverpkg=./... -covermode=$(COVERAGE_MODE) ./...
clean:
rm -rf $(GIT_DIST_PATH) \ No newline at end of file
diff --git a/_examples/common_test.go b/_examples/common_test.go
index 47463a1..89d49a3 100644
--- a/_examples/common_test.go
+++ b/_examples/common_test.go
@@ -29,6 +29,7 @@ var args = map[string][]string{
"tag": {cloneRepository(defaultURL, tempFolder())},
"pull": {createRepositoryWithRemote(tempFolder(), defaultURL)},
"ls": {cloneRepository(defaultURL, tempFolder()), "HEAD", "vendor"},
+ "merge_base": {cloneRepository(defaultURL, tempFolder()), "--is-ancestor", "HEAD~3", "HEAD^"},
}
var ignored = map[string]bool{}
@@ -50,14 +51,15 @@ func TestExamples(t *testing.T) {
}
for _, example := range examples {
- _, name := filepath.Split(filepath.Dir(example))
+ dir := filepath.Dir(example)
+ _, name := filepath.Split(dir)
if ignored[name] {
continue
}
t.Run(name, func(t *testing.T) {
- testExample(t, name, example)
+ testExample(t, name, dir)
})
}
}
@@ -135,10 +137,9 @@ func addRemote(local, remote string) {
CheckIfError(err)
}
-func testExample(t *testing.T, name, example string) {
- cmd := exec.Command("go", append([]string{
- "run", filepath.Join(example),
- }, args[name]...)...)
+func testExample(t *testing.T, name, dir string) {
+ arguments := append([]string{"run", dir}, args[name]...)
+ cmd := exec.Command("go", arguments...)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
diff --git a/_examples/log/main.go b/_examples/log/main.go
index ba0597a..5807515 100644
--- a/_examples/log/main.go
+++ b/_examples/log/main.go
@@ -2,6 +2,7 @@ package main
import (
"fmt"
+ "time"
"gopkg.in/src-d/go-git.v4"
. "gopkg.in/src-d/go-git.v4/_examples"
@@ -31,7 +32,9 @@ func main() {
CheckIfError(err)
// ... retrieves the commit history
- cIter, err := r.Log(&git.LogOptions{From: ref.Hash()})
+ since := time.Date(2019, 1, 1, 0, 0, 0, 0, time.UTC)
+ until := time.Date(2019, 7, 30, 0, 0, 0, 0, time.UTC)
+ cIter, err := r.Log(&git.LogOptions{From: ref.Hash(), Since: &since, Until: &until})
CheckIfError(err)
// ... just iterates over the commits, printing it
diff --git a/_examples/ls-remote/main.go b/_examples/ls-remote/main.go
new file mode 100644
index 0000000..42d9b4e
--- /dev/null
+++ b/_examples/ls-remote/main.go
@@ -0,0 +1,42 @@
+package main
+
+import (
+ "log"
+
+ "gopkg.in/src-d/go-git.v4"
+ "gopkg.in/src-d/go-git.v4/config"
+ "gopkg.in/src-d/go-git.v4/storage/memory"
+)
+
+// Retrieve remote tags without cloning repository
+func main() {
+
+ // Create the remote with repository URL
+ rem := git.NewRemote(memory.NewStorage(), &config.RemoteConfig{
+ Name: "origin",
+ URLs: []string{"https://github.com/Zenika/MARCEL"},
+ })
+
+ log.Print("Fetching tags...")
+
+ // We can then use every Remote functions to retrieve wanted information
+ refs, err := rem.List(&git.ListOptions{})
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ // Filters the references list and only keeps tags
+ var tags []string
+ for _, ref := range refs {
+ if ref.Name().IsTag() {
+ tags = append(tags, ref.Name().Short())
+ }
+ }
+
+ if len(tags) == 0 {
+ log.Println("No tags!")
+ return
+ }
+
+ log.Printf("Tags found: %v", tags)
+}
diff --git a/_examples/merge_base/help-long.msg.go b/_examples/merge_base/help-long.msg.go
new file mode 100644
index 0000000..7759cbd
--- /dev/null
+++ b/_examples/merge_base/help-long.msg.go
@@ -0,0 +1,63 @@
+package main
+
+const helpLongMsg = `
+NAME:
+ %_COMMAND_NAME_% - Lists the best common ancestors of the two passed commit revisions
+
+SYNOPSIS:
+ usage: %_COMMAND_NAME_% <path> <commitRev> <commitRev>
+ or: %_COMMAND_NAME_% <path> --independent <commitRev>...
+ or: %_COMMAND_NAME_% <path> --is-ancestor <commitRev> <commitRev>
+
+ params:
+ <path> Path to the git repository
+ <commitRev> Git revision as supported by go-git
+
+DESCRIPTION:
+ %_COMMAND_NAME_% finds the best common ancestor(s) between two commits. One common ancestor is better than another common ancestor if the latter is an ancestor of the former.
+ A common ancestor that does not have any better common ancestor is a best common ancestor, i.e. a merge base. Note that there can be more than one merge base for a pair of commits.
+ Commits that does not share a common history has no common ancestors.
+
+OPTIONS:
+ As the most common special case, specifying only two commits on the command line means computing the merge base between the given two commits.
+ If there is no shared history between the passed commits, there won't be a merge-base, and the command will exit with status 1.
+
+--independent
+ List the subgroup from the passed commits, that cannot be reached from any other of the passed ones. In other words, it prints a minimal subset of the supplied commits with the same ancestors.
+
+--is-ancestor
+ Check if the first commit is an ancestor of the second one, and exit with status 0 if true, or with status 1 if not. Errors are signaled by a non-zero status that is not 1.
+
+DISCUSSION:
+ Given two commits A and B, %_COMMAND_NAME_% A B will output a commit which is the best common ancestor of both, what means that is reachable from both A and B through the parent relationship.
+
+ For example, with this topology:
+
+ o---o---o---o---B
+ / /
+ ---3---2---o---1---o---A
+
+ the merge base between A and B is 1.
+
+ With the given topology 2 and 3 are also common ancestors of A and B, but they are not the best ones because they can be also reached from 1.
+
+ When the history involves cross-cross merges, there can be more than one best common ancestor for two commits. For example, with this topology:
+
+ ---1---o---A
+ \ /
+ X
+ / \
+ ---2---o---o---B
+
+ When the history involves feature branches depending on other feature branches there can be also more than one common ancestor. For example:
+
+
+ o---o---o
+ / \
+ 1---o---A \
+ / / \
+ ---o---o---2---o---o---B
+
+ In both examples, both 1 and 2 are merge-bases of A and B for each situation.
+ Neither one is better than the other (both are best merge bases) because 1 cannot be reached from 2, nor the opposite.
+`
diff --git a/_examples/merge_base/helpers.go b/_examples/merge_base/helpers.go
new file mode 100644
index 0000000..179a817
--- /dev/null
+++ b/_examples/merge_base/helpers.go
@@ -0,0 +1,61 @@
+package main
+
+import (
+ "fmt"
+ "os"
+ "strings"
+
+ "gopkg.in/src-d/go-git.v4/plumbing/object"
+)
+
+func checkIfError(err error, code exitCode, mainReason string, v ...interface{}) {
+ if err == nil {
+ return
+ }
+
+ printErr(wrapErr(err, mainReason, v...))
+ os.Exit(int(code))
+}
+
+func helpAndExit(s string, helpMsg string, code exitCode) {
+ if code == exitCodeSuccess {
+ printMsg("%s", s)
+ } else {
+ printErr(fmt.Errorf(s))
+ }
+
+ fmt.Println(strings.Replace(helpMsg, "%_COMMAND_NAME_%", os.Args[0], -1))
+
+ os.Exit(int(code))
+}
+
+func printErr(err error) {
+ fmt.Printf("\x1b[31;1m%s\x1b[0m\n", fmt.Sprintf("error: %s", err))
+}
+
+func printMsg(format string, args ...interface{}) {
+ fmt.Printf("%s\n", fmt.Sprintf(format, args...))
+}
+
+func printCommits(commits []*object.Commit) {
+ for _, commit := range commits {
+ if os.Getenv("LOG_LEVEL") == "verbose" {
+ fmt.Printf(
+ "\x1b[36;1m%s \x1b[90;21m%s\x1b[0m %s\n",
+ commit.Hash.String()[:7],
+ commit.Hash.String(),
+ strings.Split(commit.Message, "\n")[0],
+ )
+ } else {
+ fmt.Println(commit.Hash.String())
+ }
+ }
+}
+
+func wrapErr(err error, s string, v ...interface{}) error {
+ if err != nil {
+ return fmt.Errorf("%s\n %s", fmt.Sprintf(s, v...), err)
+ }
+
+ return nil
+}
diff --git a/_examples/merge_base/main.go b/_examples/merge_base/main.go
new file mode 100644
index 0000000..fe6abc6
--- /dev/null
+++ b/_examples/merge_base/main.go
@@ -0,0 +1,124 @@
+package main
+
+import (
+ "os"
+
+ "gopkg.in/src-d/go-git.v4"
+ "gopkg.in/src-d/go-git.v4/plumbing"
+ "gopkg.in/src-d/go-git.v4/plumbing/object"
+)
+
+type exitCode int
+
+const (
+ exitCodeSuccess exitCode = iota
+ exitCodeNotFound
+ exitCodeWrongSyntax
+ exitCodeCouldNotOpenRepository
+ exitCodeCouldNotParseRevision
+ exitCodeUnexpected
+
+ cmdDesc = "Returns the merge-base between two commits:"
+
+ helpShortMsg = `
+ usage: %_COMMAND_NAME_% <path> <commitRev> <commitRev>
+ or: %_COMMAND_NAME_% <path> --independent <commitRev>...
+ or: %_COMMAND_NAME_% <path> --is-ancestor <commitRev> <commitRev>
+ or: %_COMMAND_NAME_% --help
+
+ params:
+ <path> path to the git repository
+ <commitRev> git revision as supported by go-git
+
+options:
+ (no options) lists the best common ancestors of the two passed commits
+ --independent list commits not reachable from the others
+ --is-ancestor is the first one ancestor of the other?
+ --help show the full help message of %_COMMAND_NAME_%
+`
+)
+
+// Command that mimics `git merge-base --all <baseRev> <headRev>`
+// Command that mimics `git merge-base --is-ancestor <baseRev> <headRev>`
+// Command that mimics `git merge-base --independent <commitRev>...`
+func main() {
+ if len(os.Args) == 1 {
+ helpAndExit("Returns the merge-base between two commits:", helpShortMsg, exitCodeSuccess)
+ }
+
+ if os.Args[1] == "--help" || os.Args[1] == "-h" {
+ helpAndExit("Returns the merge-base between two commits:", helpLongMsg, exitCodeSuccess)
+ }
+
+ if len(os.Args) < 4 {
+ helpAndExit("Wrong syntax", helpShortMsg, exitCodeWrongSyntax)
+ }
+
+ path := os.Args[1]
+
+ var modeIndependent, modeAncestor bool
+ var commitRevs []string
+ var res []*object.Commit
+
+ switch os.Args[2] {
+ case "--independent":
+ modeIndependent = true
+ commitRevs = os.Args[3:]
+ case "--is-ancestor":
+ modeAncestor = true
+ commitRevs = os.Args[3:]
+ if len(commitRevs) != 2 {
+ helpAndExit("Wrong syntax", helpShortMsg, exitCodeWrongSyntax)
+ }
+ default:
+ commitRevs = os.Args[2:]
+ if len(commitRevs) != 2 {
+ helpAndExit("Wrong syntax", helpShortMsg, exitCodeWrongSyntax)
+ }
+ }
+
+ // Open a git repository from current directory
+ repo, err := git.PlainOpen(path)
+ checkIfError(err, exitCodeCouldNotOpenRepository, "not in a git repository")
+
+ // Get the hashes of the passed revisions
+ var hashes []*plumbing.Hash
+ for _, rev := range commitRevs {
+ hash, err := repo.ResolveRevision(plumbing.Revision(rev))
+ checkIfError(err, exitCodeCouldNotParseRevision, "could not parse revision '%s'", rev)
+ hashes = append(hashes, hash)
+ }
+
+ // Get the commits identified by the passed hashes
+ var commits []*object.Commit
+ for _, hash := range hashes {
+ commit, err := repo.CommitObject(*hash)
+ checkIfError(err, exitCodeUnexpected, "could not find commit '%s'", hash.String())
+ commits = append(commits, commit)
+ }
+
+ if modeAncestor {
+ isAncestor, err := commits[0].IsAncestor(commits[1])
+ checkIfError(err, exitCodeUnexpected, "could not traverse the repository history")
+
+ if !isAncestor {
+ os.Exit(int(exitCodeNotFound))
+ }
+
+ os.Exit(int(exitCodeSuccess))
+ }
+
+ if modeIndependent {
+ res, err = object.Independents(commits)
+ checkIfError(err, exitCodeUnexpected, "could not traverse the repository history")
+ } else {
+ res, err = commits[0].MergeBase(commits[1])
+ checkIfError(err, exitCodeUnexpected, "could not traverse the repository history")
+
+ if len(res) == 0 {
+ os.Exit(int(exitCodeNotFound))
+ }
+ }
+
+ printCommits(res)
+}
diff --git a/_examples/open/main.go b/_examples/open/main.go
index dec183e..71c6b84 100644
--- a/_examples/open/main.go
+++ b/_examples/open/main.go
@@ -14,7 +14,7 @@ func main() {
CheckArgs("<path>")
path := os.Args[1]
- // We instanciate a new repository targeting the given path (the .git folder)
+ // We instantiate a new repository targeting the given path (the .git folder)
r, err := git.PlainOpen(path)
CheckIfError(err)
diff --git a/_examples/progress/main.go b/_examples/progress/main.go
index 357703c..074430f 100644
--- a/_examples/progress/main.go
+++ b/_examples/progress/main.go
@@ -22,7 +22,7 @@ func main() {
// as git does, when you make a clone, pull or some other operations the
// server sends information via the sideband, this information can being
- // collected provinding a io.Writer to the CloneOptions options
+ // collected providing a io.Writer to the CloneOptions options
Progress: os.Stdout,
})
diff --git a/_examples/pull/main.go b/_examples/pull/main.go
index 06369fa..6f258c8 100644
--- a/_examples/pull/main.go
+++ b/_examples/pull/main.go
@@ -13,7 +13,7 @@ func main() {
CheckArgs("<path>")
path := os.Args[1]
- // We instance\iate a new repository targeting the given path (the .git folder)
+ // We instantiate a new repository targeting the given path (the .git folder)
r, err := git.PlainOpen(path)
CheckIfError(err)
diff --git a/_examples/tag/main.go b/_examples/tag/main.go
index 1e6212b..93192b0 100644
--- a/_examples/tag/main.go
+++ b/_examples/tag/main.go
@@ -15,7 +15,7 @@ func main() {
CheckArgs("<path>")
path := os.Args[1]
- // We instanciate a new repository targeting the given path (the .git folder)
+ // We instantiate a new repository targeting the given path (the .git folder)
r, err := git.PlainOpen(path)
CheckIfError(err)
diff --git a/blame.go b/blame.go
index adb72d5..f610851 100644
--- a/blame.go
+++ b/blame.go
@@ -193,7 +193,7 @@ func (b *blame) fillGraphAndData() error {
// this first commit.
if i == 0 {
for j := 0; j < nLines; j++ {
- b.graph[i][j] = (*object.Commit)(b.revs[i])
+ b.graph[i][j] = b.revs[i]
}
} else {
// if this is not the first commit, then assign to the old
@@ -211,7 +211,7 @@ func (b *blame) sliceGraph(i int) []*object.Commit {
fVs := b.graph[i]
result := make([]*object.Commit, 0, len(fVs))
for _, v := range fVs {
- c := object.Commit(*v)
+ c := *v
result = append(result, &c)
}
return result
@@ -234,7 +234,7 @@ func (b *blame) assignOrigin(c, p int) {
b.graph[c][dl] = b.graph[p][sl]
case hunks[h].Type == 1:
dl++
- b.graph[c][dl] = (*object.Commit)(b.revs[c])
+ b.graph[c][dl] = b.revs[c]
case hunks[h].Type == -1:
sl++
default:
diff --git a/config/branch.go b/config/branch.go
index e18073c..20dde6e 100644
--- a/config/branch.go
+++ b/config/branch.go
@@ -8,8 +8,9 @@ import (
)
var (
- errBranchEmptyName = errors.New("branch config: empty name")
- errBranchInvalidMerge = errors.New("branch config: invalid merge")
+ errBranchEmptyName = errors.New("branch config: empty name")
+ errBranchInvalidMerge = errors.New("branch config: invalid merge")
+ errBranchInvalidRebase = errors.New("branch config: rebase must be one of 'true' or 'interactive'")
)
// Branch contains information on the
@@ -21,6 +22,10 @@ type Branch struct {
Remote string
// Merge is the local refspec for the branch
Merge plumbing.ReferenceName
+ // Rebase instead of merge when pulling. Valid values are
+ // "true" and "interactive". "false" is undocumented and
+ // typically represented by the non-existence of this field
+ Rebase string
raw *format.Subsection
}
@@ -35,6 +40,13 @@ func (b *Branch) Validate() error {
return errBranchInvalidMerge
}
+ if b.Rebase != "" &&
+ b.Rebase != "true" &&
+ b.Rebase != "interactive" &&
+ b.Rebase != "false" {
+ return errBranchInvalidRebase
+ }
+
return nil
}
@@ -57,6 +69,12 @@ func (b *Branch) marshal() *format.Subsection {
b.raw.SetOption(mergeKey, string(b.Merge))
}
+ if b.Rebase == "" {
+ b.raw.RemoveOption(rebaseKey)
+ } else {
+ b.raw.SetOption(rebaseKey, b.Rebase)
+ }
+
return b.raw
}
@@ -66,6 +84,7 @@ func (b *Branch) unmarshal(s *format.Subsection) error {
b.Name = b.raw.Name
b.Remote = b.raw.Options.Get(remoteSection)
b.Merge = plumbing.ReferenceName(b.raw.Options.Get(mergeKey))
+ b.Rebase = b.raw.Options.Get(rebaseKey)
return b.Validate()
}
diff --git a/config/branch_test.go b/config/branch_test.go
index d74122e..6d9ca86 100644
--- a/config/branch_test.go
+++ b/config/branch_test.go
@@ -38,12 +38,13 @@ func (b *BranchSuite) TestValidateMerge(c *C) {
c.Assert(badBranch.Validate(), NotNil)
}
-func (b *BranchSuite) TestMarshall(c *C) {
+func (b *BranchSuite) TestMarshal(c *C) {
expected := []byte(`[core]
bare = false
[branch "branch-tracking-on-clone"]
remote = fork
merge = refs/heads/branch-tracking-on-clone
+ rebase = interactive
`)
cfg := NewConfig()
@@ -51,6 +52,7 @@ func (b *BranchSuite) TestMarshall(c *C) {
Name: "branch-tracking-on-clone",
Remote: "fork",
Merge: plumbing.ReferenceName("refs/heads/branch-tracking-on-clone"),
+ Rebase: "interactive",
}
actual, err := cfg.Marshal()
@@ -58,12 +60,13 @@ func (b *BranchSuite) TestMarshall(c *C) {
c.Assert(string(actual), Equals, string(expected))
}
-func (b *BranchSuite) TestUnmarshall(c *C) {
+func (b *BranchSuite) TestUnmarshal(c *C) {
input := []byte(`[core]
bare = false
[branch "branch-tracking-on-clone"]
remote = fork
merge = refs/heads/branch-tracking-on-clone
+ rebase = interactive
`)
cfg := NewConfig()
@@ -73,4 +76,5 @@ func (b *BranchSuite) TestUnmarshall(c *C) {
c.Assert(branch.Name, Equals, "branch-tracking-on-clone")
c.Assert(branch.Remote, Equals, "fork")
c.Assert(branch.Merge, Equals, plumbing.ReferenceName("refs/heads/branch-tracking-on-clone"))
+ c.Assert(branch.Rebase, Equals, "interactive")
}
diff --git a/config/config.go b/config/config.go
index 2c3b8b9..321ca04 100644
--- a/config/config.go
+++ b/config/config.go
@@ -33,7 +33,7 @@ var (
)
// Config contains the repository configuration
-// ftp://www.kernel.org/pub/software/scm/git/docs/git-config.html#FILES
+// https://www.kernel.org/pub/software/scm/git/docs/git-config.html#FILES
type Config struct {
Core struct {
// IsBare if true this repository is assumed to be bare and has no
@@ -120,6 +120,7 @@ const (
commentCharKey = "commentChar"
windowKey = "window"
mergeKey = "merge"
+ rebaseKey = "rebase"
// DefaultPackWindow holds the number of previous objects used to
// generate deltas. The value 10 is the same used by git command.
diff --git a/config/config_test.go b/config/config_test.go
index db0932c..54eb5e1 100644
--- a/config/config_test.go
+++ b/config/config_test.go
@@ -9,7 +9,7 @@ type ConfigSuite struct{}
var _ = Suite(&ConfigSuite{})
-func (s *ConfigSuite) TestUnmarshall(c *C) {
+func (s *ConfigSuite) TestUnmarshal(c *C) {
input := []byte(`[core]
bare = true
worktree = foo
@@ -60,7 +60,7 @@ func (s *ConfigSuite) TestUnmarshall(c *C) {
c.Assert(cfg.Branches["master"].Merge, Equals, plumbing.ReferenceName("refs/heads/master"))
}
-func (s *ConfigSuite) TestMarshall(c *C) {
+func (s *ConfigSuite) TestMarshal(c *C) {
output := []byte(`[core]
bare = true
worktree = bar
@@ -119,7 +119,7 @@ func (s *ConfigSuite) TestMarshall(c *C) {
c.Assert(string(b), Equals, string(output))
}
-func (s *ConfigSuite) TestUnmarshallMarshall(c *C) {
+func (s *ConfigSuite) TestUnmarshalMarshal(c *C) {
input := []byte(`[core]
bare = true
worktree = foo
diff --git a/config/modules_test.go b/config/modules_test.go
index 8e10d70..8ea68e7 100644
--- a/config/modules_test.go
+++ b/config/modules_test.go
@@ -39,7 +39,7 @@ func (s *ModulesSuite) TestValidateMissingName(c *C) {
c.Assert(m.Validate(), Equals, ErrModuleEmptyPath)
}
-func (s *ModulesSuite) TestMarshall(c *C) {
+func (s *ModulesSuite) TestMarshal(c *C) {
input := []byte(`[submodule "qux"]
path = qux
url = baz
@@ -54,7 +54,7 @@ func (s *ModulesSuite) TestMarshall(c *C) {
c.Assert(output, DeepEquals, input)
}
-func (s *ModulesSuite) TestUnmarshall(c *C) {
+func (s *ModulesSuite) TestUnmarshal(c *C) {
input := []byte(`[submodule "qux"]
path = qux
url = https://github.com/foo/qux.git
@@ -79,7 +79,7 @@ func (s *ModulesSuite) TestUnmarshall(c *C) {
c.Assert(cfg.Submodules["foo/bar"].Branch, Equals, "dev")
}
-func (s *ModulesSuite) TestUnmarshallMarshall(c *C) {
+func (s *ModulesSuite) TestUnmarshalMarshal(c *C) {
input := []byte(`[submodule "foo/bar"]
path = foo/bar
url = https://github.com/foo/bar.git
diff --git a/config/refspec.go b/config/refspec.go
index 391705c..14bb400 100644
--- a/config/refspec.go
+++ b/config/refspec.go
@@ -18,7 +18,7 @@ var (
ErrRefSpecMalformedWildcard = errors.New("malformed refspec, mismatched number of wildcards")
)
-// RefSpec is a mapping from local branches to remote references
+// RefSpec is a mapping from local branches to remote references.
// The format of the refspec is an optional +, followed by <src>:<dst>, where
// <src> is the pattern for references on the remote side and <dst> is where
// those references will be written locally. The + tells Git to update the
@@ -99,11 +99,11 @@ func (s RefSpec) matchGlob(n plumbing.ReferenceName) bool {
var prefix, suffix string
prefix = src[0:wildcard]
- if len(src) < wildcard {
- suffix = src[wildcard+1 : len(suffix)]
+ if len(src) > wildcard+1 {
+ suffix = src[wildcard+1:]
}
- return len(name) > len(prefix)+len(suffix) &&
+ return len(name) >= len(prefix)+len(suffix) &&
strings.HasPrefix(name, prefix) &&
strings.HasSuffix(name, suffix)
}
@@ -127,6 +127,13 @@ func (s RefSpec) Dst(n plumbing.ReferenceName) plumbing.ReferenceName {
return plumbing.ReferenceName(dst[0:wd] + match + dst[wd+1:])
}
+func (s RefSpec) Reverse() RefSpec {
+ spec := string(s)
+ separator := strings.Index(spec, refSpecSeparator)
+
+ return RefSpec(spec[separator+1:] + refSpecSeparator + spec[:separator])
+}
+
func (s RefSpec) String() string {
return string(s)
}
diff --git a/config/refspec_test.go b/config/refspec_test.go
index 675e075..aaeac73 100644
--- a/config/refspec_test.go
+++ b/config/refspec_test.go
@@ -96,9 +96,38 @@ func (s *RefSpecSuite) TestRefSpecMatch(c *C) {
}
func (s *RefSpecSuite) TestRefSpecMatchGlob(c *C) {
- spec := RefSpec("refs/heads/*:refs/remotes/origin/*")
- c.Assert(spec.Match(plumbing.ReferenceName("refs/tag/foo")), Equals, false)
- c.Assert(spec.Match(plumbing.ReferenceName("refs/heads/foo")), Equals, true)
+ tests := map[string]map[string]bool{
+ "refs/heads/*:refs/remotes/origin/*": {
+ "refs/tag/foo": false,
+ "refs/heads/foo": true,
+ },
+ "refs/heads/*bc:refs/remotes/origin/*bc": {
+ "refs/heads/abc": true,
+ "refs/heads/bc": true,
+ "refs/heads/abx": false,
+ },
+ "refs/heads/a*c:refs/remotes/origin/a*c": {
+ "refs/heads/abc": true,
+ "refs/heads/ac": true,
+ "refs/heads/abx": false,
+ },
+ "refs/heads/ab*:refs/remotes/origin/ab*": {
+ "refs/heads/abc": true,
+ "refs/heads/ab": true,
+ "refs/heads/xbc": false,
+ },
+ }
+
+ for specStr, data := range tests {
+ spec := RefSpec(specStr)
+ for ref, matches := range data {
+ c.Assert(spec.Match(plumbing.ReferenceName(ref)),
+ Equals,
+ matches,
+ Commentf("while matching spec %q against ref %q", specStr, ref),
+ )
+ }
+ }
}
func (s *RefSpecSuite) TestRefSpecDst(c *C) {
@@ -110,12 +139,43 @@ func (s *RefSpecSuite) TestRefSpecDst(c *C) {
}
func (s *RefSpecSuite) TestRefSpecDstBlob(c *C) {
+ ref := "refs/heads/abc"
+ tests := map[string]string{
+ "refs/heads/*:refs/remotes/origin/*": "refs/remotes/origin/abc",
+ "refs/heads/*bc:refs/remotes/origin/*": "refs/remotes/origin/a",
+ "refs/heads/*bc:refs/remotes/origin/*bc": "refs/remotes/origin/abc",
+ "refs/heads/a*c:refs/remotes/origin/*": "refs/remotes/origin/b",
+ "refs/heads/a*c:refs/remotes/origin/a*c": "refs/remotes/origin/abc",
+ "refs/heads/ab*:refs/remotes/origin/*": "refs/remotes/origin/c",
+ "refs/heads/ab*:refs/remotes/origin/ab*": "refs/remotes/origin/abc",
+ "refs/heads/*abc:refs/remotes/origin/*abc": "refs/remotes/origin/abc",
+ "refs/heads/abc*:refs/remotes/origin/abc*": "refs/remotes/origin/abc",
+ // for these two cases, git specifically logs:
+ // error: * Ignoring funny ref 'refs/remotes/origin/' locally
+ // and ignores the ref; go-git does not currently do this validation,
+ // but probably should.
+ // "refs/heads/*abc:refs/remotes/origin/*": "",
+ // "refs/heads/abc*:refs/remotes/origin/*": "",
+ }
+
+ for specStr, dst := range tests {
+ spec := RefSpec(specStr)
+ c.Assert(spec.Dst(plumbing.ReferenceName(ref)).String(),
+ Equals,
+ dst,
+ Commentf("while getting dst from spec %q with ref %q", specStr, ref),
+ )
+ }
+}
+
+func (s *RefSpecSuite) TestRefSpecReverse(c *C) {
spec := RefSpec("refs/heads/*:refs/remotes/origin/*")
c.Assert(
- spec.Dst(plumbing.ReferenceName("refs/heads/foo")).String(), Equals,
- "refs/remotes/origin/foo",
+ spec.Reverse(), Equals,
+ RefSpec("refs/remotes/origin/*:refs/heads/*"),
)
}
+
func (s *RefSpecSuite) TestMatchAny(c *C) {
specs := []RefSpec{
"refs/heads/bar:refs/remotes/origin/foo",
diff --git a/go.mod b/go.mod
index 60d4702..31907a6 100644
--- a/go.mod
+++ b/go.mod
@@ -4,27 +4,28 @@ require (
github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7 // indirect
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 // indirect
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5
- github.com/davecgh/go-spew v1.1.1 // indirect
github.com/emirpasic/gods v1.12.0
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 // indirect
- github.com/gliderlabs/ssh v0.1.3
- github.com/google/go-cmp v0.2.0
+ github.com/gliderlabs/ssh v0.2.2
+ github.com/google/go-cmp v0.3.0
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99
github.com/jessevdk/go-flags v1.4.0
- github.com/kevinburke/ssh_config v0.0.0-20180830205328-81db2a75821e
+ github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd
github.com/mitchellh/go-homedir v1.1.0
github.com/pelletier/go-buffruneio v0.2.0 // indirect
github.com/pkg/errors v0.8.1 // indirect
github.com/sergi/go-diff v1.0.0
github.com/src-d/gcfg v1.4.0
- github.com/stretchr/testify v1.3.0 // indirect
+ github.com/stretchr/objx v0.2.0 // indirect
github.com/xanzy/ssh-agent v0.2.1
- golang.org/x/crypto v0.0.0-20190422183909-d864b10871cd
- golang.org/x/net v0.0.0-20190502183928-7f726cade0ab
- golang.org/x/sys v0.0.0-20190422165155-953cdadca894 // indirect
- golang.org/x/text v0.3.0
+ golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4
+ golang.org/x/net v0.0.0-20190724013045-ca1201d0de80
+ golang.org/x/text v0.3.2
+ golang.org/x/tools v0.0.0-20190729092621-ff9f1409240a // indirect
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127
- gopkg.in/src-d/go-billy.v4 v4.3.0
+ gopkg.in/src-d/go-billy.v4 v4.3.2
gopkg.in/src-d/go-git-fixtures.v3 v3.5.0
gopkg.in/warnings.v0 v0.1.2 // indirect
)
+
+go 1.13
diff --git a/go.sum b/go.sum
index 94a6142..21a89fb 100644
--- a/go.sum
+++ b/go.sum
@@ -4,6 +4,7 @@ github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 h1:kFOfPq6dUM1hTo
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
+github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@@ -13,17 +14,24 @@ github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 h1:BHsljHzVlRcyQhjr
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
github.com/gliderlabs/ssh v0.1.3 h1:cBU46h1lYQk5f2Z+jZbewFKy+1zzE2aUX/ilcPDAm9M=
github.com/gliderlabs/ssh v0.1.3/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
+github.com/gliderlabs/ssh v0.2.2 h1:6zsha5zo/TWhRhwqCD3+EarCAgZ2yN28ipRnGPnwkI0=
+github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
+github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY=
+github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A=
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo=
github.com/jessevdk/go-flags v1.4.0 h1:4IU2WS7AumrZ/40jfhf4QVDMsQwqA7VEHozFRrGARJA=
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/kevinburke/ssh_config v0.0.0-20180830205328-81db2a75821e h1:RgQk53JHp/Cjunrr1WlsXSZpqXn+uREuHvUVcK82CV8=
github.com/kevinburke/ssh_config v0.0.0-20180830205328-81db2a75821e/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM=
+github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd h1:Coekwdh0v2wtGp9Gmz1Ze3eVRAWJMLokvN3QjdzCHLY=
+github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM=
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
@@ -39,6 +47,7 @@ github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAm
github.com/src-d/gcfg v1.4.0 h1:xXbNR5AlLSA315x2UO+fTSSAXCDf+Ar38/6oyGbDKQ4=
github.com/src-d/gcfg v1.4.0/go.mod h1:p/UMsR43ujA89BJY9duynAwIpvqEujIH/jFlfL7jWoI=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/xanzy/ssh-agent v0.2.1 h1:TCbipTQL2JiiCprBWx9frJ2eJlCYT00NmctrHxVAr70=
@@ -47,11 +56,17 @@ golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnf
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190422183909-d864b10871cd h1:sMHc2rZHuzQmrbVoSpt9HgerkXPyIeCSO6k0zUMGfFk=
golang.org/x/crypto v0.0.0-20190422183909-d864b10871cd/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 h1:HuIa8hRrWRSrqYzx1qI49NNxhdi2PrY7gxVSq1JjLDc=
+golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190420063019-afa5a82059c6 h1:HdqqaWmYAUI7/dmByKKEw+yxDksGSo+9GjkUc9Zp34E=
golang.org/x/net v0.0.0-20190420063019-afa5a82059c6/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190502183928-7f726cade0ab h1:9RfW3ktsOZxgo9YNbBAjq1FWzc/igwEcUzZz8IXgSbk=
golang.org/x/net v0.0.0-20190502183928-7f726cade0ab/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190724013045-ca1201d0de80 h1:Ao/3l156eZf2AW5wK8a7/smtodRU+gha3+BeqJ69lRk=
+golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180903190138-2b024373dcd9 h1:lkiLiLBHGoH3XnqSLUIaBsilGMUjI+Uy2Xu2JLUtTas=
golang.org/x/sys v0.0.0-20180903190138-2b024373dcd9/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -59,12 +74,20 @@ golang.org/x/sys v0.0.0-20190221075227-b4e8571b14e0/go.mod h1:STP8DvDyc/dI5b8T5h
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894 h1:Cz4ceDQGXuKRnVBDTS23GTn/pU5OE2C0WrNTOYK1Uuc=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e h1:D5TXcfTk7xF7hvieo4QErS3qqCB4teTffacDWr7CI+0=
+golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
+golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190729092621-ff9f1409240a/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/src-d/go-billy.v4 v4.3.0 h1:KtlZ4c1OWbIs4jCv5ZXrTqG8EQocr0g/d4DjNg70aek=
gopkg.in/src-d/go-billy.v4 v4.3.0/go.mod h1:tm33zBoOwxjYHZIE+OV8bxTWFMJLrconzFMd38aARFk=
+gopkg.in/src-d/go-billy.v4 v4.3.2 h1:0SQA1pRztfTFx2miS8sA97XvooFeNOmvUenF4o0EcVg=
+gopkg.in/src-d/go-billy.v4 v4.3.2/go.mod h1:nDjArDMp+XMs1aFAESLRjfGSgfvoYN0hDfzEk0GjC98=
gopkg.in/src-d/go-git-fixtures.v3 v3.5.0 h1:ivZFOIltbce2Mo8IjzUHAFoq/IylO9WHhNOAJK+LsJg=
gopkg.in/src-d/go-git-fixtures.v3 v3.5.0/go.mod h1:dLBcvytrw/TYZsNTWCnkNF2DSIlzWYqTe3rJR56Ac7g=
gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME=
diff --git a/internal/revision/parser.go b/internal/revision/parser.go
index d2c509e..61de386 100644
--- a/internal/revision/parser.go
+++ b/internal/revision/parser.go
@@ -1,5 +1,5 @@
// Package revision extracts git revision from string
-// More informations about revision : https://www.kernel.org/pub/software/scm/git/docs/gitrevisions.html
+// More information about revision : https://www.kernel.org/pub/software/scm/git/docs/gitrevisions.html
package revision
import (
diff --git a/internal/revision/parser_test.go b/internal/revision/parser_test.go
index a58f0ad..fe45228 100644
--- a/internal/revision/parser_test.go
+++ b/internal/revision/parser_test.go
@@ -366,7 +366,7 @@ func (s *ParserSuite) TestParseRefWithValidName(c *C) {
}
}
-func (s *ParserSuite) TestParseRefWithUnvalidName(c *C) {
+func (s *ParserSuite) TestParseRefWithInvalidName(c *C) {
datas := map[string]error{
".master": &ErrInvalidRevision{`must not start with "."`},
"/master": &ErrInvalidRevision{`must not start with "/"`},
diff --git a/internal/revision/scanner.go b/internal/revision/scanner.go
index fb5f333..c46c21b 100644
--- a/internal/revision/scanner.go
+++ b/internal/revision/scanner.go
@@ -10,7 +10,7 @@ import (
// validates it belongs to a rune category
type runeCategoryValidator func(r rune) bool
-// tokenizeExpression aggegates a series of runes matching check predicate into a single
+// tokenizeExpression aggregates a series of runes matching check predicate into a single
// string and provides given tokenType as token type
func tokenizeExpression(ch rune, tokenType token, check runeCategoryValidator, r *bufio.Reader) (token, string, error) {
var data []rune
diff --git a/internal/url/url.go b/internal/url/url.go
index 0f0d709..14cf133 100644
--- a/internal/url/url.go
+++ b/internal/url/url.go
@@ -6,7 +6,7 @@ import (
var (
isSchemeRegExp = regexp.MustCompile(`^[^:]+://`)
- scpLikeUrlRegExp = regexp.MustCompile(`^(?:(?P<user>[^@]+)@)?(?P<host>[^:\s]+):(?:(?P<port>[0-9]{1,5})/)?(?P<path>[^\\].*)$`)
+ scpLikeUrlRegExp = regexp.MustCompile(`^(?:(?P<user>[^@]+)@)?(?P<host>[^:\s]+):(?:(?P<port>[0-9]{1,5})(?:\/|:))?(?P<path>[^\\].*\/[^\\].*)$`)
)
// MatchesScheme returns true if the given string matches a URL-like
diff --git a/internal/url/url_test.go b/internal/url/url_test.go
new file mode 100755
index 0000000..d168db6
--- /dev/null
+++ b/internal/url/url_test.go
@@ -0,0 +1,60 @@
+package url
+
+import (
+ "testing"
+
+ . "gopkg.in/check.v1"
+)
+
+func Test(t *testing.T) { TestingT(t) }
+
+type URLSuite struct{}
+
+var _ = Suite(&URLSuite{})
+
+func (s *URLSuite) TestMatchesScpLike(c *C) {
+ examples := []string{
+ "git@github.com:james/bond",
+ "git@github.com:007/bond",
+ "git@github.com:22:james/bond",
+ "git@github.com:22:007/bond",
+ }
+
+ for _, url := range examples {
+ c.Check(MatchesScpLike(url), Equals, true)
+ }
+}
+
+func (s *URLSuite) TestFindScpLikeComponents(c *C) {
+ url := "git@github.com:james/bond"
+ user, host, port, path := FindScpLikeComponents(url)
+
+ c.Check(user, Equals, "git")
+ c.Check(host, Equals, "github.com")
+ c.Check(port, Equals, "")
+ c.Check(path, Equals, "james/bond")
+
+ url = "git@github.com:007/bond"
+ user, host, port, path = FindScpLikeComponents(url)
+
+ c.Check(user, Equals, "git")
+ c.Check(host, Equals, "github.com")
+ c.Check(port, Equals, "")
+ c.Check(path, Equals, "007/bond")
+
+ url = "git@github.com:22:james/bond"
+ user, host, port, path = FindScpLikeComponents(url)
+
+ c.Check(user, Equals, "git")
+ c.Check(host, Equals, "github.com")
+ c.Check(port, Equals, "22")
+ c.Check(path, Equals, "james/bond")
+
+ url = "git@github.com:22:007/bond"
+ user, host, port, path = FindScpLikeComponents(url)
+
+ c.Check(user, Equals, "git")
+ c.Check(host, Equals, "github.com")
+ c.Check(port, Equals, "22")
+ c.Check(path, Equals, "007/bond")
+}
diff --git a/object_walker.go b/object_walker.go
index f8b19cd..1fdcb36 100644
--- a/object_walker.go
+++ b/object_walker.go
@@ -21,7 +21,7 @@ func newObjectWalker(s storage.Storer) *objectWalker {
return &objectWalker{s, map[plumbing.Hash]struct{}{}}
}
-// walkAllRefs walks all (hash) refererences from the repo.
+// walkAllRefs walks all (hash) references from the repo.
func (p *objectWalker) walkAllRefs() error {
// Walk over all the references in the repo.
it, err := p.Storer.IterReferences()
diff --git a/options.go b/options.go
index 7c9e687..9a03560 100644
--- a/options.go
+++ b/options.go
@@ -4,6 +4,7 @@ import (
"errors"
"regexp"
"strings"
+ "time"
"golang.org/x/crypto/openpgp"
"gopkg.in/src-d/go-git.v4/config"
@@ -186,6 +187,9 @@ type PushOptions struct {
// Progress is where the human readable information sent by the server is
// stored, if nil nothing is stored.
Progress sideband.Progress
+ // Prune specify that remote refs that match given RefSpecs and that do
+ // not exist locally will be removed.
+ Prune bool
}
// Validate validates the fields and sets the default values.
@@ -242,6 +246,11 @@ type CheckoutOptions struct {
// Force, if true when switching branches, proceed even if the index or the
// working tree differs from HEAD. This is used to throw away local changes
Force bool
+ // Keep, if true when switching branches, local changes (the index or the
+ // working tree changes) will be kept so that they can be committed to the
+ // target branch. Force and Keep are mutually exclusive, should not be both
+ // set to true.
+ Keep bool
}
// Validate validates the fields and sets the default values.
@@ -334,12 +343,27 @@ type LogOptions struct {
// Show only those commits in which the specified file was inserted/updated.
// It is equivalent to running `git log -- <file-name>`.
+ // this field is kept for compatility, it can be replaced with PathFilter
FileName *string
+ // Filter commits based on the path of files that are updated
+ // takes file path as argument and should return true if the file is desired
+ // It can be used to implement `git log -- <path>`
+ // either <path> is a file path, or directory path, or a regexp of file/directory path
+ PathFilter func(string) bool
+
// Pretend as if all the refs in refs/, along with HEAD, are listed on the command line as <commit>.
// It is equivalent to running `git log --all`.
// If set on true, the From option will be ignored.
All bool
+
+ // Show commits more recent than a specific date.
+ // It is equivalent to running `git log --since <date>` or `git log --after <date>`.
+ Since *time.Time
+
+ // Show commits older than a specific date.
+ // It is equivalent to running `git log --until <date>` or `git log --before <date>`.
+ Until *time.Time
}
var (
diff --git a/plumbing/filemode/filemode.go b/plumbing/filemode/filemode.go
index 0994bc4..594984f 100644
--- a/plumbing/filemode/filemode.go
+++ b/plumbing/filemode/filemode.go
@@ -32,10 +32,10 @@ const (
Regular FileMode = 0100644
// Deprecated represent non-executable files with the group writable
// bit set. This mode was supported by the first versions of git,
- // but it has been deprecatred nowadays. This library uses them
+ // but it has been deprecated nowadays. This library uses them
// internally, so you can read old packfiles, but will treat them as
// Regulars when interfacing with the outside world. This is the
- // standard git behaviuor.
+ // standard git behaviour.
Deprecated FileMode = 0100664
// Executable represents executable files.
Executable FileMode = 0100755
@@ -152,7 +152,7 @@ func (m FileMode) IsRegular() bool {
}
// IsFile returns if the FileMode represents that of a file, this is,
-// Regular, Deprecated, Excutable or Link.
+// Regular, Deprecated, Executable or Link.
func (m FileMode) IsFile() bool {
return m == Regular ||
m == Deprecated ||
diff --git a/plumbing/filemode/filemode_test.go b/plumbing/filemode/filemode_test.go
index 299c96a..8d713f6 100644
--- a/plumbing/filemode/filemode_test.go
+++ b/plumbing/filemode/filemode_test.go
@@ -126,7 +126,7 @@ func (s *ModeSuite) TestNewFromOsFileModeExclusive(c *C) {
}
func (s *ModeSuite) TestNewFromOsFileModeTemporary(c *C) {
- // temporaty files are ignored
+ // temporary files are ignored
fixture{
input: os.FileMode(0644) | os.ModeTemporary, // Trw-r--r--
expected: Empty, err: "no equivalent.*",
diff --git a/plumbing/format/commitgraph/encoder.go b/plumbing/format/commitgraph/encoder.go
index a06871c..615e833 100644
--- a/plumbing/format/commitgraph/encoder.go
+++ b/plumbing/format/commitgraph/encoder.go
@@ -24,8 +24,6 @@ func NewEncoder(w io.Writer) *Encoder {
// Encode writes an index into the commit-graph file
func (e *Encoder) Encode(idx Index) error {
- var err error
-
// Get all the hashes in the input index
hashes := idx.Hashes()
@@ -39,26 +37,26 @@ func (e *Encoder) Encode(idx Index) error {
chunkSizes = append(chunkSizes, uint64(extraEdgesCount)*4)
}
- if err = e.encodeFileHeader(len(chunkSignatures)); err != nil {
+ if err := e.encodeFileHeader(len(chunkSignatures)); err != nil {
return err
}
- if err = e.encodeChunkHeaders(chunkSignatures, chunkSizes); err != nil {
+ if err := e.encodeChunkHeaders(chunkSignatures, chunkSizes); err != nil {
return err
}
- if err = e.encodeFanout(fanout); err != nil {
+ if err := e.encodeFanout(fanout); err != nil {
return err
}
- if err = e.encodeOidLookup(hashes); err != nil {
+ if err := e.encodeOidLookup(hashes); err != nil {
return err
}
if extraEdges, err := e.encodeCommitData(hashes, hashToIndex, idx); err == nil {
if err = e.encodeExtraEdges(extraEdges); err != nil {
return err
}
- }
- if err != nil {
+ } else {
return err
}
+
return e.encodeChecksum()
}
diff --git a/plumbing/format/commitgraph/file.go b/plumbing/format/commitgraph/file.go
index 175d279..1f82abd 100644
--- a/plumbing/format/commitgraph/file.go
+++ b/plumbing/format/commitgraph/file.go
@@ -249,7 +249,7 @@ func (fi *fileIndex) getHashesFromIndexes(indexes []int) ([]plumbing.Hash, error
// Hashes returns all the hashes that are available in the index
func (fi *fileIndex) Hashes() []plumbing.Hash {
hashes := make([]plumbing.Hash, fi.fanout[0xff])
- for i := 0; i < int(fi.fanout[0xff]); i++ {
+ for i := 0; i < fi.fanout[0xff]; i++ {
offset := fi.oidLookupOffset + int64(i)*20
if n, err := fi.reader.ReadAt(hashes[i][:], offset); err != nil || n < 20 {
return nil
diff --git a/plumbing/format/commitgraph/memory.go b/plumbing/format/commitgraph/memory.go
index a4a96e9..f5afd4c 100644
--- a/plumbing/format/commitgraph/memory.go
+++ b/plumbing/format/commitgraph/memory.go
@@ -31,7 +31,7 @@ func (mi *MemoryIndex) GetIndexByHash(h plumbing.Hash) (int, error) {
// GetCommitDataByIndex gets the commit node from the commit graph using index
// obtained from child node, if available
func (mi *MemoryIndex) GetCommitDataByIndex(i int) (*CommitData, error) {
- if int(i) >= len(mi.commitData) {
+ if i >= len(mi.commitData) {
return nil, plumbing.ErrObjectNotFound
}
diff --git a/plumbing/format/diff/unified_encoder.go b/plumbing/format/diff/unified_encoder.go
index 8bd6d8a..ce3bc7c 100644
--- a/plumbing/format/diff/unified_encoder.go
+++ b/plumbing/format/diff/unified_encoder.go
@@ -4,6 +4,7 @@ import (
"bytes"
"fmt"
"io"
+ "regexp"
"strings"
"gopkg.in/src-d/go-git.v4/plumbing"
@@ -25,9 +26,10 @@ const (
tPath = "+++ %s\n"
binary = "Binary files %s and %s differ\n"
- addLine = "+%s\n"
- deleteLine = "-%s\n"
- equalLine = " %s\n"
+ addLine = "+%s%s"
+ deleteLine = "-%s%s"
+ equalLine = " %s%s"
+ noNewLine = "\n\\ No newline at end of file\n"
oldMode = "old mode %o\n"
newMode = "new mode %o\n"
@@ -94,7 +96,7 @@ func (e *UnifiedEncoder) printMessage(message string) {
isEmpty := message == ""
hasSuffix := strings.HasSuffix(message, "\n")
if !isEmpty && !hasSuffix {
- message = message + "\n"
+ message += "\n"
}
e.buf.WriteString(message)
@@ -216,7 +218,7 @@ func (c *hunksGenerator) processHunk(i int, op Operation) {
linesBefore = c.ctxLines
}
- c.current = &hunk{ctxPrefix: ctxPrefix}
+ c.current = &hunk{ctxPrefix: strings.TrimSuffix(ctxPrefix, "\n")}
c.current.AddOp(Equal, c.beforeContext...)
switch op {
@@ -279,12 +281,13 @@ func (c *hunksGenerator) processEqualsLines(ls []string, i int) {
}
}
+var splitLinesRE = regexp.MustCompile(`[^\n]*(\n|$)`)
+
func splitLines(s string) []string {
- out := strings.Split(s, "\n")
+ out := splitLinesRE.FindAllString(s, -1)
if out[len(out)-1] == "" {
out = out[:len(out)-1]
}
-
return out
}
@@ -346,7 +349,7 @@ type op struct {
}
func (o *op) String() string {
- var prefix string
+ var prefix, suffix string
switch o.t {
case Add:
prefix = addLine
@@ -355,6 +358,10 @@ func (o *op) String() string {
case Equal:
prefix = equalLine
}
+ n := len(o.text)
+ if n > 0 && o.text[n-1] != '\n' {
+ suffix = noNewLine
+ }
- return fmt.Sprintf(prefix, o.text)
+ return fmt.Sprintf(prefix, o.text, suffix)
}
diff --git a/plumbing/format/diff/unified_encoder_test.go b/plumbing/format/diff/unified_encoder_test.go
index 7736af1..091a96a 100644
--- a/plumbing/format/diff/unified_encoder_test.go
+++ b/plumbing/format/diff/unified_encoder_test.go
@@ -83,7 +83,7 @@ var oneChunkPatch Patch = testPatch{
content: "A\n",
op: Delete,
}, {
- content: "B\nC\nD\nE\nF\nG",
+ content: "B\nC\nD\nE\nF\nG\n",
op: Equal,
}, {
content: "H\n",
@@ -125,7 +125,7 @@ var oneChunkPatchInverted Patch = testPatch{
content: "A\n",
op: Add,
}, {
- content: "B\nC\nD\nE\nF\nG",
+ content: "B\nC\nD\nE\nF\nG\n",
op: Equal,
}, {
content: "H\n",
@@ -164,13 +164,13 @@ var fixtures []*fixture = []*fixture{{
seed: "hello\nbug\n",
},
chunks: []testChunk{{
- content: "hello",
+ content: "hello\n",
op: Equal,
}, {
- content: "world",
+ content: "world\n",
op: Delete,
}, {
- content: "bug",
+ content: "bug\n",
op: Add,
}},
}},
@@ -239,18 +239,18 @@ rename to test1.txt
from: &testFile{
mode: filemode.Regular,
path: "test.txt",
- seed: "test",
+ seed: "test\n",
},
to: &testFile{
mode: filemode.Regular,
path: "test1.txt",
- seed: "test1",
+ seed: "test1\n",
},
chunks: []testChunk{{
- content: "test",
+ content: "test\n",
op: Delete,
}, {
- content: "test1",
+ content: "test1\n",
op: Add,
}},
}},
@@ -260,7 +260,7 @@ rename to test1.txt
diff: `diff --git a/test.txt b/test1.txt
rename from test.txt
rename to test1.txt
-index 30d74d258442c7c65512eafab474568dd706c430..f079749c42ffdcc5f52ed2d3a6f15b09307e975e 100644
+index 9daeafb9864cf43055ae93beb0afd6c7d144bfa4..a5bce3fd2565d8f458555a0c6f42d0504a848bd5 100644
--- a/test.txt
+++ b/test1.txt
@@ -1 +1 @@
@@ -299,19 +299,19 @@ rename to test1.txt
from: &testFile{
mode: filemode.Regular,
path: "test.txt",
- seed: "test",
+ seed: "test\n",
},
to: &testFile{
mode: filemode.Regular,
path: "test.txt",
- seed: "test2",
+ seed: "test2\n",
},
chunks: []testChunk{{
- content: "test",
+ content: "test\n",
op: Delete,
}, {
- content: "test2",
+ content: "test2\n",
op: Add,
}},
}},
@@ -320,7 +320,7 @@ rename to test1.txt
desc: "one line change",
context: 1,
diff: `diff --git a/test.txt b/test.txt
-index 30d74d258442c7c65512eafab474568dd706c430..d606037cb232bfda7788a8322492312d55b2ae9d 100644
+index 9daeafb9864cf43055ae93beb0afd6c7d144bfa4..180cf8328022becee9aaa2577a8f84ea2b9f3827 100644
--- a/test.txt
+++ b/test.txt
@@ -1 +1 @@
@@ -334,19 +334,19 @@ index 30d74d258442c7c65512eafab474568dd706c430..d606037cb232bfda7788a8322492312d
from: &testFile{
mode: filemode.Regular,
path: "test.txt",
- seed: "test",
+ seed: "test\n",
},
to: &testFile{
mode: filemode.Regular,
path: "test.txt",
- seed: "test2",
+ seed: "test2\n",
},
chunks: []testChunk{{
- content: "test",
+ content: "test\n",
op: Delete,
}, {
- content: "test2",
+ content: "test2\n",
op: Add,
}},
}},
@@ -356,7 +356,7 @@ index 30d74d258442c7c65512eafab474568dd706c430..d606037cb232bfda7788a8322492312d
context: 1,
diff: `this is the message
diff --git a/test.txt b/test.txt
-index 30d74d258442c7c65512eafab474568dd706c430..d606037cb232bfda7788a8322492312d55b2ae9d 100644
+index 9daeafb9864cf43055ae93beb0afd6c7d144bfa4..180cf8328022becee9aaa2577a8f84ea2b9f3827 100644
--- a/test.txt
+++ b/test.txt
@@ -1 +1 @@
@@ -397,7 +397,9 @@ index 30d74d258442c7c65512eafab474568dd706c430..d606037cb232bfda7788a8322492312d
+++ b/test.txt
@@ -1 +1 @@
-test
+\ No newline at end of file
+test2
+\ No newline at end of file
`,
}, {
patch: testPatch{
@@ -407,7 +409,7 @@ index 30d74d258442c7c65512eafab474568dd706c430..d606037cb232bfda7788a8322492312d
to: &testFile{
mode: filemode.Regular,
path: "new.txt",
- seed: "test\ntest2\test3",
+ seed: "test\ntest2\ntest3",
},
chunks: []testChunk{{
@@ -421,13 +423,14 @@ index 30d74d258442c7c65512eafab474568dd706c430..d606037cb232bfda7788a8322492312d
context: 1,
diff: `diff --git a/new.txt b/new.txt
new file mode 100644
-index 0000000000000000000000000000000000000000..65c8dd02a42273038658a22b1cb29c8d9457ca12
+index 0000000000000000000000000000000000000000..3ceaab5442b64a0c2b33dd25fae67ccdb4fd1ea8
--- /dev/null
+++ b/new.txt
@@ -0,0 +1,3 @@
+test
+test2
+test3
+\ No newline at end of file
`,
}, {
patch: testPatch{
@@ -456,6 +459,7 @@ index 30d74d258442c7c65512eafab474568dd706c430..00000000000000000000000000000000
+++ /dev/null
@@ -1 +0,0 @@
-test
+\ No newline at end of file
`,
}, {
patch: oneChunkPatch,
@@ -548,6 +552,7 @@ index ab5eed5d4a2c33aeef67e0188ee79bed666bde6f..0adddcde4fd38042c354518351820eb0
X
Y
Z
+\ No newline at end of file
`,
}, {
patch: oneChunkPatch,
@@ -813,6 +818,47 @@ index 0adddcde4fd38042c354518351820eb06c417c82..553ae669c7a9303cf848fcc749a25692
+++ b/onechunk.txt
@@ -23 +22,0 @@ Y
-Z
+\ No newline at end of file
+`,
+}, {
+ patch: testPatch{
+ message: "",
+ filePatches: []testFilePatch{{
+ from: &testFile{
+ mode: filemode.Regular,
+ path: "onechunk.txt",
+ seed: "B\nC\nD\nE\nF\nG\nI\nJ\nK\nL\nM\nN\nO\nP\nQ\nR\nS\nT\nV\nW\nX\nY\nZ",
+ },
+ to: &testFile{
+ mode: filemode.Regular,
+ path: "onechunk.txt",
+ seed: "B\nC\nD\nE\nF\nG\nI\nJ\nK\nL\nM\nN\nO\nP\nQ\nR\nS\nT\nV\nW\nX\nY",
+ },
+
+ chunks: []testChunk{{
+ content: "B\nC\nD\nE\nF\nG\nI\nJ\nK\nL\nM\nN\nO\nP\nQ\nR\nS\nT\nV\nW\nX\n",
+ op: Equal,
+ }, {
+ content: "Y\nZ",
+ op: Delete,
+ }, {
+ content: "Y",
+ op: Add,
+ }},
+ }},
+ },
+ desc: "remove last letter and no newline at end of file",
+ context: 0,
+ diff: `diff --git a/onechunk.txt b/onechunk.txt
+index 0adddcde4fd38042c354518351820eb06c417c82..d39ae38aad7ba9447b5e7998b2e4714f26c9218d 100644
+--- a/onechunk.txt
++++ b/onechunk.txt
+@@ -22,2 +21 @@ X
+-Y
+-Z
+\ No newline at end of file
++Y
+\ No newline at end of file
`,
}}
diff --git a/plumbing/format/gitattributes/pattern.go b/plumbing/format/gitattributes/pattern.go
index c5ca0c7..d961aba 100644
--- a/plumbing/format/gitattributes/pattern.go
+++ b/plumbing/format/gitattributes/pattern.go
@@ -66,7 +66,7 @@ func (p *pattern) Match(path []string) bool {
doublestar = true
}
- switch true {
+ switch {
case strings.Contains(pattern[0], "**"):
return false
diff --git a/plumbing/format/idxfile/decoder.go b/plumbing/format/idxfile/decoder.go
index 5b92782..d1a8a2c 100644
--- a/plumbing/format/idxfile/decoder.go
+++ b/plumbing/format/idxfile/decoder.go
@@ -12,7 +12,7 @@ import (
var (
// ErrUnsupportedVersion is returned by Decode when the idx file version
// is not supported.
- ErrUnsupportedVersion = errors.New("Unsuported version")
+ ErrUnsupportedVersion = errors.New("Unsupported version")
// ErrMalformedIdxFile is returned by Decode when the idx file is corrupted.
ErrMalformedIdxFile = errors.New("Malformed IDX file")
)
@@ -110,10 +110,6 @@ func readObjectNames(idx *MemoryIndex, r io.Reader) error {
continue
}
- if buckets < 0 {
- return ErrMalformedIdxFile
- }
-
idx.FanoutMapping[k] = len(idx.Names)
nameLen := int(buckets * objectIDLength)
diff --git a/plumbing/format/idxfile/writer.go b/plumbing/format/idxfile/writer.go
index aa919e7..fcc78c5 100644
--- a/plumbing/format/idxfile/writer.go
+++ b/plumbing/format/idxfile/writer.go
@@ -147,7 +147,7 @@ func (w *Writer) createIndex() (*MemoryIndex, error) {
idx.Offset32[bucket] = append(idx.Offset32[bucket], buf.Bytes()...)
buf.Truncate(0)
- binary.WriteUint32(buf, uint32(o.CRC32))
+ binary.WriteUint32(buf, o.CRC32)
idx.CRC32[bucket] = append(idx.CRC32[bucket], buf.Bytes()...)
}
diff --git a/plumbing/format/index/decoder_test.go b/plumbing/format/index/decoder_test.go
index 7468ad0..92d312d 100644
--- a/plumbing/format/index/decoder_test.go
+++ b/plumbing/format/index/decoder_test.go
@@ -115,7 +115,7 @@ func (s *IndexSuite) TestDecodeMergeConflict(c *C) {
{TheirMode, "14f8e368114f561c38e134f6e68ea6fea12d77ed"},
}
- // stagged files
+ // staged files
for i, e := range idx.Entries[4:7] {
c.Assert(e.Stage, Equals, expected[i].Stage)
c.Assert(e.CreatedAt.IsZero(), Equals, true)
diff --git a/plumbing/format/index/doc.go b/plumbing/format/index/doc.go
index f2b3d76..39ae6ad 100644
--- a/plumbing/format/index/doc.go
+++ b/plumbing/format/index/doc.go
@@ -320,7 +320,7 @@
// == End of Index Entry
//
// The End of Index Entry (EOIE) is used to locate the end of the variable
-// length index entries and the begining of the extensions. Code can take
+// length index entries and the beginning of the extensions. Code can take
// advantage of this to quickly locate the index extensions without having
// to parse through all of the index entries.
//
@@ -353,7 +353,7 @@
//
// - A number of index offset entries each consisting of:
//
-// - 32-bit offset from the begining of the file to the first cache entry
+// - 32-bit offset from the beginning of the file to the first cache entry
// in this block of entries.
//
// - 32-bit count of cache entries in this blockpackage index
diff --git a/plumbing/format/index/encoder_test.go b/plumbing/format/index/encoder_test.go
index 78cbbba..ea121fc 100644
--- a/plumbing/format/index/encoder_test.go
+++ b/plumbing/format/index/encoder_test.go
@@ -55,7 +55,7 @@ func (s *IndexSuite) TestEncode(c *C) {
}
-func (s *IndexSuite) TestEncodeUnsuportedVersion(c *C) {
+func (s *IndexSuite) TestEncodeUnsupportedVersion(c *C) {
idx := &Index{Version: 3}
buf := bytes.NewBuffer(nil)
@@ -64,7 +64,7 @@ func (s *IndexSuite) TestEncodeUnsuportedVersion(c *C) {
c.Assert(err, Equals, ErrUnsupportedVersion)
}
-func (s *IndexSuite) TestEncodeWithIntentToAddUnsuportedVersion(c *C) {
+func (s *IndexSuite) TestEncodeWithIntentToAddUnsupportedVersion(c *C) {
idx := &Index{
Version: 2,
Entries: []*Entry{{IntentToAdd: true}},
@@ -76,7 +76,7 @@ func (s *IndexSuite) TestEncodeWithIntentToAddUnsuportedVersion(c *C) {
c.Assert(err, Equals, ErrUnsupportedVersion)
}
-func (s *IndexSuite) TestEncodeWithSkipWorktreeUnsuportedVersion(c *C) {
+func (s *IndexSuite) TestEncodeWithSkipWorktreeUnsupportedVersion(c *C) {
idx := &Index{
Version: 2,
Entries: []*Entry{{SkipWorktree: true}},
diff --git a/plumbing/format/index/index.go b/plumbing/format/index/index.go
index 6c4b7ca..6653c91 100644
--- a/plumbing/format/index/index.go
+++ b/plumbing/format/index/index.go
@@ -198,7 +198,7 @@ type ResolveUndoEntry struct {
}
// EndOfIndexEntry is the End of Index Entry (EOIE) is used to locate the end of
-// the variable length index entries and the begining of the extensions. Code
+// the variable length index entries and the beginning of the extensions. Code
// can take advantage of this to quickly locate the index extensions without
// having to parse through all of the index entries.
//
diff --git a/plumbing/format/packfile/diff_delta.go b/plumbing/format/packfile/diff_delta.go
index d35e78a..43f87a0 100644
--- a/plumbing/format/packfile/diff_delta.go
+++ b/plumbing/format/packfile/diff_delta.go
@@ -40,8 +40,8 @@ func getDelta(index *deltaIndex, base, target plumbing.EncodedObject) (plumbing.
defer tr.Close()
bb := bufPool.Get().(*bytes.Buffer)
- bb.Reset()
defer bufPool.Put(bb)
+ bb.Reset()
_, err = bb.ReadFrom(br)
if err != nil {
@@ -49,8 +49,8 @@ func getDelta(index *deltaIndex, base, target plumbing.EncodedObject) (plumbing.
}
tb := bufPool.Get().(*bytes.Buffer)
- tb.Reset()
defer bufPool.Put(tb)
+ tb.Reset()
_, err = tb.ReadFrom(tr)
if err != nil {
@@ -77,6 +77,7 @@ func DiffDelta(src, tgt []byte) []byte {
func diffDelta(index *deltaIndex, src []byte, tgt []byte) []byte {
buf := bufPool.Get().(*bytes.Buffer)
+ defer bufPool.Put(buf)
buf.Reset()
buf.Write(deltaEncodeSize(len(src)))
buf.Write(deltaEncodeSize(len(tgt)))
@@ -86,6 +87,7 @@ func diffDelta(index *deltaIndex, src []byte, tgt []byte) []byte {
}
ibuf := bufPool.Get().(*bytes.Buffer)
+ defer bufPool.Put(ibuf)
ibuf.Reset()
for i := 0; i < len(tgt); i++ {
offset, l := index.findMatch(src, tgt, i)
@@ -127,12 +129,9 @@ func diffDelta(index *deltaIndex, src []byte, tgt []byte) []byte {
}
encodeInsertOperation(ibuf, buf)
- bytes := buf.Bytes()
-
- bufPool.Put(buf)
- bufPool.Put(ibuf)
- return bytes
+ // buf.Bytes() is only valid until the next modifying operation on the buffer. Copy it.
+ return append([]byte{}, buf.Bytes()...)
}
func encodeInsertOperation(ibuf, buf *bytes.Buffer) {
diff --git a/plumbing/format/packfile/packfile.go b/plumbing/format/packfile/packfile.go
index f528073..21a15de 100644
--- a/plumbing/format/packfile/packfile.go
+++ b/plumbing/format/packfile/packfile.go
@@ -133,8 +133,8 @@ func (p *Packfile) getObjectSize(h *ObjectHeader) (int64, error) {
return h.Length, nil
case plumbing.REFDeltaObject, plumbing.OFSDeltaObject:
buf := bufPool.Get().(*bytes.Buffer)
- buf.Reset()
defer bufPool.Put(buf)
+ buf.Reset()
if _, _, err := p.s.NextObject(buf); err != nil {
return 0, err
@@ -222,11 +222,11 @@ func (p *Packfile) getNextObject(h *ObjectHeader, hash plumbing.Hash) (plumbing.
// optimization only if the expanded version of the object still meets
// the small object threshold condition.
buf := bufPool.Get().(*bytes.Buffer)
+ defer bufPool.Put(buf)
buf.Reset()
if _, _, err := p.s.NextObject(buf); err != nil {
return nil, err
}
- defer bufPool.Put(buf)
size = p.getDeltaObjectSize(buf)
if size <= smallObjectThreshold {
@@ -321,12 +321,12 @@ func (p *Packfile) fillRegularObjectContent(obj plumbing.EncodedObject) error {
func (p *Packfile) fillREFDeltaObjectContent(obj plumbing.EncodedObject, ref plumbing.Hash) error {
buf := bufPool.Get().(*bytes.Buffer)
+ defer bufPool.Put(buf)
buf.Reset()
_, _, err := p.s.NextObject(buf)
if err != nil {
return err
}
- defer bufPool.Put(buf)
return p.fillREFDeltaObjectContentWithBuffer(obj, ref, buf)
}
@@ -351,12 +351,12 @@ func (p *Packfile) fillREFDeltaObjectContentWithBuffer(obj plumbing.EncodedObjec
func (p *Packfile) fillOFSDeltaObjectContent(obj plumbing.EncodedObject, offset int64) error {
buf := bufPool.Get().(*bytes.Buffer)
+ defer bufPool.Put(buf)
buf.Reset()
_, _, err := p.s.NextObject(buf)
if err != nil {
return err
}
- defer bufPool.Put(buf)
return p.fillOFSDeltaObjectContentWithBuffer(obj, offset, buf)
}
diff --git a/plumbing/format/packfile/parser.go b/plumbing/format/packfile/parser.go
index 71cbba9..d8c0f75 100644
--- a/plumbing/format/packfile/parser.go
+++ b/plumbing/format/packfile/parser.go
@@ -4,6 +4,7 @@ import (
"bytes"
"errors"
"io"
+ "io/ioutil"
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/plumbing/cache"
@@ -263,11 +264,14 @@ func (p *Parser) indexObjects() error {
}
func (p *Parser) resolveDeltas() error {
+ buf := &bytes.Buffer{}
for _, obj := range p.oi {
- content, err := p.get(obj)
+ buf.Reset()
+ err := p.get(obj, buf)
if err != nil {
return err
}
+ content := buf.Bytes()
if err := p.onInflatedObjectHeader(obj.Type, obj.Length, obj.Offset); err != nil {
return err
@@ -279,7 +283,7 @@ func (p *Parser) resolveDeltas() error {
if !obj.IsDelta() && len(obj.Children) > 0 {
for _, child := range obj.Children {
- if _, err := p.resolveObject(child, content); err != nil {
+ if err := p.resolveObject(ioutil.Discard, child, content); err != nil {
return err
}
}
@@ -294,82 +298,87 @@ func (p *Parser) resolveDeltas() error {
return nil
}
-func (p *Parser) get(o *objectInfo) (b []byte, err error) {
- var ok bool
+func (p *Parser) get(o *objectInfo, buf *bytes.Buffer) error {
if !o.ExternalRef { // skip cache check for placeholder parents
- b, ok = p.cache.Get(o.Offset)
+ b, ok := p.cache.Get(o.Offset)
+ if ok {
+ _, err := buf.Write(b)
+ return err
+ }
}
// If it's not on the cache and is not a delta we can try to find it in the
// storage, if there's one. External refs must enter here.
- if !ok && p.storage != nil && !o.Type.IsDelta() {
+ if p.storage != nil && !o.Type.IsDelta() {
e, err := p.storage.EncodedObject(plumbing.AnyObject, o.SHA1)
if err != nil {
- return nil, err
+ return err
}
o.Type = e.Type()
r, err := e.Reader()
if err != nil {
- return nil, err
- }
-
- b = make([]byte, e.Size())
- if _, err = r.Read(b); err != nil {
- return nil, err
+ return err
}
- }
- if b != nil {
- return b, nil
+ _, err = buf.ReadFrom(io.LimitReader(r, e.Size()))
+ return err
}
if o.ExternalRef {
// we were not able to resolve a ref in a thin pack
- return nil, ErrReferenceDeltaNotFound
+ return ErrReferenceDeltaNotFound
}
- var data []byte
if o.DiskType.IsDelta() {
- base, err := p.get(o.Parent)
+ b := bufPool.Get().(*bytes.Buffer)
+ defer bufPool.Put(b)
+ b.Reset()
+ err := p.get(o.Parent, b)
if err != nil {
- return nil, err
+ return err
}
+ base := b.Bytes()
- data, err = p.resolveObject(o, base)
+ err = p.resolveObject(buf, o, base)
if err != nil {
- return nil, err
+ return err
}
} else {
- data, err = p.readData(o)
+ err := p.readData(buf, o)
if err != nil {
- return nil, err
+ return err
}
}
if len(o.Children) > 0 {
+ data := make([]byte, buf.Len())
+ copy(data, buf.Bytes())
p.cache.Put(o.Offset, data)
}
-
- return data, nil
+ return nil
}
func (p *Parser) resolveObject(
+ w io.Writer,
o *objectInfo,
base []byte,
-) ([]byte, error) {
+) error {
if !o.DiskType.IsDelta() {
- return nil, nil
+ return nil
}
-
- data, err := p.readData(o)
+ buf := bufPool.Get().(*bytes.Buffer)
+ defer bufPool.Put(buf)
+ buf.Reset()
+ err := p.readData(buf, o)
if err != nil {
- return nil, err
+ return err
}
+ data := buf.Bytes()
data, err = applyPatchBase(o, data, base)
if err != nil {
- return nil, err
+ return err
}
if p.storage != nil {
@@ -377,37 +386,35 @@ func (p *Parser) resolveObject(
obj.SetSize(o.Size())
obj.SetType(o.Type)
if _, err := obj.Write(data); err != nil {
- return nil, err
+ return err
}
if _, err := p.storage.SetEncodedObject(obj); err != nil {
- return nil, err
+ return err
}
}
-
- return data, nil
+ _, err = w.Write(data)
+ return err
}
-func (p *Parser) readData(o *objectInfo) ([]byte, error) {
+func (p *Parser) readData(w io.Writer, o *objectInfo) error {
if !p.scanner.IsSeekable && o.DiskType.IsDelta() {
data, ok := p.deltas[o.Offset]
if !ok {
- return nil, ErrDeltaNotCached
+ return ErrDeltaNotCached
}
-
- return data, nil
+ _, err := w.Write(data)
+ return err
}
if _, err := p.scanner.SeekObjectHeader(o.Offset); err != nil {
- return nil, err
+ return err
}
- buf := new(bytes.Buffer)
- if _, _, err := p.scanner.NextObject(buf); err != nil {
- return nil, err
+ if _, _, err := p.scanner.NextObject(w); err != nil {
+ return err
}
-
- return buf.Bytes(), nil
+ return nil
}
func applyPatchBase(ota *objectInfo, data, base []byte) ([]byte, error) {
diff --git a/plumbing/format/packfile/patch_delta.go b/plumbing/format/packfile/patch_delta.go
index a972f1c..e1a5141 100644
--- a/plumbing/format/packfile/patch_delta.go
+++ b/plumbing/format/packfile/patch_delta.go
@@ -1,8 +1,9 @@
package packfile
import (
+ "bytes"
"errors"
- "io/ioutil"
+ "io"
"gopkg.in/src-d/go-git.v4/plumbing"
)
@@ -26,19 +27,29 @@ func ApplyDelta(target, base plumbing.EncodedObject, delta []byte) error {
return err
}
- src, err := ioutil.ReadAll(r)
+ buf := bufPool.Get().(*bytes.Buffer)
+ defer bufPool.Put(buf)
+ buf.Reset()
+ _, err = buf.ReadFrom(r)
if err != nil {
return err
}
+ src := buf.Bytes()
- dst, err := PatchDelta(src, delta)
+ dst := bufPool.Get().(*bytes.Buffer)
+ defer bufPool.Put(dst)
+ dst.Reset()
+ err = patchDelta(dst, src, delta)
if err != nil {
return err
}
- target.SetSize(int64(len(dst)))
- _, err = w.Write(dst)
+ target.SetSize(int64(dst.Len()))
+
+ b := byteSlicePool.Get().([]byte)
+ _, err = io.CopyBuffer(w, dst, b)
+ byteSlicePool.Put(b)
return err
}
@@ -51,23 +62,31 @@ var (
// An error will be returned if delta is corrupted (ErrDeltaLen) or an action command
// is not copy from source or copy from delta (ErrDeltaCmd).
func PatchDelta(src, delta []byte) ([]byte, error) {
+ b := &bytes.Buffer{}
+ if err := patchDelta(b, src, delta); err != nil {
+ return nil, err
+ }
+ return b.Bytes(), nil
+}
+
+func patchDelta(dst *bytes.Buffer, src, delta []byte) error {
if len(delta) < deltaSizeMin {
- return nil, ErrInvalidDelta
+ return ErrInvalidDelta
}
srcSz, delta := decodeLEB128(delta)
if srcSz != uint(len(src)) {
- return nil, ErrInvalidDelta
+ return ErrInvalidDelta
}
targetSz, delta := decodeLEB128(delta)
remainingTargetSz := targetSz
var cmd byte
- dest := make([]byte, 0, targetSz)
+ dst.Grow(int(targetSz))
for {
if len(delta) == 0 {
- return nil, ErrInvalidDelta
+ return ErrInvalidDelta
}
cmd = delta[0]
@@ -77,35 +96,35 @@ func PatchDelta(src, delta []byte) ([]byte, error) {
var err error
offset, delta, err = decodeOffset(cmd, delta)
if err != nil {
- return nil, err
+ return err
}
sz, delta, err = decodeSize(cmd, delta)
if err != nil {
- return nil, err
+ return err
}
if invalidSize(sz, targetSz) ||
invalidOffsetSize(offset, sz, srcSz) {
break
}
- dest = append(dest, src[offset:offset+sz]...)
+ dst.Write(src[offset:offset+sz])
remainingTargetSz -= sz
} else if isCopyFromDelta(cmd) {
sz := uint(cmd) // cmd is the size itself
if invalidSize(sz, targetSz) {
- return nil, ErrInvalidDelta
+ return ErrInvalidDelta
}
if uint(len(delta)) < sz {
- return nil, ErrInvalidDelta
+ return ErrInvalidDelta
}
- dest = append(dest, delta[0:sz]...)
+ dst.Write(delta[0:sz])
remainingTargetSz -= sz
delta = delta[sz:]
} else {
- return nil, ErrDeltaCmd
+ return ErrDeltaCmd
}
if remainingTargetSz <= 0 {
@@ -113,7 +132,7 @@ func PatchDelta(src, delta []byte) ([]byte, error) {
}
}
- return dest, nil
+ return nil
}
// Decodes a number encoded as an unsigned LEB128 at the start of some
diff --git a/plumbing/format/packfile/scanner_test.go b/plumbing/format/packfile/scanner_test.go
index a401d6d..3078477 100644
--- a/plumbing/format/packfile/scanner_test.go
+++ b/plumbing/format/packfile/scanner_test.go
@@ -140,6 +140,7 @@ func (s *ScannerSuite) TestReaderReset(c *C) {
p := NewScanner(r)
version, objects, err := p.Header()
+ c.Assert(err, IsNil)
c.Assert(version, Equals, VersionSupported)
c.Assert(objects, Equals, uint32(31))
diff --git a/plumbing/hash.go b/plumbing/hash.go
index 8e60877..637a425 100644
--- a/plumbing/hash.go
+++ b/plumbing/hash.go
@@ -9,7 +9,7 @@ import (
"strconv"
)
-// Hash SHA1 hased content
+// Hash SHA1 hashed content
type Hash [20]byte
// ZeroHash is Hash with value zero
diff --git a/plumbing/object/commit_stats_test.go b/plumbing/object/commit_stats_test.go
index 2fb3f08..dc9e4ad 100644
--- a/plumbing/object/commit_stats_test.go
+++ b/plumbing/object/commit_stats_test.go
@@ -22,7 +22,7 @@ type CommitStatsSuite struct {
var _ = Suite(&CommitStatsSuite{})
func (s *CommitStatsSuite) TestStats(c *C) {
- r, hash := s.writeHisotry(c, []byte("foo\n"), []byte("foo\nbar\n"))
+ r, hash := s.writeHistory(c, []byte("foo\n"), []byte("foo\nbar\n"))
aCommit, err := r.CommitObject(hash)
c.Assert(err, IsNil)
@@ -37,7 +37,7 @@ func (s *CommitStatsSuite) TestStats(c *C) {
}
func (s *CommitStatsSuite) TestStats_RootCommit(c *C) {
- r, hash := s.writeHisotry(c, []byte("foo\n"))
+ r, hash := s.writeHistory(c, []byte("foo\n"))
aCommit, err := r.CommitObject(hash)
c.Assert(err, IsNil)
@@ -53,7 +53,7 @@ func (s *CommitStatsSuite) TestStats_RootCommit(c *C) {
}
func (s *CommitStatsSuite) TestStats_WithoutNewLine(c *C) {
- r, hash := s.writeHisotry(c, []byte("foo\nbar"), []byte("foo\nbar\n"))
+ r, hash := s.writeHistory(c, []byte("foo\nbar"), []byte("foo\nbar\n"))
aCommit, err := r.CommitObject(hash)
c.Assert(err, IsNil)
@@ -67,7 +67,7 @@ func (s *CommitStatsSuite) TestStats_WithoutNewLine(c *C) {
c.Assert(fileStats[0].String(), Equals, " foo | 2 +-\n")
}
-func (s *CommitStatsSuite) writeHisotry(c *C, files ...[]byte) (*git.Repository, plumbing.Hash) {
+func (s *CommitStatsSuite) writeHistory(c *C, files ...[]byte) (*git.Repository, plumbing.Hash) {
cm := &git.CommitOptions{
Author: &object.Signature{Name: "Foo", Email: "foo@example.local", When: time.Now()},
}
diff --git a/plumbing/object/commit_walker_bfs_filtered.go b/plumbing/object/commit_walker_bfs_filtered.go
new file mode 100644
index 0000000..7b17f15
--- /dev/null
+++ b/plumbing/object/commit_walker_bfs_filtered.go
@@ -0,0 +1,176 @@
+package object
+
+import (
+ "io"
+
+ "gopkg.in/src-d/go-git.v4/plumbing"
+ "gopkg.in/src-d/go-git.v4/plumbing/storer"
+)
+
+// NewFilterCommitIter returns a CommitIter that walks the commit history,
+// starting at the passed commit and visiting its parents in Breadth-first order.
+// The commits returned by the CommitIter will validate the passed CommitFilter.
+// The history won't be transversed beyond a commit if isLimit is true for it.
+// Each commit will be visited only once.
+// If the commit history can not be traversed, or the Close() method is called,
+// the CommitIter won't return more commits.
+// If no isValid is passed, all ancestors of from commit will be valid.
+// If no isLimit is limit, all ancestors of all commits will be visited.
+func NewFilterCommitIter(
+ from *Commit,
+ isValid *CommitFilter,
+ isLimit *CommitFilter,
+) CommitIter {
+ var validFilter CommitFilter
+ if isValid == nil {
+ validFilter = func(_ *Commit) bool {
+ return true
+ }
+ } else {
+ validFilter = *isValid
+ }
+
+ var limitFilter CommitFilter
+ if isLimit == nil {
+ limitFilter = func(_ *Commit) bool {
+ return false
+ }
+ } else {
+ limitFilter = *isLimit
+ }
+
+ return &filterCommitIter{
+ isValid: validFilter,
+ isLimit: limitFilter,
+ visited: map[plumbing.Hash]struct{}{},
+ queue: []*Commit{from},
+ }
+}
+
+// CommitFilter returns a boolean for the passed Commit
+type CommitFilter func(*Commit) bool
+
+// filterCommitIter implements CommitIter
+type filterCommitIter struct {
+ isValid CommitFilter
+ isLimit CommitFilter
+ visited map[plumbing.Hash]struct{}
+ queue []*Commit
+ lastErr error
+}
+
+// Next returns the next commit of the CommitIter.
+// It will return io.EOF if there are no more commits to visit,
+// or an error if the history could not be traversed.
+func (w *filterCommitIter) Next() (*Commit, error) {
+ var commit *Commit
+ var err error
+ for {
+ commit, err = w.popNewFromQueue()
+ if err != nil {
+ return nil, w.close(err)
+ }
+
+ w.visited[commit.Hash] = struct{}{}
+
+ if !w.isLimit(commit) {
+ err = w.addToQueue(commit.s, commit.ParentHashes...)
+ if err != nil {
+ return nil, w.close(err)
+ }
+ }
+
+ if w.isValid(commit) {
+ return commit, nil
+ }
+ }
+}
+
+// ForEach runs the passed callback over each Commit returned by the CommitIter
+// until the callback returns an error or there is no more commits to traverse.
+func (w *filterCommitIter) ForEach(cb func(*Commit) error) error {
+ for {
+ commit, err := w.Next()
+ if err == io.EOF {
+ break
+ }
+
+ if err != nil {
+ return err
+ }
+
+ if err := cb(commit); err == storer.ErrStop {
+ break
+ } else if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// Error returns the error that caused that the CommitIter is no longer returning commits
+func (w *filterCommitIter) Error() error {
+ return w.lastErr
+}
+
+// Close closes the CommitIter
+func (w *filterCommitIter) Close() {
+ w.visited = map[plumbing.Hash]struct{}{}
+ w.queue = []*Commit{}
+ w.isLimit = nil
+ w.isValid = nil
+}
+
+// close closes the CommitIter with an error
+func (w *filterCommitIter) close(err error) error {
+ w.Close()
+ w.lastErr = err
+ return err
+}
+
+// popNewFromQueue returns the first new commit from the internal fifo queue,
+// or an io.EOF error if the queue is empty
+func (w *filterCommitIter) popNewFromQueue() (*Commit, error) {
+ var first *Commit
+ for {
+ if len(w.queue) == 0 {
+ if w.lastErr != nil {
+ return nil, w.lastErr
+ }
+
+ return nil, io.EOF
+ }
+
+ first = w.queue[0]
+ w.queue = w.queue[1:]
+ if _, ok := w.visited[first.Hash]; ok {
+ continue
+ }
+
+ return first, nil
+ }
+}
+
+// addToQueue adds the passed commits to the internal fifo queue if they weren't seen
+// or returns an error if the passed hashes could not be used to get valid commits
+func (w *filterCommitIter) addToQueue(
+ store storer.EncodedObjectStorer,
+ hashes ...plumbing.Hash,
+) error {
+ for _, hash := range hashes {
+ if _, ok := w.visited[hash]; ok {
+ continue
+ }
+
+ commit, err := GetCommit(store, hash)
+ if err != nil {
+ return err
+ }
+
+ w.queue = append(w.queue, commit)
+ }
+
+ return nil
+}
+
diff --git a/plumbing/object/commit_walker_bfs_filtered_test.go b/plumbing/object/commit_walker_bfs_filtered_test.go
new file mode 100644
index 0000000..6984b60
--- /dev/null
+++ b/plumbing/object/commit_walker_bfs_filtered_test.go
@@ -0,0 +1,256 @@
+package object
+
+import (
+ "fmt"
+ "strings"
+
+ "gopkg.in/src-d/go-git.v4/plumbing"
+ "gopkg.in/src-d/go-git.v4/plumbing/storer"
+
+ . "gopkg.in/check.v1"
+)
+
+var _ = Suite(&filterCommitIterSuite{})
+
+type filterCommitIterSuite struct {
+ BaseObjectsSuite
+}
+
+func commitsFromIter(iter CommitIter) ([]*Commit, error) {
+ var commits []*Commit
+ err := iter.ForEach(func(c *Commit) error {
+ commits = append(commits, c)
+ return nil
+ })
+
+ return commits, err
+}
+
+func assertHashes(c *C, commits []*Commit, hashes []string) {
+ if len(commits) != len(hashes) {
+ var expected []string
+ expected = append(expected, hashes...)
+ fmt.Println("expected:", strings.Join(expected, ", "))
+ var got []string
+ for _, c := range commits {
+ got = append(got, c.Hash.String())
+ }
+ fmt.Println(" got:", strings.Join(got, ", "))
+ }
+
+ c.Assert(commits, HasLen, len(hashes))
+ for i, commit := range commits {
+ c.Assert(hashes[i], Equals, commit.Hash.String())
+ }
+}
+
+func validIfCommit(ignored plumbing.Hash) CommitFilter {
+ return func(c *Commit) bool {
+ return c.Hash == ignored
+ }
+}
+
+func not(filter CommitFilter) CommitFilter {
+ return func(c *Commit) bool {
+ return !filter(c)
+ }
+}
+
+/*
+// TestCase history
+
+* 6ecf0ef2c2dffb796033e5a02219af86ec6584e5 <- HEAD
+|
+| * e8d3ffab552895c19b9fcf7aa264d277cde33881
+|/
+* 918c48b83bd081e863dbe1b80f8998f058cd8294
+|
+* af2d6a6954d532f8ffb47615169c8fdf9d383a1a
+|
+* 1669dce138d9b841a518c64b10914d88f5e488ea
+|\
+| * a5b8b09e2f8fcb0bb99d3ccb0958157b40890d69 // isLimit
+| |\
+| | * b8e471f58bcbca63b07bda20e428190409c2db47 // ignored if isLimit is passed
+| |/
+* | 35e85108805c84807bc66a02d91535e1e24b38b9 // isValid; ignored if passed as !isValid
+|/
+* b029517f6300c2da0f4b651b8642506cd6aaf45d
+*/
+
+// TestFilterCommitIter asserts that FilterCommitIter returns all commits from
+// history, but e8d3ffab552895c19b9fcf7aa264d277cde33881, that is not reachable
+// from HEAD
+func (s *filterCommitIterSuite) TestFilterCommitIter(c *C) {
+ from := s.commit(c, s.Fixture.Head)
+
+ commits, err := commitsFromIter(NewFilterCommitIter(from, nil, nil))
+ c.Assert(err, IsNil)
+
+ expected := []string{
+ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5",
+ "918c48b83bd081e863dbe1b80f8998f058cd8294",
+ "af2d6a6954d532f8ffb47615169c8fdf9d383a1a",
+ "1669dce138d9b841a518c64b10914d88f5e488ea",
+ "35e85108805c84807bc66a02d91535e1e24b38b9",
+ "a5b8b09e2f8fcb0bb99d3ccb0958157b40890d69",
+ "b029517f6300c2da0f4b651b8642506cd6aaf45d",
+ "b8e471f58bcbca63b07bda20e428190409c2db47",
+ }
+
+ assertHashes(c, commits, expected)
+}
+
+// TestFilterCommitIterWithValid asserts that FilterCommitIter returns only commits
+// that matches the passed isValid filter; in this testcase, it was filtered out
+// all commits but one from history
+func (s *filterCommitIterSuite) TestFilterCommitIterWithValid(c *C) {
+ from := s.commit(c, s.Fixture.Head)
+
+ validIf := validIfCommit(plumbing.NewHash("35e85108805c84807bc66a02d91535e1e24b38b9"))
+ commits, err := commitsFromIter(NewFilterCommitIter(from, &validIf, nil))
+ c.Assert(err, IsNil)
+
+ expected := []string{
+ "35e85108805c84807bc66a02d91535e1e24b38b9",
+ }
+
+ assertHashes(c, commits, expected)
+}
+
+// that matches the passed isValid filter; in this testcase, it was filtered out
+// only one commit from history
+func (s *filterCommitIterSuite) TestFilterCommitIterWithInvalid(c *C) {
+ from := s.commit(c, s.Fixture.Head)
+
+ validIf := validIfCommit(plumbing.NewHash("35e85108805c84807bc66a02d91535e1e24b38b9"))
+ validIfNot := not(validIf)
+ commits, err := commitsFromIter(NewFilterCommitIter(from, &validIfNot, nil))
+ c.Assert(err, IsNil)
+
+ expected := []string{
+ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5",
+ "918c48b83bd081e863dbe1b80f8998f058cd8294",
+ "af2d6a6954d532f8ffb47615169c8fdf9d383a1a",
+ "1669dce138d9b841a518c64b10914d88f5e488ea",
+ "a5b8b09e2f8fcb0bb99d3ccb0958157b40890d69",
+ "b029517f6300c2da0f4b651b8642506cd6aaf45d",
+ "b8e471f58bcbca63b07bda20e428190409c2db47",
+ }
+
+ assertHashes(c, commits, expected)
+}
+
+// TestFilterCommitIterWithNoValidCommits asserts that FilterCommitIter returns
+// no commits if the passed isValid filter does not allow any commit
+func (s *filterCommitIterSuite) TestFilterCommitIterWithNoValidCommits(c *C) {
+ from := s.commit(c, s.Fixture.Head)
+
+ validIf := validIfCommit(plumbing.NewHash("THIS_COMMIT_DOES_NOT_EXIST"))
+ commits, err := commitsFromIter(NewFilterCommitIter(from, &validIf, nil))
+ c.Assert(err, IsNil)
+ c.Assert(commits, HasLen, 0)
+}
+
+// TestFilterCommitIterWithStopAt asserts that FilterCommitIter returns only commits
+// are not beyond a isLimit filter
+func (s *filterCommitIterSuite) TestFilterCommitIterWithStopAt(c *C) {
+ from := s.commit(c, s.Fixture.Head)
+
+ stopAtRule := validIfCommit(plumbing.NewHash("a5b8b09e2f8fcb0bb99d3ccb0958157b40890d69"))
+ commits, err := commitsFromIter(NewFilterCommitIter(from, nil, &stopAtRule))
+ c.Assert(err, IsNil)
+
+ expected := []string{
+ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5",
+ "918c48b83bd081e863dbe1b80f8998f058cd8294",
+ "af2d6a6954d532f8ffb47615169c8fdf9d383a1a",
+ "1669dce138d9b841a518c64b10914d88f5e488ea",
+ "35e85108805c84807bc66a02d91535e1e24b38b9",
+ "a5b8b09e2f8fcb0bb99d3ccb0958157b40890d69",
+ "b029517f6300c2da0f4b651b8642506cd6aaf45d",
+ }
+
+ assertHashes(c, commits, expected)
+}
+
+// TestFilterCommitIterWithStopAt asserts that FilterCommitIter works properly
+// with isValid and isLimit filters
+func (s *filterCommitIterSuite) TestFilterCommitIterWithInvalidAndStopAt(c *C) {
+ from := s.commit(c, s.Fixture.Head)
+
+ stopAtRule := validIfCommit(plumbing.NewHash("a5b8b09e2f8fcb0bb99d3ccb0958157b40890d69"))
+ validIf := validIfCommit(plumbing.NewHash("35e85108805c84807bc66a02d91535e1e24b38b9"))
+ validIfNot := not(validIf)
+ commits, err := commitsFromIter(NewFilterCommitIter(from, &validIfNot, &stopAtRule))
+ c.Assert(err, IsNil)
+
+ expected := []string{
+ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5",
+ "918c48b83bd081e863dbe1b80f8998f058cd8294",
+ "af2d6a6954d532f8ffb47615169c8fdf9d383a1a",
+ "1669dce138d9b841a518c64b10914d88f5e488ea",
+ "a5b8b09e2f8fcb0bb99d3ccb0958157b40890d69",
+ "b029517f6300c2da0f4b651b8642506cd6aaf45d",
+ }
+
+ assertHashes(c, commits, expected)
+}
+
+// TestIteratorForEachCallbackReturn that ForEach callback does not cause
+// the ForEach to return an error if it returned an ErrStop
+//
+// - 6ecf0ef2c2dffb796033e5a02219af86ec6584e5
+// - 918c48b83bd081e863dbe1b80f8998f058cd8294 //<- stop
+// - af2d6a6954d532f8ffb47615169c8fdf9d383a1a
+// - 1669dce138d9b841a518c64b10914d88f5e488ea //<- err
+// - 35e85108805c84807bc66a02d91535e1e24b38b9
+// - a5b8b09e2f8fcb0bb99d3ccb0958157b40890d69
+// - b029517f6300c2da0f4b651b8642506cd6aaf45d
+// - b8e471f58bcbca63b07bda20e428190409c2db47
+func (s *filterCommitIterSuite) TestIteratorForEachCallbackReturn(c *C) {
+
+ var visited []*Commit
+ errUnexpected := fmt.Errorf("Could not continue")
+ cb := func(c *Commit) error {
+ switch c.Hash {
+ case plumbing.NewHash("918c48b83bd081e863dbe1b80f8998f058cd8294"):
+ return storer.ErrStop
+ case plumbing.NewHash("1669dce138d9b841a518c64b10914d88f5e488ea"):
+ return errUnexpected
+ }
+
+ visited = append(visited, c)
+ return nil
+ }
+
+ from := s.commit(c, s.Fixture.Head)
+
+ iter := NewFilterCommitIter(from, nil, nil)
+ err := iter.ForEach(cb)
+ c.Assert(err, IsNil)
+ expected := []string{
+ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5",
+ }
+ assertHashes(c, visited, expected)
+
+ err = iter.ForEach(cb)
+ c.Assert(err, Equals, errUnexpected)
+ expected = []string{
+ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5",
+ "af2d6a6954d532f8ffb47615169c8fdf9d383a1a",
+ }
+ assertHashes(c, visited, expected)
+
+ err = iter.ForEach(cb)
+ c.Assert(err, IsNil)
+ expected = []string{
+ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5",
+ "af2d6a6954d532f8ffb47615169c8fdf9d383a1a",
+ "35e85108805c84807bc66a02d91535e1e24b38b9",
+ "a5b8b09e2f8fcb0bb99d3ccb0958157b40890d69",
+ "b029517f6300c2da0f4b651b8642506cd6aaf45d",
+ "b8e471f58bcbca63b07bda20e428190409c2db47",
+ }
+ assertHashes(c, visited, expected)
+}
diff --git a/plumbing/object/commit_walker_limit.go b/plumbing/object/commit_walker_limit.go
new file mode 100644
index 0000000..ee56e50
--- /dev/null
+++ b/plumbing/object/commit_walker_limit.go
@@ -0,0 +1,65 @@
+package object
+
+import (
+ "io"
+ "time"
+
+ "gopkg.in/src-d/go-git.v4/plumbing/storer"
+)
+
+type commitLimitIter struct {
+ sourceIter CommitIter
+ limitOptions LogLimitOptions
+}
+
+type LogLimitOptions struct {
+ Since *time.Time
+ Until *time.Time
+}
+
+func NewCommitLimitIterFromIter(commitIter CommitIter, limitOptions LogLimitOptions) CommitIter {
+ iterator := new(commitLimitIter)
+ iterator.sourceIter = commitIter
+ iterator.limitOptions = limitOptions
+ return iterator
+}
+
+func (c *commitLimitIter) Next() (*Commit, error) {
+ for {
+ commit, err := c.sourceIter.Next()
+ if err != nil {
+ return nil, err
+ }
+
+ if c.limitOptions.Since != nil && commit.Committer.When.Before(*c.limitOptions.Since) {
+ continue
+ }
+ if c.limitOptions.Until != nil && commit.Committer.When.After(*c.limitOptions.Until) {
+ continue
+ }
+ return commit, nil
+ }
+}
+
+func (c *commitLimitIter) ForEach(cb func(*Commit) error) error {
+ for {
+ commit, nextErr := c.Next()
+ if nextErr == io.EOF {
+ break
+ }
+ if nextErr != nil {
+ return nextErr
+ }
+ err := cb(commit)
+ if err == storer.ErrStop {
+ return nil
+ } else if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (c *commitLimitIter) Close() {
+ c.sourceIter.Close()
+}
diff --git a/plumbing/object/commit_walker_file.go b/plumbing/object/commit_walker_path.go
index 6f16e61..6a49fd1 100644
--- a/plumbing/object/commit_walker_file.go
+++ b/plumbing/object/commit_walker_path.go
@@ -8,27 +8,39 @@ import (
"gopkg.in/src-d/go-git.v4/plumbing/storer"
)
-type commitFileIter struct {
- fileName string
+type commitPathIter struct {
+ pathFilter func(string) bool
sourceIter CommitIter
currentCommit *Commit
checkParent bool
}
-// NewCommitFileIterFromIter returns a commit iterator which performs diffTree between
+// NewCommitPathIterFromIter returns a commit iterator which performs diffTree between
// successive trees returned from the commit iterator from the argument. The purpose of this is
// to find the commits that explain how the files that match the path came to be.
// If checkParent is true then the function double checks if potential parent (next commit in a path)
// is one of the parents in the tree (it's used by `git log --all`).
-func NewCommitFileIterFromIter(fileName string, commitIter CommitIter, checkParent bool) CommitIter {
- iterator := new(commitFileIter)
+// pathFilter is a function that takes path of file as argument and returns true if we want it
+func NewCommitPathIterFromIter(pathFilter func(string) bool, commitIter CommitIter, checkParent bool) CommitIter {
+ iterator := new(commitPathIter)
iterator.sourceIter = commitIter
- iterator.fileName = fileName
+ iterator.pathFilter = pathFilter
iterator.checkParent = checkParent
return iterator
}
-func (c *commitFileIter) Next() (*Commit, error) {
+// this function is kept for compatibilty, can be replaced with NewCommitPathIterFromIter
+func NewCommitFileIterFromIter(fileName string, commitIter CommitIter, checkParent bool) CommitIter {
+ return NewCommitPathIterFromIter(
+ func(path string) bool {
+ return path == fileName
+ },
+ commitIter,
+ checkParent,
+ )
+}
+
+func (c *commitPathIter) Next() (*Commit, error) {
if c.currentCommit == nil {
var err error
c.currentCommit, err = c.sourceIter.Next()
@@ -45,7 +57,7 @@ func (c *commitFileIter) Next() (*Commit, error) {
return commit, commitErr
}
-func (c *commitFileIter) getNextFileCommit() (*Commit, error) {
+func (c *commitPathIter) getNextFileCommit() (*Commit, error) {
for {
// Parent-commit can be nil if the current-commit is the initial commit
parentCommit, parentCommitErr := c.sourceIter.Next()
@@ -96,9 +108,9 @@ func (c *commitFileIter) getNextFileCommit() (*Commit, error) {
}
}
-func (c *commitFileIter) hasFileChange(changes Changes, parent *Commit) bool {
+func (c *commitPathIter) hasFileChange(changes Changes, parent *Commit) bool {
for _, change := range changes {
- if change.name() != c.fileName {
+ if !c.pathFilter(change.name()) {
continue
}
@@ -125,9 +137,12 @@ func isParentHash(hash plumbing.Hash, commit *Commit) bool {
return false
}
-func (c *commitFileIter) ForEach(cb func(*Commit) error) error {
+func (c *commitPathIter) ForEach(cb func(*Commit) error) error {
for {
commit, nextErr := c.Next()
+ if nextErr == io.EOF {
+ break
+ }
if nextErr != nil {
return nextErr
}
@@ -138,8 +153,9 @@ func (c *commitFileIter) ForEach(cb func(*Commit) error) error {
return err
}
}
+ return nil
}
-func (c *commitFileIter) Close() {
+func (c *commitPathIter) Close() {
c.sourceIter.Close()
}
diff --git a/plumbing/object/merge_base.go b/plumbing/object/merge_base.go
new file mode 100644
index 0000000..6f2568d
--- /dev/null
+++ b/plumbing/object/merge_base.go
@@ -0,0 +1,210 @@
+package object
+
+import (
+ "fmt"
+ "sort"
+
+ "gopkg.in/src-d/go-git.v4/plumbing"
+ "gopkg.in/src-d/go-git.v4/plumbing/storer"
+)
+
+// errIsReachable is thrown when first commit is an ancestor of the second
+var errIsReachable = fmt.Errorf("first is reachable from second")
+
+// MergeBase mimics the behavior of `git merge-base actual other`, returning the
+// best common ancestor between the actual and the passed one.
+// The best common ancestors can not be reached from other common ancestors.
+func (c *Commit) MergeBase(other *Commit) ([]*Commit, error) {
+ // use sortedByCommitDateDesc strategy
+ sorted := sortByCommitDateDesc(c, other)
+ newer := sorted[0]
+ older := sorted[1]
+
+ newerHistory, err := ancestorsIndex(older, newer)
+ if err == errIsReachable {
+ return []*Commit{older}, nil
+ }
+
+ if err != nil {
+ return nil, err
+ }
+
+ var res []*Commit
+ inNewerHistory := isInIndexCommitFilter(newerHistory)
+ resIter := NewFilterCommitIter(older, &inNewerHistory, &inNewerHistory)
+ _ = resIter.ForEach(func(commit *Commit) error {
+ res = append(res, commit)
+ return nil
+ })
+
+ return Independents(res)
+}
+
+// IsAncestor returns true if the actual commit is ancestor of the passed one.
+// It returns an error if the history is not transversable
+// It mimics the behavior of `git merge --is-ancestor actual other`
+func (c *Commit) IsAncestor(other *Commit) (bool, error) {
+ found := false
+ iter := NewCommitPreorderIter(other, nil, nil)
+ err := iter.ForEach(func(comm *Commit) error {
+ if comm.Hash != c.Hash {
+ return nil
+ }
+
+ found = true
+ return storer.ErrStop
+ })
+
+ return found, err
+}
+
+// ancestorsIndex returns a map with the ancestors of the starting commit if the
+// excluded one is not one of them. It returns errIsReachable if the excluded commit
+// is ancestor of the starting, or another error if the history is not traversable.
+func ancestorsIndex(excluded, starting *Commit) (map[plumbing.Hash]struct{}, error) {
+ if excluded.Hash.String() == starting.Hash.String() {
+ return nil, errIsReachable
+ }
+
+ startingHistory := map[plumbing.Hash]struct{}{}
+ startingIter := NewCommitIterBSF(starting, nil, nil)
+ err := startingIter.ForEach(func(commit *Commit) error {
+ if commit.Hash == excluded.Hash {
+ return errIsReachable
+ }
+
+ startingHistory[commit.Hash] = struct{}{}
+ return nil
+ })
+
+ if err != nil {
+ return nil, err
+ }
+
+ return startingHistory, nil
+}
+
+// Independents returns a subset of the passed commits, that are not reachable the others
+// It mimics the behavior of `git merge-base --independent commit...`.
+func Independents(commits []*Commit) ([]*Commit, error) {
+ // use sortedByCommitDateDesc strategy
+ candidates := sortByCommitDateDesc(commits...)
+ candidates = removeDuplicated(candidates)
+
+ seen := map[plumbing.Hash]struct{}{}
+ var isLimit CommitFilter = func(commit *Commit) bool {
+ _, ok := seen[commit.Hash]
+ return ok
+ }
+
+ if len(candidates) < 2 {
+ return candidates, nil
+ }
+
+ pos := 0
+ for {
+ from := candidates[pos]
+ others := remove(candidates, from)
+ fromHistoryIter := NewFilterCommitIter(from, nil, &isLimit)
+ err := fromHistoryIter.ForEach(func(fromAncestor *Commit) error {
+ for _, other := range others {
+ if fromAncestor.Hash == other.Hash {
+ candidates = remove(candidates, other)
+ others = remove(others, other)
+ }
+ }
+
+ if len(candidates) == 1 {
+ return storer.ErrStop
+ }
+
+ seen[fromAncestor.Hash] = struct{}{}
+ return nil
+ })
+
+ if err != nil {
+ return nil, err
+ }
+
+ nextPos := indexOf(candidates, from) + 1
+ if nextPos >= len(candidates) {
+ break
+ }
+
+ pos = nextPos
+ }
+
+ return candidates, nil
+}
+
+// sortByCommitDateDesc returns the passed commits, sorted by `committer.When desc`
+//
+// Following this strategy, it is tried to reduce the time needed when walking
+// the history from one commit to reach the others. It is assumed that ancestors
+// use to be committed before its descendant;
+// That way `Independents(A^, A)` will be processed as being `Independents(A, A^)`;
+// so starting by `A` it will be reached `A^` way sooner than walking from `A^`
+// to the initial commit, and then from `A` to `A^`.
+func sortByCommitDateDesc(commits ...*Commit) []*Commit {
+ sorted := make([]*Commit, len(commits))
+ copy(sorted, commits)
+ sort.Slice(sorted, func(i, j int) bool {
+ return sorted[i].Committer.When.After(sorted[j].Committer.When)
+ })
+
+ return sorted
+}
+
+// indexOf returns the first position where target was found in the passed commits
+func indexOf(commits []*Commit, target *Commit) int {
+ for i, commit := range commits {
+ if target.Hash == commit.Hash {
+ return i
+ }
+ }
+
+ return -1
+}
+
+// remove returns the passed commits excluding the commit toDelete
+func remove(commits []*Commit, toDelete *Commit) []*Commit {
+ res := make([]*Commit, len(commits))
+ j := 0
+ for _, commit := range commits {
+ if commit.Hash == toDelete.Hash {
+ continue
+ }
+
+ res[j] = commit
+ j++
+ }
+
+ return res[:j]
+}
+
+// removeDuplicated removes duplicated commits from the passed slice of commits
+func removeDuplicated(commits []*Commit) []*Commit {
+ seen := make(map[plumbing.Hash]struct{}, len(commits))
+ res := make([]*Commit, len(commits))
+ j := 0
+ for _, commit := range commits {
+ if _, ok := seen[commit.Hash]; ok {
+ continue
+ }
+
+ seen[commit.Hash] = struct{}{}
+ res[j] = commit
+ j++
+ }
+
+ return res[:j]
+}
+
+// isInIndexCommitFilter returns a commitFilter that returns true
+// if the commit is in the passed index.
+func isInIndexCommitFilter(index map[plumbing.Hash]struct{}) CommitFilter {
+ return func(c *Commit) bool {
+ _, ok := index[c.Hash]
+ return ok
+ }
+}
diff --git a/plumbing/object/merge_base_test.go b/plumbing/object/merge_base_test.go
new file mode 100644
index 0000000..72c9cd9
--- /dev/null
+++ b/plumbing/object/merge_base_test.go
@@ -0,0 +1,323 @@
+package object
+
+import (
+ "fmt"
+ "sort"
+
+ "gopkg.in/src-d/go-git.v4/plumbing"
+ "gopkg.in/src-d/go-git.v4/plumbing/cache"
+ "gopkg.in/src-d/go-git.v4/storage/filesystem"
+
+ . "gopkg.in/check.v1"
+ fixtures "gopkg.in/src-d/go-git-fixtures.v3"
+)
+
+func alphabeticSortCommits(commits []*Commit) {
+ sort.Slice(commits, func(i, j int) bool {
+ return commits[i].Hash.String() > commits[j].Hash.String()
+ })
+}
+
+/*
+
+The following tests consider this history having two root commits: V and W
+
+V---o---M----AB----A---CD1--P---C--------S-------------------Q < master
+ \ \ / / /
+ \ X GQ1---G < feature /
+ \ / \ / / /
+W---o---N----o----B---CD2---o---D----o----GQ2------------o < dev
+
+MergeBase
+----------------------------
+passed merge-base
+ M, N Commits with unrelated history, have no merge-base
+ A, B AB Regular merge-base between two commits
+ A, A A The merge-commit between equal commits, is the same
+ Q, N N The merge-commit between a commit an its ancestor, is the ancestor
+ C, D CD1, CD2 Cross merges causes more than one merge-base
+ G, Q GQ1, GQ2 Feature branches including merges, causes more than one merge-base
+
+Independents
+----------------------------
+candidates result
+ A A Only one commit returns it
+ A, A, A A Repeated commits are ignored
+ A, A, M, M, N A, N M is reachable from A, so it is not independent
+ S, G, P S, G P is reachable from S, so it is not independent
+ CD1, CD2, M, N CD1, CD2 M and N are reachable from CD2, so they're not
+ C, G, dev, M, N C, G, dev M and N are reachable from G, so they're not
+ C, D, M, N C, D M and N are reachable from C, so they're not
+ A, A^, A, N, N^ A, N A^ and N^ are reachable from A and N
+ A^^^, A^, A^^, A, N A, N A^^^, A^^ and A^ are reachable from A, so they're not
+
+IsAncestor
+----------------------------
+passed result
+ A^^, A true Will be true if first is ancestor of the second
+ M, G true True because it will also reach G from M crossing merge commits
+ A, A true True if first and second are the same
+ M, N false Commits with unrelated history, will return false
+*/
+
+var _ = Suite(&mergeBaseSuite{})
+
+type mergeBaseSuite struct {
+ BaseObjectsSuite
+}
+
+func (s *mergeBaseSuite) SetUpSuite(c *C) {
+ s.Suite.SetUpSuite(c)
+ s.Fixture = fixtures.ByTag("merge-base").One()
+ s.Storer = filesystem.NewStorage(s.Fixture.DotGit(), cache.NewObjectLRUDefault())
+}
+
+var revisionIndex = map[string]plumbing.Hash{
+ "master": plumbing.NewHash("dce0e0c20d701c3d260146e443d6b3b079505191"),
+ "feature": plumbing.NewHash("d1b0093698e398d596ef94d646c4db37e8d1e970"),
+ "dev": plumbing.NewHash("25ca6c810c08482d61113fbcaaada38bb59093a8"),
+ "M": plumbing.NewHash("bb355b64e18386dbc3af63dfd09c015c44cbd9b6"),
+ "N": plumbing.NewHash("d64b894762ab5f09e2b155221b90c18bd0637236"),
+ "A": plumbing.NewHash("29740cfaf0c2ee4bb532dba9e80040ca738f367c"),
+ "B": plumbing.NewHash("2c84807970299ba98951c65fe81ebbaac01030f0"),
+ "AB": plumbing.NewHash("31a7e081a28f149ee98ffd13ba1a6d841a5f46fd"),
+ "P": plumbing.NewHash("ff84393134864cf9d3a9853a81bde81778bd5805"),
+ "C": plumbing.NewHash("8b72fabdc4222c3ff965bc310ded788c601c50ed"),
+ "D": plumbing.NewHash("14777cf3e209334592fbfd0b878f6868394db836"),
+ "CD1": plumbing.NewHash("4709e13a3cbb300c2b8a917effda776e1b8955c7"),
+ "CD2": plumbing.NewHash("38468e274e91e50ffb637b88a1954ab6193fe974"),
+ "S": plumbing.NewHash("628f1a42b70380ed05734bf01b468b46206ef1ea"),
+ "G": plumbing.NewHash("d1b0093698e398d596ef94d646c4db37e8d1e970"),
+ "Q": plumbing.NewHash("dce0e0c20d701c3d260146e443d6b3b079505191"),
+ "GQ1": plumbing.NewHash("ccaaa99c21dad7e9f392c36ae8cb72dc63bed458"),
+ "GQ2": plumbing.NewHash("806824d4778e94fe7c3244e92a9cd07090c9ab54"),
+ "A^": plumbing.NewHash("31a7e081a28f149ee98ffd13ba1a6d841a5f46fd"),
+ "A^^": plumbing.NewHash("bb355b64e18386dbc3af63dfd09c015c44cbd9b6"),
+ "A^^^": plumbing.NewHash("8d08dd1388b82dd354cb43918d83da86c76b0978"),
+ "N^": plumbing.NewHash("b6e1fc8dad4f1068fb42774ec5fc65c065b2c312"),
+}
+
+func (s *mergeBaseSuite) commitsFromRevs(c *C, revs []string) ([]*Commit, error) {
+ var commits []*Commit
+ for _, rev := range revs {
+ hash, ok := revisionIndex[rev]
+ if !ok {
+ return nil, fmt.Errorf("Revision not found '%s'", rev)
+ }
+
+ commits = append(commits, s.commit(c, hash))
+ }
+
+ return commits, nil
+}
+
+// AssertMergeBase validates that the merge-base of the passed revs,
+// matches the expected result
+func (s *mergeBaseSuite) AssertMergeBase(c *C, revs, expectedRevs []string) {
+ c.Assert(revs, HasLen, 2)
+
+ commits, err := s.commitsFromRevs(c, revs)
+ c.Assert(err, IsNil)
+
+ results, err := commits[0].MergeBase(commits[1])
+ c.Assert(err, IsNil)
+
+ expected, err := s.commitsFromRevs(c, expectedRevs)
+ c.Assert(err, IsNil)
+
+ c.Assert(results, HasLen, len(expected))
+
+ alphabeticSortCommits(results)
+ alphabeticSortCommits(expected)
+ for i, commit := range results {
+ c.Assert(commit.Hash.String(), Equals, expected[i].Hash.String())
+ }
+}
+
+// AssertIndependents validates the independent commits of the passed list
+func (s *mergeBaseSuite) AssertIndependents(c *C, revs, expectedRevs []string) {
+ commits, err := s.commitsFromRevs(c, revs)
+ c.Assert(err, IsNil)
+
+ results, err := Independents(commits)
+ c.Assert(err, IsNil)
+
+ expected, err := s.commitsFromRevs(c, expectedRevs)
+ c.Assert(err, IsNil)
+
+ c.Assert(results, HasLen, len(expected))
+
+ alphabeticSortCommits(results)
+ alphabeticSortCommits(expected)
+ for i, commit := range results {
+ c.Assert(commit.Hash.String(), Equals, expected[i].Hash.String())
+ }
+}
+
+// AssertAncestor validates if the first rev is ancestor of the second one
+func (s *mergeBaseSuite) AssertAncestor(c *C, revs []string, shouldBeAncestor bool) {
+ c.Assert(revs, HasLen, 2)
+
+ commits, err := s.commitsFromRevs(c, revs)
+ c.Assert(err, IsNil)
+
+ isAncestor, err := commits[0].IsAncestor(commits[1])
+ c.Assert(err, IsNil)
+ c.Assert(isAncestor, Equals, shouldBeAncestor)
+}
+
+// TestNoAncestorsWhenNoCommonHistory validates that merge-base returns no commits
+// when there is no common history (M, N -> none)
+func (s *mergeBaseSuite) TestNoAncestorsWhenNoCommonHistory(c *C) {
+ revs := []string{"M", "N"}
+ nothing := []string{}
+ s.AssertMergeBase(c, revs, nothing)
+}
+
+// TestCommonAncestorInMergedOrphans validates that merge-base returns a common
+// ancestor in orphan branches when they where merged (A, B -> AB)
+func (s *mergeBaseSuite) TestCommonAncestorInMergedOrphans(c *C) {
+ revs := []string{"A", "B"}
+ expectedRevs := []string{"AB"}
+ s.AssertMergeBase(c, revs, expectedRevs)
+}
+
+// TestMergeBaseWithSelf validates that merge-base between equal commits, returns
+// the same commit (A, A -> A)
+func (s *mergeBaseSuite) TestMergeBaseWithSelf(c *C) {
+ revs := []string{"A", "A"}
+ expectedRevs := []string{"A"}
+ s.AssertMergeBase(c, revs, expectedRevs)
+}
+
+// TestMergeBaseWithAncestor validates that merge-base between a commit an its
+// ancestor returns the ancestor (Q, N -> N)
+func (s *mergeBaseSuite) TestMergeBaseWithAncestor(c *C) {
+ revs := []string{"Q", "N"}
+ expectedRevs := []string{"N"}
+ s.AssertMergeBase(c, revs, expectedRevs)
+}
+
+// TestDoubleCommonAncestorInCrossMerge validates that merge-base returns two
+// common ancestors when there are cross merges (C, D -> CD1, CD2)
+func (s *mergeBaseSuite) TestDoubleCommonAncestorInCrossMerge(c *C) {
+ revs := []string{"C", "D"}
+ expectedRevs := []string{"CD1", "CD2"}
+ s.AssertMergeBase(c, revs, expectedRevs)
+}
+
+// TestDoubleCommonInSubFeatureBranches validates that merge-base returns two
+// common ancestors when two branches where partially merged (G, Q -> GQ1, GQ2)
+func (s *mergeBaseSuite) TestDoubleCommonInSubFeatureBranches(c *C) {
+ revs := []string{"G", "Q"}
+ expectedRevs := []string{"GQ1", "GQ2"}
+ s.AssertMergeBase(c, revs, expectedRevs)
+}
+
+// TestIndependentOnlyOne validates that Independents for one commit returns
+// that same commit (A -> A)
+func (s *mergeBaseSuite) TestIndependentOnlyOne(c *C) {
+ revs := []string{"A"}
+ expectedRevs := []string{"A"}
+ s.AssertIndependents(c, revs, expectedRevs)
+}
+
+// TestIndependentOnlyRepeated validates that Independents for one repeated commit
+// returns that same commit (A, A, A -> A)
+func (s *mergeBaseSuite) TestIndependentOnlyRepeated(c *C) {
+ revs := []string{"A", "A", "A"}
+ expectedRevs := []string{"A"}
+ s.AssertIndependents(c, revs, expectedRevs)
+}
+
+// TestIndependentWithRepeatedAncestors validates that Independents works well
+// when there are repeated ancestors (A, A, M, M, N -> A, N)
+func (s *mergeBaseSuite) TestIndependentWithRepeatedAncestors(c *C) {
+ revs := []string{"A", "A", "M", "M", "N"}
+ expectedRevs := []string{"A", "N"}
+ s.AssertIndependents(c, revs, expectedRevs)
+}
+
+// TestIndependentBeyondShortcut validates that Independents does not stop walking
+// in all paths when one of them is known (S, G, P -> S, G)
+func (s *mergeBaseSuite) TestIndependentBeyondShortcut(c *C) {
+ revs := []string{"S", "G", "P"}
+ expectedRevs := []string{"S", "G"}
+ s.AssertIndependents(c, revs, expectedRevs)
+}
+
+// TestIndependentBeyondShortcutBis validates that Independents does not stop walking
+// in all paths when one of them is known (CD1, CD2, M, N -> CD1, CD2)
+func (s *mergeBaseSuite) TestIndependentBeyondShortcutBis(c *C) {
+ revs := []string{"CD1", "CD2", "M", "N"}
+ expectedRevs := []string{"CD1", "CD2"}
+ s.AssertIndependents(c, revs, expectedRevs)
+}
+
+// TestIndependentWithPairOfAncestors validates that Independents excluded all
+// the ancestors (C, D, M, N -> C, D)
+func (s *mergeBaseSuite) TestIndependentWithPairOfAncestors(c *C) {
+ revs := []string{"C", "D", "M", "N"}
+ expectedRevs := []string{"C", "D"}
+ s.AssertIndependents(c, revs, expectedRevs)
+}
+
+// TestIndependentAcrossCrossMerges validates that Independents works well
+// along cross merges (C, G, dev, M -> C, G, dev)
+func (s *mergeBaseSuite) TestIndependentAcrossCrossMerges(c *C) {
+ revs := []string{"C", "G", "dev", "M", "N"}
+ expectedRevs := []string{"C", "G", "dev"}
+ s.AssertIndependents(c, revs, expectedRevs)
+}
+
+// TestIndependentChangingOrderRepetition validates that Independents works well
+// when the order and repetition is tricky (A, A^, A, N, N^ -> A, N)
+func (s *mergeBaseSuite) TestIndependentChangingOrderRepetition(c *C) {
+ revs := []string{"A", "A^", "A", "N", "N^"}
+ expectedRevs := []string{"A", "N"}
+ s.AssertIndependents(c, revs, expectedRevs)
+}
+
+// TestIndependentChangingOrder validates that Independents works well
+// when the order is tricky (A^^^, A^, A^^, A, N -> A, N)
+func (s *mergeBaseSuite) TestIndependentChangingOrder(c *C) {
+ revs := []string{"A^^^", "A^", "A^^", "A", "N"}
+ expectedRevs := []string{"A", "N"}
+ s.AssertIndependents(c, revs, expectedRevs)
+}
+
+// TestAncestor validates that IsAncestor returns true if walking from first
+// commit, through its parents, it can be reached the second ( A^^, A -> true )
+func (s *mergeBaseSuite) TestAncestor(c *C) {
+ revs := []string{"A^^", "A"}
+ s.AssertAncestor(c, revs, true)
+
+ revs = []string{"A", "A^^"}
+ s.AssertAncestor(c, revs, false)
+}
+
+// TestAncestorBeyondMerges validates that IsAncestor returns true also if first can be
+// be reached from first one even crossing merge commits in between ( M, G -> true )
+func (s *mergeBaseSuite) TestAncestorBeyondMerges(c *C) {
+ revs := []string{"M", "G"}
+ s.AssertAncestor(c, revs, true)
+
+ revs = []string{"G", "M"}
+ s.AssertAncestor(c, revs, false)
+}
+
+// TestAncestorSame validates that IsAncestor returns both are the same ( A, A -> true )
+func (s *mergeBaseSuite) TestAncestorSame(c *C) {
+ revs := []string{"A", "A"}
+ s.AssertAncestor(c, revs, true)
+}
+
+// TestAncestorUnrelated validates that IsAncestor returns false when the passed commits
+// does not share any history, no matter the order used ( M, N -> false )
+func (s *mergeBaseSuite) TestAncestorUnrelated(c *C) {
+ revs := []string{"M", "N"}
+ s.AssertAncestor(c, revs, false)
+
+ revs = []string{"N", "M"}
+ s.AssertAncestor(c, revs, false)
+}
diff --git a/plumbing/object/object.go b/plumbing/object/object.go
index e960e50..c48a18d 100644
--- a/plumbing/object/object.go
+++ b/plumbing/object/object.go
@@ -138,17 +138,19 @@ func (s *Signature) decodeTimeAndTimeZone(b []byte) {
return
}
- // Include a dummy year in this time.Parse() call to avoid a bug in Go:
- // https://github.com/golang/go/issues/19750
- //
- // Parsing the timezone with no other details causes the tl.Location() call
- // below to return time.Local instead of the parsed zone in some cases
- tl, err := time.Parse("2006 -0700", "1970 "+string(b[tzStart:tzStart+timeZoneLength]))
- if err != nil {
+ timezone := string(b[tzStart : tzStart+timeZoneLength])
+ tzhours, err1 := strconv.ParseInt(timezone[0:3], 10, 64)
+ tzmins, err2 := strconv.ParseInt(timezone[3:], 10, 64)
+ if err1 != nil || err2 != nil {
return
}
+ if tzhours < 0 {
+ tzmins *= -1
+ }
+
+ tz := time.FixedZone("", int(tzhours*60*60+tzmins*60))
- s.When = s.When.In(tl.Location())
+ s.When = s.When.In(tz)
}
func (s *Signature) encodeTimeAndTimeZone(w io.Writer) error {
diff --git a/plumbing/object/patch.go b/plumbing/object/patch.go
index 1efd0b1..32454ac 100644
--- a/plumbing/object/patch.go
+++ b/plumbing/object/patch.go
@@ -278,7 +278,7 @@ func printStat(fileStats []FileStat) string {
var scaleFactor float64
if longestTotalChange > heightOfHistogram {
// Scale down to heightOfHistogram.
- scaleFactor = float64(longestTotalChange / heightOfHistogram)
+ scaleFactor = longestTotalChange / heightOfHistogram
} else {
scaleFactor = 1.0
}
diff --git a/plumbing/object/patch_test.go b/plumbing/object/patch_test.go
index 47057fb..37944c3 100644
--- a/plumbing/object/patch_test.go
+++ b/plumbing/object/patch_test.go
@@ -19,6 +19,7 @@ func (s *PatchSuite) TestStatsWithSubmodules(c *C) {
fixtures.ByURL("https://github.com/git-fixtures/submodule.git").One().DotGit(), cache.NewObjectLRUDefault())
commit, err := GetCommit(storer, plumbing.NewHash("b685400c1f9316f350965a5993d350bc746b0bf4"))
+ c.Assert(err, IsNil)
tree, err := commit.Tree()
c.Assert(err, IsNil)
diff --git a/plumbing/object/tree.go b/plumbing/object/tree.go
index d30cf6e..d0b4fff 100644
--- a/plumbing/object/tree.go
+++ b/plumbing/object/tree.go
@@ -288,7 +288,7 @@ func (t *Tree) Encode(o plumbing.EncodedObject) (err error) {
return err
}
- if _, err = w.Write([]byte(entry.Hash[:])); err != nil {
+ if _, err = w.Write(entry.Hash[:]); err != nil {
return err
}
}
@@ -517,4 +517,4 @@ func simpleJoin(parent, child string) string {
return parent + "/" + child
}
return child
-} \ No newline at end of file
+}
diff --git a/plumbing/protocol/packp/advrefs.go b/plumbing/protocol/packp/advrefs.go
index 684e76a..487ee19 100644
--- a/plumbing/protocol/packp/advrefs.go
+++ b/plumbing/protocol/packp/advrefs.go
@@ -107,7 +107,7 @@ func (a *AdvRefs) resolveHead(s storer.ReferenceStorer) error {
return nil
}
- ref, err := s.Reference(plumbing.ReferenceName(plumbing.Master))
+ ref, err := s.Reference(plumbing.Master)
// check first if HEAD is pointing to master
if err == nil {
diff --git a/plumbing/protocol/packp/advrefs_decode.go b/plumbing/protocol/packp/advrefs_decode.go
index 1b4c62c..80f5b4e 100644
--- a/plumbing/protocol/packp/advrefs_decode.go
+++ b/plumbing/protocol/packp/advrefs_decode.go
@@ -53,7 +53,7 @@ func (d *advRefsDecoder) Decode(v *AdvRefs) error {
type decoderStateFn func(*advRefsDecoder) decoderStateFn
-// fills out the parser stiky error
+// fills out the parser sticky error
func (d *advRefsDecoder) error(format string, a ...interface{}) {
msg := fmt.Sprintf(
"pkt-line %d: %s", d.nLine,
@@ -281,7 +281,7 @@ func decodeShallow(p *advRefsDecoder) decoderStateFn {
}
if len(p.line) == 0 {
- return nil // succesfull parse of the advertised-refs message
+ return nil // successful parse of the advertised-refs message
}
return decodeShallow
diff --git a/plumbing/protocol/packp/capability/list.go b/plumbing/protocol/packp/capability/list.go
index 26a79b6..9609211 100644
--- a/plumbing/protocol/packp/capability/list.go
+++ b/plumbing/protocol/packp/capability/list.go
@@ -14,8 +14,8 @@ var (
// ErrArguments is returned if arguments are given with a capabilities that
// not supports arguments
ErrArguments = errors.New("arguments not allowed")
- // ErrEmtpyArgument is returned when an empty value is given
- ErrEmtpyArgument = errors.New("empty argument")
+ // ErrEmptyArgument is returned when an empty value is given
+ ErrEmptyArgument = errors.New("empty argument")
// ErrMultipleArguments multiple argument given to a capabilities that not
// support it
ErrMultipleArguments = errors.New("multiple arguments not allowed")
@@ -119,7 +119,7 @@ func (l *List) Add(c Capability, values ...string) error {
func (l *List) validateNoEmptyArgs(values []string) error {
for _, v := range values {
if v == "" {
- return ErrEmtpyArgument
+ return ErrEmptyArgument
}
}
return nil
diff --git a/plumbing/protocol/packp/capability/list_test.go b/plumbing/protocol/packp/capability/list_test.go
index 82dd63f..61b0b13 100644
--- a/plumbing/protocol/packp/capability/list_test.go
+++ b/plumbing/protocol/packp/capability/list_test.go
@@ -176,7 +176,7 @@ func (s *SuiteCapabilities) TestAddErrArgumentsNotAllowed(c *check.C) {
func (s *SuiteCapabilities) TestAddErrArguments(c *check.C) {
cap := NewList()
err := cap.Add(SymRef, "")
- c.Assert(err, check.Equals, ErrEmtpyArgument)
+ c.Assert(err, check.Equals, ErrEmptyArgument)
}
func (s *SuiteCapabilities) TestAddErrMultipleArguments(c *check.C) {
diff --git a/plumbing/protocol/packp/ulreq.go b/plumbing/protocol/packp/ulreq.go
index 74109d8..72895e3 100644
--- a/plumbing/protocol/packp/ulreq.go
+++ b/plumbing/protocol/packp/ulreq.go
@@ -68,8 +68,8 @@ func NewUploadRequest() *UploadRequest {
}
// NewUploadRequestFromCapabilities returns a pointer to a new UploadRequest
-// value, the request capabilities are filled with the most optiomal ones, based
-// on the adv value (advertaised capabilities), the UploadRequest generated it
+// value, the request capabilities are filled with the most optimal ones, based
+// on the adv value (advertised capabilities), the UploadRequest generated it
// has no wants or shallows and an infinite depth.
func NewUploadRequestFromCapabilities(adv *capability.List) *UploadRequest {
r := NewUploadRequest()
diff --git a/plumbing/protocol/packp/ulreq_encode.go b/plumbing/protocol/packp/ulreq_encode.go
index 89a5986..dcfeb83 100644
--- a/plumbing/protocol/packp/ulreq_encode.go
+++ b/plumbing/protocol/packp/ulreq_encode.go
@@ -64,10 +64,10 @@ func (e *ulReqEncoder) encodeFirstWant() stateFn {
return nil
}
- return e.encodeAditionalWants
+ return e.encodeAdditionalWants
}
-func (e *ulReqEncoder) encodeAditionalWants() stateFn {
+func (e *ulReqEncoder) encodeAdditionalWants() stateFn {
last := e.data.Wants[0]
for _, w := range e.data.Wants[1:] {
if bytes.Equal(last[:], w[:]) {
diff --git a/plumbing/protocol/packp/updreq_decode.go b/plumbing/protocol/packp/updreq_decode.go
index c15d49c..59f095f 100644
--- a/plumbing/protocol/packp/updreq_decode.go
+++ b/plumbing/protocol/packp/updreq_decode.go
@@ -13,9 +13,9 @@ import (
)
var (
- shallowLineLength = len(shallow) + hashSize
- minCommandLength = hashSize*2 + 2 + 1
- minCommandAndCapsLenth = minCommandLength + 1
+ shallowLineLength = len(shallow) + hashSize
+ minCommandLength = hashSize*2 + 2 + 1
+ minCommandAndCapsLength = minCommandLength + 1
)
var (
@@ -46,7 +46,7 @@ func errInvalidShallowLineLength(got int) error {
func errInvalidCommandCapabilitiesLineLength(got int) error {
return errMalformedRequest(fmt.Sprintf(
"invalid command and capabilities line length: expected at least %d, got %d",
- minCommandAndCapsLenth, got))
+ minCommandAndCapsLength, got))
}
func errInvalidCommandLineLength(got int) error {
@@ -174,7 +174,7 @@ func (d *updReqDecoder) decodeCommandAndCapabilities() error {
return errMissingCapabilitiesDelimiter
}
- if len(b) < minCommandAndCapsLenth {
+ if len(b) < minCommandAndCapsLength {
return errInvalidCommandCapabilitiesLineLength(len(b))
}
@@ -225,7 +225,7 @@ func parseCommand(b []byte) (*Command, error) {
return nil, errInvalidNewObjId(err)
}
- return &Command{Old: oh, New: nh, Name: plumbing.ReferenceName(n)}, nil
+ return &Command{Old: oh, New: nh, Name: n}, nil
}
func parseHash(s string) (plumbing.Hash, error) {
diff --git a/plumbing/protocol/packp/uppackreq.go b/plumbing/protocol/packp/uppackreq.go
index 1144139..831ef8f 100644
--- a/plumbing/protocol/packp/uppackreq.go
+++ b/plumbing/protocol/packp/uppackreq.go
@@ -27,8 +27,8 @@ func NewUploadPackRequest() *UploadPackRequest {
}
// NewUploadPackRequestFromCapabilities creates a new UploadPackRequest and
-// returns a pointer. The request capabilities are filled with the most optiomal
-// ones, based on the adv value (advertaised capabilities), the UploadPackRequest
+// returns a pointer. The request capabilities are filled with the most optimal
+// ones, based on the adv value (advertised capabilities), the UploadPackRequest
// it has no wants, haves or shallows and an infinite depth
func NewUploadPackRequestFromCapabilities(adv *capability.List) *UploadPackRequest {
ur := NewUploadRequestFromCapabilities(adv)
diff --git a/plumbing/storer/object.go b/plumbing/storer/object.go
index 98d1ec3..c84960a 100644
--- a/plumbing/storer/object.go
+++ b/plumbing/storer/object.go
@@ -141,7 +141,7 @@ func NewEncodedObjectLookupIter(
// Next returns the next object from the iterator. If the iterator has reached
// the end it will return io.EOF as an error. If the object can't be found in
// the object storage, it will return plumbing.ErrObjectNotFound as an error.
-// If the object is retreieved successfully error will be nil.
+// If the object is retrieved successfully error will be nil.
func (iter *EncodedObjectLookupIter) Next() (plumbing.EncodedObject, error) {
if iter.pos >= len(iter.series) {
return nil, io.EOF
@@ -187,7 +187,7 @@ func NewEncodedObjectSliceIter(series []plumbing.EncodedObject) *EncodedObjectSl
}
// Next returns the next object from the iterator. If the iterator has reached
-// the end it will return io.EOF as an error. If the object is retreieved
+// the end it will return io.EOF as an error. If the object is retrieved
// successfully error will be nil.
func (iter *EncodedObjectSliceIter) Next() (plumbing.EncodedObject, error) {
if len(iter.series) == 0 {
diff --git a/plumbing/transport/http/common.go b/plumbing/transport/http/common.go
index 5d3535e..16ff930 100644
--- a/plumbing/transport/http/common.go
+++ b/plumbing/transport/http/common.go
@@ -84,7 +84,7 @@ var DefaultClient = NewClient(nil)
// Unless a properly initialized client is given, it will fall back into
// `http.DefaultClient`.
//
-// Note that for HTTP client cannot distinguist between private repositories and
+// Note that for HTTP client cannot distinguish between private repositories and
// unexistent repositories on GitHub. So it returns `ErrAuthorizationRequired`
// for both.
func NewClient(c *http.Client) transport.Transport {
@@ -139,7 +139,7 @@ func (s *session) ApplyAuthToRequest(req *http.Request) {
return
}
- s.auth.setAuth(req)
+ s.auth.SetAuth(req)
}
func (s *session) ModifyEndpointIfRedirect(res *http.Response) {
@@ -175,7 +175,7 @@ func (*session) Close() error {
// AuthMethod is concrete implementation of common.AuthMethod for HTTP services
type AuthMethod interface {
transport.AuthMethod
- setAuth(r *http.Request)
+ SetAuth(r *http.Request)
}
func basicAuthFromEndpoint(ep *transport.Endpoint) *BasicAuth {
@@ -192,7 +192,7 @@ type BasicAuth struct {
Username, Password string
}
-func (a *BasicAuth) setAuth(r *http.Request) {
+func (a *BasicAuth) SetAuth(r *http.Request) {
if a == nil {
return
}
@@ -226,7 +226,7 @@ type TokenAuth struct {
Token string
}
-func (a *TokenAuth) setAuth(r *http.Request) {
+func (a *TokenAuth) SetAuth(r *http.Request) {
if a == nil {
return
}
diff --git a/plumbing/transport/http/common_test.go b/plumbing/transport/http/common_test.go
index 8b300e8..d9e0636 100644
--- a/plumbing/transport/http/common_test.go
+++ b/plumbing/transport/http/common_test.go
@@ -64,7 +64,7 @@ func (s *ClientSuite) TestNewTokenAuth(c *C) {
// Check header is set correctly
req, err := http.NewRequest("GET", "https://github.com/git-fixtures/basic", nil)
c.Assert(err, Equals, nil)
- a.setAuth(req)
+ a.SetAuth(req)
c.Assert(req.Header.Get("Authorization"), Equals, "Bearer OAUTH-TOKEN-TEXT")
}
diff --git a/plumbing/transport/internal/common/common.go b/plumbing/transport/internal/common/common.go
index 00497f3..cb1b6da 100644
--- a/plumbing/transport/internal/common/common.go
+++ b/plumbing/transport/internal/common/common.go
@@ -66,7 +66,7 @@ type Command interface {
Close() error
}
-// CommandKiller expands the Command interface, enableing it for being killed.
+// CommandKiller expands the Command interface, enabling it for being killed.
type CommandKiller interface {
// Kill and close the session whatever the state it is. It will block until
// the command is terminated.
diff --git a/plumbing/transport/internal/common/common_test.go b/plumbing/transport/internal/common/common_test.go
index b2f035d..c60ef3b 100644
--- a/plumbing/transport/internal/common/common_test.go
+++ b/plumbing/transport/internal/common/common_test.go
@@ -13,7 +13,7 @@ type CommonSuite struct{}
var _ = Suite(&CommonSuite{})
-func (s *CommonSuite) TestIsRepoNotFoundErrorForUnknowSource(c *C) {
+func (s *CommonSuite) TestIsRepoNotFoundErrorForUnknownSource(c *C) {
msg := "unknown system is complaining of something very sad :("
isRepoNotFound := isRepoNotFoundError(msg)
diff --git a/plumbing/transport/server/server.go b/plumbing/transport/server/server.go
index 20bd12e..8e0dcc1 100644
--- a/plumbing/transport/server/server.go
+++ b/plumbing/transport/server/server.go
@@ -286,11 +286,6 @@ func (s *rpSession) updateReferences(req *packp.ReferenceUpdateRequest) {
continue
}
- if err != nil {
- s.setStatus(cmd.Name, err)
- continue
- }
-
ref := plumbing.NewHashReference(cmd.Name, cmd.New)
err := s.storer.SetReference(ref)
s.setStatus(cmd.Name, err)
diff --git a/plumbing/transport/ssh/auth_method.go b/plumbing/transport/ssh/auth_method.go
index dbb47c5..1e5c383 100644
--- a/plumbing/transport/ssh/auth_method.go
+++ b/plumbing/transport/ssh/auth_method.go
@@ -61,7 +61,7 @@ func (a *KeyboardInteractive) ClientConfig() (*ssh.ClientConfig, error) {
return a.SetHostKeyCallback(&ssh.ClientConfig{
User: a.User,
Auth: []ssh.AuthMethod{
- ssh.KeyboardInteractiveChallenge(a.Challenge),
+ a.Challenge,
},
})
}
diff --git a/prune_test.go b/prune_test.go
index 670cd07..2279636 100644
--- a/prune_test.go
+++ b/prune_test.go
@@ -56,6 +56,8 @@ func (s *PruneSuite) testPrune(c *C, deleteTime time.Time) {
newCount++
return nil
})
+ c.Assert(err, IsNil)
+
if deleteTime.IsZero() {
c.Assert(newCount < count, Equals, true)
} else {
diff --git a/remote.go b/remote.go
index 8060409..baee7a0 100644
--- a/remote.go
+++ b/remote.go
@@ -45,7 +45,10 @@ type Remote struct {
s storage.Storer
}
-func newRemote(s storage.Storer, c *config.RemoteConfig) *Remote {
+// NewRemote creates a new Remote.
+// The intended purpose is to use the Remote for tasks such as listing remote references (like using git ls-remote).
+// Otherwise Remotes should be created via the use of a Repository.
+func NewRemote(s storage.Storer, c *config.RemoteConfig) *Remote {
return &Remote{s: s, c: c}
}
@@ -168,7 +171,17 @@ func (r *Remote) PushContext(ctx context.Context, o *PushOptions) (err error) {
}
}
- rs, err := pushHashes(ctx, s, r.s, req, hashesToPush, r.useRefDeltas(ar))
+ if len(hashesToPush) == 0 {
+ allDelete = true
+ for _, command := range req.Commands {
+ if command.Action() != packp.Delete {
+ allDelete = false
+ break
+ }
+ }
+ }
+
+ rs, err := pushHashes(ctx, s, r.s, req, hashesToPush, r.useRefDeltas(ar), allDelete)
if err != nil {
return err
}
@@ -201,7 +214,7 @@ func (r *Remote) newReferenceUpdateRequest(
}
}
- if err := r.addReferencesToUpdate(o.RefSpecs, localRefs, remoteRefs, req); err != nil {
+ if err := r.addReferencesToUpdate(o.RefSpecs, localRefs, remoteRefs, req, o.Prune); err != nil {
return nil, err
}
@@ -389,6 +402,7 @@ func (r *Remote) addReferencesToUpdate(
localRefs []*plumbing.Reference,
remoteRefs storer.ReferenceStorer,
req *packp.ReferenceUpdateRequest,
+ prune bool,
) error {
// This references dictionary will be used to search references by name.
refsDict := make(map[string]*plumbing.Reference)
@@ -398,7 +412,7 @@ func (r *Remote) addReferencesToUpdate(
for _, rs := range refspecs {
if rs.IsDelete() {
- if err := r.deleteReferences(rs, remoteRefs, req); err != nil {
+ if err := r.deleteReferences(rs, remoteRefs, refsDict, req, false); err != nil {
return err
}
} else {
@@ -406,6 +420,12 @@ func (r *Remote) addReferencesToUpdate(
if err != nil {
return err
}
+
+ if prune {
+ if err := r.deleteReferences(rs, remoteRefs, refsDict, req, true); err != nil {
+ return err
+ }
+ }
}
}
@@ -441,7 +461,10 @@ func (r *Remote) addOrUpdateReferences(
}
func (r *Remote) deleteReferences(rs config.RefSpec,
- remoteRefs storer.ReferenceStorer, req *packp.ReferenceUpdateRequest) error {
+ remoteRefs storer.ReferenceStorer,
+ refsDict map[string]*plumbing.Reference,
+ req *packp.ReferenceUpdateRequest,
+ prune bool) error {
iter, err := remoteRefs.IterReferences()
if err != nil {
return err
@@ -452,8 +475,19 @@ func (r *Remote) deleteReferences(rs config.RefSpec,
return nil
}
- if rs.Dst("") != ref.Name() {
- return nil
+ if prune {
+ rs := rs.Reverse()
+ if !rs.Match(ref.Name()) {
+ return nil
+ }
+
+ if _, ok := refsDict[rs.Dst(ref.Name()).String()]; ok {
+ return nil
+ }
+ } else {
+ if rs.Dst("") != ref.Name() {
+ return nil
+ }
}
cmd := &packp.Command{
@@ -903,7 +937,7 @@ func (r *Remote) updateLocalReferenceStorage(
updated = true
}
- if err == nil && forceNeeded {
+ if forceNeeded {
err = ErrForceNeeded
}
@@ -1012,10 +1046,11 @@ func pushHashes(
req *packp.ReferenceUpdateRequest,
hs []plumbing.Hash,
useRefDeltas bool,
+ allDelete bool,
) (*packp.ReportStatus, error) {
rd, wr := io.Pipe()
- req.Packfile = rd
+
config, err := s.Config()
if err != nil {
return nil, err
@@ -1026,15 +1061,20 @@ func pushHashes(
// to the channel.
done := make(chan error, 1)
- go func() {
- e := packfile.NewEncoder(wr, s, useRefDeltas)
- if _, err := e.Encode(hs, config.Pack.Window); err != nil {
- done <- wr.CloseWithError(err)
- return
- }
+ if !allDelete {
+ req.Packfile = rd
+ go func() {
+ e := packfile.NewEncoder(wr, s, useRefDeltas)
+ if _, err := e.Encode(hs, config.Pack.Window); err != nil {
+ done <- wr.CloseWithError(err)
+ return
+ }
- done <- wr.Close()
- }()
+ done <- wr.Close()
+ }()
+ } else {
+ close(done)
+ }
rs, err := sess.ReceivePack(ctx, req)
if err != nil {
diff --git a/remote_test.go b/remote_test.go
index 58a0598..a45d814 100644
--- a/remote_test.go
+++ b/remote_test.go
@@ -21,7 +21,7 @@ import (
. "gopkg.in/check.v1"
"gopkg.in/src-d/go-billy.v4/osfs"
- "gopkg.in/src-d/go-git-fixtures.v3"
+ fixtures "gopkg.in/src-d/go-git-fixtures.v3"
)
type RemoteSuite struct {
@@ -31,32 +31,32 @@ type RemoteSuite struct {
var _ = Suite(&RemoteSuite{})
func (s *RemoteSuite) TestFetchInvalidEndpoint(c *C) {
- r := newRemote(nil, &config.RemoteConfig{Name: "foo", URLs: []string{"http://\\"}})
+ r := NewRemote(nil, &config.RemoteConfig{Name: "foo", URLs: []string{"http://\\"}})
err := r.Fetch(&FetchOptions{RemoteName: "foo"})
c.Assert(err, ErrorMatches, ".*invalid character.*")
}
func (s *RemoteSuite) TestFetchNonExistentEndpoint(c *C) {
- r := newRemote(nil, &config.RemoteConfig{Name: "foo", URLs: []string{"ssh://non-existent/foo.git"}})
+ r := NewRemote(nil, &config.RemoteConfig{Name: "foo", URLs: []string{"ssh://non-existent/foo.git"}})
err := r.Fetch(&FetchOptions{})
c.Assert(err, NotNil)
}
func (s *RemoteSuite) TestFetchInvalidSchemaEndpoint(c *C) {
- r := newRemote(nil, &config.RemoteConfig{Name: "foo", URLs: []string{"qux://foo"}})
+ r := NewRemote(nil, &config.RemoteConfig{Name: "foo", URLs: []string{"qux://foo"}})
err := r.Fetch(&FetchOptions{})
c.Assert(err, ErrorMatches, ".*unsupported scheme.*")
}
func (s *RemoteSuite) TestFetchInvalidFetchOptions(c *C) {
- r := newRemote(nil, &config.RemoteConfig{Name: "foo", URLs: []string{"qux://foo"}})
+ r := NewRemote(nil, &config.RemoteConfig{Name: "foo", URLs: []string{"qux://foo"}})
invalid := config.RefSpec("^*$ñ")
err := r.Fetch(&FetchOptions{RefSpecs: []config.RefSpec{invalid}})
c.Assert(err, Equals, config.ErrRefSpecMalformedSeparator)
}
func (s *RemoteSuite) TestFetchWildcard(c *C) {
- r := newRemote(memory.NewStorage(), &config.RemoteConfig{
+ r := NewRemote(memory.NewStorage(), &config.RemoteConfig{
URLs: []string{s.GetBasicLocalRepositoryURL()},
})
@@ -72,7 +72,7 @@ func (s *RemoteSuite) TestFetchWildcard(c *C) {
}
func (s *RemoteSuite) TestFetchWildcardTags(c *C) {
- r := newRemote(memory.NewStorage(), &config.RemoteConfig{
+ r := NewRemote(memory.NewStorage(), &config.RemoteConfig{
URLs: []string{s.GetLocalRepositoryURL(fixtures.ByTag("tags").One())},
})
@@ -91,7 +91,7 @@ func (s *RemoteSuite) TestFetchWildcardTags(c *C) {
}
func (s *RemoteSuite) TestFetch(c *C) {
- r := newRemote(memory.NewStorage(), &config.RemoteConfig{
+ r := NewRemote(memory.NewStorage(), &config.RemoteConfig{
URLs: []string{s.GetLocalRepositoryURL(fixtures.ByTag("tags").One())},
})
@@ -105,7 +105,7 @@ func (s *RemoteSuite) TestFetch(c *C) {
}
func (s *RemoteSuite) TestFetchNonExistantReference(c *C) {
- r := newRemote(memory.NewStorage(), &config.RemoteConfig{
+ r := NewRemote(memory.NewStorage(), &config.RemoteConfig{
URLs: []string{s.GetLocalRepositoryURL(fixtures.ByTag("tags").One())},
})
@@ -119,7 +119,7 @@ func (s *RemoteSuite) TestFetchNonExistantReference(c *C) {
}
func (s *RemoteSuite) TestFetchContext(c *C) {
- r := newRemote(memory.NewStorage(), &config.RemoteConfig{
+ r := NewRemote(memory.NewStorage(), &config.RemoteConfig{
URLs: []string{s.GetLocalRepositoryURL(fixtures.ByTag("tags").One())},
})
@@ -135,7 +135,7 @@ func (s *RemoteSuite) TestFetchContext(c *C) {
}
func (s *RemoteSuite) TestFetchWithAllTags(c *C) {
- r := newRemote(memory.NewStorage(), &config.RemoteConfig{
+ r := NewRemote(memory.NewStorage(), &config.RemoteConfig{
URLs: []string{s.GetLocalRepositoryURL(fixtures.ByTag("tags").One())},
})
@@ -155,7 +155,7 @@ func (s *RemoteSuite) TestFetchWithAllTags(c *C) {
}
func (s *RemoteSuite) TestFetchWithNoTags(c *C) {
- r := newRemote(memory.NewStorage(), &config.RemoteConfig{
+ r := NewRemote(memory.NewStorage(), &config.RemoteConfig{
URLs: []string{s.GetLocalRepositoryURL(fixtures.ByTag("tags").One())},
})
@@ -171,7 +171,7 @@ func (s *RemoteSuite) TestFetchWithNoTags(c *C) {
}
func (s *RemoteSuite) TestFetchWithDepth(c *C) {
- r := newRemote(memory.NewStorage(), &config.RemoteConfig{
+ r := NewRemote(memory.NewStorage(), &config.RemoteConfig{
URLs: []string{s.GetBasicLocalRepositoryURL()},
})
@@ -212,7 +212,7 @@ func (s *RemoteSuite) TestFetchWithProgress(c *C) {
sto := memory.NewStorage()
buf := bytes.NewBuffer(nil)
- r := newRemote(sto, &config.RemoteConfig{Name: "foo", URLs: []string{url}})
+ r := NewRemote(sto, &config.RemoteConfig{Name: "foo", URLs: []string{url}})
refspec := config.RefSpec("+refs/heads/*:refs/remotes/origin/*")
err := r.Fetch(&FetchOptions{
@@ -248,7 +248,7 @@ func (s *RemoteSuite) TestFetchWithPackfileWriter(c *C) {
mock := &mockPackfileWriter{Storer: fss}
url := s.GetBasicLocalRepositoryURL()
- r := newRemote(mock, &config.RemoteConfig{Name: "foo", URLs: []string{url}})
+ r := NewRemote(mock, &config.RemoteConfig{Name: "foo", URLs: []string{url}})
refspec := config.RefSpec("+refs/heads/*:refs/remotes/origin/*")
err = r.Fetch(&FetchOptions{
@@ -276,7 +276,7 @@ func (s *RemoteSuite) TestFetchNoErrAlreadyUpToDate(c *C) {
}
func (s *RemoteSuite) TestFetchNoErrAlreadyUpToDateButStillUpdateLocalRemoteRefs(c *C) {
- r := newRemote(memory.NewStorage(), &config.RemoteConfig{
+ r := NewRemote(memory.NewStorage(), &config.RemoteConfig{
URLs: []string{s.GetBasicLocalRepositoryURL()},
})
@@ -313,7 +313,7 @@ func (s *RemoteSuite) TestFetchNoErrAlreadyUpToDateWithNonCommitObjects(c *C) {
}
func (s *RemoteSuite) doTestFetchNoErrAlreadyUpToDate(c *C, url string) {
- r := newRemote(memory.NewStorage(), &config.RemoteConfig{URLs: []string{url}})
+ r := NewRemote(memory.NewStorage(), &config.RemoteConfig{URLs: []string{url}})
o := &FetchOptions{
RefSpecs: []config.RefSpec{
@@ -328,7 +328,7 @@ func (s *RemoteSuite) doTestFetchNoErrAlreadyUpToDate(c *C, url string) {
}
func (s *RemoteSuite) testFetchFastForward(c *C, sto storage.Storer) {
- r := newRemote(sto, &config.RemoteConfig{
+ r := NewRemote(sto, &config.RemoteConfig{
URLs: []string{s.GetBasicLocalRepositoryURL()},
})
@@ -386,7 +386,7 @@ func (s *RemoteSuite) TestFetchFastForwardFS(c *C) {
}
func (s *RemoteSuite) TestString(c *C) {
- r := newRemote(nil, &config.RemoteConfig{
+ r := NewRemote(nil, &config.RemoteConfig{
Name: "foo",
URLs: []string{"https://github.com/git-fixtures/basic.git"},
})
@@ -405,7 +405,7 @@ func (s *RemoteSuite) TestPushToEmptyRepository(c *C) {
srcFs := fixtures.Basic().One().DotGit()
sto := filesystem.NewStorage(srcFs, cache.NewObjectLRUDefault())
- r := newRemote(sto, &config.RemoteConfig{
+ r := NewRemote(sto, &config.RemoteConfig{
Name: DefaultRemoteName,
URLs: []string{url},
})
@@ -442,7 +442,7 @@ func (s *RemoteSuite) TestPushContext(c *C) {
fs := fixtures.ByURL("https://github.com/git-fixtures/tags.git").One().DotGit()
sto := filesystem.NewStorage(fs, cache.NewObjectLRUDefault())
- r := newRemote(sto, &config.RemoteConfig{
+ r := NewRemote(sto, &config.RemoteConfig{
Name: DefaultRemoteName,
URLs: []string{url},
})
@@ -471,7 +471,7 @@ func (s *RemoteSuite) TestPushTags(c *C) {
fs := fixtures.ByURL("https://github.com/git-fixtures/tags.git").One().DotGit()
sto := filesystem.NewStorage(fs, cache.NewObjectLRUDefault())
- r := newRemote(sto, &config.RemoteConfig{
+ r := NewRemote(sto, &config.RemoteConfig{
Name: DefaultRemoteName,
URLs: []string{url},
})
@@ -494,7 +494,7 @@ func (s *RemoteSuite) TestPushNoErrAlreadyUpToDate(c *C) {
fs := fixtures.Basic().One().DotGit()
sto := filesystem.NewStorage(fs, cache.NewObjectLRUDefault())
- r := newRemote(sto, &config.RemoteConfig{
+ r := NewRemote(sto, &config.RemoteConfig{
Name: DefaultRemoteName,
URLs: []string{fs.Root()},
})
@@ -564,7 +564,7 @@ func (s *RemoteSuite) TestPushForce(c *C) {
dstSto := filesystem.NewStorage(dstFs, cache.NewObjectLRUDefault())
url := dstFs.Root()
- r := newRemote(sto, &config.RemoteConfig{
+ r := NewRemote(sto, &config.RemoteConfig{
Name: DefaultRemoteName,
URLs: []string{url},
})
@@ -583,6 +583,63 @@ func (s *RemoteSuite) TestPushForce(c *C) {
c.Assert(newRef, Not(DeepEquals), oldRef)
}
+func (s *RemoteSuite) TestPushPrune(c *C) {
+ fs := fixtures.Basic().One().DotGit()
+ url := c.MkDir()
+ server, err := PlainClone(url, true, &CloneOptions{
+ URL: fs.Root(),
+ })
+ c.Assert(err, IsNil)
+
+ r, err := PlainClone(c.MkDir(), true, &CloneOptions{
+ URL: url,
+ })
+ c.Assert(err, IsNil)
+
+ tag, err := r.Reference(plumbing.ReferenceName("refs/tags/v1.0.0"), true)
+ c.Assert(err, IsNil)
+
+ err = r.DeleteTag("v1.0.0")
+ c.Assert(err, IsNil)
+
+ remote, err := r.Remote(DefaultRemoteName)
+ c.Assert(err, IsNil)
+
+ ref, err := r.Reference(plumbing.ReferenceName("refs/heads/master"), true)
+ c.Assert(err, IsNil)
+
+ err = remote.Push(&PushOptions{
+ RefSpecs: []config.RefSpec{
+ config.RefSpec("refs/heads/*:refs/heads/*"),
+ },
+ Prune: true,
+ })
+ c.Assert(err, Equals, NoErrAlreadyUpToDate)
+
+ AssertReferences(c, server, map[string]string{
+ "refs/tags/v1.0.0": tag.Hash().String(),
+ })
+
+ err = remote.Push(&PushOptions{
+ RefSpecs: []config.RefSpec{
+ config.RefSpec("*:*"),
+ },
+ Prune: true,
+ })
+ c.Assert(err, IsNil)
+
+ AssertReferences(c, server, map[string]string{
+ "refs/remotes/origin/master": ref.Hash().String(),
+ })
+
+ AssertReferences(c, server, map[string]string{
+ "refs/remotes/origin/master": ref.Hash().String(),
+ })
+
+ ref, err = server.Reference(plumbing.ReferenceName("refs/tags/v1.0.0"), true)
+ c.Assert(err, Equals, plumbing.ErrReferenceNotFound)
+}
+
func (s *RemoteSuite) TestPushNewReference(c *C) {
fs := fixtures.Basic().One().DotGit()
url := c.MkDir()
@@ -654,32 +711,32 @@ func (s *RemoteSuite) TestPushNewReferenceAndDeleteInBatch(c *C) {
}
func (s *RemoteSuite) TestPushInvalidEndpoint(c *C) {
- r := newRemote(nil, &config.RemoteConfig{Name: "foo", URLs: []string{"http://\\"}})
+ r := NewRemote(nil, &config.RemoteConfig{Name: "foo", URLs: []string{"http://\\"}})
err := r.Push(&PushOptions{RemoteName: "foo"})
c.Assert(err, ErrorMatches, ".*invalid character.*")
}
func (s *RemoteSuite) TestPushNonExistentEndpoint(c *C) {
- r := newRemote(nil, &config.RemoteConfig{Name: "foo", URLs: []string{"ssh://non-existent/foo.git"}})
+ r := NewRemote(nil, &config.RemoteConfig{Name: "foo", URLs: []string{"ssh://non-existent/foo.git"}})
err := r.Push(&PushOptions{})
c.Assert(err, NotNil)
}
func (s *RemoteSuite) TestPushInvalidSchemaEndpoint(c *C) {
- r := newRemote(nil, &config.RemoteConfig{Name: "origin", URLs: []string{"qux://foo"}})
+ r := NewRemote(nil, &config.RemoteConfig{Name: "origin", URLs: []string{"qux://foo"}})
err := r.Push(&PushOptions{})
c.Assert(err, ErrorMatches, ".*unsupported scheme.*")
}
func (s *RemoteSuite) TestPushInvalidFetchOptions(c *C) {
- r := newRemote(nil, &config.RemoteConfig{Name: "foo", URLs: []string{"qux://foo"}})
+ r := NewRemote(nil, &config.RemoteConfig{Name: "foo", URLs: []string{"qux://foo"}})
invalid := config.RefSpec("^*$ñ")
err := r.Push(&PushOptions{RefSpecs: []config.RefSpec{invalid}})
c.Assert(err, Equals, config.ErrRefSpecMalformedSeparator)
}
func (s *RemoteSuite) TestPushInvalidRefSpec(c *C) {
- r := newRemote(nil, &config.RemoteConfig{
+ r := NewRemote(nil, &config.RemoteConfig{
Name: DefaultRemoteName,
URLs: []string{"some-url"},
})
@@ -692,7 +749,7 @@ func (s *RemoteSuite) TestPushInvalidRefSpec(c *C) {
}
func (s *RemoteSuite) TestPushWrongRemoteName(c *C) {
- r := newRemote(nil, &config.RemoteConfig{
+ r := NewRemote(nil, &config.RemoteConfig{
Name: DefaultRemoteName,
URLs: []string{"some-url"},
})
@@ -729,7 +786,7 @@ func (s *RemoteSuite) TestGetHaves(c *C) {
func (s *RemoteSuite) TestList(c *C) {
repo := fixtures.Basic().One()
- remote := newRemote(memory.NewStorage(), &config.RemoteConfig{
+ remote := NewRemote(memory.NewStorage(), &config.RemoteConfig{
Name: DefaultRemoteName,
URLs: []string{repo.URL},
})
@@ -784,7 +841,7 @@ func (s *RemoteSuite) TestUpdateShallows(c *C) {
{nil, hashes[0:6]},
}
- remote := newRemote(memory.NewStorage(), &config.RemoteConfig{
+ remote := NewRemote(memory.NewStorage(), &config.RemoteConfig{
Name: DefaultRemoteName,
})
@@ -817,7 +874,7 @@ func (s *RemoteSuite) TestUseRefDeltas(c *C) {
fs := fixtures.ByURL("https://github.com/git-fixtures/tags.git").One().DotGit()
sto := filesystem.NewStorage(fs, cache.NewObjectLRUDefault())
- r := newRemote(sto, &config.RemoteConfig{
+ r := NewRemote(sto, &config.RemoteConfig{
Name: DefaultRemoteName,
URLs: []string{url},
})
diff --git a/repository.go b/repository.go
index a94dc2f..1e3e339 100644
--- a/repository.go
+++ b/repository.go
@@ -451,7 +451,7 @@ func (r *Repository) Remote(name string) (*Remote, error) {
return nil, ErrRemoteNotFound
}
- return newRemote(r.Storer, c), nil
+ return NewRemote(r.Storer, c), nil
}
// Remotes returns a list with all the remotes
@@ -465,7 +465,7 @@ func (r *Repository) Remotes() ([]*Remote, error) {
var i int
for _, c := range cfg.Remotes {
- remotes[i] = newRemote(r.Storer, c)
+ remotes[i] = NewRemote(r.Storer, c)
i++
}
@@ -478,7 +478,7 @@ func (r *Repository) CreateRemote(c *config.RemoteConfig) (*Remote, error) {
return nil, err
}
- remote := newRemote(r.Storer, c)
+ remote := NewRemote(r.Storer, c)
cfg, err := r.Storer.Config()
if err != nil {
@@ -504,7 +504,7 @@ func (r *Repository) CreateRemoteAnonymous(c *config.RemoteConfig) (*Remote, err
return nil, ErrAnonymousRemoteName
}
- remote := newRemote(r.Storer, c)
+ remote := NewRemote(r.Storer, c)
return remote, nil
}
@@ -1067,6 +1067,14 @@ func (r *Repository) Log(o *LogOptions) (object.CommitIter, error) {
// for `git log --all` also check parent (if the next commit comes from the real parent)
it = r.logWithFile(*o.FileName, it, o.All)
}
+ if o.PathFilter != nil {
+ it = r.logWithPathFilter(o.PathFilter, it, o.All)
+ }
+
+ if o.Since != nil || o.Until != nil {
+ limitOptions := object.LogLimitOptions{Since: o.Since, Until: o.Until}
+ it = r.logWithLimit(it, limitOptions)
+ }
return it, nil
}
@@ -1094,7 +1102,25 @@ func (r *Repository) logAll(commitIterFunc func(*object.Commit) object.CommitIte
}
func (*Repository) logWithFile(fileName string, commitIter object.CommitIter, checkParent bool) object.CommitIter {
- return object.NewCommitFileIterFromIter(fileName, commitIter, checkParent)
+ return object.NewCommitPathIterFromIter(
+ func(path string) bool {
+ return path == fileName
+ },
+ commitIter,
+ checkParent,
+ )
+}
+
+func (*Repository) logWithPathFilter(pathFilter func(string) bool, commitIter object.CommitIter, checkParent bool) object.CommitIter {
+ return object.NewCommitPathIterFromIter(
+ pathFilter,
+ commitIter,
+ checkParent,
+ )
+}
+
+func (*Repository) logWithLimit(commitIter object.CommitIter, limitOptions object.LogLimitOptions) object.CommitIter {
+ return object.NewCommitLimitIterFromIter(commitIter, limitOptions)
}
func commitIterFunc(order LogOrder) func(c *object.Commit) object.CommitIter {
diff --git a/repository_plan9_test.go b/repository_plan9_test.go
new file mode 100644
index 0000000..00ebeed
--- /dev/null
+++ b/repository_plan9_test.go
@@ -0,0 +1,47 @@
+package git
+
+import (
+ "fmt"
+ "strings"
+)
+
+// preReceiveHook returns the bytes of a pre-receive hook script
+// that prints m before exiting successfully
+func preReceiveHook(m string) []byte {
+ return []byte(fmt.Sprintf("#!/bin/rc\necho -n %s\n", quote(m)))
+}
+
+const quoteChar = '\''
+
+func needsQuote(s string) bool {
+ for i := 0; i < len(s); i++ {
+ c := s[i]
+ if c == quoteChar || c <= ' ' { // quote, blanks, or control characters
+ return true
+ }
+ }
+ return false
+}
+
+// Quote adds single quotes to s in the style of rc(1) if they are needed.
+// The behaviour should be identical to Plan 9's quote(3).
+func quote(s string) string {
+ if s == "" {
+ return "''"
+ }
+ if !needsQuote(s) {
+ return s
+ }
+ var b strings.Builder
+ b.Grow(10 + len(s)) // Enough room for few quotes
+ b.WriteByte(quoteChar)
+ for i := 0; i < len(s); i++ {
+ c := s[i]
+ if c == quoteChar {
+ b.WriteByte(quoteChar)
+ }
+ b.WriteByte(c)
+ }
+ b.WriteByte(quoteChar)
+ return b.String()
+}
diff --git a/repository_test.go b/repository_test.go
index 0148c78..06b748a 100644
--- a/repository_test.go
+++ b/repository_test.go
@@ -3,19 +3,24 @@ package git
import (
"bytes"
"context"
+ "errors"
"fmt"
"io"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
+ "regexp"
"strings"
"testing"
"time"
+ fixtures "gopkg.in/src-d/go-git-fixtures.v3"
+
"golang.org/x/crypto/openpgp"
"golang.org/x/crypto/openpgp/armor"
openpgperr "golang.org/x/crypto/openpgp/errors"
+
"gopkg.in/src-d/go-git.v4/config"
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/plumbing/cache"
@@ -30,7 +35,6 @@ import (
"gopkg.in/src-d/go-billy.v4/memfs"
"gopkg.in/src-d/go-billy.v4/osfs"
"gopkg.in/src-d/go-billy.v4/util"
- "gopkg.in/src-d/go-git-fixtures.v3"
)
type RepositorySuite struct {
@@ -335,12 +339,14 @@ func (s *RepositorySuite) TestCreateBranchUnmarshal(c *C) {
Merge: "refs/heads/foo",
}
err = r.CreateBranch(testBranch1)
+ c.Assert(err, IsNil)
err = r.CreateBranch(testBranch2)
-
c.Assert(err, IsNil)
+
cfg, err := r.Config()
c.Assert(err, IsNil)
marshaled, err := cfg.Marshal()
+ c.Assert(err, IsNil)
c.Assert(string(expected), Equals, string(marshaled))
}
@@ -1504,12 +1510,13 @@ func (s *RepositorySuite) TestLogFileForEach(c *C) {
}
expectedIndex := 0
- cIter.ForEach(func(commit *object.Commit) error {
+ err = cIter.ForEach(func(commit *object.Commit) error {
expectedCommitHash := commitOrder[expectedIndex]
c.Assert(commit.Hash.String(), Equals, expectedCommitHash.String())
expectedIndex++
return nil
})
+ c.Assert(err, IsNil)
c.Assert(expectedIndex, Equals, 1)
}
@@ -1548,12 +1555,13 @@ func (s *RepositorySuite) TestLogAllFileForEach(c *C) {
}
expectedIndex := 0
- cIter.ForEach(func(commit *object.Commit) error {
+ err = cIter.ForEach(func(commit *object.Commit) error {
expectedCommitHash := commitOrder[expectedIndex]
c.Assert(commit.Hash.String(), Equals, expectedCommitHash.String())
expectedIndex++
return nil
})
+ c.Assert(err, IsNil)
c.Assert(expectedIndex, Equals, 1)
}
@@ -1595,12 +1603,13 @@ func (s *RepositorySuite) TestLogFileInitialCommit(c *C) {
}
expectedIndex := 0
- cIter.ForEach(func(commit *object.Commit) error {
+ err = cIter.ForEach(func(commit *object.Commit) error {
expectedCommitHash := commitOrder[expectedIndex]
c.Assert(commit.Hash.String(), Equals, expectedCommitHash.String())
expectedIndex++
return nil
})
+ c.Assert(err, IsNil)
c.Assert(expectedIndex, Equals, 1)
}
@@ -1646,6 +1655,221 @@ func (s *RepositorySuite) TestLogFileWithOtherParamsPass(c *C) {
c.Assert(iterErr, Equals, io.EOF)
}
+type mockErrCommitIter struct{}
+
+func (m *mockErrCommitIter) Next() (*object.Commit, error) {
+ return nil, errors.New("mock next error")
+}
+func (m *mockErrCommitIter) ForEach(func(*object.Commit) error) error {
+ return errors.New("mock foreach error")
+}
+
+func (m *mockErrCommitIter) Close() {}
+
+func (s *RepositorySuite) TestLogFileWithError(c *C) {
+ fileName := "README"
+ cIter := object.NewCommitFileIterFromIter(fileName, &mockErrCommitIter{}, false)
+ defer cIter.Close()
+
+ err := cIter.ForEach(func(commit *object.Commit) error {
+ return nil
+ })
+ c.Assert(err, NotNil)
+}
+
+func (s *RepositorySuite) TestLogPathWithError(c *C) {
+ fileName := "README"
+ pathIter := func(path string) bool {
+ return path == fileName
+ }
+ cIter := object.NewCommitPathIterFromIter(pathIter, &mockErrCommitIter{}, false)
+ defer cIter.Close()
+
+ err := cIter.ForEach(func(commit *object.Commit) error {
+ return nil
+ })
+ c.Assert(err, NotNil)
+}
+
+func (s *RepositorySuite) TestLogPathRegexpWithError(c *C) {
+ pathRE := regexp.MustCompile("R.*E")
+ pathIter := func(path string) bool {
+ return pathRE.MatchString(path)
+ }
+ cIter := object.NewCommitPathIterFromIter(pathIter, &mockErrCommitIter{}, false)
+ defer cIter.Close()
+
+ err := cIter.ForEach(func(commit *object.Commit) error {
+ return nil
+ })
+ c.Assert(err, NotNil)
+}
+
+func (s *RepositorySuite) TestLogPathFilterRegexp(c *C) {
+ pathRE := regexp.MustCompile(".*\\.go")
+ pathIter := func(path string) bool {
+ return pathRE.MatchString(path)
+ }
+
+ r, _ := Init(memory.NewStorage(), nil)
+ err := r.clone(context.Background(), &CloneOptions{
+ URL: s.GetBasicLocalRepositoryURL(),
+ })
+ c.Assert(err, IsNil)
+
+ expectedCommitIDs := []string{
+ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5",
+ "918c48b83bd081e863dbe1b80f8998f058cd8294",
+ }
+ commitIDs := []string{}
+
+ cIter, err := r.Log(&LogOptions{
+ PathFilter: pathIter,
+ From: plumbing.NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5"),
+ })
+ c.Assert(err, IsNil)
+ defer cIter.Close()
+
+ cIter.ForEach(func(commit *object.Commit) error {
+ commitIDs = append(commitIDs, commit.ID().String())
+ return nil
+ })
+ c.Assert(
+ strings.Join(commitIDs, ", "),
+ Equals,
+ strings.Join(expectedCommitIDs, ", "),
+ )
+}
+
+func (s *RepositorySuite) TestLogLimitNext(c *C) {
+ r, _ := Init(memory.NewStorage(), nil)
+ err := r.clone(context.Background(), &CloneOptions{
+ URL: s.GetBasicLocalRepositoryURL(),
+ })
+
+ c.Assert(err, IsNil)
+
+ since := time.Date(2015, 4, 1, 0, 0, 0, 0, time.UTC)
+ cIter, err := r.Log(&LogOptions{Since: &since})
+
+ c.Assert(err, IsNil)
+
+ commitOrder := []plumbing.Hash{
+ plumbing.NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5"),
+ }
+
+ for _, o := range commitOrder {
+ commit, err := cIter.Next()
+ c.Assert(err, IsNil)
+ c.Assert(commit.Hash, Equals, o)
+ }
+ _, err = cIter.Next()
+ c.Assert(err, Equals, io.EOF)
+}
+
+func (s *RepositorySuite) TestLogLimitForEach(c *C) {
+ r, _ := Init(memory.NewStorage(), nil)
+ err := r.clone(context.Background(), &CloneOptions{
+ URL: s.GetBasicLocalRepositoryURL(),
+ })
+
+ c.Assert(err, IsNil)
+
+ since := time.Date(2015, 3, 31, 11, 54, 0, 0, time.UTC)
+ until := time.Date(2015, 4, 1, 0, 0, 0, 0, time.UTC)
+ cIter, err := r.Log(&LogOptions{Since: &since, Until: &until})
+ c.Assert(err, IsNil)
+ defer cIter.Close()
+
+ commitOrder := []plumbing.Hash{
+ plumbing.NewHash("918c48b83bd081e863dbe1b80f8998f058cd8294"),
+ }
+
+ expectedIndex := 0
+ err = cIter.ForEach(func(commit *object.Commit) error {
+ expectedCommitHash := commitOrder[expectedIndex]
+ c.Assert(commit.Hash.String(), Equals, expectedCommitHash.String())
+ expectedIndex++
+ return nil
+ })
+ c.Assert(err, IsNil)
+ c.Assert(expectedIndex, Equals, 1)
+}
+
+func (s *RepositorySuite) TestLogAllLimitForEach(c *C) {
+ r, _ := Init(memory.NewStorage(), nil)
+ err := r.clone(context.Background(), &CloneOptions{
+ URL: s.GetBasicLocalRepositoryURL(),
+ })
+
+ c.Assert(err, IsNil)
+
+ since := time.Date(2015, 3, 31, 11, 54, 0, 0, time.UTC)
+ until := time.Date(2015, 4, 1, 0, 0, 0, 0, time.UTC)
+ cIter, err := r.Log(&LogOptions{Since: &since, Until: &until, All: true})
+ c.Assert(err, IsNil)
+ defer cIter.Close()
+
+ commitOrder := []plumbing.Hash{
+ plumbing.NewHash("e8d3ffab552895c19b9fcf7aa264d277cde33881"),
+ plumbing.NewHash("918c48b83bd081e863dbe1b80f8998f058cd8294"),
+ }
+
+ expectedIndex := 0
+ err = cIter.ForEach(func(commit *object.Commit) error {
+ expectedCommitHash := commitOrder[expectedIndex]
+ c.Assert(commit.Hash.String(), Equals, expectedCommitHash.String())
+ expectedIndex++
+ return nil
+ })
+ c.Assert(err, IsNil)
+ c.Assert(expectedIndex, Equals, 2)
+}
+
+func (s *RepositorySuite) TestLogLimitWithOtherParamsFail(c *C) {
+ r, _ := Init(memory.NewStorage(), nil)
+ err := r.clone(context.Background(), &CloneOptions{
+ URL: s.GetBasicLocalRepositoryURL(),
+ })
+ c.Assert(err, IsNil)
+
+ since := time.Date(2015, 3, 31, 11, 54, 0, 0, time.UTC)
+ cIter, err := r.Log(&LogOptions{
+ Order: LogOrderCommitterTime,
+ Since: &since,
+ From: plumbing.NewHash("35e85108805c84807bc66a02d91535e1e24b38b9"),
+ })
+ c.Assert(err, IsNil)
+ defer cIter.Close()
+
+ _, iterErr := cIter.Next()
+ c.Assert(iterErr, Equals, io.EOF)
+}
+
+func (s *RepositorySuite) TestLogLimitWithOtherParamsPass(c *C) {
+ r, _ := Init(memory.NewStorage(), nil)
+ err := r.clone(context.Background(), &CloneOptions{
+ URL: s.GetBasicLocalRepositoryURL(),
+ })
+ c.Assert(err, IsNil)
+
+ until := time.Date(2015, 3, 31, 11, 43, 0, 0, time.UTC)
+ cIter, err := r.Log(&LogOptions{
+ Order: LogOrderCommitterTime,
+ Until: &until,
+ From: plumbing.NewHash("35e85108805c84807bc66a02d91535e1e24b38b9"),
+ })
+ c.Assert(err, IsNil)
+ defer cIter.Close()
+
+ commitVal, iterErr := cIter.Next()
+ c.Assert(iterErr, Equals, nil)
+ c.Assert(commitVal.Hash.String(), Equals, "b029517f6300c2da0f4b651b8642506cd6aaf45d")
+
+ _, iterErr = cIter.Next()
+ c.Assert(iterErr, Equals, io.EOF)
+}
+
func (s *RepositorySuite) TestCommit(c *C) {
r, _ := Init(memory.NewStorage(), nil)
err := r.clone(context.Background(), &CloneOptions{
@@ -2456,9 +2680,9 @@ func (s *RepositorySuite) TestResolveRevisionWithErrors(c *C) {
c.Assert(err, IsNil)
datas := map[string]string{
- "efs/heads/master~": "reference not found",
- "HEAD^3": `Revision invalid : "3" found must be 0, 1 or 2 after "^"`,
- "HEAD^{/whatever}": `No commit message match regexp : "whatever"`,
+ "efs/heads/master~": "reference not found",
+ "HEAD^3": `Revision invalid : "3" found must be 0, 1 or 2 after "^"`,
+ "HEAD^{/whatever}": `No commit message match regexp : "whatever"`,
"4e1243bd22c66e76c2ba9eddc1f91394e57f9f83": "reference not found",
}
@@ -2671,3 +2895,22 @@ func BenchmarkObjects(b *testing.B) {
})
}
}
+
+func BenchmarkPlainClone(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ t, err := ioutil.TempDir("", "")
+ if err != nil {
+ b.Fatal(err)
+ }
+ _, err = PlainClone(t, false, &CloneOptions{
+ URL: "https://github.com/knqyf263/vuln-list",
+ Depth: 1,
+ })
+ if err != nil {
+ b.Error(err)
+ }
+ b.StopTimer()
+ os.RemoveAll(t)
+ b.StartTimer()
+ }
+}
diff --git a/storage/filesystem/dotgit/dotgit_test.go b/storage/filesystem/dotgit/dotgit_test.go
index 4cda2f9..bd4e9f0 100644
--- a/storage/filesystem/dotgit/dotgit_test.go
+++ b/storage/filesystem/dotgit/dotgit_test.go
@@ -226,6 +226,7 @@ func (s *SuiteDotGit) TestRemoveRefFromReferenceFileAndPackedRefs(c *C) {
"refs/remotes/origin/branch",
"e8d3ffab552895c19b9fcf7aa264d277cde33881",
), nil)
+ c.Assert(err, IsNil)
// Make sure it only appears once in the refs list.
refs, err := dir.Refs()
diff --git a/storage/filesystem/index.go b/storage/filesystem/index.go
index d04195c..be800ef 100644
--- a/storage/filesystem/index.go
+++ b/storage/filesystem/index.go
@@ -20,8 +20,14 @@ func (s *IndexStorage) SetIndex(idx *index.Index) (err error) {
}
defer ioutil.CheckClose(f, &err)
+ bw := bufio.NewWriter(f)
+ defer func() {
+ if e := bw.Flush(); err == nil && e != nil {
+ err = e
+ }
+ }()
- e := index.NewEncoder(f)
+ e := index.NewEncoder(bw)
err = e.Encode(idx)
return err
}
diff --git a/storage/memory/storage.go b/storage/memory/storage.go
index f240f2a..fee8266 100644
--- a/storage/memory/storage.go
+++ b/storage/memory/storage.go
@@ -15,7 +15,7 @@ import (
var ErrUnsupportedObjectType = fmt.Errorf("unsupported object type")
// Storage is an implementation of git.Storer that stores data on memory, being
-// ephemeral. The use of this storage should be done in controlled envoriments,
+// ephemeral. The use of this storage should be done in controlled environments,
// since the representation in memory of some repository can fill the machine
// memory. in the other hand this storage has the best performance.
type Storage struct {
diff --git a/utils/diff/diff_ext_test.go b/utils/diff/diff_ext_test.go
index adda276..c6c7e90 100644
--- a/utils/diff/diff_ext_test.go
+++ b/utils/diff/diff_ext_test.go
@@ -99,6 +99,37 @@ var doTests = [...]struct {
{Type: 1, Text: "111\nBCD\n"},
},
},
+ {
+ src: "A\nB\nC\nD\nE\nF\nG\nH\nI\nJ\nK\nL\nM\nN\nÑ\nO\nP\nQ\nR\nS\nT\nU\nV\nW\nX\nY\nZ",
+ dst: "B\nC\nD\nE\nF\nG\nI\nJ\nK\nL\nM\nN\nO\nP\nQ\nR\nS\nT\nV\nW\nX\nY\nZ",
+ exp: []diffmatchpatch.Diff{
+ {Type: -1, Text: "A\n"},
+ {Type: 0, Text: "B\nC\nD\nE\nF\nG\n"},
+ {Type: -1, Text: "H\n"},
+ {Type: 0, Text: "I\nJ\nK\nL\nM\nN\n"},
+ {Type: -1, Text: "Ñ\n"},
+ {Type: 0, Text: "O\nP\nQ\nR\nS\nT\n"},
+ {Type: -1, Text: "U\n"},
+ {Type: 0, Text: "V\nW\nX\nY\nZ"},
+ },
+ },
+ {
+ src: "B\nC\nD\nE\nF\nG\nI\nJ\nK\nL\nM\nN\nO\nP\nQ\nR\nS\nT\nV\nW\nX\nY\nZ",
+ dst: "B\nC\nD\nE\nF\nG\nI\nJ\nK\nL\nM\nN\nO\nP\nQ\nR\nS\nT\nV\nW\nX\nY\n",
+ exp: []diffmatchpatch.Diff{
+ {Type: 0, Text: "B\nC\nD\nE\nF\nG\nI\nJ\nK\nL\nM\nN\nO\nP\nQ\nR\nS\nT\nV\nW\nX\nY\n"},
+ {Type: -1, Text: "Z"},
+ },
+ },
+ {
+ src: "B\nC\nD\nE\nF\nG\nI\nJ\nK\nL\nM\nN\nO\nP\nQ\nR\nS\nT\nV\nW\nX\nY\nZ",
+ dst: "B\nC\nD\nE\nF\nG\nI\nJ\nK\nL\nM\nN\nO\nP\nQ\nR\nS\nT\nV\nW\nX\nY",
+ exp: []diffmatchpatch.Diff{
+ {Type: 0, Text: "B\nC\nD\nE\nF\nG\nI\nJ\nK\nL\nM\nN\nO\nP\nQ\nR\nS\nT\nV\nW\nX\n"},
+ {Type: -1, Text: "Y\nZ"},
+ {Type: 1, Text: "Y"},
+ },
+ },
}
func (s *suiteCommon) TestDo(c *C) {
diff --git a/utils/merkletrie/difftree.go b/utils/merkletrie/difftree.go
index d57ed13..77ba5a8 100644
--- a/utils/merkletrie/difftree.go
+++ b/utils/merkletrie/difftree.go
@@ -8,7 +8,7 @@ package merkletrie
// type defined in this same package; we will iterate over both
// trees at the same time, while comparing the current noders in
// each iterator. Depending on how they differ we will output the
-// corresponding chages and move the iterators further over both
+// corresponding changes and move the iterators further over both
// trees.
//
// The table bellow show all the possible comparison results, along
@@ -69,7 +69,7 @@ package merkletrie
//
// ### C. To was created: 01, 02, 03, 04, 05, 06
// - check: `DifferentName() && ToBeforeFrom()`
-// - action: inserRecursively(to)
+// - action: insertRecursively(to)
// - advance: `ToNext()`
//
// ### D. From was deleted: 10, 20, 30, 40, 50, 60
@@ -131,13 +131,13 @@ package merkletrie
// - action: `DeleteDir(from); InsertFile(to)`
// - advance: `FromNext(); ToNext()`
//
-// ### J. file with contents to dir with contetns: 25, 26, 35, 36
+// ### J. file with contents to dir with contents: 25, 26, 35, 36
// - check: `SameName() && DifferentHash() && FromIsFile() &&
// FromIsNotEmpty() && ToIsDir() && ToIsNotEmpty()`
// - action: `DeleteFile(from); InsertDirRecursively(to)`
// - advance: `FromNext(); ToNext()`
//
-// ### J'. dir with contetns to file with contents: 52, 62, 53, 63
+// ### J'. dir with contents to file with contents: 52, 62, 53, 63
// - check: `SameName() && DifferentHash() && FromIsDir() &&
// FromIsNotEmpty() && ToIsFile() && ToIsNotEmpty()`
// - action: `DeleteDirRecursively(from); InsertFile(to)`
@@ -216,7 +216,7 @@ package merkletrie
// 1 0 1 0 1 0 | a() | a<1> | I' | f | delete(from); insert(to); NN
// 1 0 1 0 1 1 | a() | a<> | F' | f | delete(from); insert(to); NN
// 1 0 1 1 0 0 | a(...) | a(;;;) | L | g | nothing; SS
-// 1 0 1 1 0 1 | a(...) | a() | K' | h | deleteChidren(from); NN
+// 1 0 1 1 0 1 | a(...) | a() | K' | h | deleteChildren(from); NN
// 1 0 1 1 1 0 | a() | a(...) | K | i | insertChildren(to); NN
// 1 0 1 1 1 1 | ---- | ---- | | |
// 1 1 0 0 0 0 | a<1> | a<1> | B | b | nothing; NN
diff --git a/utils/merkletrie/difftree_test.go b/utils/merkletrie/difftree_test.go
index ac86145..f725bcf 100644
--- a/utils/merkletrie/difftree_test.go
+++ b/utils/merkletrie/difftree_test.go
@@ -177,7 +177,7 @@ func newChangesFromString(s string) (changes, error) {
for _, chunk := range strings.Split(s, " ") {
change := change{
- path: string(chunk[1:]),
+ path: chunk[1:],
}
switch chunk[0] {
diff --git a/utils/merkletrie/internal/frame/frame.go b/utils/merkletrie/internal/frame/frame.go
index a0b042e..77a3de4 100644
--- a/utils/merkletrie/internal/frame/frame.go
+++ b/utils/merkletrie/internal/frame/frame.go
@@ -38,7 +38,7 @@ func New(n noder.Noder) (*Frame, error) {
}
// String returns the quoted names of the noders in the frame sorted in
-// alphabeticall order by name, surrounded by square brackets and
+// alphabetical order by name, surrounded by square brackets and
// separated by comas.
//
// Examples:
@@ -61,7 +61,7 @@ func (f *Frame) String() string {
}
// First returns, but dont extract, the noder with the alphabetically
-// smaller name in the frame and true if the frame was not empy.
+// smaller name in the frame and true if the frame was not empty.
// Otherwise it returns nil and false.
func (f *Frame) First() (noder.Noder, bool) {
if f.Len() == 0 {
diff --git a/utils/merkletrie/internal/fsnoder/doc.go b/utils/merkletrie/internal/fsnoder/doc.go
index 3f55b5f..c79ac59 100644
--- a/utils/merkletrie/internal/fsnoder/doc.go
+++ b/utils/merkletrie/internal/fsnoder/doc.go
@@ -30,7 +30,7 @@ Directories are expressed as:
- its elements between parents, separated with spaces, in any order.
-- (optionally) the root directory can be unnamed, by skiping its name.
+- (optionally) the root directory can be unnamed, by skipping its name.
Examples:
diff --git a/utils/merkletrie/internal/fsnoder/file.go b/utils/merkletrie/internal/fsnoder/file.go
index c975a60..686a675 100644
--- a/utils/merkletrie/internal/fsnoder/file.go
+++ b/utils/merkletrie/internal/fsnoder/file.go
@@ -60,7 +60,7 @@ const (
fileEndMark = '>'
)
-// String returns a string formated as: name<contents>.
+// String returns a string formatted as: name<contents>.
func (f *file) String() string {
var buf bytes.Buffer
buf.WriteString(f.name)
diff --git a/utils/merkletrie/internal/fsnoder/new_test.go b/utils/merkletrie/internal/fsnoder/new_test.go
index 805772f..a2c474a 100644
--- a/utils/merkletrie/internal/fsnoder/new_test.go
+++ b/utils/merkletrie/internal/fsnoder/new_test.go
@@ -176,7 +176,7 @@ func (s *FSNoderSuite) TestEmptyDir(c *C) {
check(c, input, expected)
}
-func (s *FSNoderSuite) TestDirWithEmtpyFile(c *C) {
+func (s *FSNoderSuite) TestDirWithEmptyFile(c *C) {
input := "(A(a<>))"
a, err := newFile("a", "")
@@ -189,7 +189,7 @@ func (s *FSNoderSuite) TestDirWithEmtpyFile(c *C) {
check(c, input, expected)
}
-func (s *FSNoderSuite) TestDirWithEmtpyFileSameName(c *C) {
+func (s *FSNoderSuite) TestDirWithEmptyFileSameName(c *C) {
input := "(A(A<>))"
f, err := newFile("A", "")
diff --git a/utils/merkletrie/iter_test.go b/utils/merkletrie/iter_test.go
index b334cf1..3b24043 100644
--- a/utils/merkletrie/iter_test.go
+++ b/utils/merkletrie/iter_test.go
@@ -95,7 +95,7 @@ func (t test) run(c *C, iter *merkletrie.Iter,
}
// A testsCollection value represents a tree and a collection of tests
-// we want to perfrom on iterators of that tree.
+// we want to perform on iterators of that tree.
//
// Example:
//
diff --git a/utils/merkletrie/noder/path_test.go b/utils/merkletrie/noder/path_test.go
index f49f028..f65b1d5 100644
--- a/utils/merkletrie/noder/path_test.go
+++ b/utils/merkletrie/noder/path_test.go
@@ -154,8 +154,8 @@ func (s *PathSuite) TestCompareMixedDepths(c *C) {
}
func (s *PathSuite) TestCompareNormalization(c *C) {
- p1 := Path([]Noder{&noderMock{name: norm.Form(norm.NFKC).String("페")}})
- p2 := Path([]Noder{&noderMock{name: norm.Form(norm.NFKD).String("페")}})
+ p1 := Path([]Noder{&noderMock{name: norm.NFKC.String("페")}})
+ p2 := Path([]Noder{&noderMock{name: norm.NFKD.String("페")}})
c.Assert(p1.Compare(p2), Equals, 1)
c.Assert(p2.Compare(p1), Equals, -1)
p1 = Path([]Noder{&noderMock{name: "TestAppWithUnicodéPath"}})
diff --git a/worktree.go b/worktree.go
index dae40a3..4a609e9 100644
--- a/worktree.go
+++ b/worktree.go
@@ -9,6 +9,7 @@ import (
"os"
"path/filepath"
"strings"
+ "sync"
"gopkg.in/src-d/go-git.v4/config"
"gopkg.in/src-d/go-git.v4/plumbing"
@@ -160,6 +161,8 @@ func (w *Worktree) Checkout(opts *CheckoutOptions) error {
ro := &ResetOptions{Commit: c, Mode: MergeReset}
if opts.Force {
ro.Mode = HardReset
+ } else if opts.Keep {
+ ro.Mode = SoftReset
}
if !opts.Hash.IsZero() && !opts.Create {
@@ -302,6 +305,7 @@ func (w *Worktree) resetIndex(t *object.Tree) error {
if err != nil {
return err
}
+ b := newIndexBuilder(idx)
changes, err := w.diffTreeWithStaging(t, true)
if err != nil {
@@ -328,12 +332,12 @@ func (w *Worktree) resetIndex(t *object.Tree) error {
name = ch.From.String()
}
- _, _ = idx.Remove(name)
+ b.Remove(name)
if e == nil {
continue
}
- idx.Entries = append(idx.Entries, &index.Entry{
+ b.Add(&index.Entry{
Name: name,
Hash: e.Hash,
Mode: e.Mode,
@@ -341,6 +345,7 @@ func (w *Worktree) resetIndex(t *object.Tree) error {
}
+ b.Write(idx)
return w.r.Storer.SetIndex(idx)
}
@@ -354,17 +359,19 @@ func (w *Worktree) resetWorktree(t *object.Tree) error {
if err != nil {
return err
}
+ b := newIndexBuilder(idx)
for _, ch := range changes {
- if err := w.checkoutChange(ch, t, idx); err != nil {
+ if err := w.checkoutChange(ch, t, b); err != nil {
return err
}
}
+ b.Write(idx)
return w.r.Storer.SetIndex(idx)
}
-func (w *Worktree) checkoutChange(ch merkletrie.Change, t *object.Tree, idx *index.Index) error {
+func (w *Worktree) checkoutChange(ch merkletrie.Change, t *object.Tree, idx *indexBuilder) error {
a, err := ch.Action()
if err != nil {
return err
@@ -443,7 +450,7 @@ func (w *Worktree) setHEADCommit(commit plumbing.Hash) error {
func (w *Worktree) checkoutChangeSubmodule(name string,
a merkletrie.Action,
e *object.TreeEntry,
- idx *index.Index,
+ idx *indexBuilder,
) error {
switch a {
case merkletrie.Modify:
@@ -477,11 +484,11 @@ func (w *Worktree) checkoutChangeRegularFile(name string,
a merkletrie.Action,
t *object.Tree,
e *object.TreeEntry,
- idx *index.Index,
+ idx *indexBuilder,
) error {
switch a {
case merkletrie.Modify:
- _, _ = idx.Remove(name)
+ idx.Remove(name)
// to apply perm changes the file is deleted, billy doesn't implement
// chmod
@@ -506,6 +513,12 @@ func (w *Worktree) checkoutChangeRegularFile(name string,
return nil
}
+var copyBufferPool = sync.Pool{
+ New: func() interface{} {
+ return make([]byte, 32*1024)
+ },
+}
+
func (w *Worktree) checkoutFile(f *object.File) (err error) {
mode, err := f.Mode.ToOSFileMode()
if err != nil {
@@ -529,8 +542,9 @@ func (w *Worktree) checkoutFile(f *object.File) (err error) {
}
defer ioutil.CheckClose(to, &err)
-
- _, err = io.Copy(to, from)
+ buf := copyBufferPool.Get().([]byte)
+ _, err = io.CopyBuffer(to, from, buf)
+ copyBufferPool.Put(buf)
return
}
@@ -567,19 +581,18 @@ func (w *Worktree) checkoutFileSymlink(f *object.File) (err error) {
return
}
-func (w *Worktree) addIndexFromTreeEntry(name string, f *object.TreeEntry, idx *index.Index) error {
- _, _ = idx.Remove(name)
- idx.Entries = append(idx.Entries, &index.Entry{
+func (w *Worktree) addIndexFromTreeEntry(name string, f *object.TreeEntry, idx *indexBuilder) error {
+ idx.Remove(name)
+ idx.Add(&index.Entry{
Hash: f.Hash,
Name: name,
Mode: filemode.Submodule,
})
-
return nil
}
-func (w *Worktree) addIndexFromFile(name string, h plumbing.Hash, idx *index.Index) error {
- _, _ = idx.Remove(name)
+func (w *Worktree) addIndexFromFile(name string, h plumbing.Hash, idx *indexBuilder) error {
+ idx.Remove(name)
fi, err := w.Filesystem.Lstat(name)
if err != nil {
return err
@@ -603,8 +616,7 @@ func (w *Worktree) addIndexFromFile(name string, h plumbing.Hash, idx *index.Ind
if fillSystemInfo != nil {
fillSystemInfo(e, fi.Sys())
}
-
- idx.Entries = append(idx.Entries, e)
+ idx.Add(e)
return nil
}
@@ -720,7 +732,7 @@ func (w *Worktree) Clean(opts *CleanOptions) error {
func (w *Worktree) doClean(status Status, opts *CleanOptions, dir string, files []os.FileInfo) error {
for _, fi := range files {
- if fi.Name() == ".git" {
+ if fi.Name() == GitDirName {
continue
}
@@ -911,3 +923,32 @@ func doCleanDirectories(fs billy.Filesystem, dir string) error {
}
return nil
}
+
+type indexBuilder struct {
+ entries map[string]*index.Entry
+}
+
+func newIndexBuilder(idx *index.Index) *indexBuilder {
+ entries := make(map[string]*index.Entry, len(idx.Entries))
+ for _, e := range idx.Entries {
+ entries[e.Name] = e
+ }
+ return &indexBuilder{
+ entries: entries,
+ }
+}
+
+func (b *indexBuilder) Write(idx *index.Index) {
+ idx.Entries = idx.Entries[:0]
+ for _, e := range b.entries {
+ idx.Entries = append(idx.Entries, e)
+ }
+}
+
+func (b *indexBuilder) Add(e *index.Entry) {
+ b.entries[e.Name] = e
+}
+
+func (b *indexBuilder) Remove(name string) {
+ delete(b.entries, filepath.ToSlash(name))
+}
diff --git a/worktree_commit_test.go b/worktree_commit_test.go
index da377c6..5bcbac9 100644
--- a/worktree_commit_test.go
+++ b/worktree_commit_test.go
@@ -210,7 +210,7 @@ func (s *WorktreeSuite) TestCommitTreeSort(c *C) {
r, err := Init(st, nil)
c.Assert(err, IsNil)
- r, err = Clone(memory.NewStorage(), memfs.New(), &CloneOptions{
+ r, _ = Clone(memory.NewStorage(), memfs.New(), &CloneOptions{
URL: path,
})
diff --git a/worktree_plan9.go b/worktree_plan9.go
new file mode 100644
index 0000000..16d3915
--- /dev/null
+++ b/worktree_plan9.go
@@ -0,0 +1,31 @@
+package git
+
+import (
+ "syscall"
+ "time"
+
+ "gopkg.in/src-d/go-git.v4/plumbing/format/index"
+)
+
+func init() {
+ fillSystemInfo = func(e *index.Entry, sys interface{}) {
+ if os, ok := sys.(*syscall.Dir); ok {
+ // Plan 9 doesn't have a CreatedAt field.
+ e.CreatedAt = time.Unix(int64(os.Mtime), 0)
+
+ e.Dev = uint32(os.Dev)
+
+ // Plan 9 has no Inode.
+ // ext2srv(4) appears to store Inode in Qid.Path.
+ e.Inode = uint32(os.Qid.Path)
+
+ // Plan 9 has string UID/GID
+ e.GID = 0
+ e.UID = 0
+ }
+ }
+}
+
+func isSymlinkWindowsNonAdmin(err error) bool {
+ return true
+}
diff --git a/worktree_test.go b/worktree_test.go
index afedc91..dab1c23 100644
--- a/worktree_test.go
+++ b/worktree_test.go
@@ -314,6 +314,46 @@ func (s *WorktreeSuite) TestCheckoutForce(c *C) {
c.Assert(entries, HasLen, 8)
}
+func (s *WorktreeSuite) TestCheckoutKeep(c *C) {
+ w := &Worktree{
+ r: s.Repository,
+ Filesystem: memfs.New(),
+ }
+
+ err := w.Checkout(&CheckoutOptions{
+ Force: true,
+ })
+ c.Assert(err, IsNil)
+
+ // Create a new branch and create a new file.
+ err = w.Checkout(&CheckoutOptions{
+ Branch: plumbing.NewBranchReferenceName("new-branch"),
+ Create: true,
+ })
+ c.Assert(err, IsNil)
+
+ w.Filesystem = memfs.New()
+ f, err := w.Filesystem.Create("new-file.txt")
+ c.Assert(err, IsNil)
+ _, err = f.Write([]byte("DUMMY"))
+ c.Assert(err, IsNil)
+ c.Assert(f.Close(), IsNil)
+
+ // Add the file to staging.
+ _, err = w.Add("new-file.txt")
+ c.Assert(err, IsNil)
+
+ // Switch branch to master, and verify that the new file was kept in staging.
+ err = w.Checkout(&CheckoutOptions{
+ Keep: true,
+ })
+ c.Assert(err, IsNil)
+
+ fi, err := w.Filesystem.Stat("new-file.txt")
+ c.Assert(err, IsNil)
+ c.Assert(fi.Size(), Equals, int64(5))
+}
+
func (s *WorktreeSuite) TestCheckoutSymlink(c *C) {
if runtime.GOOS == "windows" {
c.Skip("git doesn't support symlinks by default in windows")
@@ -392,7 +432,7 @@ func (s *WorktreeSuite) TestFilenameNormalization(c *C) {
err = w.Filesystem.Remove(filename)
c.Assert(err, IsNil)
- modFilename := norm.Form(norm.NFKD).String(filename)
+ modFilename := norm.NFKD.String(filename)
writeFile(modFilename)
_, err = w.Add(filename)
@@ -1635,6 +1675,7 @@ func (s *WorktreeSuite) TestClean(c *C) {
// Status before cleaning.
status, err := wt.Status()
+ c.Assert(err, IsNil)
c.Assert(len(status), Equals, 2)
err = wt.Clean(&CleanOptions{})