aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--_examples/commit/main.go2
-rw-r--r--_examples/common_test.go1
-rw-r--r--_examples/ls/main.go272
-rw-r--r--config/branch.go23
-rw-r--r--config/branch_test.go4
-rw-r--r--config/config.go1
-rw-r--r--go.mod23
-rw-r--r--go.sum56
-rw-r--r--options.go5
-rw-r--r--plumbing/format/commitgraph/commitgraph.go35
-rw-r--r--plumbing/format/commitgraph/commitgraph_test.go132
-rw-r--r--plumbing/format/commitgraph/doc.go103
-rw-r--r--plumbing/format/commitgraph/encoder.go190
-rw-r--r--plumbing/format/commitgraph/file.go259
-rw-r--r--plumbing/format/commitgraph/memory.go72
-rw-r--r--plumbing/format/gitattributes/attributes.go214
-rw-r--r--plumbing/format/gitattributes/attributes_test.go67
-rw-r--r--plumbing/format/gitattributes/dir.go126
-rw-r--r--plumbing/format/gitattributes/dir_test.go199
-rw-r--r--plumbing/format/gitattributes/matcher.go78
-rw-r--r--plumbing/format/gitattributes/matcher_test.go29
-rw-r--r--plumbing/format/gitattributes/pattern.go101
-rw-r--r--plumbing/format/gitattributes/pattern_test.go229
-rw-r--r--plumbing/format/idxfile/idxfile.go105
-rw-r--r--plumbing/format/index/decoder.go37
-rw-r--r--plumbing/format/packfile/common.go10
-rw-r--r--plumbing/format/packfile/packfile.go187
-rw-r--r--plumbing/format/packfile/scanner.go189
-rw-r--r--plumbing/format/packfile/scanner_test.go49
-rw-r--r--plumbing/object/commit.go49
-rw-r--r--plumbing/object/commit_stats_test.go95
-rw-r--r--plumbing/object/commit_test.go23
-rw-r--r--plumbing/object/commitgraph/commitnode.go98
-rw-r--r--plumbing/object/commitgraph/commitnode_graph.go131
-rw-r--r--plumbing/object/commitgraph/commitnode_object.go90
-rw-r--r--plumbing/object/commitgraph/commitnode_test.go147
-rw-r--r--plumbing/object/commitgraph/commitnode_walker_ctime.go105
-rw-r--r--plumbing/object/commitgraph/doc.go7
-rw-r--r--plumbing/object/common.go12
-rw-r--r--plumbing/object/patch.go15
-rw-r--r--plumbing/object/tag.go13
-rw-r--r--plumbing/object/tag_test.go96
-rw-r--r--plumbing/object/tree.go23
-rw-r--r--plumbing/transport/ssh/common.go27
-rw-r--r--plumbing/transport/ssh/proxy_test.go36
-rw-r--r--repository.go81
-rw-r--r--repository_test.go10
-rw-r--r--storage/filesystem/dotgit/dotgit.go20
-rw-r--r--storage/filesystem/index.go3
-rw-r--r--storage/filesystem/object.go145
-rw-r--r--storage/filesystem/object_test.go18
-rw-r--r--storage/filesystem/storage.go4
-rw-r--r--utils/binary/read.go15
-rw-r--r--utils/binary/read_test.go10
-rw-r--r--worktree.go2
-rw-r--r--worktree_test.go40
56 files changed, 3714 insertions, 399 deletions
diff --git a/_examples/commit/main.go b/_examples/commit/main.go
index ec296b9..f184b81 100644
--- a/_examples/commit/main.go
+++ b/_examples/commit/main.go
@@ -27,7 +27,7 @@ func main() {
// ... we need a file to commit so let's create a new file inside of the
// worktree of the project using the go standard library.
- Info("echo \"hellow world!\" > example-git-file")
+ Info("echo \"hello world!\" > example-git-file")
filename := filepath.Join(directory, "example-git-file")
err = ioutil.WriteFile(filename, []byte("hello world!"), 0644)
CheckIfError(err)
diff --git a/_examples/common_test.go b/_examples/common_test.go
index aa7c9b4..47463a1 100644
--- a/_examples/common_test.go
+++ b/_examples/common_test.go
@@ -28,6 +28,7 @@ var args = map[string][]string{
"showcase": {defaultURL, tempFolder()},
"tag": {cloneRepository(defaultURL, tempFolder())},
"pull": {createRepositoryWithRemote(tempFolder(), defaultURL)},
+ "ls": {cloneRepository(defaultURL, tempFolder()), "HEAD", "vendor"},
}
var ignored = map[string]bool{}
diff --git a/_examples/ls/main.go b/_examples/ls/main.go
new file mode 100644
index 0000000..bb686f1
--- /dev/null
+++ b/_examples/ls/main.go
@@ -0,0 +1,272 @@
+package main
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "path"
+ "strings"
+
+ "github.com/emirpasic/gods/trees/binaryheap"
+ "gopkg.in/src-d/go-git.v4"
+ . "gopkg.in/src-d/go-git.v4/_examples"
+ "gopkg.in/src-d/go-git.v4/plumbing"
+ "gopkg.in/src-d/go-git.v4/plumbing/cache"
+ commitgraph_fmt "gopkg.in/src-d/go-git.v4/plumbing/format/commitgraph"
+ "gopkg.in/src-d/go-git.v4/plumbing/object"
+ "gopkg.in/src-d/go-git.v4/plumbing/object/commitgraph"
+ "gopkg.in/src-d/go-git.v4/storage/filesystem"
+
+ "gopkg.in/src-d/go-billy.v4"
+ "gopkg.in/src-d/go-billy.v4/osfs"
+)
+
+// Example how to resolve a revision into its commit counterpart
+func main() {
+ CheckArgs("<path>", "<revision>", "<tree path>")
+
+ path := os.Args[1]
+ revision := os.Args[2]
+ treePath := os.Args[3]
+
+ // We instantiate a new repository targeting the given path (the .git folder)
+ fs := osfs.New(path)
+ if _, err := fs.Stat(git.GitDirName); err == nil {
+ fs, err = fs.Chroot(git.GitDirName)
+ CheckIfError(err)
+ }
+
+ s := filesystem.NewStorageWithOptions(fs, cache.NewObjectLRUDefault(), filesystem.Options{KeepDescriptors: true})
+ r, err := git.Open(s, fs)
+ CheckIfError(err)
+ defer s.Close()
+
+ // Resolve revision into a sha1 commit, only some revisions are resolved
+ // look at the doc to get more details
+ Info("git rev-parse %s", revision)
+
+ h, err := r.ResolveRevision(plumbing.Revision(revision))
+ CheckIfError(err)
+
+ commit, err := r.CommitObject(*h)
+ CheckIfError(err)
+
+ tree, err := commit.Tree()
+ CheckIfError(err)
+ if treePath != "" {
+ tree, err = tree.Tree(treePath)
+ CheckIfError(err)
+ }
+
+ var paths []string
+ for _, entry := range tree.Entries {
+ paths = append(paths, entry.Name)
+ }
+
+ commitNodeIndex, file := getCommitNodeIndex(r, fs)
+ if file != nil {
+ defer file.Close()
+ }
+
+ commitNode, err := commitNodeIndex.Get(*h)
+ CheckIfError(err)
+
+ revs, err := getLastCommitForPaths(commitNode, treePath, paths)
+ CheckIfError(err)
+ for path, rev := range revs {
+ // Print one line per file (name hash message)
+ hash := rev.Hash.String()
+ line := strings.Split(rev.Message, "\n")
+ fmt.Println(path, hash[:7], line[0])
+ }
+}
+
+func getCommitNodeIndex(r *git.Repository, fs billy.Filesystem) (commitgraph.CommitNodeIndex, io.ReadCloser) {
+ file, err := fs.Open(path.Join("objects", "info", "commit-graph"))
+ if err == nil {
+ index, err := commitgraph_fmt.OpenFileIndex(file)
+ if err == nil {
+ return commitgraph.NewGraphCommitNodeIndex(index, r.Storer), file
+ }
+ file.Close()
+ }
+
+ return commitgraph.NewObjectCommitNodeIndex(r.Storer), nil
+}
+
+type commitAndPaths struct {
+ commit commitgraph.CommitNode
+ // Paths that are still on the branch represented by commit
+ paths []string
+ // Set of hashes for the paths
+ hashes map[string]plumbing.Hash
+}
+
+func getCommitTree(c commitgraph.CommitNode, treePath string) (*object.Tree, error) {
+ tree, err := c.Tree()
+ if err != nil {
+ return nil, err
+ }
+
+ // Optimize deep traversals by focusing only on the specific tree
+ if treePath != "" {
+ tree, err = tree.Tree(treePath)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return tree, nil
+}
+
+func getFullPath(treePath, path string) string {
+ if treePath != "" {
+ if path != "" {
+ return treePath + "/" + path
+ }
+ return treePath
+ }
+ return path
+}
+
+func getFileHashes(c commitgraph.CommitNode, treePath string, paths []string) (map[string]plumbing.Hash, error) {
+ tree, err := getCommitTree(c, treePath)
+ if err == object.ErrDirectoryNotFound {
+ // The whole tree didn't exist, so return empty map
+ return make(map[string]plumbing.Hash), nil
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ hashes := make(map[string]plumbing.Hash)
+ for _, path := range paths {
+ if path != "" {
+ entry, err := tree.FindEntry(path)
+ if err == nil {
+ hashes[path] = entry.Hash
+ }
+ } else {
+ hashes[path] = tree.Hash
+ }
+ }
+
+ return hashes, nil
+}
+
+func getLastCommitForPaths(c commitgraph.CommitNode, treePath string, paths []string) (map[string]*object.Commit, error) {
+ // We do a tree traversal with nodes sorted by commit time
+ heap := binaryheap.NewWith(func(a, b interface{}) int {
+ if a.(*commitAndPaths).commit.CommitTime().Before(b.(*commitAndPaths).commit.CommitTime()) {
+ return 1
+ }
+ return -1
+ })
+
+ resultNodes := make(map[string]commitgraph.CommitNode)
+ initialHashes, err := getFileHashes(c, treePath, paths)
+ if err != nil {
+ return nil, err
+ }
+
+ // Start search from the root commit and with full set of paths
+ heap.Push(&commitAndPaths{c, paths, initialHashes})
+
+ for {
+ cIn, ok := heap.Pop()
+ if !ok {
+ break
+ }
+ current := cIn.(*commitAndPaths)
+
+ // Load the parent commits for the one we are currently examining
+ numParents := current.commit.NumParents()
+ var parents []commitgraph.CommitNode
+ for i := 0; i < numParents; i++ {
+ parent, err := current.commit.ParentNode(i)
+ if err != nil {
+ break
+ }
+ parents = append(parents, parent)
+ }
+
+ // Examine the current commit and set of interesting paths
+ pathUnchanged := make([]bool, len(current.paths))
+ parentHashes := make([]map[string]plumbing.Hash, len(parents))
+ for j, parent := range parents {
+ parentHashes[j], err = getFileHashes(parent, treePath, current.paths)
+ if err != nil {
+ break
+ }
+
+ for i, path := range current.paths {
+ if parentHashes[j][path] == current.hashes[path] {
+ pathUnchanged[i] = true
+ }
+ }
+ }
+
+ var remainingPaths []string
+ for i, path := range current.paths {
+ // The results could already contain some newer change for the same path,
+ // so don't override that and bail out on the file early.
+ if resultNodes[path] == nil {
+ if pathUnchanged[i] {
+ // The path existed with the same hash in at least one parent so it could
+ // not have been changed in this commit directly.
+ remainingPaths = append(remainingPaths, path)
+ } else {
+ // There are few possible cases how can we get here:
+ // - The path didn't exist in any parent, so it must have been created by
+ // this commit.
+ // - The path did exist in the parent commit, but the hash of the file has
+ // changed.
+ // - We are looking at a merge commit and the hash of the file doesn't
+ // match any of the hashes being merged. This is more common for directories,
+ // but it can also happen if a file is changed through conflict resolution.
+ resultNodes[path] = current.commit
+ }
+ }
+ }
+
+ if len(remainingPaths) > 0 {
+ // Add the parent nodes along with remaining paths to the heap for further
+ // processing.
+ for j, parent := range parents {
+ // Combine remainingPath with paths available on the parent branch
+ // and make union of them
+ remainingPathsForParent := make([]string, 0, len(remainingPaths))
+ newRemainingPaths := make([]string, 0, len(remainingPaths))
+ for _, path := range remainingPaths {
+ if parentHashes[j][path] == current.hashes[path] {
+ remainingPathsForParent = append(remainingPathsForParent, path)
+ } else {
+ newRemainingPaths = append(newRemainingPaths, path)
+ }
+ }
+
+ if remainingPathsForParent != nil {
+ heap.Push(&commitAndPaths{parent, remainingPathsForParent, parentHashes[j]})
+ }
+
+ if len(newRemainingPaths) == 0 {
+ break
+ } else {
+ remainingPaths = newRemainingPaths
+ }
+ }
+ }
+ }
+
+ // Post-processing
+ result := make(map[string]*object.Commit)
+ for path, commitNode := range resultNodes {
+ var err error
+ result[path], err = commitNode.Commit()
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return result, nil
+}
diff --git a/config/branch.go b/config/branch.go
index e18073c..af61bbb 100644
--- a/config/branch.go
+++ b/config/branch.go
@@ -8,8 +8,9 @@ import (
)
var (
- errBranchEmptyName = errors.New("branch config: empty name")
- errBranchInvalidMerge = errors.New("branch config: invalid merge")
+ errBranchEmptyName = errors.New("branch config: empty name")
+ errBranchInvalidMerge = errors.New("branch config: invalid merge")
+ errBranchInvalidRebase = errors.New("branch config: rebase must be one of 'true' or 'interactive'")
)
// Branch contains information on the
@@ -21,6 +22,10 @@ type Branch struct {
Remote string
// Merge is the local refspec for the branch
Merge plumbing.ReferenceName
+ // Rebase instead of merge when pulling. Valid values are
+ // "true" and "interactive". "false" is undocumented and
+ // typically represented by the non-existence of this field
+ Rebase string
raw *format.Subsection
}
@@ -35,6 +40,13 @@ func (b *Branch) Validate() error {
return errBranchInvalidMerge
}
+ if b.Rebase != "" &&
+ b.Rebase != "true" &&
+ b.Rebase != "interactive" &&
+ b.Rebase != "false" {
+ return errBranchInvalidRebase
+ }
+
return nil
}
@@ -57,6 +69,12 @@ func (b *Branch) marshal() *format.Subsection {
b.raw.SetOption(mergeKey, string(b.Merge))
}
+ if b.Rebase == "" {
+ b.raw.RemoveOption(rebaseKey)
+ } else {
+ b.raw.SetOption(rebaseKey, string(b.Rebase))
+ }
+
return b.raw
}
@@ -66,6 +84,7 @@ func (b *Branch) unmarshal(s *format.Subsection) error {
b.Name = b.raw.Name
b.Remote = b.raw.Options.Get(remoteSection)
b.Merge = plumbing.ReferenceName(b.raw.Options.Get(mergeKey))
+ b.Rebase = b.raw.Options.Get(rebaseKey)
return b.Validate()
}
diff --git a/config/branch_test.go b/config/branch_test.go
index d74122e..2dbe888 100644
--- a/config/branch_test.go
+++ b/config/branch_test.go
@@ -44,6 +44,7 @@ func (b *BranchSuite) TestMarshall(c *C) {
[branch "branch-tracking-on-clone"]
remote = fork
merge = refs/heads/branch-tracking-on-clone
+ rebase = interactive
`)
cfg := NewConfig()
@@ -51,6 +52,7 @@ func (b *BranchSuite) TestMarshall(c *C) {
Name: "branch-tracking-on-clone",
Remote: "fork",
Merge: plumbing.ReferenceName("refs/heads/branch-tracking-on-clone"),
+ Rebase: "interactive",
}
actual, err := cfg.Marshal()
@@ -64,6 +66,7 @@ func (b *BranchSuite) TestUnmarshall(c *C) {
[branch "branch-tracking-on-clone"]
remote = fork
merge = refs/heads/branch-tracking-on-clone
+ rebase = interactive
`)
cfg := NewConfig()
@@ -73,4 +76,5 @@ func (b *BranchSuite) TestUnmarshall(c *C) {
c.Assert(branch.Name, Equals, "branch-tracking-on-clone")
c.Assert(branch.Remote, Equals, "fork")
c.Assert(branch.Merge, Equals, plumbing.ReferenceName("refs/heads/branch-tracking-on-clone"))
+ c.Assert(branch.Rebase, Equals, "interactive")
}
diff --git a/config/config.go b/config/config.go
index 2c3b8b9..ea614e9 100644
--- a/config/config.go
+++ b/config/config.go
@@ -120,6 +120,7 @@ const (
commentCharKey = "commentChar"
windowKey = "window"
mergeKey = "merge"
+ rebaseKey = "rebase"
// DefaultPackWindow holds the number of previous objects used to
// generate deltas. The value 10 is the same used by git command.
diff --git a/go.mod b/go.mod
index 36a1bed..60d4702 100644
--- a/go.mod
+++ b/go.mod
@@ -3,27 +3,28 @@ module gopkg.in/src-d/go-git.v4
require (
github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7 // indirect
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 // indirect
+ github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5
github.com/davecgh/go-spew v1.1.1 // indirect
- github.com/emirpasic/gods v1.9.0
+ github.com/emirpasic/gods v1.12.0
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 // indirect
- github.com/gliderlabs/ssh v0.1.1
+ github.com/gliderlabs/ssh v0.1.3
github.com/google/go-cmp v0.2.0
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99
github.com/jessevdk/go-flags v1.4.0
github.com/kevinburke/ssh_config v0.0.0-20180830205328-81db2a75821e
- github.com/mitchellh/go-homedir v1.0.0
+ github.com/mitchellh/go-homedir v1.1.0
github.com/pelletier/go-buffruneio v0.2.0 // indirect
- github.com/pkg/errors v0.8.0 // indirect
- github.com/pmezard/go-difflib v1.0.0 // indirect
+ github.com/pkg/errors v0.8.1 // indirect
github.com/sergi/go-diff v1.0.0
github.com/src-d/gcfg v1.4.0
- github.com/stretchr/testify v1.2.2 // indirect
- github.com/xanzy/ssh-agent v0.2.0
- golang.org/x/crypto v0.0.0-20180904163835-0709b304e793
- golang.org/x/net v0.0.0-20180906233101-161cd47e91fd // indirect
+ github.com/stretchr/testify v1.3.0 // indirect
+ github.com/xanzy/ssh-agent v0.2.1
+ golang.org/x/crypto v0.0.0-20190422183909-d864b10871cd
+ golang.org/x/net v0.0.0-20190502183928-7f726cade0ab
+ golang.org/x/sys v0.0.0-20190422165155-953cdadca894 // indirect
golang.org/x/text v0.3.0
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127
- gopkg.in/src-d/go-billy.v4 v4.2.1
- gopkg.in/src-d/go-git-fixtures.v3 v3.1.1
+ gopkg.in/src-d/go-billy.v4 v4.3.0
+ gopkg.in/src-d/go-git-fixtures.v3 v3.5.0
gopkg.in/warnings.v0 v0.1.2 // indirect
)
diff --git a/go.sum b/go.sum
index 98ba1d4..94a6142 100644
--- a/go.sum
+++ b/go.sum
@@ -2,14 +2,17 @@ github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7 h1:uSoVVbwJiQipAclBb
github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7/go.mod h1:6zEj6s6u/ghQa61ZWa/C2Aw3RkjiTBOix7dkqa1VLIs=
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 h1:kFOfPq6dUM1hTo4JG6LR5AXSUEsOjtdm0kw0FtQtMJA=
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
+github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
+github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/emirpasic/gods v1.9.0 h1:rUF4PuzEjMChMiNsVjdI+SyLu7rEqpQ5reNFnhC7oFo=
-github.com/emirpasic/gods v1.9.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o=
+github.com/emirpasic/gods v1.12.0 h1:QAUIPSaCu4G+POclxeqb3F+WPpdKqFGlw36+yOzGlrg=
+github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o=
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 h1:BHsljHzVlRcyQhjrss6TZTdY2VfCqZPbv5k3iBFa2ZQ=
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
-github.com/gliderlabs/ssh v0.1.1 h1:j3L6gSLQalDETeEg/Jg0mGY0/y/N6zI2xX1978P0Uqw=
-github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
+github.com/gliderlabs/ssh v0.1.3 h1:cBU46h1lYQk5f2Z+jZbewFKy+1zzE2aUX/ilcPDAm9M=
+github.com/gliderlabs/ssh v0.1.3/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A=
@@ -23,37 +26,46 @@ github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORN
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
-github.com/mitchellh/go-homedir v1.0.0 h1:vKb8ShqSby24Yrqr/yDYkuFz8d0WUjys40rvnGC8aR0=
-github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
+github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
+github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/pelletier/go-buffruneio v0.2.0 h1:U4t4R6YkofJ5xHm3dJzuRpPZ0mr5MMCoAWooScCR7aA=
github.com/pelletier/go-buffruneio v0.2.0/go.mod h1:JkE26KsDizTr40EUHkXVtNPvgGtbSNq5BcowyYOWdKo=
-github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw=
-github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
+github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ=
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
-github.com/src-d/gcfg v1.3.0 h1:2BEDr8r0I0b8h/fOqwtxCEiq2HJu8n2JGZJQFGXWLjg=
-github.com/src-d/gcfg v1.3.0/go.mod h1:p/UMsR43ujA89BJY9duynAwIpvqEujIH/jFlfL7jWoI=
github.com/src-d/gcfg v1.4.0 h1:xXbNR5AlLSA315x2UO+fTSSAXCDf+Ar38/6oyGbDKQ4=
github.com/src-d/gcfg v1.4.0/go.mod h1:p/UMsR43ujA89BJY9duynAwIpvqEujIH/jFlfL7jWoI=
-github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
-github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
-github.com/xanzy/ssh-agent v0.2.0 h1:Adglfbi5p9Z0BmK2oKU9nTG+zKfniSfnaMYB+ULd+Ro=
-github.com/xanzy/ssh-agent v0.2.0/go.mod h1:0NyE30eGUDliuLEHJgYte/zncp2zdTStcOnWhgSqHD8=
-golang.org/x/crypto v0.0.0-20180904163835-0709b304e793 h1:u+LnwYTOOW7Ukr/fppxEb1Nwz0AtPflrblfvUudpo+I=
-golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
-golang.org/x/net v0.0.0-20180906233101-161cd47e91fd h1:nTDtHvHSdCn1m6ITfMRqtOd/9+7a3s8RBNOZ3eYZzJA=
-golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+github.com/xanzy/ssh-agent v0.2.1 h1:TCbipTQL2JiiCprBWx9frJ2eJlCYT00NmctrHxVAr70=
+github.com/xanzy/ssh-agent v0.2.1/go.mod h1:mLlQY/MoOhWBj+gOGMQkOeiEvkx+8pJSI+0Bx9h2kr4=
+golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190422183909-d864b10871cd h1:sMHc2rZHuzQmrbVoSpt9HgerkXPyIeCSO6k0zUMGfFk=
+golang.org/x/crypto v0.0.0-20190422183909-d864b10871cd/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190420063019-afa5a82059c6 h1:HdqqaWmYAUI7/dmByKKEw+yxDksGSo+9GjkUc9Zp34E=
+golang.org/x/net v0.0.0-20190420063019-afa5a82059c6/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190502183928-7f726cade0ab h1:9RfW3ktsOZxgo9YNbBAjq1FWzc/igwEcUzZz8IXgSbk=
+golang.org/x/net v0.0.0-20190502183928-7f726cade0ab/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/sys v0.0.0-20180903190138-2b024373dcd9 h1:lkiLiLBHGoH3XnqSLUIaBsilGMUjI+Uy2Xu2JLUtTas=
golang.org/x/sys v0.0.0-20180903190138-2b024373dcd9/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190221075227-b4e8571b14e0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190422165155-953cdadca894 h1:Cz4ceDQGXuKRnVBDTS23GTn/pU5OE2C0WrNTOYK1Uuc=
+golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/src-d/go-billy.v4 v4.2.1 h1:omN5CrMrMcQ+4I8bJ0wEhOBPanIRWzFC953IiXKdYzo=
-gopkg.in/src-d/go-billy.v4 v4.2.1/go.mod h1:tm33zBoOwxjYHZIE+OV8bxTWFMJLrconzFMd38aARFk=
-gopkg.in/src-d/go-git-fixtures.v3 v3.1.1 h1:XWW/s5W18RaJpmo1l0IYGqXKuJITWRFuA45iOf1dKJs=
-gopkg.in/src-d/go-git-fixtures.v3 v3.1.1/go.mod h1:dLBcvytrw/TYZsNTWCnkNF2DSIlzWYqTe3rJR56Ac7g=
+gopkg.in/src-d/go-billy.v4 v4.3.0 h1:KtlZ4c1OWbIs4jCv5ZXrTqG8EQocr0g/d4DjNg70aek=
+gopkg.in/src-d/go-billy.v4 v4.3.0/go.mod h1:tm33zBoOwxjYHZIE+OV8bxTWFMJLrconzFMd38aARFk=
+gopkg.in/src-d/go-git-fixtures.v3 v3.5.0 h1:ivZFOIltbce2Mo8IjzUHAFoq/IylO9WHhNOAJK+LsJg=
+gopkg.in/src-d/go-git-fixtures.v3 v3.5.0/go.mod h1:dLBcvytrw/TYZsNTWCnkNF2DSIlzWYqTe3rJR56Ac7g=
gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME=
gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=
diff --git a/options.go b/options.go
index 7c9e687..a3b14fe 100644
--- a/options.go
+++ b/options.go
@@ -242,6 +242,11 @@ type CheckoutOptions struct {
// Force, if true when switching branches, proceed even if the index or the
// working tree differs from HEAD. This is used to throw away local changes
Force bool
+ // Keep, if true when switching branches, local changes (the index or the
+ // working tree changes) will be kept so that they can be committed to the
+ // target branch. Force and Keep are mutually exclusive, should not be both
+ // set to true.
+ Keep bool
}
// Validate validates the fields and sets the default values.
diff --git a/plumbing/format/commitgraph/commitgraph.go b/plumbing/format/commitgraph/commitgraph.go
new file mode 100644
index 0000000..e43cd89
--- /dev/null
+++ b/plumbing/format/commitgraph/commitgraph.go
@@ -0,0 +1,35 @@
+package commitgraph
+
+import (
+ "time"
+
+ "gopkg.in/src-d/go-git.v4/plumbing"
+)
+
+// CommitData is a reduced representation of Commit as presented in the commit graph
+// file. It is merely useful as an optimization for walking the commit graphs.
+type CommitData struct {
+ // TreeHash is the hash of the root tree of the commit.
+ TreeHash plumbing.Hash
+ // ParentIndexes are the indexes of the parent commits of the commit.
+ ParentIndexes []int
+ // ParentHashes are the hashes of the parent commits of the commit.
+ ParentHashes []plumbing.Hash
+ // Generation number is the pre-computed generation in the commit graph
+ // or zero if not available
+ Generation int
+ // When is the timestamp of the commit.
+ When time.Time
+}
+
+// Index represents a representation of commit graph that allows indexed
+// access to the nodes using commit object hash
+type Index interface {
+ // GetIndexByHash gets the index in the commit graph from commit hash, if available
+ GetIndexByHash(h plumbing.Hash) (int, error)
+ // GetNodeByIndex gets the commit node from the commit graph using index
+ // obtained from child node, if available
+ GetCommitDataByIndex(i int) (*CommitData, error)
+ // Hashes returns all the hashes that are available in the index
+ Hashes() []plumbing.Hash
+}
diff --git a/plumbing/format/commitgraph/commitgraph_test.go b/plumbing/format/commitgraph/commitgraph_test.go
new file mode 100644
index 0000000..0214f49
--- /dev/null
+++ b/plumbing/format/commitgraph/commitgraph_test.go
@@ -0,0 +1,132 @@
+package commitgraph_test
+
+import (
+ "io/ioutil"
+ "os"
+ "path"
+ "testing"
+
+ . "gopkg.in/check.v1"
+ fixtures "gopkg.in/src-d/go-git-fixtures.v3"
+ "gopkg.in/src-d/go-git.v4/plumbing"
+ "gopkg.in/src-d/go-git.v4/plumbing/format/commitgraph"
+)
+
+func Test(t *testing.T) { TestingT(t) }
+
+type CommitgraphSuite struct {
+ fixtures.Suite
+}
+
+var _ = Suite(&CommitgraphSuite{})
+
+func testDecodeHelper(c *C, path string) {
+ reader, err := os.Open(path)
+ c.Assert(err, IsNil)
+ defer reader.Close()
+ index, err := commitgraph.OpenFileIndex(reader)
+ c.Assert(err, IsNil)
+
+ // Root commit
+ nodeIndex, err := index.GetIndexByHash(plumbing.NewHash("347c91919944a68e9413581a1bc15519550a3afe"))
+ c.Assert(err, IsNil)
+ commitData, err := index.GetCommitDataByIndex(nodeIndex)
+ c.Assert(err, IsNil)
+ c.Assert(len(commitData.ParentIndexes), Equals, 0)
+ c.Assert(len(commitData.ParentHashes), Equals, 0)
+
+ // Regular commit
+ nodeIndex, err = index.GetIndexByHash(plumbing.NewHash("e713b52d7e13807e87a002e812041f248db3f643"))
+ c.Assert(err, IsNil)
+ commitData, err = index.GetCommitDataByIndex(nodeIndex)
+ c.Assert(err, IsNil)
+ c.Assert(len(commitData.ParentIndexes), Equals, 1)
+ c.Assert(len(commitData.ParentHashes), Equals, 1)
+ c.Assert(commitData.ParentHashes[0].String(), Equals, "347c91919944a68e9413581a1bc15519550a3afe")
+
+ // Merge commit
+ nodeIndex, err = index.GetIndexByHash(plumbing.NewHash("b29328491a0682c259bcce28741eac71f3499f7d"))
+ c.Assert(err, IsNil)
+ commitData, err = index.GetCommitDataByIndex(nodeIndex)
+ c.Assert(err, IsNil)
+ c.Assert(len(commitData.ParentIndexes), Equals, 2)
+ c.Assert(len(commitData.ParentHashes), Equals, 2)
+ c.Assert(commitData.ParentHashes[0].String(), Equals, "e713b52d7e13807e87a002e812041f248db3f643")
+ c.Assert(commitData.ParentHashes[1].String(), Equals, "03d2c021ff68954cf3ef0a36825e194a4b98f981")
+
+ // Octopus merge commit
+ nodeIndex, err = index.GetIndexByHash(plumbing.NewHash("6f6c5d2be7852c782be1dd13e36496dd7ad39560"))
+ c.Assert(err, IsNil)
+ commitData, err = index.GetCommitDataByIndex(nodeIndex)
+ c.Assert(err, IsNil)
+ c.Assert(len(commitData.ParentIndexes), Equals, 3)
+ c.Assert(len(commitData.ParentHashes), Equals, 3)
+ c.Assert(commitData.ParentHashes[0].String(), Equals, "ce275064ad67d51e99f026084e20827901a8361c")
+ c.Assert(commitData.ParentHashes[1].String(), Equals, "bb13916df33ed23004c3ce9ed3b8487528e655c1")
+ c.Assert(commitData.ParentHashes[2].String(), Equals, "a45273fe2d63300e1962a9e26a6b15c276cd7082")
+
+ // Check all hashes
+ hashes := index.Hashes()
+ c.Assert(len(hashes), Equals, 11)
+ c.Assert(hashes[0].String(), Equals, "03d2c021ff68954cf3ef0a36825e194a4b98f981")
+ c.Assert(hashes[10].String(), Equals, "e713b52d7e13807e87a002e812041f248db3f643")
+}
+
+func (s *CommitgraphSuite) TestDecode(c *C) {
+ fixtures.ByTag("commit-graph").Test(c, func(f *fixtures.Fixture) {
+ dotgit := f.DotGit()
+ testDecodeHelper(c, path.Join(dotgit.Root(), "objects", "info", "commit-graph"))
+ })
+}
+
+func (s *CommitgraphSuite) TestReencode(c *C) {
+ fixtures.ByTag("commit-graph").Test(c, func(f *fixtures.Fixture) {
+ dotgit := f.DotGit()
+
+ reader, err := os.Open(path.Join(dotgit.Root(), "objects", "info", "commit-graph"))
+ c.Assert(err, IsNil)
+ defer reader.Close()
+ index, err := commitgraph.OpenFileIndex(reader)
+ c.Assert(err, IsNil)
+
+ writer, err := ioutil.TempFile(dotgit.Root(), "commit-graph")
+ c.Assert(err, IsNil)
+ tmpName := writer.Name()
+ defer os.Remove(tmpName)
+ encoder := commitgraph.NewEncoder(writer)
+ err = encoder.Encode(index)
+ c.Assert(err, IsNil)
+ writer.Close()
+
+ testDecodeHelper(c, tmpName)
+ })
+}
+
+func (s *CommitgraphSuite) TestReencodeInMemory(c *C) {
+ fixtures.ByTag("commit-graph").Test(c, func(f *fixtures.Fixture) {
+ dotgit := f.DotGit()
+
+ reader, err := os.Open(path.Join(dotgit.Root(), "objects", "info", "commit-graph"))
+ c.Assert(err, IsNil)
+ index, err := commitgraph.OpenFileIndex(reader)
+ c.Assert(err, IsNil)
+ memoryIndex := commitgraph.NewMemoryIndex()
+ for i, hash := range index.Hashes() {
+ commitData, err := index.GetCommitDataByIndex(i)
+ c.Assert(err, IsNil)
+ memoryIndex.Add(hash, commitData)
+ }
+ reader.Close()
+
+ writer, err := ioutil.TempFile(dotgit.Root(), "commit-graph")
+ c.Assert(err, IsNil)
+ tmpName := writer.Name()
+ defer os.Remove(tmpName)
+ encoder := commitgraph.NewEncoder(writer)
+ err = encoder.Encode(memoryIndex)
+ c.Assert(err, IsNil)
+ writer.Close()
+
+ testDecodeHelper(c, tmpName)
+ })
+}
diff --git a/plumbing/format/commitgraph/doc.go b/plumbing/format/commitgraph/doc.go
new file mode 100644
index 0000000..41cd8b1
--- /dev/null
+++ b/plumbing/format/commitgraph/doc.go
@@ -0,0 +1,103 @@
+// Package commitgraph implements encoding and decoding of commit-graph files.
+//
+// Git commit graph format
+// =======================
+//
+// The Git commit graph stores a list of commit OIDs and some associated
+// metadata, including:
+//
+// - The generation number of the commit. Commits with no parents have
+// generation number 1; commits with parents have generation number
+// one more than the maximum generation number of its parents. We
+// reserve zero as special, and can be used to mark a generation
+// number invalid or as "not computed".
+//
+// - The root tree OID.
+//
+// - The commit date.
+//
+// - The parents of the commit, stored using positional references within
+// the graph file.
+//
+// These positional references are stored as unsigned 32-bit integers
+// corresponding to the array position within the list of commit OIDs. Due
+// to some special constants we use to track parents, we can store at most
+// (1 << 30) + (1 << 29) + (1 << 28) - 1 (around 1.8 billion) commits.
+//
+// == Commit graph files have the following format:
+//
+// In order to allow extensions that add extra data to the graph, we organize
+// the body into "chunks" and provide a binary lookup table at the beginning
+// of the body. The header includes certain values, such as number of chunks
+// and hash type.
+//
+// All 4-byte numbers are in network order.
+//
+// HEADER:
+//
+// 4-byte signature:
+// The signature is: {'C', 'G', 'P', 'H'}
+//
+// 1-byte version number:
+// Currently, the only valid version is 1.
+//
+// 1-byte Hash Version (1 = SHA-1)
+// We infer the hash length (H) from this value.
+//
+// 1-byte number (C) of "chunks"
+//
+// 1-byte (reserved for later use)
+// Current clients should ignore this value.
+//
+// CHUNK LOOKUP:
+//
+// (C + 1) * 12 bytes listing the table of contents for the chunks:
+// First 4 bytes describe the chunk id. Value 0 is a terminating label.
+// Other 8 bytes provide the byte-offset in current file for chunk to
+// start. (Chunks are ordered contiguously in the file, so you can infer
+// the length using the next chunk position if necessary.) Each chunk
+// ID appears at most once.
+//
+// The remaining data in the body is described one chunk at a time, and
+// these chunks may be given in any order. Chunks are required unless
+// otherwise specified.
+//
+// CHUNK DATA:
+//
+// OID Fanout (ID: {'O', 'I', 'D', 'F'}) (256 * 4 bytes)
+// The ith entry, F[i], stores the number of OIDs with first
+// byte at most i. Thus F[255] stores the total
+// number of commits (N).
+//
+// OID Lookup (ID: {'O', 'I', 'D', 'L'}) (N * H bytes)
+// The OIDs for all commits in the graph, sorted in ascending order.
+//
+// Commit Data (ID: {'C', 'D', 'A', 'T' }) (N * (H + 16) bytes)
+// * The first H bytes are for the OID of the root tree.
+// * The next 8 bytes are for the positions of the first two parents
+// of the ith commit. Stores value 0x7000000 if no parent in that
+// position. If there are more than two parents, the second value
+// has its most-significant bit on and the other bits store an array
+// position into the Extra Edge List chunk.
+// * The next 8 bytes store the generation number of the commit and
+// the commit time in seconds since EPOCH. The generation number
+// uses the higher 30 bits of the first 4 bytes, while the commit
+// time uses the 32 bits of the second 4 bytes, along with the lowest
+// 2 bits of the lowest byte, storing the 33rd and 34th bit of the
+// commit time.
+//
+// Extra Edge List (ID: {'E', 'D', 'G', 'E'}) [Optional]
+// This list of 4-byte values store the second through nth parents for
+// all octopus merges. The second parent value in the commit data stores
+// an array position within this list along with the most-significant bit
+// on. Starting at that array position, iterate through this list of commit
+// positions for the parents until reaching a value with the most-significant
+// bit on. The other bits correspond to the position of the last parent.
+//
+// TRAILER:
+//
+// H-byte HASH-checksum of all of the above.
+//
+// Source:
+// https://raw.githubusercontent.com/git/git/master/Documentation/technical/commit-graph-format.txt
+package commitgraph
diff --git a/plumbing/format/commitgraph/encoder.go b/plumbing/format/commitgraph/encoder.go
new file mode 100644
index 0000000..a06871c
--- /dev/null
+++ b/plumbing/format/commitgraph/encoder.go
@@ -0,0 +1,190 @@
+package commitgraph
+
+import (
+ "crypto/sha1"
+ "hash"
+ "io"
+
+ "gopkg.in/src-d/go-git.v4/plumbing"
+ "gopkg.in/src-d/go-git.v4/utils/binary"
+)
+
+// Encoder writes MemoryIndex structs to an output stream.
+type Encoder struct {
+ io.Writer
+ hash hash.Hash
+}
+
+// NewEncoder returns a new stream encoder that writes to w.
+func NewEncoder(w io.Writer) *Encoder {
+ h := sha1.New()
+ mw := io.MultiWriter(w, h)
+ return &Encoder{mw, h}
+}
+
+// Encode writes an index into the commit-graph file
+func (e *Encoder) Encode(idx Index) error {
+ var err error
+
+ // Get all the hashes in the input index
+ hashes := idx.Hashes()
+
+ // Sort the inout and prepare helper structures we'll need for encoding
+ hashToIndex, fanout, extraEdgesCount := e.prepare(idx, hashes)
+
+ chunkSignatures := [][]byte{oidFanoutSignature, oidLookupSignature, commitDataSignature}
+ chunkSizes := []uint64{4 * 256, uint64(len(hashes)) * 20, uint64(len(hashes)) * 36}
+ if extraEdgesCount > 0 {
+ chunkSignatures = append(chunkSignatures, extraEdgeListSignature)
+ chunkSizes = append(chunkSizes, uint64(extraEdgesCount)*4)
+ }
+
+ if err = e.encodeFileHeader(len(chunkSignatures)); err != nil {
+ return err
+ }
+ if err = e.encodeChunkHeaders(chunkSignatures, chunkSizes); err != nil {
+ return err
+ }
+ if err = e.encodeFanout(fanout); err != nil {
+ return err
+ }
+ if err = e.encodeOidLookup(hashes); err != nil {
+ return err
+ }
+ if extraEdges, err := e.encodeCommitData(hashes, hashToIndex, idx); err == nil {
+ if err = e.encodeExtraEdges(extraEdges); err != nil {
+ return err
+ }
+ }
+ if err != nil {
+ return err
+ }
+ return e.encodeChecksum()
+}
+
+func (e *Encoder) prepare(idx Index, hashes []plumbing.Hash) (hashToIndex map[plumbing.Hash]uint32, fanout []uint32, extraEdgesCount uint32) {
+ // Sort the hashes and build our index
+ plumbing.HashesSort(hashes)
+ hashToIndex = make(map[plumbing.Hash]uint32)
+ fanout = make([]uint32, 256)
+ for i, hash := range hashes {
+ hashToIndex[hash] = uint32(i)
+ fanout[hash[0]]++
+ }
+
+ // Convert the fanout to cumulative values
+ for i := 1; i <= 0xff; i++ {
+ fanout[i] += fanout[i-1]
+ }
+
+ // Find out if we will need extra edge table
+ for i := 0; i < len(hashes); i++ {
+ v, _ := idx.GetCommitDataByIndex(i)
+ if len(v.ParentHashes) > 2 {
+ extraEdgesCount += uint32(len(v.ParentHashes) - 1)
+ break
+ }
+ }
+
+ return
+}
+
+func (e *Encoder) encodeFileHeader(chunkCount int) (err error) {
+ if _, err = e.Write(commitFileSignature); err == nil {
+ _, err = e.Write([]byte{1, 1, byte(chunkCount), 0})
+ }
+ return
+}
+
+func (e *Encoder) encodeChunkHeaders(chunkSignatures [][]byte, chunkSizes []uint64) (err error) {
+ // 8 bytes of file header, 12 bytes for each chunk header and 12 byte for terminator
+ offset := uint64(8 + len(chunkSignatures)*12 + 12)
+ for i, signature := range chunkSignatures {
+ if _, err = e.Write(signature); err == nil {
+ err = binary.WriteUint64(e, offset)
+ }
+ if err != nil {
+ return
+ }
+ offset += chunkSizes[i]
+ }
+ if _, err = e.Write(lastSignature); err == nil {
+ err = binary.WriteUint64(e, offset)
+ }
+ return
+}
+
+func (e *Encoder) encodeFanout(fanout []uint32) (err error) {
+ for i := 0; i <= 0xff; i++ {
+ if err = binary.WriteUint32(e, fanout[i]); err != nil {
+ return
+ }
+ }
+ return
+}
+
+func (e *Encoder) encodeOidLookup(hashes []plumbing.Hash) (err error) {
+ for _, hash := range hashes {
+ if _, err = e.Write(hash[:]); err != nil {
+ return err
+ }
+ }
+ return
+}
+
+func (e *Encoder) encodeCommitData(hashes []plumbing.Hash, hashToIndex map[plumbing.Hash]uint32, idx Index) (extraEdges []uint32, err error) {
+ for _, hash := range hashes {
+ origIndex, _ := idx.GetIndexByHash(hash)
+ commitData, _ := idx.GetCommitDataByIndex(origIndex)
+ if _, err = e.Write(commitData.TreeHash[:]); err != nil {
+ return
+ }
+
+ var parent1, parent2 uint32
+ if len(commitData.ParentHashes) == 0 {
+ parent1 = parentNone
+ parent2 = parentNone
+ } else if len(commitData.ParentHashes) == 1 {
+ parent1 = hashToIndex[commitData.ParentHashes[0]]
+ parent2 = parentNone
+ } else if len(commitData.ParentHashes) == 2 {
+ parent1 = hashToIndex[commitData.ParentHashes[0]]
+ parent2 = hashToIndex[commitData.ParentHashes[1]]
+ } else if len(commitData.ParentHashes) > 2 {
+ parent1 = hashToIndex[commitData.ParentHashes[0]]
+ parent2 = uint32(len(extraEdges)) | parentOctopusUsed
+ for _, parentHash := range commitData.ParentHashes[1:] {
+ extraEdges = append(extraEdges, hashToIndex[parentHash])
+ }
+ extraEdges[len(extraEdges)-1] |= parentLast
+ }
+
+ if err = binary.WriteUint32(e, parent1); err == nil {
+ err = binary.WriteUint32(e, parent2)
+ }
+ if err != nil {
+ return
+ }
+
+ unixTime := uint64(commitData.When.Unix())
+ unixTime |= uint64(commitData.Generation) << 34
+ if err = binary.WriteUint64(e, unixTime); err != nil {
+ return
+ }
+ }
+ return
+}
+
+func (e *Encoder) encodeExtraEdges(extraEdges []uint32) (err error) {
+ for _, parent := range extraEdges {
+ if err = binary.WriteUint32(e, parent); err != nil {
+ return
+ }
+ }
+ return
+}
+
+func (e *Encoder) encodeChecksum() error {
+ _, err := e.Write(e.hash.Sum(nil)[:20])
+ return err
+}
diff --git a/plumbing/format/commitgraph/file.go b/plumbing/format/commitgraph/file.go
new file mode 100644
index 0000000..175d279
--- /dev/null
+++ b/plumbing/format/commitgraph/file.go
@@ -0,0 +1,259 @@
+package commitgraph
+
+import (
+ "bytes"
+ encbin "encoding/binary"
+ "errors"
+ "io"
+ "time"
+
+ "gopkg.in/src-d/go-git.v4/plumbing"
+ "gopkg.in/src-d/go-git.v4/utils/binary"
+)
+
+var (
+ // ErrUnsupportedVersion is returned by OpenFileIndex when the commit graph
+ // file version is not supported.
+ ErrUnsupportedVersion = errors.New("Unsupported version")
+ // ErrUnsupportedHash is returned by OpenFileIndex when the commit graph
+ // hash function is not supported. Currently only SHA-1 is defined and
+ // supported
+ ErrUnsupportedHash = errors.New("Unsupported hash algorithm")
+ // ErrMalformedCommitGraphFile is returned by OpenFileIndex when the commit
+ // graph file is corrupted.
+ ErrMalformedCommitGraphFile = errors.New("Malformed commit graph file")
+
+ commitFileSignature = []byte{'C', 'G', 'P', 'H'}
+ oidFanoutSignature = []byte{'O', 'I', 'D', 'F'}
+ oidLookupSignature = []byte{'O', 'I', 'D', 'L'}
+ commitDataSignature = []byte{'C', 'D', 'A', 'T'}
+ extraEdgeListSignature = []byte{'E', 'D', 'G', 'E'}
+ lastSignature = []byte{0, 0, 0, 0}
+
+ parentNone = uint32(0x70000000)
+ parentOctopusUsed = uint32(0x80000000)
+ parentOctopusMask = uint32(0x7fffffff)
+ parentLast = uint32(0x80000000)
+)
+
+type fileIndex struct {
+ reader io.ReaderAt
+ fanout [256]int
+ oidFanoutOffset int64
+ oidLookupOffset int64
+ commitDataOffset int64
+ extraEdgeListOffset int64
+}
+
+// OpenFileIndex opens a serialized commit graph file in the format described at
+// https://github.com/git/git/blob/master/Documentation/technical/commit-graph-format.txt
+func OpenFileIndex(reader io.ReaderAt) (Index, error) {
+ fi := &fileIndex{reader: reader}
+
+ if err := fi.verifyFileHeader(); err != nil {
+ return nil, err
+ }
+ if err := fi.readChunkHeaders(); err != nil {
+ return nil, err
+ }
+ if err := fi.readFanout(); err != nil {
+ return nil, err
+ }
+
+ return fi, nil
+}
+
+func (fi *fileIndex) verifyFileHeader() error {
+ // Verify file signature
+ var signature = make([]byte, 4)
+ if _, err := fi.reader.ReadAt(signature, 0); err != nil {
+ return err
+ }
+ if !bytes.Equal(signature, commitFileSignature) {
+ return ErrMalformedCommitGraphFile
+ }
+
+ // Read and verify the file header
+ var header = make([]byte, 4)
+ if _, err := fi.reader.ReadAt(header, 4); err != nil {
+ return err
+ }
+ if header[0] != 1 {
+ return ErrUnsupportedVersion
+ }
+ if header[1] != 1 {
+ return ErrUnsupportedHash
+ }
+
+ return nil
+}
+
+func (fi *fileIndex) readChunkHeaders() error {
+ var chunkID = make([]byte, 4)
+ for i := 0; ; i++ {
+ chunkHeader := io.NewSectionReader(fi.reader, 8+(int64(i)*12), 12)
+ if _, err := io.ReadAtLeast(chunkHeader, chunkID, 4); err != nil {
+ return err
+ }
+ chunkOffset, err := binary.ReadUint64(chunkHeader)
+ if err != nil {
+ return err
+ }
+
+ if bytes.Equal(chunkID, oidFanoutSignature) {
+ fi.oidFanoutOffset = int64(chunkOffset)
+ } else if bytes.Equal(chunkID, oidLookupSignature) {
+ fi.oidLookupOffset = int64(chunkOffset)
+ } else if bytes.Equal(chunkID, commitDataSignature) {
+ fi.commitDataOffset = int64(chunkOffset)
+ } else if bytes.Equal(chunkID, extraEdgeListSignature) {
+ fi.extraEdgeListOffset = int64(chunkOffset)
+ } else if bytes.Equal(chunkID, lastSignature) {
+ break
+ }
+ }
+
+ if fi.oidFanoutOffset <= 0 || fi.oidLookupOffset <= 0 || fi.commitDataOffset <= 0 {
+ return ErrMalformedCommitGraphFile
+ }
+
+ return nil
+}
+
+func (fi *fileIndex) readFanout() error {
+ fanoutReader := io.NewSectionReader(fi.reader, fi.oidFanoutOffset, 256*4)
+ for i := 0; i < 256; i++ {
+ fanoutValue, err := binary.ReadUint32(fanoutReader)
+ if err != nil {
+ return err
+ }
+ if fanoutValue > 0x7fffffff {
+ return ErrMalformedCommitGraphFile
+ }
+ fi.fanout[i] = int(fanoutValue)
+ }
+ return nil
+}
+
+func (fi *fileIndex) GetIndexByHash(h plumbing.Hash) (int, error) {
+ var oid plumbing.Hash
+
+ // Find the hash in the oid lookup table
+ var low int
+ if h[0] == 0 {
+ low = 0
+ } else {
+ low = fi.fanout[h[0]-1]
+ }
+ high := fi.fanout[h[0]]
+ for low < high {
+ mid := (low + high) >> 1
+ offset := fi.oidLookupOffset + int64(mid)*20
+ if _, err := fi.reader.ReadAt(oid[:], offset); err != nil {
+ return 0, err
+ }
+ cmp := bytes.Compare(h[:], oid[:])
+ if cmp < 0 {
+ high = mid
+ } else if cmp == 0 {
+ return mid, nil
+ } else {
+ low = mid + 1
+ }
+ }
+
+ return 0, plumbing.ErrObjectNotFound
+}
+
+func (fi *fileIndex) GetCommitDataByIndex(idx int) (*CommitData, error) {
+ if idx >= fi.fanout[0xff] {
+ return nil, plumbing.ErrObjectNotFound
+ }
+
+ offset := fi.commitDataOffset + int64(idx)*36
+ commitDataReader := io.NewSectionReader(fi.reader, offset, 36)
+
+ treeHash, err := binary.ReadHash(commitDataReader)
+ if err != nil {
+ return nil, err
+ }
+ parent1, err := binary.ReadUint32(commitDataReader)
+ if err != nil {
+ return nil, err
+ }
+ parent2, err := binary.ReadUint32(commitDataReader)
+ if err != nil {
+ return nil, err
+ }
+ genAndTime, err := binary.ReadUint64(commitDataReader)
+ if err != nil {
+ return nil, err
+ }
+
+ var parentIndexes []int
+ if parent2&parentOctopusUsed == parentOctopusUsed {
+ // Octopus merge
+ parentIndexes = []int{int(parent1 & parentOctopusMask)}
+ offset := fi.extraEdgeListOffset + 4*int64(parent2&parentOctopusMask)
+ buf := make([]byte, 4)
+ for {
+ _, err := fi.reader.ReadAt(buf, offset)
+ if err != nil {
+ return nil, err
+ }
+
+ parent := encbin.BigEndian.Uint32(buf)
+ offset += 4
+ parentIndexes = append(parentIndexes, int(parent&parentOctopusMask))
+ if parent&parentLast == parentLast {
+ break
+ }
+ }
+ } else if parent2 != parentNone {
+ parentIndexes = []int{int(parent1 & parentOctopusMask), int(parent2 & parentOctopusMask)}
+ } else if parent1 != parentNone {
+ parentIndexes = []int{int(parent1 & parentOctopusMask)}
+ }
+
+ parentHashes, err := fi.getHashesFromIndexes(parentIndexes)
+ if err != nil {
+ return nil, err
+ }
+
+ return &CommitData{
+ TreeHash: treeHash,
+ ParentIndexes: parentIndexes,
+ ParentHashes: parentHashes,
+ Generation: int(genAndTime >> 34),
+ When: time.Unix(int64(genAndTime&0x3FFFFFFFF), 0),
+ }, nil
+}
+
+func (fi *fileIndex) getHashesFromIndexes(indexes []int) ([]plumbing.Hash, error) {
+ hashes := make([]plumbing.Hash, len(indexes))
+
+ for i, idx := range indexes {
+ if idx >= fi.fanout[0xff] {
+ return nil, ErrMalformedCommitGraphFile
+ }
+
+ offset := fi.oidLookupOffset + int64(idx)*20
+ if _, err := fi.reader.ReadAt(hashes[i][:], offset); err != nil {
+ return nil, err
+ }
+ }
+
+ return hashes, nil
+}
+
+// Hashes returns all the hashes that are available in the index
+func (fi *fileIndex) Hashes() []plumbing.Hash {
+ hashes := make([]plumbing.Hash, fi.fanout[0xff])
+ for i := 0; i < int(fi.fanout[0xff]); i++ {
+ offset := fi.oidLookupOffset + int64(i)*20
+ if n, err := fi.reader.ReadAt(hashes[i][:], offset); err != nil || n < 20 {
+ return nil
+ }
+ }
+ return hashes
+}
diff --git a/plumbing/format/commitgraph/memory.go b/plumbing/format/commitgraph/memory.go
new file mode 100644
index 0000000..a4a96e9
--- /dev/null
+++ b/plumbing/format/commitgraph/memory.go
@@ -0,0 +1,72 @@
+package commitgraph
+
+import (
+ "gopkg.in/src-d/go-git.v4/plumbing"
+)
+
+// MemoryIndex provides a way to build the commit-graph in memory
+// for later encoding to file.
+type MemoryIndex struct {
+ commitData []*CommitData
+ indexMap map[plumbing.Hash]int
+}
+
+// NewMemoryIndex creates in-memory commit graph representation
+func NewMemoryIndex() *MemoryIndex {
+ return &MemoryIndex{
+ indexMap: make(map[plumbing.Hash]int),
+ }
+}
+
+// GetIndexByHash gets the index in the commit graph from commit hash, if available
+func (mi *MemoryIndex) GetIndexByHash(h plumbing.Hash) (int, error) {
+ i, ok := mi.indexMap[h]
+ if ok {
+ return i, nil
+ }
+
+ return 0, plumbing.ErrObjectNotFound
+}
+
+// GetCommitDataByIndex gets the commit node from the commit graph using index
+// obtained from child node, if available
+func (mi *MemoryIndex) GetCommitDataByIndex(i int) (*CommitData, error) {
+ if int(i) >= len(mi.commitData) {
+ return nil, plumbing.ErrObjectNotFound
+ }
+
+ commitData := mi.commitData[i]
+
+ // Map parent hashes to parent indexes
+ if commitData.ParentIndexes == nil {
+ parentIndexes := make([]int, len(commitData.ParentHashes))
+ for i, parentHash := range commitData.ParentHashes {
+ var err error
+ if parentIndexes[i], err = mi.GetIndexByHash(parentHash); err != nil {
+ return nil, err
+ }
+ }
+ commitData.ParentIndexes = parentIndexes
+ }
+
+ return commitData, nil
+}
+
+// Hashes returns all the hashes that are available in the index
+func (mi *MemoryIndex) Hashes() []plumbing.Hash {
+ hashes := make([]plumbing.Hash, 0, len(mi.indexMap))
+ for k := range mi.indexMap {
+ hashes = append(hashes, k)
+ }
+ return hashes
+}
+
+// Add adds new node to the memory index
+func (mi *MemoryIndex) Add(hash plumbing.Hash, commitData *CommitData) {
+ // The parent indexes are calculated lazily in GetNodeByIndex
+ // which allows adding nodes out of order as long as all parents
+ // are eventually resolved
+ commitData.ParentIndexes = nil
+ mi.indexMap[hash] = len(mi.commitData)
+ mi.commitData = append(mi.commitData, commitData)
+}
diff --git a/plumbing/format/gitattributes/attributes.go b/plumbing/format/gitattributes/attributes.go
new file mode 100644
index 0000000..d13c2a9
--- /dev/null
+++ b/plumbing/format/gitattributes/attributes.go
@@ -0,0 +1,214 @@
+package gitattributes
+
+import (
+ "errors"
+ "io"
+ "io/ioutil"
+ "strings"
+)
+
+const (
+ commentPrefix = "#"
+ eol = "\n"
+ macroPrefix = "[attr]"
+)
+
+var (
+ ErrMacroNotAllowed = errors.New("macro not allowed")
+ ErrInvalidAttributeName = errors.New("Invalid attribute name")
+)
+
+type MatchAttribute struct {
+ Name string
+ Pattern Pattern
+ Attributes []Attribute
+}
+
+type attributeState byte
+
+const (
+ attributeUnknown attributeState = 0
+ attributeSet attributeState = 1
+ attributeUnspecified attributeState = '!'
+ attributeUnset attributeState = '-'
+ attributeSetValue attributeState = '='
+)
+
+type Attribute interface {
+ Name() string
+ IsSet() bool
+ IsUnset() bool
+ IsUnspecified() bool
+ IsValueSet() bool
+ Value() string
+ String() string
+}
+
+type attribute struct {
+ name string
+ state attributeState
+ value string
+}
+
+func (a attribute) Name() string {
+ return a.name
+}
+
+func (a attribute) IsSet() bool {
+ return a.state == attributeSet
+}
+
+func (a attribute) IsUnset() bool {
+ return a.state == attributeUnset
+}
+
+func (a attribute) IsUnspecified() bool {
+ return a.state == attributeUnspecified
+}
+
+func (a attribute) IsValueSet() bool {
+ return a.state == attributeSetValue
+}
+
+func (a attribute) Value() string {
+ return a.value
+}
+
+func (a attribute) String() string {
+ switch a.state {
+ case attributeSet:
+ return a.name + ": set"
+ case attributeUnset:
+ return a.name + ": unset"
+ case attributeUnspecified:
+ return a.name + ": unspecified"
+ default:
+ return a.name + ": " + a.value
+ }
+}
+
+// ReadAttributes reads patterns and attributes from the gitattributes format.
+func ReadAttributes(r io.Reader, domain []string, allowMacro bool) (attributes []MatchAttribute, err error) {
+ data, err := ioutil.ReadAll(r)
+ if err != nil {
+ return nil, err
+ }
+
+ for _, line := range strings.Split(string(data), eol) {
+ attribute, err := ParseAttributesLine(line, domain, allowMacro)
+ if err != nil {
+ return attributes, err
+ }
+ if len(attribute.Name) == 0 {
+ continue
+ }
+
+ attributes = append(attributes, attribute)
+ }
+
+ return attributes, nil
+}
+
+// ParseAttributesLine parses a gitattribute line, extracting path pattern and
+// attributes.
+func ParseAttributesLine(line string, domain []string, allowMacro bool) (m MatchAttribute, err error) {
+ line = strings.TrimSpace(line)
+
+ if strings.HasPrefix(line, commentPrefix) || len(line) == 0 {
+ return
+ }
+
+ name, unquoted := unquote(line)
+ attrs := strings.Fields(unquoted)
+ if len(name) == 0 {
+ name = attrs[0]
+ attrs = attrs[1:]
+ }
+
+ var macro bool
+ macro, name, err = checkMacro(name, allowMacro)
+ if err != nil {
+ return
+ }
+
+ m.Name = name
+ m.Attributes = make([]Attribute, 0, len(attrs))
+
+ for _, attrName := range attrs {
+ attr := attribute{
+ name: attrName,
+ state: attributeSet,
+ }
+
+ // ! and - prefixes
+ state := attributeState(attr.name[0])
+ if state == attributeUnspecified || state == attributeUnset {
+ attr.state = state
+ attr.name = attr.name[1:]
+ }
+
+ kv := strings.SplitN(attrName, "=", 2)
+ if len(kv) == 2 {
+ attr.name = kv[0]
+ attr.value = kv[1]
+ attr.state = attributeSetValue
+ }
+
+ if !validAttributeName(attr.name) {
+ return m, ErrInvalidAttributeName
+ }
+ m.Attributes = append(m.Attributes, attr)
+ }
+
+ if !macro {
+ m.Pattern = ParsePattern(name, domain)
+ }
+ return
+}
+
+func checkMacro(name string, allowMacro bool) (macro bool, macroName string, err error) {
+ if !strings.HasPrefix(name, macroPrefix) {
+ return false, name, nil
+ }
+ if !allowMacro {
+ return true, name, ErrMacroNotAllowed
+ }
+
+ macroName = name[len(macroPrefix):]
+ if !validAttributeName(macroName) {
+ return true, name, ErrInvalidAttributeName
+ }
+ return true, macroName, nil
+}
+
+func validAttributeName(name string) bool {
+ if len(name) == 0 || name[0] == '-' {
+ return false
+ }
+
+ for _, ch := range name {
+ if !(ch == '-' || ch == '.' || ch == '_' ||
+ ('0' <= ch && ch <= '9') ||
+ ('a' <= ch && ch <= 'z') ||
+ ('A' <= ch && ch <= 'Z')) {
+ return false
+ }
+ }
+ return true
+}
+
+func unquote(str string) (string, string) {
+ if str[0] != '"' {
+ return "", str
+ }
+
+ for i := 1; i < len(str); i++ {
+ switch str[i] {
+ case '\\':
+ i++
+ case '"':
+ return str[1:i], str[i+1:]
+ }
+ }
+ return "", str
+}
diff --git a/plumbing/format/gitattributes/attributes_test.go b/plumbing/format/gitattributes/attributes_test.go
new file mode 100644
index 0000000..aea70ba
--- /dev/null
+++ b/plumbing/format/gitattributes/attributes_test.go
@@ -0,0 +1,67 @@
+package gitattributes
+
+import (
+ "strings"
+
+ . "gopkg.in/check.v1"
+)
+
+type AttributesSuite struct{}
+
+var _ = Suite(&AttributesSuite{})
+
+func (s *AttributesSuite) TestAttributes_ReadAttributes(c *C) {
+ lines := []string{
+ "[attr]sub -a",
+ "[attr]add a",
+ "* sub a",
+ "* !a foo=bar -b c",
+ }
+
+ mas, err := ReadAttributes(strings.NewReader(strings.Join(lines, "\n")), nil, true)
+ c.Assert(err, IsNil)
+ c.Assert(len(mas), Equals, 4)
+
+ c.Assert(mas[0].Name, Equals, "sub")
+ c.Assert(mas[0].Pattern, IsNil)
+ c.Assert(mas[0].Attributes[0].IsUnset(), Equals, true)
+
+ c.Assert(mas[1].Name, Equals, "add")
+ c.Assert(mas[1].Pattern, IsNil)
+ c.Assert(mas[1].Attributes[0].IsSet(), Equals, true)
+
+ c.Assert(mas[2].Name, Equals, "*")
+ c.Assert(mas[2].Pattern, NotNil)
+ c.Assert(mas[2].Attributes[0].IsSet(), Equals, true)
+
+ c.Assert(mas[3].Name, Equals, "*")
+ c.Assert(mas[3].Pattern, NotNil)
+ c.Assert(mas[3].Attributes[0].IsUnspecified(), Equals, true)
+ c.Assert(mas[3].Attributes[1].IsValueSet(), Equals, true)
+ c.Assert(mas[3].Attributes[1].Value(), Equals, "bar")
+ c.Assert(mas[3].Attributes[2].IsUnset(), Equals, true)
+ c.Assert(mas[3].Attributes[3].IsSet(), Equals, true)
+ c.Assert(mas[3].Attributes[0].String(), Equals, "a: unspecified")
+ c.Assert(mas[3].Attributes[1].String(), Equals, "foo: bar")
+ c.Assert(mas[3].Attributes[2].String(), Equals, "b: unset")
+ c.Assert(mas[3].Attributes[3].String(), Equals, "c: set")
+}
+
+func (s *AttributesSuite) TestAttributes_ReadAttributesDisallowMacro(c *C) {
+ lines := []string{
+ "[attr]sub -a",
+ "* a add",
+ }
+
+ _, err := ReadAttributes(strings.NewReader(strings.Join(lines, "\n")), nil, false)
+ c.Assert(err, Equals, ErrMacroNotAllowed)
+}
+
+func (s *AttributesSuite) TestAttributes_ReadAttributesInvalidName(c *C) {
+ lines := []string{
+ "[attr]foo!bar -a",
+ }
+
+ _, err := ReadAttributes(strings.NewReader(strings.Join(lines, "\n")), nil, true)
+ c.Assert(err, Equals, ErrInvalidAttributeName)
+}
diff --git a/plumbing/format/gitattributes/dir.go b/plumbing/format/gitattributes/dir.go
new file mode 100644
index 0000000..d5c1e6a
--- /dev/null
+++ b/plumbing/format/gitattributes/dir.go
@@ -0,0 +1,126 @@
+package gitattributes
+
+import (
+ "os"
+ "os/user"
+
+ "gopkg.in/src-d/go-billy.v4"
+ "gopkg.in/src-d/go-git.v4/plumbing/format/config"
+ gioutil "gopkg.in/src-d/go-git.v4/utils/ioutil"
+)
+
+const (
+ coreSection = "core"
+ attributesfile = "attributesfile"
+ gitDir = ".git"
+ gitattributesFile = ".gitattributes"
+ gitconfigFile = ".gitconfig"
+ systemFile = "/etc/gitconfig"
+)
+
+func ReadAttributesFile(fs billy.Filesystem, path []string, attributesFile string, allowMacro bool) ([]MatchAttribute, error) {
+ f, err := fs.Open(fs.Join(append(path, attributesFile)...))
+ if os.IsNotExist(err) {
+ return nil, nil
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ return ReadAttributes(f, path, allowMacro)
+}
+
+// ReadPatterns reads gitattributes patterns recursively through the directory
+// structure. The result is in ascending order of priority (last higher).
+//
+// The .gitattribute file in the root directory will allow custom macro
+// definitions. Custom macro definitions in other directories .gitattributes
+// will return an error.
+func ReadPatterns(fs billy.Filesystem, path []string) (attributes []MatchAttribute, err error) {
+ attributes, err = ReadAttributesFile(fs, path, gitattributesFile, true)
+ if err != nil {
+ return
+ }
+
+ attrs, err := walkDirectory(fs, path)
+ return append(attributes, attrs...), err
+}
+
+func walkDirectory(fs billy.Filesystem, root []string) (attributes []MatchAttribute, err error) {
+ fis, err := fs.ReadDir(fs.Join(root...))
+ if err != nil {
+ return attributes, err
+ }
+
+ for _, fi := range fis {
+ if !fi.IsDir() || fi.Name() == ".git" {
+ continue
+ }
+
+ path := append(root, fi.Name())
+
+ dirAttributes, err := ReadAttributesFile(fs, path, gitattributesFile, false)
+ if err != nil {
+ return attributes, err
+ }
+
+ subAttributes, err := walkDirectory(fs, path)
+ if err != nil {
+ return attributes, err
+ }
+
+ attributes = append(attributes, append(dirAttributes, subAttributes...)...)
+ }
+
+ return
+}
+
+func loadPatterns(fs billy.Filesystem, path string) ([]MatchAttribute, error) {
+ f, err := fs.Open(path)
+ if os.IsNotExist(err) {
+ return nil, nil
+ }
+ if err != nil {
+ return nil, err
+ }
+ defer gioutil.CheckClose(f, &err)
+
+ raw := config.New()
+ if err = config.NewDecoder(f).Decode(raw); err != nil {
+ return nil, nil
+ }
+
+ path = raw.Section(coreSection).Options.Get(attributesfile)
+ if path == "" {
+ return nil, nil
+ }
+
+ return ReadAttributesFile(fs, nil, path, true)
+}
+
+// LoadGlobalPatterns loads gitattributes patterns and attributes from the
+// gitattributes file declared in a user's ~/.gitconfig file. If the
+// ~/.gitconfig file does not exist the function will return nil. If the
+// core.attributesFile property is not declared, the function will return nil.
+// If the file pointed to by the core.attributesfile property does not exist,
+// the function will return nil. The function assumes fs is rooted at the root
+// filesystem.
+func LoadGlobalPatterns(fs billy.Filesystem) (attributes []MatchAttribute, err error) {
+ usr, err := user.Current()
+ if err != nil {
+ return
+ }
+
+ return loadPatterns(fs, fs.Join(usr.HomeDir, gitconfigFile))
+}
+
+// LoadSystemPatterns loads gitattributes patterns and attributes from the
+// gitattributes file declared in a system's /etc/gitconfig file. If the
+// /etc/gitconfig file does not exist the function will return nil. If the
+// core.attributesfile property is not declared, the function will return nil.
+// If the file pointed to by the core.attributesfile property does not exist,
+// the function will return nil. The function assumes fs is rooted at the root
+// filesystem.
+func LoadSystemPatterns(fs billy.Filesystem) (attributes []MatchAttribute, err error) {
+ return loadPatterns(fs, systemFile)
+}
diff --git a/plumbing/format/gitattributes/dir_test.go b/plumbing/format/gitattributes/dir_test.go
new file mode 100644
index 0000000..34b915d
--- /dev/null
+++ b/plumbing/format/gitattributes/dir_test.go
@@ -0,0 +1,199 @@
+package gitattributes
+
+import (
+ "os"
+ "os/user"
+ "strconv"
+
+ . "gopkg.in/check.v1"
+ "gopkg.in/src-d/go-billy.v4"
+ "gopkg.in/src-d/go-billy.v4/memfs"
+)
+
+type MatcherSuite struct {
+ GFS billy.Filesystem // git repository root
+ RFS billy.Filesystem // root that contains user home
+ MCFS billy.Filesystem // root that contains user home, but missing ~/.gitattributes
+ MEFS billy.Filesystem // root that contains user home, but missing attributesfile entry
+ MIFS billy.Filesystem // root that contains user home, but missing .gitattributes
+
+ SFS billy.Filesystem // root that contains /etc/gitattributes
+}
+
+var _ = Suite(&MatcherSuite{})
+
+func (s *MatcherSuite) SetUpTest(c *C) {
+ // setup root that contains user home
+ usr, err := user.Current()
+ c.Assert(err, IsNil)
+
+ gitAttributesGlobal := func(fs billy.Filesystem, filename string) {
+ f, err := fs.Create(filename)
+ c.Assert(err, IsNil)
+ _, err = f.Write([]byte("# IntelliJ\n"))
+ c.Assert(err, IsNil)
+ _, err = f.Write([]byte(".idea/** text\n"))
+ c.Assert(err, IsNil)
+ _, err = f.Write([]byte("*.iml -text\n"))
+ c.Assert(err, IsNil)
+ err = f.Close()
+ c.Assert(err, IsNil)
+ }
+
+ // setup generic git repository root
+ fs := memfs.New()
+ f, err := fs.Create(".gitattributes")
+ c.Assert(err, IsNil)
+ _, err = f.Write([]byte("vendor/g*/** foo=bar\n"))
+ c.Assert(err, IsNil)
+ err = f.Close()
+ c.Assert(err, IsNil)
+
+ err = fs.MkdirAll("vendor", os.ModePerm)
+ c.Assert(err, IsNil)
+ f, err = fs.Create("vendor/.gitattributes")
+ c.Assert(err, IsNil)
+ _, err = f.Write([]byte("github.com/** -foo\n"))
+ c.Assert(err, IsNil)
+ err = f.Close()
+ c.Assert(err, IsNil)
+
+ fs.MkdirAll("another", os.ModePerm)
+ fs.MkdirAll("vendor/github.com", os.ModePerm)
+ fs.MkdirAll("vendor/gopkg.in", os.ModePerm)
+
+ gitAttributesGlobal(fs, fs.Join(usr.HomeDir, ".gitattributes_global"))
+
+ s.GFS = fs
+
+ fs = memfs.New()
+ err = fs.MkdirAll(usr.HomeDir, os.ModePerm)
+ c.Assert(err, IsNil)
+
+ f, err = fs.Create(fs.Join(usr.HomeDir, gitconfigFile))
+ c.Assert(err, IsNil)
+ _, err = f.Write([]byte("[core]\n"))
+ c.Assert(err, IsNil)
+ _, err = f.Write([]byte(" attributesfile = " + strconv.Quote(fs.Join(usr.HomeDir, ".gitattributes_global")) + "\n"))
+ c.Assert(err, IsNil)
+ err = f.Close()
+ c.Assert(err, IsNil)
+
+ gitAttributesGlobal(fs, fs.Join(usr.HomeDir, ".gitattributes_global"))
+
+ s.RFS = fs
+
+ // root that contains user home, but missing ~/.gitconfig
+ fs = memfs.New()
+ gitAttributesGlobal(fs, fs.Join(usr.HomeDir, ".gitattributes_global"))
+
+ s.MCFS = fs
+
+ // setup root that contains user home, but missing attributesfile entry
+ fs = memfs.New()
+ err = fs.MkdirAll(usr.HomeDir, os.ModePerm)
+ c.Assert(err, IsNil)
+
+ f, err = fs.Create(fs.Join(usr.HomeDir, gitconfigFile))
+ c.Assert(err, IsNil)
+ _, err = f.Write([]byte("[core]\n"))
+ c.Assert(err, IsNil)
+ err = f.Close()
+ c.Assert(err, IsNil)
+
+ gitAttributesGlobal(fs, fs.Join(usr.HomeDir, ".gitattributes_global"))
+
+ s.MEFS = fs
+
+ // setup root that contains user home, but missing .gitattributes
+ fs = memfs.New()
+ err = fs.MkdirAll(usr.HomeDir, os.ModePerm)
+ c.Assert(err, IsNil)
+
+ f, err = fs.Create(fs.Join(usr.HomeDir, gitconfigFile))
+ c.Assert(err, IsNil)
+ _, err = f.Write([]byte("[core]\n"))
+ c.Assert(err, IsNil)
+ _, err = f.Write([]byte(" attributesfile = " + strconv.Quote(fs.Join(usr.HomeDir, ".gitattributes_global")) + "\n"))
+ c.Assert(err, IsNil)
+ err = f.Close()
+ c.Assert(err, IsNil)
+
+ s.MIFS = fs
+
+ // setup root that contains user home
+ fs = memfs.New()
+ err = fs.MkdirAll("etc", os.ModePerm)
+ c.Assert(err, IsNil)
+
+ f, err = fs.Create(systemFile)
+ c.Assert(err, IsNil)
+ _, err = f.Write([]byte("[core]\n"))
+ c.Assert(err, IsNil)
+ _, err = f.Write([]byte(" attributesfile = /etc/gitattributes_global\n"))
+ c.Assert(err, IsNil)
+ err = f.Close()
+ c.Assert(err, IsNil)
+
+ gitAttributesGlobal(fs, "/etc/gitattributes_global")
+
+ s.SFS = fs
+}
+
+func (s *MatcherSuite) TestDir_ReadPatterns(c *C) {
+ ps, err := ReadPatterns(s.GFS, nil)
+ c.Assert(err, IsNil)
+ c.Assert(ps, HasLen, 2)
+
+ m := NewMatcher(ps)
+ results, _ := m.Match([]string{"vendor", "gopkg.in", "file"}, nil)
+ c.Assert(results["foo"].Value(), Equals, "bar")
+
+ results, _ = m.Match([]string{"vendor", "github.com", "file"}, nil)
+ c.Assert(results["foo"].IsUnset(), Equals, false)
+}
+
+func (s *MatcherSuite) TestDir_LoadGlobalPatterns(c *C) {
+ ps, err := LoadGlobalPatterns(s.RFS)
+ c.Assert(err, IsNil)
+ c.Assert(ps, HasLen, 2)
+
+ m := NewMatcher(ps)
+
+ results, _ := m.Match([]string{"go-git.v4.iml"}, nil)
+ c.Assert(results["text"].IsUnset(), Equals, true)
+
+ results, _ = m.Match([]string{".idea", "file"}, nil)
+ c.Assert(results["text"].IsSet(), Equals, true)
+}
+
+func (s *MatcherSuite) TestDir_LoadGlobalPatternsMissingGitconfig(c *C) {
+ ps, err := LoadGlobalPatterns(s.MCFS)
+ c.Assert(err, IsNil)
+ c.Assert(ps, HasLen, 0)
+}
+
+func (s *MatcherSuite) TestDir_LoadGlobalPatternsMissingAttributesfile(c *C) {
+ ps, err := LoadGlobalPatterns(s.MEFS)
+ c.Assert(err, IsNil)
+ c.Assert(ps, HasLen, 0)
+}
+
+func (s *MatcherSuite) TestDir_LoadGlobalPatternsMissingGitattributes(c *C) {
+ ps, err := LoadGlobalPatterns(s.MIFS)
+ c.Assert(err, IsNil)
+ c.Assert(ps, HasLen, 0)
+}
+
+func (s *MatcherSuite) TestDir_LoadSystemPatterns(c *C) {
+ ps, err := LoadSystemPatterns(s.SFS)
+ c.Assert(err, IsNil)
+ c.Assert(ps, HasLen, 2)
+
+ m := NewMatcher(ps)
+ results, _ := m.Match([]string{"go-git.v4.iml"}, nil)
+ c.Assert(results["text"].IsUnset(), Equals, true)
+
+ results, _ = m.Match([]string{".idea", "file"}, nil)
+ c.Assert(results["text"].IsSet(), Equals, true)
+}
diff --git a/plumbing/format/gitattributes/matcher.go b/plumbing/format/gitattributes/matcher.go
new file mode 100644
index 0000000..df12864
--- /dev/null
+++ b/plumbing/format/gitattributes/matcher.go
@@ -0,0 +1,78 @@
+package gitattributes
+
+// Matcher defines a global multi-pattern matcher for gitattributes patterns
+type Matcher interface {
+ // Match matches patterns in the order of priorities.
+ Match(path []string, attributes []string) (map[string]Attribute, bool)
+}
+
+type MatcherOptions struct{}
+
+// NewMatcher constructs a new matcher. Patterns must be given in the order of
+// increasing priority. That is the most generic settings files first, then the
+// content of the repo .gitattributes, then content of .gitattributes down the
+// path.
+func NewMatcher(stack []MatchAttribute) Matcher {
+ m := &matcher{stack: stack}
+ m.init()
+
+ return m
+}
+
+type matcher struct {
+ stack []MatchAttribute
+ macros map[string]MatchAttribute
+}
+
+func (m *matcher) init() {
+ m.macros = make(map[string]MatchAttribute)
+
+ for _, attr := range m.stack {
+ if attr.Pattern == nil {
+ m.macros[attr.Name] = attr
+ }
+ }
+}
+
+// Match matches path against the patterns in gitattributes files and returns
+// the attributes associated with the path.
+//
+// Specific attributes can be specified otherwise all attributes are returned.
+//
+// Matched is true if any path was matched to a rule, even if the results map
+// is empty.
+func (m *matcher) Match(path []string, attributes []string) (results map[string]Attribute, matched bool) {
+ results = make(map[string]Attribute, len(attributes))
+
+ n := len(m.stack)
+ for i := n - 1; i >= 0; i-- {
+ if len(attributes) > 0 && len(attributes) == len(results) {
+ return
+ }
+
+ pattern := m.stack[i].Pattern
+ if pattern == nil {
+ continue
+ }
+
+ if match := pattern.Match(path); match {
+ matched = true
+ for _, attr := range m.stack[i].Attributes {
+ if attr.IsSet() {
+ m.expandMacro(attr.Name(), results)
+ }
+ results[attr.Name()] = attr
+ }
+ }
+ }
+ return
+}
+
+func (m *matcher) expandMacro(name string, results map[string]Attribute) bool {
+ if macro, ok := m.macros[name]; ok {
+ for _, attr := range macro.Attributes {
+ results[attr.Name()] = attr
+ }
+ }
+ return false
+}
diff --git a/plumbing/format/gitattributes/matcher_test.go b/plumbing/format/gitattributes/matcher_test.go
new file mode 100644
index 0000000..edb71a1
--- /dev/null
+++ b/plumbing/format/gitattributes/matcher_test.go
@@ -0,0 +1,29 @@
+package gitattributes
+
+import (
+ "strings"
+
+ . "gopkg.in/check.v1"
+)
+
+func (s *MatcherSuite) TestMatcher_Match(c *C) {
+ lines := []string{
+ "[attr]binary -diff -merge -text",
+ "**/middle/v[uo]l?ano binary text eol=crlf",
+ "volcano -eol",
+ "foobar diff merge text eol=lf foo=bar",
+ }
+
+ ma, err := ReadAttributes(strings.NewReader(strings.Join(lines, "\n")), nil, true)
+ c.Assert(err, IsNil)
+
+ m := NewMatcher(ma)
+ results, matched := m.Match([]string{"head", "middle", "vulkano"}, nil)
+
+ c.Assert(matched, Equals, true)
+ c.Assert(results["binary"].IsSet(), Equals, true)
+ c.Assert(results["diff"].IsUnset(), Equals, true)
+ c.Assert(results["merge"].IsUnset(), Equals, true)
+ c.Assert(results["text"].IsSet(), Equals, true)
+ c.Assert(results["eol"].Value(), Equals, "crlf")
+}
diff --git a/plumbing/format/gitattributes/pattern.go b/plumbing/format/gitattributes/pattern.go
new file mode 100644
index 0000000..c5ca0c7
--- /dev/null
+++ b/plumbing/format/gitattributes/pattern.go
@@ -0,0 +1,101 @@
+package gitattributes
+
+import (
+ "path/filepath"
+ "strings"
+)
+
+const (
+ patternDirSep = "/"
+ zeroToManyDirs = "**"
+)
+
+// Pattern defines a gitattributes pattern.
+type Pattern interface {
+ // Match matches the given path to the pattern.
+ Match(path []string) bool
+}
+
+type pattern struct {
+ domain []string
+ pattern []string
+}
+
+// ParsePattern parses a gitattributes pattern string into the Pattern
+// structure.
+func ParsePattern(p string, domain []string) Pattern {
+ return &pattern{
+ domain: domain,
+ pattern: strings.Split(p, patternDirSep),
+ }
+}
+
+func (p *pattern) Match(path []string) bool {
+ if len(path) <= len(p.domain) {
+ return false
+ }
+ for i, e := range p.domain {
+ if path[i] != e {
+ return false
+ }
+ }
+
+ if len(p.pattern) == 1 {
+ // for a simple rule, .gitattribute matching rules differs from
+ // .gitignore and only the last part of the path is considered.
+ path = path[len(path)-1:]
+ } else {
+ path = path[len(p.domain):]
+ }
+
+ pattern := p.pattern
+ var match, doublestar bool
+ var err error
+ for _, part := range path {
+ // skip empty
+ if pattern[0] == "" {
+ pattern = pattern[1:]
+ }
+
+ // eat doublestar
+ if pattern[0] == zeroToManyDirs {
+ pattern = pattern[1:]
+ if len(pattern) == 0 {
+ return true
+ }
+ doublestar = true
+ }
+
+ switch true {
+ case strings.Contains(pattern[0], "**"):
+ return false
+
+ // keep going down the path until we hit a match
+ case doublestar:
+ match, err = filepath.Match(pattern[0], part)
+ if err != nil {
+ return false
+ }
+
+ if match {
+ doublestar = false
+ pattern = pattern[1:]
+ }
+
+ default:
+ match, err = filepath.Match(pattern[0], part)
+ if err != nil {
+ return false
+ }
+ if !match {
+ return false
+ }
+ pattern = pattern[1:]
+ }
+ }
+
+ if len(pattern) > 0 {
+ return false
+ }
+ return match
+}
diff --git a/plumbing/format/gitattributes/pattern_test.go b/plumbing/format/gitattributes/pattern_test.go
new file mode 100644
index 0000000..f95be6e
--- /dev/null
+++ b/plumbing/format/gitattributes/pattern_test.go
@@ -0,0 +1,229 @@
+package gitattributes
+
+import (
+ "testing"
+
+ . "gopkg.in/check.v1"
+)
+
+func Test(t *testing.T) { TestingT(t) }
+
+type PatternSuite struct{}
+
+var _ = Suite(&PatternSuite{})
+
+func (s *PatternSuite) TestMatch_domainLonger_mismatch(c *C) {
+ p := ParsePattern("value", []string{"head", "middle", "tail"})
+ r := p.Match([]string{"head", "middle"})
+ c.Assert(r, Equals, false)
+}
+
+func (s *PatternSuite) TestMatch_domainSameLength_mismatch(c *C) {
+ p := ParsePattern("value", []string{"head", "middle", "tail"})
+ r := p.Match([]string{"head", "middle", "tail"})
+ c.Assert(r, Equals, false)
+}
+
+func (s *PatternSuite) TestMatch_domainMismatch_mismatch(c *C) {
+ p := ParsePattern("value", []string{"head", "middle", "tail"})
+ r := p.Match([]string{"head", "middle", "_tail_", "value"})
+ c.Assert(r, Equals, false)
+}
+
+func (s *PatternSuite) TestSimpleMatch_match(c *C) {
+ p := ParsePattern("vul?ano", nil)
+ r := p.Match([]string{"value", "vulkano"})
+ c.Assert(r, Equals, true)
+}
+
+func (s *PatternSuite) TestSimpleMatch_withDomain(c *C) {
+ p := ParsePattern("middle/tail", []string{"value", "volcano"})
+ r := p.Match([]string{"value", "volcano", "middle", "tail"})
+ c.Assert(r, Equals, true)
+}
+
+func (s *PatternSuite) TestSimpleMatch_onlyMatchInDomain_mismatch(c *C) {
+ p := ParsePattern("value/volcano", []string{"value", "volcano"})
+ r := p.Match([]string{"value", "volcano", "tail"})
+ c.Assert(r, Equals, false)
+}
+
+func (s *PatternSuite) TestSimpleMatch_atStart(c *C) {
+ p := ParsePattern("value", nil)
+ r := p.Match([]string{"value", "tail"})
+ c.Assert(r, Equals, false)
+}
+
+func (s *PatternSuite) TestSimpleMatch_inTheMiddle(c *C) {
+ p := ParsePattern("value", nil)
+ r := p.Match([]string{"head", "value", "tail"})
+ c.Assert(r, Equals, false)
+}
+
+func (s *PatternSuite) TestSimpleMatch_atEnd(c *C) {
+ p := ParsePattern("value", nil)
+ r := p.Match([]string{"head", "value"})
+ c.Assert(r, Equals, true)
+}
+
+func (s *PatternSuite) TestSimpleMatch_mismatch(c *C) {
+ p := ParsePattern("value", nil)
+ r := p.Match([]string{"head", "val", "tail"})
+ c.Assert(r, Equals, false)
+}
+
+func (s *PatternSuite) TestSimpleMatch_valueLonger_mismatch(c *C) {
+ p := ParsePattern("tai", nil)
+ r := p.Match([]string{"head", "value", "tail"})
+ c.Assert(r, Equals, false)
+}
+
+func (s *PatternSuite) TestSimpleMatch_withAsterisk(c *C) {
+ p := ParsePattern("t*l", nil)
+ r := p.Match([]string{"value", "vulkano", "tail"})
+ c.Assert(r, Equals, true)
+}
+
+func (s *PatternSuite) TestSimpleMatch_withQuestionMark(c *C) {
+ p := ParsePattern("ta?l", nil)
+ r := p.Match([]string{"value", "vulkano", "tail"})
+ c.Assert(r, Equals, true)
+}
+
+func (s *PatternSuite) TestSimpleMatch_magicChars(c *C) {
+ p := ParsePattern("v[ou]l[kc]ano", nil)
+ r := p.Match([]string{"value", "volcano"})
+ c.Assert(r, Equals, true)
+}
+
+func (s *PatternSuite) TestSimpleMatch_wrongPattern_mismatch(c *C) {
+ p := ParsePattern("v[ou]l[", nil)
+ r := p.Match([]string{"value", "vol["})
+ c.Assert(r, Equals, false)
+}
+
+func (s *PatternSuite) TestGlobMatch_fromRootWithSlash(c *C) {
+ p := ParsePattern("/value/vul?ano/tail", nil)
+ r := p.Match([]string{"value", "vulkano", "tail"})
+ c.Assert(r, Equals, true)
+}
+
+func (s *PatternSuite) TestGlobMatch_withDomain(c *C) {
+ p := ParsePattern("middle/tail", []string{"value", "volcano"})
+ r := p.Match([]string{"value", "volcano", "middle", "tail"})
+ c.Assert(r, Equals, true)
+}
+
+func (s *PatternSuite) TestGlobMatch_onlyMatchInDomain_mismatch(c *C) {
+ p := ParsePattern("volcano/tail", []string{"value", "volcano"})
+ r := p.Match([]string{"value", "volcano", "tail"})
+ c.Assert(r, Equals, false)
+}
+
+func (s *PatternSuite) TestGlobMatch_fromRootWithoutSlash(c *C) {
+ p := ParsePattern("value/vul?ano/tail", nil)
+ r := p.Match([]string{"value", "vulkano", "tail"})
+ c.Assert(r, Equals, true)
+}
+
+func (s *PatternSuite) TestGlobMatch_fromRoot_mismatch(c *C) {
+ p := ParsePattern("value/vulkano", nil)
+ r := p.Match([]string{"value", "volcano"})
+ c.Assert(r, Equals, false)
+}
+
+func (s *PatternSuite) TestGlobMatch_fromRoot_tooShort_mismatch(c *C) {
+ p := ParsePattern("value/vul?ano", nil)
+ r := p.Match([]string{"value"})
+ c.Assert(r, Equals, false)
+}
+
+func (s *PatternSuite) TestGlobMatch_fromRoot_notAtRoot_mismatch(c *C) {
+ p := ParsePattern("/value/volcano", nil)
+ r := p.Match([]string{"value", "value", "volcano"})
+ c.Assert(r, Equals, false)
+}
+
+func (s *PatternSuite) TestGlobMatch_leadingAsterisks_atStart(c *C) {
+ p := ParsePattern("**/*lue/vol?ano/ta?l", nil)
+ r := p.Match([]string{"value", "volcano", "tail"})
+ c.Assert(r, Equals, true)
+}
+
+func (s *PatternSuite) TestGlobMatch_leadingAsterisks_notAtStart(c *C) {
+ p := ParsePattern("**/*lue/vol?ano/tail", nil)
+ r := p.Match([]string{"head", "value", "volcano", "tail"})
+ c.Assert(r, Equals, true)
+}
+
+func (s *PatternSuite) TestGlobMatch_leadingAsterisks_mismatch(c *C) {
+ p := ParsePattern("**/*lue/vol?ano/tail", nil)
+ r := p.Match([]string{"head", "value", "Volcano", "tail"})
+ c.Assert(r, Equals, false)
+}
+
+func (s *PatternSuite) TestGlobMatch_tailingAsterisks(c *C) {
+ p := ParsePattern("/*lue/vol?ano/**", nil)
+ r := p.Match([]string{"value", "volcano", "tail", "moretail"})
+ c.Assert(r, Equals, true)
+}
+
+func (s *PatternSuite) TestGlobMatch_tailingAsterisks_single(c *C) {
+ p := ParsePattern("/*lue/**", nil)
+ r := p.Match([]string{"value", "volcano"})
+ c.Assert(r, Equals, true)
+}
+
+func (s *PatternSuite) TestGlobMatch_tailingAsterisks_exactMatch(c *C) {
+ p := ParsePattern("/*lue/vol?ano/**", nil)
+ r := p.Match([]string{"value", "volcano"})
+ c.Assert(r, Equals, false)
+}
+
+func (s *PatternSuite) TestGlobMatch_middleAsterisks_emptyMatch(c *C) {
+ p := ParsePattern("/*lue/**/vol?ano", nil)
+ r := p.Match([]string{"value", "volcano"})
+ c.Assert(r, Equals, true)
+}
+
+func (s *PatternSuite) TestGlobMatch_middleAsterisks_oneMatch(c *C) {
+ p := ParsePattern("/*lue/**/vol?ano", nil)
+ r := p.Match([]string{"value", "middle", "volcano"})
+ c.Assert(r, Equals, true)
+}
+
+func (s *PatternSuite) TestGlobMatch_middleAsterisks_multiMatch(c *C) {
+ p := ParsePattern("/*lue/**/vol?ano", nil)
+ r := p.Match([]string{"value", "middle1", "middle2", "volcano"})
+ c.Assert(r, Equals, true)
+}
+
+func (s *PatternSuite) TestGlobMatch_wrongDoubleAsterisk_mismatch(c *C) {
+ p := ParsePattern("/*lue/**foo/vol?ano/tail", nil)
+ r := p.Match([]string{"value", "foo", "volcano", "tail"})
+ c.Assert(r, Equals, false)
+}
+
+func (s *PatternSuite) TestGlobMatch_magicChars(c *C) {
+ p := ParsePattern("**/head/v[ou]l[kc]ano", nil)
+ r := p.Match([]string{"value", "head", "volcano"})
+ c.Assert(r, Equals, true)
+}
+
+func (s *PatternSuite) TestGlobMatch_wrongPattern_noTraversal_mismatch(c *C) {
+ p := ParsePattern("**/head/v[ou]l[", nil)
+ r := p.Match([]string{"value", "head", "vol["})
+ c.Assert(r, Equals, false)
+}
+
+func (s *PatternSuite) TestGlobMatch_wrongPattern_onTraversal_mismatch(c *C) {
+ p := ParsePattern("/value/**/v[ou]l[", nil)
+ r := p.Match([]string{"value", "head", "vol["})
+ c.Assert(r, Equals, false)
+}
+
+func (s *PatternSuite) TestGlobMatch_issue_923(c *C) {
+ p := ParsePattern("**/android/**/GeneratedPluginRegistrant.java", nil)
+ r := p.Match([]string{"packages", "flutter_tools", "lib", "src", "android", "gradle.dart"})
+ c.Assert(r, Equals, false)
+}
diff --git a/plumbing/format/idxfile/idxfile.go b/plumbing/format/idxfile/idxfile.go
index 5fed278..14b5860 100644
--- a/plumbing/format/idxfile/idxfile.go
+++ b/plumbing/format/idxfile/idxfile.go
@@ -5,8 +5,9 @@ import (
"io"
"sort"
+ encbin "encoding/binary"
+
"gopkg.in/src-d/go-git.v4/plumbing"
- "gopkg.in/src-d/go-git.v4/utils/binary"
)
const (
@@ -55,7 +56,8 @@ type MemoryIndex struct {
PackfileChecksum [20]byte
IdxChecksum [20]byte
- offsetHash map[int64]plumbing.Hash
+ offsetHash map[int64]plumbing.Hash
+ offsetHashIsFull bool
}
var _ Index = (*MemoryIndex)(nil)
@@ -121,31 +123,32 @@ func (idx *MemoryIndex) FindOffset(h plumbing.Hash) (int64, error) {
return 0, plumbing.ErrObjectNotFound
}
- return idx.getOffset(k, i)
+ offset := idx.getOffset(k, i)
+
+ if !idx.offsetHashIsFull {
+ // Save the offset for reverse lookup
+ if idx.offsetHash == nil {
+ idx.offsetHash = make(map[int64]plumbing.Hash)
+ }
+ idx.offsetHash[int64(offset)] = h
+ }
+
+ return int64(offset), nil
}
const isO64Mask = uint64(1) << 31
-func (idx *MemoryIndex) getOffset(firstLevel, secondLevel int) (int64, error) {
+func (idx *MemoryIndex) getOffset(firstLevel, secondLevel int) uint64 {
offset := secondLevel << 2
- buf := bytes.NewBuffer(idx.Offset32[firstLevel][offset : offset+4])
- ofs, err := binary.ReadUint32(buf)
- if err != nil {
- return -1, err
- }
+ ofs := encbin.BigEndian.Uint32(idx.Offset32[firstLevel][offset : offset+4])
if (uint64(ofs) & isO64Mask) != 0 {
offset := 8 * (uint64(ofs) & ^isO64Mask)
- buf := bytes.NewBuffer(idx.Offset64[offset : offset+8])
- n, err := binary.ReadUint64(buf)
- if err != nil {
- return -1, err
- }
-
- return int64(n), nil
+ n := encbin.BigEndian.Uint64(idx.Offset64[offset : offset+8])
+ return n
}
- return int64(ofs), nil
+ return uint64(ofs)
}
// FindCRC32 implements the Index interface.
@@ -156,25 +159,34 @@ func (idx *MemoryIndex) FindCRC32(h plumbing.Hash) (uint32, error) {
return 0, plumbing.ErrObjectNotFound
}
- return idx.getCRC32(k, i)
+ return idx.getCRC32(k, i), nil
}
-func (idx *MemoryIndex) getCRC32(firstLevel, secondLevel int) (uint32, error) {
+func (idx *MemoryIndex) getCRC32(firstLevel, secondLevel int) uint32 {
offset := secondLevel << 2
- buf := bytes.NewBuffer(idx.CRC32[firstLevel][offset : offset+4])
- return binary.ReadUint32(buf)
+ return encbin.BigEndian.Uint32(idx.CRC32[firstLevel][offset : offset+4])
}
// FindHash implements the Index interface.
func (idx *MemoryIndex) FindHash(o int64) (plumbing.Hash, error) {
+ var hash plumbing.Hash
+ var ok bool
+
+ if idx.offsetHash != nil {
+ if hash, ok = idx.offsetHash[o]; ok {
+ return hash, nil
+ }
+ }
+
// Lazily generate the reverse offset/hash map if required.
- if idx.offsetHash == nil {
+ if !idx.offsetHashIsFull || idx.offsetHash == nil {
if err := idx.genOffsetHash(); err != nil {
return plumbing.ZeroHash, err
}
+
+ hash, ok = idx.offsetHash[o]
}
- hash, ok := idx.offsetHash[o]
if !ok {
return plumbing.ZeroHash, plumbing.ErrObjectNotFound
}
@@ -190,23 +202,21 @@ func (idx *MemoryIndex) genOffsetHash() error {
}
idx.offsetHash = make(map[int64]plumbing.Hash, count)
-
- iter, err := idx.Entries()
- if err != nil {
- return err
- }
-
- for {
- entry, err := iter.Next()
- if err != nil {
- if err == io.EOF {
- return nil
- }
- return err
+ idx.offsetHashIsFull = true
+
+ var hash plumbing.Hash
+ i := uint32(0)
+ for firstLevel, fanoutValue := range idx.Fanout {
+ mappedFirstLevel := idx.FanoutMapping[firstLevel]
+ for secondLevel := uint32(0); i < fanoutValue; i++ {
+ copy(hash[:], idx.Names[mappedFirstLevel][secondLevel*objectIDLength:])
+ offset := int64(idx.getOffset(mappedFirstLevel, int(secondLevel)))
+ idx.offsetHash[offset] = hash
+ secondLevel++
}
-
- idx.offsetHash[int64(entry.Offset)] = entry.Hash
}
+
+ return nil
}
// Count implements the Index interface.
@@ -275,22 +285,11 @@ func (i *idxfileEntryIter) Next() (*Entry, error) {
continue
}
+ mappedFirstLevel := i.idx.FanoutMapping[i.firstLevel]
entry := new(Entry)
- ofs := i.secondLevel * objectIDLength
- copy(entry.Hash[:], i.idx.Names[i.idx.FanoutMapping[i.firstLevel]][ofs:])
-
- pos := i.idx.FanoutMapping[entry.Hash[0]]
-
- offset, err := i.idx.getOffset(pos, i.secondLevel)
- if err != nil {
- return nil, err
- }
- entry.Offset = uint64(offset)
-
- entry.CRC32, err = i.idx.getCRC32(pos, i.secondLevel)
- if err != nil {
- return nil, err
- }
+ copy(entry.Hash[:], i.idx.Names[mappedFirstLevel][i.secondLevel*objectIDLength:])
+ entry.Offset = i.idx.getOffset(mappedFirstLevel, i.secondLevel)
+ entry.CRC32 = i.idx.getCRC32(mappedFirstLevel, i.secondLevel)
i.secondLevel++
i.total++
diff --git a/plumbing/format/index/decoder.go b/plumbing/format/index/decoder.go
index ac57d08..98f92fd 100644
--- a/plumbing/format/index/decoder.go
+++ b/plumbing/format/index/decoder.go
@@ -1,6 +1,7 @@
package index
import (
+ "bufio"
"bytes"
"crypto/sha1"
"errors"
@@ -42,14 +43,17 @@ type Decoder struct {
r io.Reader
hash hash.Hash
lastEntry *Entry
+
+ extReader *bufio.Reader
}
// NewDecoder returns a new decoder that reads from r.
func NewDecoder(r io.Reader) *Decoder {
h := sha1.New()
return &Decoder{
- r: io.TeeReader(r, h),
- hash: h,
+ r: io.TeeReader(r, h),
+ hash: h,
+ extReader: bufio.NewReader(nil),
}
}
@@ -184,11 +188,9 @@ func (d *Decoder) doReadEntryNameV4() (string, error) {
func (d *Decoder) doReadEntryName(len uint16) (string, error) {
name := make([]byte, len)
- if err := binary.Read(d.r, &name); err != nil {
- return "", err
- }
+ _, err := io.ReadFull(d.r, name[:])
- return string(name), nil
+ return string(name), err
}
// Index entries are padded out to the next 8 byte alignment
@@ -279,20 +281,21 @@ func (d *Decoder) readExtension(idx *Index, header []byte) error {
return nil
}
-func (d *Decoder) getExtensionReader() (io.Reader, error) {
+func (d *Decoder) getExtensionReader() (*bufio.Reader, error) {
len, err := binary.ReadUint32(d.r)
if err != nil {
return nil, err
}
- return &io.LimitedReader{R: d.r, N: int64(len)}, nil
+ d.extReader.Reset(&io.LimitedReader{R: d.r, N: int64(len)})
+ return d.extReader, nil
}
func (d *Decoder) readChecksum(expected []byte, alreadyRead [4]byte) error {
var h plumbing.Hash
copy(h[:4], alreadyRead[:])
- if err := binary.Read(d.r, h[4:]); err != nil {
+ if _, err := io.ReadFull(d.r, h[4:]); err != nil {
return err
}
@@ -326,7 +329,7 @@ func validateHeader(r io.Reader) (version uint32, err error) {
}
type treeExtensionDecoder struct {
- r io.Reader
+ r *bufio.Reader
}
func (d *treeExtensionDecoder) Decode(t *Tree) error {
@@ -386,16 +389,13 @@ func (d *treeExtensionDecoder) readEntry() (*TreeEntry, error) {
}
e.Trees = i
-
- if err := binary.Read(d.r, &e.Hash); err != nil {
- return nil, err
- }
+ _, err = io.ReadFull(d.r, e.Hash[:])
return e, nil
}
type resolveUndoDecoder struct {
- r io.Reader
+ r *bufio.Reader
}
func (d *resolveUndoDecoder) Decode(ru *ResolveUndo) error {
@@ -433,7 +433,7 @@ func (d *resolveUndoDecoder) readEntry() (*ResolveUndoEntry, error) {
for s := range e.Stages {
var hash plumbing.Hash
- if err := binary.Read(d.r, hash[:]); err != nil {
+ if _, err := io.ReadFull(d.r, hash[:]); err != nil {
return nil, err
}
@@ -462,7 +462,7 @@ func (d *resolveUndoDecoder) readStage(e *ResolveUndoEntry, s Stage) error {
}
type endOfIndexEntryDecoder struct {
- r io.Reader
+ r *bufio.Reader
}
func (d *endOfIndexEntryDecoder) Decode(e *EndOfIndexEntry) error {
@@ -472,5 +472,6 @@ func (d *endOfIndexEntryDecoder) Decode(e *EndOfIndexEntry) error {
return err
}
- return binary.Read(d.r, &e.Hash)
+ _, err = io.ReadFull(d.r, e.Hash[:])
+ return err
}
diff --git a/plumbing/format/packfile/common.go b/plumbing/format/packfile/common.go
index 0d9ed54..f82c1ab 100644
--- a/plumbing/format/packfile/common.go
+++ b/plumbing/format/packfile/common.go
@@ -2,6 +2,7 @@ package packfile
import (
"bytes"
+ "compress/zlib"
"io"
"sync"
@@ -66,3 +67,12 @@ var bufPool = sync.Pool{
return bytes.NewBuffer(nil)
},
}
+
+var zlibInitBytes = []byte{0x78, 0x9c, 0x01, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00, 0x01}
+
+var zlibReaderPool = sync.Pool{
+ New: func() interface{} {
+ r, _ := zlib.NewReader(bytes.NewReader(zlibInitBytes))
+ return r
+ },
+}
diff --git a/plumbing/format/packfile/packfile.go b/plumbing/format/packfile/packfile.go
index 69b6e85..f528073 100644
--- a/plumbing/format/packfile/packfile.go
+++ b/plumbing/format/packfile/packfile.go
@@ -76,20 +76,18 @@ func (p *Packfile) Get(h plumbing.Hash) (plumbing.EncodedObject, error) {
return nil, err
}
- return p.GetByOffset(offset)
+ return p.objectAtOffset(offset, h)
}
-// GetByOffset retrieves the encoded object from the packfile with the given
+// GetByOffset retrieves the encoded object from the packfile at the given
// offset.
func (p *Packfile) GetByOffset(o int64) (plumbing.EncodedObject, error) {
hash, err := p.FindHash(o)
- if err == nil {
- if obj, ok := p.deltaBaseCache.Get(hash); ok {
- return obj, nil
- }
+ if err != nil {
+ return nil, err
}
- return p.objectAtOffset(o)
+ return p.objectAtOffset(o, hash)
}
// GetSizeByOffset retrieves the size of the encoded object from the
@@ -122,6 +120,13 @@ func (p *Packfile) nextObjectHeader() (*ObjectHeader, error) {
return h, err
}
+func (p *Packfile) getDeltaObjectSize(buf *bytes.Buffer) int64 {
+ delta := buf.Bytes()
+ _, delta = decodeLEB128(delta) // skip src size
+ sz, _ := decodeLEB128(delta)
+ return int64(sz)
+}
+
func (p *Packfile) getObjectSize(h *ObjectHeader) (int64, error) {
switch h.Type {
case plumbing.CommitObject, plumbing.TreeObject, plumbing.BlobObject, plumbing.TagObject:
@@ -135,10 +140,7 @@ func (p *Packfile) getObjectSize(h *ObjectHeader) (int64, error) {
return 0, err
}
- delta := buf.Bytes()
- _, delta = decodeLEB128(delta) // skip src size
- sz, _ := decodeLEB128(delta)
- return int64(sz), nil
+ return p.getDeltaObjectSize(buf), nil
default:
return 0, ErrInvalidObject.AddDetails("type %q", h.Type)
}
@@ -176,10 +178,16 @@ func (p *Packfile) getObjectType(h *ObjectHeader) (typ plumbing.ObjectType, err
err = ErrInvalidObject.AddDetails("type %q", h.Type)
}
+ p.offsetToType[h.Offset] = typ
+
return
}
-func (p *Packfile) objectAtOffset(offset int64) (plumbing.EncodedObject, error) {
+func (p *Packfile) objectAtOffset(offset int64, hash plumbing.Hash) (plumbing.EncodedObject, error) {
+ if obj, ok := p.cacheGet(hash); ok {
+ return obj, nil
+ }
+
h, err := p.objectHeaderAtOffset(offset)
if err != nil {
if err == io.EOF || isInvalid(err) {
@@ -188,27 +196,54 @@ func (p *Packfile) objectAtOffset(offset int64) (plumbing.EncodedObject, error)
return nil, err
}
+ return p.getNextObject(h, hash)
+}
+
+func (p *Packfile) getNextObject(h *ObjectHeader, hash plumbing.Hash) (plumbing.EncodedObject, error) {
+ var err error
+
// If we have no filesystem, we will return a MemoryObject instead
// of an FSObject.
if p.fs == nil {
- return p.getNextObject(h)
+ return p.getNextMemoryObject(h)
}
- // If the object is not a delta and it's small enough then read it
- // completely into memory now since it is already read from disk
- // into buffer anyway.
- if h.Length <= smallObjectThreshold && h.Type != plumbing.OFSDeltaObject && h.Type != plumbing.REFDeltaObject {
- return p.getNextObject(h)
- }
+ // If the object is small enough then read it completely into memory now since
+ // it is already read from disk into buffer anyway. For delta objects we want
+ // to perform the optimization too, but we have to be careful about applying
+ // small deltas on big objects.
+ var size int64
+ if h.Length <= smallObjectThreshold {
+ if h.Type != plumbing.OFSDeltaObject && h.Type != plumbing.REFDeltaObject {
+ return p.getNextMemoryObject(h)
+ }
- hash, err := p.FindHash(h.Offset)
- if err != nil {
- return nil, err
- }
+ // For delta objects we read the delta data and apply the small object
+ // optimization only if the expanded version of the object still meets
+ // the small object threshold condition.
+ buf := bufPool.Get().(*bytes.Buffer)
+ buf.Reset()
+ if _, _, err := p.s.NextObject(buf); err != nil {
+ return nil, err
+ }
+ defer bufPool.Put(buf)
- size, err := p.getObjectSize(h)
- if err != nil {
- return nil, err
+ size = p.getDeltaObjectSize(buf)
+ if size <= smallObjectThreshold {
+ var obj = new(plumbing.MemoryObject)
+ obj.SetSize(size)
+ if h.Type == plumbing.REFDeltaObject {
+ err = p.fillREFDeltaObjectContentWithBuffer(obj, h.Reference, buf)
+ } else {
+ err = p.fillOFSDeltaObjectContentWithBuffer(obj, h.OffsetReference, buf)
+ }
+ return obj, err
+ }
+ } else {
+ size, err = p.getObjectSize(h)
+ if err != nil {
+ return nil, err
+ }
}
typ, err := p.getObjectType(h)
@@ -231,25 +266,14 @@ func (p *Packfile) objectAtOffset(offset int64) (plumbing.EncodedObject, error)
}
func (p *Packfile) getObjectContent(offset int64) (io.ReadCloser, error) {
- ref, err := p.FindHash(offset)
- if err == nil {
- obj, ok := p.cacheGet(ref)
- if ok {
- reader, err := obj.Reader()
- if err != nil {
- return nil, err
- }
-
- return reader, nil
- }
- }
-
h, err := p.objectHeaderAtOffset(offset)
if err != nil {
return nil, err
}
- obj, err := p.getNextObject(h)
+ // getObjectContent is called from FSObject, so we have to explicitly
+ // get memory object here to avoid recursive cycle
+ obj, err := p.getNextMemoryObject(h)
if err != nil {
return nil, err
}
@@ -257,7 +281,7 @@ func (p *Packfile) getObjectContent(offset int64) (io.ReadCloser, error) {
return obj.Reader()
}
-func (p *Packfile) getNextObject(h *ObjectHeader) (plumbing.EncodedObject, error) {
+func (p *Packfile) getNextMemoryObject(h *ObjectHeader) (plumbing.EncodedObject, error) {
var obj = new(plumbing.MemoryObject)
obj.SetSize(h.Length)
obj.SetType(h.Type)
@@ -278,6 +302,8 @@ func (p *Packfile) getNextObject(h *ObjectHeader) (plumbing.EncodedObject, error
return nil, err
}
+ p.offsetToType[h.Offset] = obj.Type()
+
return obj, nil
}
@@ -300,6 +326,13 @@ func (p *Packfile) fillREFDeltaObjectContent(obj plumbing.EncodedObject, ref plu
if err != nil {
return err
}
+ defer bufPool.Put(buf)
+
+ return p.fillREFDeltaObjectContentWithBuffer(obj, ref, buf)
+}
+
+func (p *Packfile) fillREFDeltaObjectContentWithBuffer(obj plumbing.EncodedObject, ref plumbing.Hash, buf *bytes.Buffer) error {
+ var err error
base, ok := p.cacheGet(ref)
if !ok {
@@ -312,30 +345,31 @@ func (p *Packfile) fillREFDeltaObjectContent(obj plumbing.EncodedObject, ref plu
obj.SetType(base.Type())
err = ApplyDelta(obj, base, buf.Bytes())
p.cachePut(obj)
- bufPool.Put(buf)
return err
}
func (p *Packfile) fillOFSDeltaObjectContent(obj plumbing.EncodedObject, offset int64) error {
- buf := bytes.NewBuffer(nil)
+ buf := bufPool.Get().(*bytes.Buffer)
+ buf.Reset()
_, _, err := p.s.NextObject(buf)
if err != nil {
return err
}
+ defer bufPool.Put(buf)
+
+ return p.fillOFSDeltaObjectContentWithBuffer(obj, offset, buf)
+}
- var base plumbing.EncodedObject
- var ok bool
+func (p *Packfile) fillOFSDeltaObjectContentWithBuffer(obj plumbing.EncodedObject, offset int64, buf *bytes.Buffer) error {
hash, err := p.FindHash(offset)
- if err == nil {
- base, ok = p.cacheGet(hash)
+ if err != nil {
+ return err
}
- if !ok {
- base, err = p.GetByOffset(offset)
- if err != nil {
- return err
- }
+ base, err := p.objectAtOffset(offset, hash)
+ if err != nil {
+ return err
}
obj.SetType(base.Type())
@@ -414,6 +448,11 @@ func (p *Packfile) ID() (plumbing.Hash, error) {
return hash, nil
}
+// Scanner returns the packfile's Scanner
+func (p *Packfile) Scanner() *Scanner {
+ return p.s
+}
+
// Close the packfile and its resources.
func (p *Packfile) Close() error {
closer, ok := p.file.(io.Closer)
@@ -437,14 +476,50 @@ func (i *objectIter) Next() (plumbing.EncodedObject, error) {
return nil, err
}
- obj, err := i.p.GetByOffset(int64(e.Offset))
+ if i.typ != plumbing.AnyObject {
+ if typ, ok := i.p.offsetToType[int64(e.Offset)]; ok {
+ if typ != i.typ {
+ continue
+ }
+ } else if obj, ok := i.p.cacheGet(e.Hash); ok {
+ if obj.Type() != i.typ {
+ i.p.offsetToType[int64(e.Offset)] = obj.Type()
+ continue
+ }
+ return obj, nil
+ } else {
+ h, err := i.p.objectHeaderAtOffset(int64(e.Offset))
+ if err != nil {
+ return nil, err
+ }
+
+ if h.Type == plumbing.REFDeltaObject || h.Type == plumbing.OFSDeltaObject {
+ typ, err := i.p.getObjectType(h)
+ if err != nil {
+ return nil, err
+ }
+ if typ != i.typ {
+ i.p.offsetToType[int64(e.Offset)] = typ
+ continue
+ }
+ // getObjectType will seek in the file so we cannot use getNextObject safely
+ return i.p.objectAtOffset(int64(e.Offset), e.Hash)
+ } else {
+ if h.Type != i.typ {
+ i.p.offsetToType[int64(e.Offset)] = h.Type
+ continue
+ }
+ return i.p.getNextObject(h, e.Hash)
+ }
+ }
+ }
+
+ obj, err := i.p.objectAtOffset(int64(e.Offset), e.Hash)
if err != nil {
return nil, err
}
- if i.typ == plumbing.AnyObject || obj.Type() == i.typ {
- return obj, nil
- }
+ return obj, nil
}
}
diff --git a/plumbing/format/packfile/scanner.go b/plumbing/format/packfile/scanner.go
index 614b0d1..7b44192 100644
--- a/plumbing/format/packfile/scanner.go
+++ b/plumbing/format/packfile/scanner.go
@@ -39,8 +39,7 @@ type ObjectHeader struct {
}
type Scanner struct {
- r reader
- zr readerResetter
+ r *scannerReader
crc hash.Hash32
// pendingObject is used to detect if an object has been read, or still
@@ -56,19 +55,27 @@ type Scanner struct {
// NewScanner returns a new Scanner based on a reader, if the given reader
// implements io.ReadSeeker the Scanner will be also Seekable
func NewScanner(r io.Reader) *Scanner {
- seeker, ok := r.(io.ReadSeeker)
- if !ok {
- seeker = &trackableReader{Reader: r}
- }
+ _, ok := r.(io.ReadSeeker)
crc := crc32.NewIEEE()
return &Scanner{
- r: newTeeReader(newByteReadSeeker(seeker), crc),
+ r: newScannerReader(r, crc),
crc: crc,
IsSeekable: ok,
}
}
+func (s *Scanner) Reset(r io.Reader) {
+ _, ok := r.(io.ReadSeeker)
+
+ s.r.Reset(r)
+ s.crc.Reset()
+ s.IsSeekable = ok
+ s.pendingObject = nil
+ s.version = 0
+ s.objects = 0
+}
+
// Header reads the whole packfile header (signature, version and object count).
// It returns the version and the object count and performs checks on the
// validity of the signature and the version fields.
@@ -182,8 +189,7 @@ func (s *Scanner) NextObjectHeader() (*ObjectHeader, error) {
// nextObjectHeader returns the ObjectHeader for the next object in the reader
// without the Offset field
func (s *Scanner) nextObjectHeader() (*ObjectHeader, error) {
- defer s.Flush()
-
+ s.r.Flush()
s.crc.Reset()
h := &ObjectHeader{}
@@ -304,35 +310,29 @@ func (s *Scanner) readLength(first byte) (int64, error) {
// NextObject writes the content of the next object into the reader, returns
// the number of bytes written, the CRC32 of the content and an error, if any
func (s *Scanner) NextObject(w io.Writer) (written int64, crc32 uint32, err error) {
- defer s.crc.Reset()
-
s.pendingObject = nil
written, err = s.copyObject(w)
- s.Flush()
+
+ s.r.Flush()
crc32 = s.crc.Sum32()
+ s.crc.Reset()
+
return
}
// ReadRegularObject reads and write a non-deltified object
// from it zlib stream in an object entry in the packfile.
func (s *Scanner) copyObject(w io.Writer) (n int64, err error) {
- if s.zr == nil {
- var zr io.ReadCloser
- zr, err = zlib.NewReader(s.r)
- if err != nil {
- return 0, fmt.Errorf("zlib initialization error: %s", err)
- }
+ zr := zlibReaderPool.Get().(io.ReadCloser)
+ defer zlibReaderPool.Put(zr)
- s.zr = zr.(readerResetter)
- } else {
- if err = s.zr.Reset(s.r, nil); err != nil {
- return 0, fmt.Errorf("zlib reset error: %s", err)
- }
+ if err = zr.(zlib.Resetter).Reset(s.r, nil); err != nil {
+ return 0, fmt.Errorf("zlib reset error: %s", err)
}
- defer ioutil.CheckClose(s.zr, &err)
+ defer ioutil.CheckClose(zr, &err)
buf := byteSlicePool.Get().([]byte)
- n, err = io.CopyBuffer(w, s.zr, buf)
+ n, err = io.CopyBuffer(w, zr, buf)
byteSlicePool.Put(buf)
return
}
@@ -378,110 +378,89 @@ func (s *Scanner) Close() error {
return err
}
-// Flush finishes writing the buffer to crc hasher in case we are using
-// a teeReader. Otherwise it is a no-op.
+// Flush is a no-op (deprecated)
func (s *Scanner) Flush() error {
- tee, ok := s.r.(*teeReader)
- if ok {
- return tee.Flush()
- }
return nil
}
-type trackableReader struct {
- count int64
- io.Reader
+// scannerReader has the following characteristics:
+// - Provides an io.SeekReader impl for bufio.Reader, when the underlying
+// reader supports it.
+// - Keeps track of the current read position, for when the underlying reader
+// isn't an io.SeekReader, but we still want to know the current offset.
+// - Writes to the hash writer what it reads, with the aid of a smaller buffer.
+// The buffer helps avoid a performance penality for performing small writes
+// to the crc32 hash writer.
+type scannerReader struct {
+ reader io.Reader
+ crc io.Writer
+ rbuf *bufio.Reader
+ wbuf *bufio.Writer
+ offset int64
}
-// Read reads up to len(p) bytes into p.
-func (r *trackableReader) Read(p []byte) (n int, err error) {
- n, err = r.Reader.Read(p)
- r.count += int64(n)
-
- return
-}
-
-// Seek only supports io.SeekCurrent, any other operation fails
-func (r *trackableReader) Seek(offset int64, whence int) (int64, error) {
- if whence != io.SeekCurrent {
- return -1, ErrSeekNotSupported
+func newScannerReader(r io.Reader, h io.Writer) *scannerReader {
+ sr := &scannerReader{
+ rbuf: bufio.NewReader(nil),
+ wbuf: bufio.NewWriterSize(nil, 64),
+ crc: h,
}
+ sr.Reset(r)
- return r.count, nil
+ return sr
}
-func newByteReadSeeker(r io.ReadSeeker) *bufferedSeeker {
- return &bufferedSeeker{
- r: r,
- Reader: *bufio.NewReader(r),
- }
-}
+func (r *scannerReader) Reset(reader io.Reader) {
+ r.reader = reader
+ r.rbuf.Reset(r.reader)
+ r.wbuf.Reset(r.crc)
-type bufferedSeeker struct {
- r io.ReadSeeker
- bufio.Reader
-}
-
-func (r *bufferedSeeker) Seek(offset int64, whence int) (int64, error) {
- if whence == io.SeekCurrent && offset == 0 {
- current, err := r.r.Seek(offset, whence)
- if err != nil {
- return current, err
- }
-
- return current - int64(r.Buffered()), nil
+ r.offset = 0
+ if seeker, ok := r.reader.(io.ReadSeeker); ok {
+ r.offset, _ = seeker.Seek(0, io.SeekCurrent)
}
-
- defer r.Reader.Reset(r.r)
- return r.r.Seek(offset, whence)
}
-type readerResetter interface {
- io.ReadCloser
- zlib.Resetter
-}
+func (r *scannerReader) Read(p []byte) (n int, err error) {
+ n, err = r.rbuf.Read(p)
-type reader interface {
- io.Reader
- io.ByteReader
- io.Seeker
+ r.offset += int64(n)
+ if _, err := r.wbuf.Write(p[:n]); err != nil {
+ return n, err
+ }
+ return
}
-type teeReader struct {
- reader
- w hash.Hash32
- bufWriter *bufio.Writer
+func (r *scannerReader) ReadByte() (b byte, err error) {
+ b, err = r.rbuf.ReadByte()
+ if err == nil {
+ r.offset++
+ return b, r.wbuf.WriteByte(b)
+ }
+ return
}
-func newTeeReader(r reader, h hash.Hash32) *teeReader {
- return &teeReader{
- reader: r,
- w: h,
- bufWriter: bufio.NewWriter(h),
- }
+func (r *scannerReader) Flush() error {
+ return r.wbuf.Flush()
}
-func (r *teeReader) Read(p []byte) (n int, err error) {
- r.Flush()
+// Seek seeks to a location. If the underlying reader is not an io.ReadSeeker,
+// then only whence=io.SeekCurrent is supported, any other operation fails.
+func (r *scannerReader) Seek(offset int64, whence int) (int64, error) {
+ var err error
- n, err = r.reader.Read(p)
- if n > 0 {
- if n, err := r.w.Write(p[:n]); err != nil {
- return n, err
+ if seeker, ok := r.reader.(io.ReadSeeker); !ok {
+ if whence != io.SeekCurrent || offset != 0 {
+ return -1, ErrSeekNotSupported
+ }
+ } else {
+ if whence == io.SeekCurrent && offset == 0 {
+ return r.offset, nil
}
- }
- return
-}
-func (r *teeReader) ReadByte() (b byte, err error) {
- b, err = r.reader.ReadByte()
- if err == nil {
- return b, r.bufWriter.WriteByte(b)
+ r.offset, err = seeker.Seek(offset, whence)
+ r.rbuf.Reset(r.reader)
}
- return
-}
-
-func (r *teeReader) Flush() (err error) {
- return r.bufWriter.Flush()
+ return r.offset, err
}
diff --git a/plumbing/format/packfile/scanner_test.go b/plumbing/format/packfile/scanner_test.go
index 091b457..a401d6d 100644
--- a/plumbing/format/packfile/scanner_test.go
+++ b/plumbing/format/packfile/scanner_test.go
@@ -135,6 +135,55 @@ func (s *ScannerSuite) TestSeekObjectHeaderNonSeekable(c *C) {
c.Assert(err, Equals, ErrSeekNotSupported)
}
+func (s *ScannerSuite) TestReaderReset(c *C) {
+ r := fixtures.Basic().One().Packfile()
+ p := NewScanner(r)
+
+ version, objects, err := p.Header()
+ c.Assert(version, Equals, VersionSupported)
+ c.Assert(objects, Equals, uint32(31))
+
+ h, err := p.SeekObjectHeader(expectedHeadersOFS[0].Offset)
+ c.Assert(err, IsNil)
+ c.Assert(h, DeepEquals, &expectedHeadersOFS[0])
+
+ p.Reset(r)
+ c.Assert(p.pendingObject, IsNil)
+ c.Assert(p.version, Equals, uint32(0))
+ c.Assert(p.objects, Equals, uint32(0))
+ c.Assert(p.r.reader, Equals, r)
+ c.Assert(p.r.offset > expectedHeadersOFS[0].Offset, Equals, true)
+
+ p.Reset(bytes.NewReader(nil))
+ c.Assert(p.r.offset, Equals, int64(0))
+}
+
+func (s *ScannerSuite) TestReaderResetSeeks(c *C) {
+ r := fixtures.Basic().One().Packfile()
+
+ // seekable
+ p := NewScanner(r)
+ c.Assert(p.IsSeekable, Equals, true)
+ h, err := p.SeekObjectHeader(expectedHeadersOFS[0].Offset)
+ c.Assert(err, IsNil)
+ c.Assert(h, DeepEquals, &expectedHeadersOFS[0])
+
+ // reset with seekable
+ p.Reset(r)
+ c.Assert(p.IsSeekable, Equals, true)
+ h, err = p.SeekObjectHeader(expectedHeadersOFS[1].Offset)
+ c.Assert(err, IsNil)
+ c.Assert(h, DeepEquals, &expectedHeadersOFS[1])
+
+ // reset with non-seekable
+ f := fixtures.Basic().ByTag("ref-delta").One()
+ p.Reset(io.MultiReader(f.Packfile()))
+ c.Assert(p.IsSeekable, Equals, false)
+
+ _, err = p.SeekObjectHeader(expectedHeadersOFS[4].Offset)
+ c.Assert(err, Equals, ErrSeekNotSupported)
+}
+
var expectedHeadersOFS = []ObjectHeader{
{Type: plumbing.CommitObject, Offset: 12, Length: 254},
{Type: plumbing.OFSDeltaObject, Offset: 186, Length: 93, OffsetReference: 12},
diff --git a/plumbing/object/commit.go b/plumbing/object/commit.go
index e254342..6b50934 100644
--- a/plumbing/object/commit.go
+++ b/plumbing/object/commit.go
@@ -76,8 +76,8 @@ func (c *Commit) Tree() (*Tree, error) {
return GetTree(c.s, c.TreeHash)
}
-// Patch returns the Patch between the actual commit and the provided one.
-// Error will be return if context expires. Provided context must be non-nil
+// PatchContext returns the Patch between the actual commit and the provided one.
+// Error will be return if context expires. Provided context must be non-nil.
func (c *Commit) PatchContext(ctx context.Context, to *Commit) (*Patch, error) {
fromTree, err := c.Tree()
if err != nil {
@@ -171,7 +171,9 @@ func (c *Commit) Decode(o plumbing.EncodedObject) (err error) {
}
defer ioutil.CheckClose(reader, &err)
- r := bufio.NewReader(reader)
+ r := bufPool.Get().(*bufio.Reader)
+ defer bufPool.Put(r)
+ r.Reset(reader)
var message bool
var pgpsig bool
@@ -233,6 +235,11 @@ func (b *Commit) Encode(o plumbing.EncodedObject) error {
return b.encode(o, true)
}
+// EncodeWithoutSignature export a Commit into a plumbing.EncodedObject without the signature (correspond to the payload of the PGP signature).
+func (b *Commit) EncodeWithoutSignature(o plumbing.EncodedObject) error {
+ return b.encode(o, false)
+}
+
func (b *Commit) encode(o plumbing.EncodedObject, includeSig bool) (err error) {
o.SetType(plumbing.CommitObject)
w, err := o.Writer()
@@ -291,25 +298,33 @@ func (b *Commit) encode(o plumbing.EncodedObject, includeSig bool) (err error) {
return err
}
-// Stats shows the status of commit.
+// Stats returns the stats of a commit.
func (c *Commit) Stats() (FileStats, error) {
- // Get the previous commit.
- ci := c.Parents()
- parentCommit, err := ci.Next()
+ return c.StatsContext(context.Background())
+}
+
+// StatsContext returns the stats of a commit. Error will be return if context
+// expires. Provided context must be non-nil.
+func (c *Commit) StatsContext(ctx context.Context) (FileStats, error) {
+ fromTree, err := c.Tree()
if err != nil {
- if err == io.EOF {
- emptyNoder := treeNoder{}
- parentCommit = &Commit{
- Hash: emptyNoder.hash,
- // TreeHash: emptyNoder.parent.Hash,
- s: c.s,
- }
- } else {
+ return nil, err
+ }
+
+ toTree := &Tree{}
+ if c.NumParents() != 0 {
+ firstParent, err := c.Parents().Next()
+ if err != nil {
+ return nil, err
+ }
+
+ toTree, err = firstParent.Tree()
+ if err != nil {
return nil, err
}
}
- patch, err := parentCommit.Patch(c)
+ patch, err := toTree.PatchContext(ctx, fromTree)
if err != nil {
return nil, err
}
@@ -339,7 +354,7 @@ func (c *Commit) Verify(armoredKeyRing string) (*openpgp.Entity, error) {
encoded := &plumbing.MemoryObject{}
// Encode commit components, excluding signature and get a reader object.
- if err := c.encode(encoded, false); err != nil {
+ if err := c.EncodeWithoutSignature(encoded); err != nil {
return nil, err
}
er, err := encoded.Reader()
diff --git a/plumbing/object/commit_stats_test.go b/plumbing/object/commit_stats_test.go
new file mode 100644
index 0000000..2fb3f08
--- /dev/null
+++ b/plumbing/object/commit_stats_test.go
@@ -0,0 +1,95 @@
+package object_test
+
+import (
+ "context"
+ "time"
+
+ "gopkg.in/src-d/go-git.v4"
+ "gopkg.in/src-d/go-git.v4/plumbing"
+ "gopkg.in/src-d/go-git.v4/plumbing/object"
+ "gopkg.in/src-d/go-git.v4/storage/memory"
+
+ . "gopkg.in/check.v1"
+ "gopkg.in/src-d/go-billy.v4/memfs"
+ "gopkg.in/src-d/go-billy.v4/util"
+ "gopkg.in/src-d/go-git-fixtures.v3"
+)
+
+type CommitStatsSuite struct {
+ fixtures.Suite
+}
+
+var _ = Suite(&CommitStatsSuite{})
+
+func (s *CommitStatsSuite) TestStats(c *C) {
+ r, hash := s.writeHisotry(c, []byte("foo\n"), []byte("foo\nbar\n"))
+
+ aCommit, err := r.CommitObject(hash)
+ c.Assert(err, IsNil)
+
+ fileStats, err := aCommit.StatsContext(context.Background())
+ c.Assert(err, IsNil)
+
+ c.Assert(fileStats[0].Name, Equals, "foo")
+ c.Assert(fileStats[0].Addition, Equals, 1)
+ c.Assert(fileStats[0].Deletion, Equals, 0)
+ c.Assert(fileStats[0].String(), Equals, " foo | 1 +\n")
+}
+
+func (s *CommitStatsSuite) TestStats_RootCommit(c *C) {
+ r, hash := s.writeHisotry(c, []byte("foo\n"))
+
+ aCommit, err := r.CommitObject(hash)
+ c.Assert(err, IsNil)
+
+ fileStats, err := aCommit.Stats()
+ c.Assert(err, IsNil)
+
+ c.Assert(fileStats, HasLen, 1)
+ c.Assert(fileStats[0].Name, Equals, "foo")
+ c.Assert(fileStats[0].Addition, Equals, 1)
+ c.Assert(fileStats[0].Deletion, Equals, 0)
+ c.Assert(fileStats[0].String(), Equals, " foo | 1 +\n")
+}
+
+func (s *CommitStatsSuite) TestStats_WithoutNewLine(c *C) {
+ r, hash := s.writeHisotry(c, []byte("foo\nbar"), []byte("foo\nbar\n"))
+
+ aCommit, err := r.CommitObject(hash)
+ c.Assert(err, IsNil)
+
+ fileStats, err := aCommit.Stats()
+ c.Assert(err, IsNil)
+
+ c.Assert(fileStats[0].Name, Equals, "foo")
+ c.Assert(fileStats[0].Addition, Equals, 1)
+ c.Assert(fileStats[0].Deletion, Equals, 1)
+ c.Assert(fileStats[0].String(), Equals, " foo | 2 +-\n")
+}
+
+func (s *CommitStatsSuite) writeHisotry(c *C, files ...[]byte) (*git.Repository, plumbing.Hash) {
+ cm := &git.CommitOptions{
+ Author: &object.Signature{Name: "Foo", Email: "foo@example.local", When: time.Now()},
+ }
+
+ fs := memfs.New()
+ r, err := git.Init(memory.NewStorage(), fs)
+ c.Assert(err, IsNil)
+
+ w, err := r.Worktree()
+ c.Assert(err, IsNil)
+
+ var hash plumbing.Hash
+ for _, content := range files {
+ util.WriteFile(fs, "foo", content, 0644)
+
+ _, err = w.Add("foo")
+ c.Assert(err, IsNil)
+
+ hash, err = w.Commit("foo\n", cm)
+ c.Assert(err, IsNil)
+
+ }
+
+ return r, hash
+}
diff --git a/plumbing/object/commit_test.go b/plumbing/object/commit_test.go
index c9acf42..957e7d6 100644
--- a/plumbing/object/commit_test.go
+++ b/plumbing/object/commit_test.go
@@ -4,14 +4,15 @@ import (
"bytes"
"context"
"io"
+ "io/ioutil"
"strings"
"time"
+ fixtures "gopkg.in/src-d/go-git-fixtures.v3"
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/plumbing/cache"
. "gopkg.in/check.v1"
- "gopkg.in/src-d/go-git-fixtures.v3"
"gopkg.in/src-d/go-git.v4/storage/filesystem"
)
@@ -495,3 +496,23 @@ func (s *SuiteCommit) TestMalformedHeader(c *C) {
err = decoded.Decode(encoded)
c.Assert(err, IsNil)
}
+
+func (s *SuiteCommit) TestEncodeWithoutSignature(c *C) {
+ //Similar to TestString since no signature
+ encoded := &plumbing.MemoryObject{}
+ err := s.Commit.EncodeWithoutSignature(encoded)
+ c.Assert(err, IsNil)
+ er, err := encoded.Reader()
+ c.Assert(err, IsNil)
+ payload, err := ioutil.ReadAll(er)
+ c.Assert(err, IsNil)
+
+ c.Assert(string(payload), Equals, ""+
+ "tree eba74343e2f15d62adedfd8c883ee0262b5c8021\n"+
+ "parent 35e85108805c84807bc66a02d91535e1e24b38b9\n"+
+ "parent a5b8b09e2f8fcb0bb99d3ccb0958157b40890d69\n"+
+ "author Máximo Cuadros Ortiz <mcuadros@gmail.com> 1427802494 +0200\n"+
+ "committer Máximo Cuadros Ortiz <mcuadros@gmail.com> 1427802494 +0200\n"+
+ "\n"+
+ "Merge branch 'master' of github.com:tyba/git-fixture\n")
+}
diff --git a/plumbing/object/commitgraph/commitnode.go b/plumbing/object/commitgraph/commitnode.go
new file mode 100644
index 0000000..e218d32
--- /dev/null
+++ b/plumbing/object/commitgraph/commitnode.go
@@ -0,0 +1,98 @@
+package commitgraph
+
+import (
+ "io"
+ "time"
+
+ "gopkg.in/src-d/go-git.v4/plumbing"
+ "gopkg.in/src-d/go-git.v4/plumbing/object"
+ "gopkg.in/src-d/go-git.v4/plumbing/storer"
+)
+
+// CommitNode is generic interface encapsulating a lightweight commit object retrieved
+// from CommitNodeIndex
+type CommitNode interface {
+ // ID returns the Commit object id referenced by the commit graph node.
+ ID() plumbing.Hash
+ // Tree returns the Tree referenced by the commit graph node.
+ Tree() (*object.Tree, error)
+ // CommitTime returns the Commiter.When time of the Commit referenced by the commit graph node.
+ CommitTime() time.Time
+ // NumParents returns the number of parents in a commit.
+ NumParents() int
+ // ParentNodes return a CommitNodeIter for parents of specified node.
+ ParentNodes() CommitNodeIter
+ // ParentNode returns the ith parent of a commit.
+ ParentNode(i int) (CommitNode, error)
+ // ParentHashes returns hashes of the parent commits for a specified node
+ ParentHashes() []plumbing.Hash
+ // Generation returns the generation of the commit for reachability analysis.
+ // Objects with newer generation are not reachable from objects of older generation.
+ Generation() uint64
+ // Commit returns the full commit object from the node
+ Commit() (*object.Commit, error)
+}
+
+// CommitNodeIndex is generic interface encapsulating an index of CommitNode objects
+type CommitNodeIndex interface {
+ // Get returns a commit node from a commit hash
+ Get(hash plumbing.Hash) (CommitNode, error)
+}
+
+// CommitNodeIter is a generic closable interface for iterating over commit nodes.
+type CommitNodeIter interface {
+ Next() (CommitNode, error)
+ ForEach(func(CommitNode) error) error
+ Close()
+}
+
+// parentCommitNodeIter provides an iterator for parent commits from associated CommitNodeIndex.
+type parentCommitNodeIter struct {
+ node CommitNode
+ i int
+}
+
+func newParentgraphCommitNodeIter(node CommitNode) CommitNodeIter {
+ return &parentCommitNodeIter{node, 0}
+}
+
+// Next moves the iterator to the next commit and returns a pointer to it. If
+// there are no more commits, it returns io.EOF.
+func (iter *parentCommitNodeIter) Next() (CommitNode, error) {
+ obj, err := iter.node.ParentNode(iter.i)
+ if err == object.ErrParentNotFound {
+ return nil, io.EOF
+ }
+ if err == nil {
+ iter.i++
+ }
+
+ return obj, err
+}
+
+// ForEach call the cb function for each commit contained on this iter until
+// an error appends or the end of the iter is reached. If ErrStop is sent
+// the iteration is stopped but no error is returned. The iterator is closed.
+func (iter *parentCommitNodeIter) ForEach(cb func(CommitNode) error) error {
+ for {
+ obj, err := iter.Next()
+ if err != nil {
+ if err == io.EOF {
+ return nil
+ }
+
+ return err
+ }
+
+ if err := cb(obj); err != nil {
+ if err == storer.ErrStop {
+ return nil
+ }
+
+ return err
+ }
+ }
+}
+
+func (iter *parentCommitNodeIter) Close() {
+}
diff --git a/plumbing/object/commitgraph/commitnode_graph.go b/plumbing/object/commitgraph/commitnode_graph.go
new file mode 100644
index 0000000..bd54e18
--- /dev/null
+++ b/plumbing/object/commitgraph/commitnode_graph.go
@@ -0,0 +1,131 @@
+package commitgraph
+
+import (
+ "fmt"
+ "time"
+
+ "gopkg.in/src-d/go-git.v4/plumbing"
+ "gopkg.in/src-d/go-git.v4/plumbing/format/commitgraph"
+ "gopkg.in/src-d/go-git.v4/plumbing/object"
+ "gopkg.in/src-d/go-git.v4/plumbing/storer"
+)
+
+// graphCommitNode is a reduced representation of Commit as presented in the commit
+// graph file (commitgraph.Node). It is merely useful as an optimization for walking
+// the commit graphs.
+//
+// graphCommitNode implements the CommitNode interface.
+type graphCommitNode struct {
+ // Hash for the Commit object
+ hash plumbing.Hash
+ // Index of the node in the commit graph file
+ index int
+
+ commitData *commitgraph.CommitData
+ gci *graphCommitNodeIndex
+}
+
+// graphCommitNodeIndex is an index that can load CommitNode objects from both the commit
+// graph files and the object store.
+//
+// graphCommitNodeIndex implements the CommitNodeIndex interface
+type graphCommitNodeIndex struct {
+ commitGraph commitgraph.Index
+ s storer.EncodedObjectStorer
+}
+
+// NewGraphCommitNodeIndex returns CommitNodeIndex implementation that uses commit-graph
+// files as backing storage and falls back to object storage when necessary
+func NewGraphCommitNodeIndex(commitGraph commitgraph.Index, s storer.EncodedObjectStorer) CommitNodeIndex {
+ return &graphCommitNodeIndex{commitGraph, s}
+}
+
+func (gci *graphCommitNodeIndex) Get(hash plumbing.Hash) (CommitNode, error) {
+ // Check the commit graph first
+ parentIndex, err := gci.commitGraph.GetIndexByHash(hash)
+ if err == nil {
+ parent, err := gci.commitGraph.GetCommitDataByIndex(parentIndex)
+ if err != nil {
+ return nil, err
+ }
+
+ return &graphCommitNode{
+ hash: hash,
+ index: parentIndex,
+ commitData: parent,
+ gci: gci,
+ }, nil
+ }
+
+ // Fallback to loading full commit object
+ commit, err := object.GetCommit(gci.s, hash)
+ if err != nil {
+ return nil, err
+ }
+
+ return &objectCommitNode{
+ nodeIndex: gci,
+ commit: commit,
+ }, nil
+}
+
+func (c *graphCommitNode) ID() plumbing.Hash {
+ return c.hash
+}
+
+func (c *graphCommitNode) Tree() (*object.Tree, error) {
+ return object.GetTree(c.gci.s, c.commitData.TreeHash)
+}
+
+func (c *graphCommitNode) CommitTime() time.Time {
+ return c.commitData.When
+}
+
+func (c *graphCommitNode) NumParents() int {
+ return len(c.commitData.ParentIndexes)
+}
+
+func (c *graphCommitNode) ParentNodes() CommitNodeIter {
+ return newParentgraphCommitNodeIter(c)
+}
+
+func (c *graphCommitNode) ParentNode(i int) (CommitNode, error) {
+ if i < 0 || i >= len(c.commitData.ParentIndexes) {
+ return nil, object.ErrParentNotFound
+ }
+
+ parent, err := c.gci.commitGraph.GetCommitDataByIndex(c.commitData.ParentIndexes[i])
+ if err != nil {
+ return nil, err
+ }
+
+ return &graphCommitNode{
+ hash: c.commitData.ParentHashes[i],
+ index: c.commitData.ParentIndexes[i],
+ commitData: parent,
+ gci: c.gci,
+ }, nil
+}
+
+func (c *graphCommitNode) ParentHashes() []plumbing.Hash {
+ return c.commitData.ParentHashes
+}
+
+func (c *graphCommitNode) Generation() uint64 {
+ // If the commit-graph file was generated with older Git version that
+ // set the generation to zero for every commit the generation assumption
+ // is still valid. It is just less useful.
+ return uint64(c.commitData.Generation)
+}
+
+func (c *graphCommitNode) Commit() (*object.Commit, error) {
+ return object.GetCommit(c.gci.s, c.hash)
+}
+
+func (c *graphCommitNode) String() string {
+ return fmt.Sprintf(
+ "%s %s\nDate: %s",
+ plumbing.CommitObject, c.ID(),
+ c.CommitTime().Format(object.DateFormat),
+ )
+}
diff --git a/plumbing/object/commitgraph/commitnode_object.go b/plumbing/object/commitgraph/commitnode_object.go
new file mode 100644
index 0000000..2779a54
--- /dev/null
+++ b/plumbing/object/commitgraph/commitnode_object.go
@@ -0,0 +1,90 @@
+package commitgraph
+
+import (
+ "math"
+ "time"
+
+ "gopkg.in/src-d/go-git.v4/plumbing"
+ "gopkg.in/src-d/go-git.v4/plumbing/object"
+ "gopkg.in/src-d/go-git.v4/plumbing/storer"
+)
+
+// objectCommitNode is a representation of Commit as presented in the GIT object format.
+//
+// objectCommitNode implements the CommitNode interface.
+type objectCommitNode struct {
+ nodeIndex CommitNodeIndex
+ commit *object.Commit
+}
+
+// NewObjectCommitNodeIndex returns CommitNodeIndex implementation that uses
+// only object storage to load the nodes
+func NewObjectCommitNodeIndex(s storer.EncodedObjectStorer) CommitNodeIndex {
+ return &objectCommitNodeIndex{s}
+}
+
+func (oci *objectCommitNodeIndex) Get(hash plumbing.Hash) (CommitNode, error) {
+ commit, err := object.GetCommit(oci.s, hash)
+ if err != nil {
+ return nil, err
+ }
+
+ return &objectCommitNode{
+ nodeIndex: oci,
+ commit: commit,
+ }, nil
+}
+
+// objectCommitNodeIndex is an index that can load CommitNode objects only from the
+// object store.
+//
+// objectCommitNodeIndex implements the CommitNodeIndex interface
+type objectCommitNodeIndex struct {
+ s storer.EncodedObjectStorer
+}
+
+func (c *objectCommitNode) CommitTime() time.Time {
+ return c.commit.Committer.When
+}
+
+func (c *objectCommitNode) ID() plumbing.Hash {
+ return c.commit.ID()
+}
+
+func (c *objectCommitNode) Tree() (*object.Tree, error) {
+ return c.commit.Tree()
+}
+
+func (c *objectCommitNode) NumParents() int {
+ return c.commit.NumParents()
+}
+
+func (c *objectCommitNode) ParentNodes() CommitNodeIter {
+ return newParentgraphCommitNodeIter(c)
+}
+
+func (c *objectCommitNode) ParentNode(i int) (CommitNode, error) {
+ if i < 0 || i >= len(c.commit.ParentHashes) {
+ return nil, object.ErrParentNotFound
+ }
+
+ // Note: It's necessary to go through CommitNodeIndex here to ensure
+ // that if the commit-graph file covers only part of the history we
+ // start using it when that part is reached.
+ return c.nodeIndex.Get(c.commit.ParentHashes[i])
+}
+
+func (c *objectCommitNode) ParentHashes() []plumbing.Hash {
+ return c.commit.ParentHashes
+}
+
+func (c *objectCommitNode) Generation() uint64 {
+ // Commit nodes representing objects outside of the commit graph can never
+ // be reached by objects from the commit-graph thus we return the highest
+ // possible value.
+ return math.MaxUint64
+}
+
+func (c *objectCommitNode) Commit() (*object.Commit, error) {
+ return c.commit, nil
+}
diff --git a/plumbing/object/commitgraph/commitnode_test.go b/plumbing/object/commitgraph/commitnode_test.go
new file mode 100644
index 0000000..954f873
--- /dev/null
+++ b/plumbing/object/commitgraph/commitnode_test.go
@@ -0,0 +1,147 @@
+package commitgraph
+
+import (
+ "path"
+ "testing"
+
+ . "gopkg.in/check.v1"
+ fixtures "gopkg.in/src-d/go-git-fixtures.v3"
+ "gopkg.in/src-d/go-git.v4/plumbing"
+ "gopkg.in/src-d/go-git.v4/plumbing/cache"
+ "gopkg.in/src-d/go-git.v4/plumbing/format/commitgraph"
+ "gopkg.in/src-d/go-git.v4/plumbing/format/packfile"
+ "gopkg.in/src-d/go-git.v4/storage/filesystem"
+)
+
+func Test(t *testing.T) { TestingT(t) }
+
+type CommitNodeSuite struct {
+ fixtures.Suite
+}
+
+var _ = Suite(&CommitNodeSuite{})
+
+func unpackRepositry(f *fixtures.Fixture) *filesystem.Storage {
+ storer := filesystem.NewStorage(f.DotGit(), cache.NewObjectLRUDefault())
+ p := f.Packfile()
+ defer p.Close()
+ packfile.UpdateObjectStorage(storer, p)
+ return storer
+}
+
+func testWalker(c *C, nodeIndex CommitNodeIndex) {
+ head, err := nodeIndex.Get(plumbing.NewHash("b9d69064b190e7aedccf84731ca1d917871f8a1c"))
+ c.Assert(err, IsNil)
+
+ iter := NewCommitNodeIterCTime(
+ head,
+ nil,
+ nil,
+ )
+
+ var commits []CommitNode
+ iter.ForEach(func(c CommitNode) error {
+ commits = append(commits, c)
+ return nil
+ })
+
+ c.Assert(commits, HasLen, 9)
+
+ expected := []string{
+ "b9d69064b190e7aedccf84731ca1d917871f8a1c",
+ "6f6c5d2be7852c782be1dd13e36496dd7ad39560",
+ "a45273fe2d63300e1962a9e26a6b15c276cd7082",
+ "c0edf780dd0da6a65a7a49a86032fcf8a0c2d467",
+ "bb13916df33ed23004c3ce9ed3b8487528e655c1",
+ "03d2c021ff68954cf3ef0a36825e194a4b98f981",
+ "ce275064ad67d51e99f026084e20827901a8361c",
+ "e713b52d7e13807e87a002e812041f248db3f643",
+ "347c91919944a68e9413581a1bc15519550a3afe",
+ }
+ for i, commit := range commits {
+ c.Assert(commit.ID().String(), Equals, expected[i])
+ }
+}
+
+func testParents(c *C, nodeIndex CommitNodeIndex) {
+ merge3, err := nodeIndex.Get(plumbing.NewHash("6f6c5d2be7852c782be1dd13e36496dd7ad39560"))
+ c.Assert(err, IsNil)
+
+ var parents []CommitNode
+ merge3.ParentNodes().ForEach(func(c CommitNode) error {
+ parents = append(parents, c)
+ return nil
+ })
+
+ c.Assert(parents, HasLen, 3)
+
+ expected := []string{
+ "ce275064ad67d51e99f026084e20827901a8361c",
+ "bb13916df33ed23004c3ce9ed3b8487528e655c1",
+ "a45273fe2d63300e1962a9e26a6b15c276cd7082",
+ }
+ for i, parent := range parents {
+ c.Assert(parent.ID().String(), Equals, expected[i])
+ }
+}
+
+func testCommitAndTree(c *C, nodeIndex CommitNodeIndex) {
+ merge3node, err := nodeIndex.Get(plumbing.NewHash("6f6c5d2be7852c782be1dd13e36496dd7ad39560"))
+ c.Assert(err, IsNil)
+ merge3commit, err := merge3node.Commit()
+ c.Assert(err, IsNil)
+ c.Assert(merge3node.ID().String(), Equals, merge3commit.ID().String())
+ tree, err := merge3node.Tree()
+ c.Assert(err, IsNil)
+ c.Assert(tree.ID().String(), Equals, merge3commit.TreeHash.String())
+}
+
+func (s *CommitNodeSuite) TestObjectGraph(c *C) {
+ f := fixtures.ByTag("commit-graph").One()
+ storer := unpackRepositry(f)
+
+ nodeIndex := NewObjectCommitNodeIndex(storer)
+ testWalker(c, nodeIndex)
+ testParents(c, nodeIndex)
+ testCommitAndTree(c, nodeIndex)
+}
+
+func (s *CommitNodeSuite) TestCommitGraph(c *C) {
+ f := fixtures.ByTag("commit-graph").One()
+ storer := unpackRepositry(f)
+ reader, err := storer.Filesystem().Open(path.Join("objects", "info", "commit-graph"))
+ c.Assert(err, IsNil)
+ defer reader.Close()
+ index, err := commitgraph.OpenFileIndex(reader)
+ c.Assert(err, IsNil)
+
+ nodeIndex := NewGraphCommitNodeIndex(index, storer)
+ testWalker(c, nodeIndex)
+ testParents(c, nodeIndex)
+ testCommitAndTree(c, nodeIndex)
+}
+
+func (s *CommitNodeSuite) TestMixedGraph(c *C) {
+ f := fixtures.ByTag("commit-graph").One()
+ storer := unpackRepositry(f)
+
+ // Take the commit-graph file and copy it to memory index without the last commit
+ reader, err := storer.Filesystem().Open(path.Join("objects", "info", "commit-graph"))
+ c.Assert(err, IsNil)
+ defer reader.Close()
+ fileIndex, err := commitgraph.OpenFileIndex(reader)
+ c.Assert(err, IsNil)
+ memoryIndex := commitgraph.NewMemoryIndex()
+ for i, hash := range fileIndex.Hashes() {
+ if hash.String() != "b9d69064b190e7aedccf84731ca1d917871f8a1c" {
+ node, err := fileIndex.GetCommitDataByIndex(i)
+ c.Assert(err, IsNil)
+ memoryIndex.Add(hash, node)
+ }
+ }
+
+ nodeIndex := NewGraphCommitNodeIndex(memoryIndex, storer)
+ testWalker(c, nodeIndex)
+ testParents(c, nodeIndex)
+ testCommitAndTree(c, nodeIndex)
+}
diff --git a/plumbing/object/commitgraph/commitnode_walker_ctime.go b/plumbing/object/commitgraph/commitnode_walker_ctime.go
new file mode 100644
index 0000000..f6a1b6a
--- /dev/null
+++ b/plumbing/object/commitgraph/commitnode_walker_ctime.go
@@ -0,0 +1,105 @@
+package commitgraph
+
+import (
+ "io"
+
+ "github.com/emirpasic/gods/trees/binaryheap"
+
+ "gopkg.in/src-d/go-git.v4/plumbing"
+ "gopkg.in/src-d/go-git.v4/plumbing/storer"
+)
+
+type commitNodeIteratorByCTime struct {
+ heap *binaryheap.Heap
+ seenExternal map[plumbing.Hash]bool
+ seen map[plumbing.Hash]bool
+}
+
+// NewCommitNodeIterCTime returns a CommitNodeIter that walks the commit history,
+// starting at the given commit and visiting its parents while preserving Committer Time order.
+// this appears to be the closest order to `git log`
+// The given callback will be called for each visited commit. Each commit will
+// be visited only once. If the callback returns an error, walking will stop
+// and will return the error. Other errors might be returned if the history
+// cannot be traversed (e.g. missing objects). Ignore allows to skip some
+// commits from being iterated.
+func NewCommitNodeIterCTime(
+ c CommitNode,
+ seenExternal map[plumbing.Hash]bool,
+ ignore []plumbing.Hash,
+) CommitNodeIter {
+ seen := make(map[plumbing.Hash]bool)
+ for _, h := range ignore {
+ seen[h] = true
+ }
+
+ heap := binaryheap.NewWith(func(a, b interface{}) int {
+ if a.(CommitNode).CommitTime().Before(b.(CommitNode).CommitTime()) {
+ return 1
+ }
+ return -1
+ })
+
+ heap.Push(c)
+
+ return &commitNodeIteratorByCTime{
+ heap: heap,
+ seenExternal: seenExternal,
+ seen: seen,
+ }
+}
+
+func (w *commitNodeIteratorByCTime) Next() (CommitNode, error) {
+ var c CommitNode
+ for {
+ cIn, ok := w.heap.Pop()
+ if !ok {
+ return nil, io.EOF
+ }
+ c = cIn.(CommitNode)
+ cID := c.ID()
+
+ if w.seen[cID] || w.seenExternal[cID] {
+ continue
+ }
+
+ w.seen[cID] = true
+
+ for i, h := range c.ParentHashes() {
+ if w.seen[h] || w.seenExternal[h] {
+ continue
+ }
+ pc, err := c.ParentNode(i)
+ if err != nil {
+ return nil, err
+ }
+ w.heap.Push(pc)
+ }
+
+ return c, nil
+ }
+}
+
+func (w *commitNodeIteratorByCTime) ForEach(cb func(CommitNode) error) error {
+ for {
+ c, err := w.Next()
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ return err
+ }
+
+ err = cb(c)
+ if err == storer.ErrStop {
+ break
+ }
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (w *commitNodeIteratorByCTime) Close() {}
diff --git a/plumbing/object/commitgraph/doc.go b/plumbing/object/commitgraph/doc.go
new file mode 100644
index 0000000..0a55ad5
--- /dev/null
+++ b/plumbing/object/commitgraph/doc.go
@@ -0,0 +1,7 @@
+// Package commitgraph provides an interface for efficient traversal over Git
+// commit graph either through the regular object storage, or optionally with
+// the index stored in commit-graph file (Git 2.18+).
+//
+// The API and functionality of this package are considered EXPERIMENTAL and is
+// not considered stable nor production ready.
+package commitgraph
diff --git a/plumbing/object/common.go b/plumbing/object/common.go
new file mode 100644
index 0000000..3591f5f
--- /dev/null
+++ b/plumbing/object/common.go
@@ -0,0 +1,12 @@
+package object
+
+import (
+ "bufio"
+ "sync"
+)
+
+var bufPool = sync.Pool{
+ New: func() interface{} {
+ return bufio.NewReader(nil)
+ },
+}
diff --git a/plumbing/object/patch.go b/plumbing/object/patch.go
index adeaccb..1efd0b1 100644
--- a/plumbing/object/patch.go
+++ b/plumbing/object/patch.go
@@ -320,11 +320,22 @@ func getFileStatsFromFilePatches(filePatches []fdiff.FilePatch) FileStats {
}
for _, chunk := range fp.Chunks() {
+ s := chunk.Content()
+ if len(s) == 0 {
+ continue
+ }
+
switch chunk.Type() {
case fdiff.Add:
- cs.Addition += strings.Count(chunk.Content(), "\n")
+ cs.Addition += strings.Count(s, "\n")
+ if s[len(s)-1] != '\n' {
+ cs.Addition++
+ }
case fdiff.Delete:
- cs.Deletion += strings.Count(chunk.Content(), "\n")
+ cs.Deletion += strings.Count(s, "\n")
+ if s[len(s)-1] != '\n' {
+ cs.Deletion++
+ }
}
}
diff --git a/plumbing/object/tag.go b/plumbing/object/tag.go
index 03749f9..9ee5509 100644
--- a/plumbing/object/tag.go
+++ b/plumbing/object/tag.go
@@ -93,7 +93,9 @@ func (t *Tag) Decode(o plumbing.EncodedObject) (err error) {
}
defer ioutil.CheckClose(reader, &err)
- r := bufio.NewReader(reader)
+ r := bufPool.Get().(*bufio.Reader)
+ defer bufPool.Put(r)
+ r.Reset(reader)
for {
var line []byte
line, err = r.ReadBytes('\n')
@@ -141,7 +143,7 @@ func (t *Tag) Decode(o plumbing.EncodedObject) (err error) {
if pgpsig {
if bytes.Contains(l, []byte(endpgp)) {
t.PGPSignature += endpgp + "\n"
- pgpsig = false
+ break
} else {
t.PGPSignature += string(l) + "\n"
}
@@ -169,6 +171,11 @@ func (t *Tag) Encode(o plumbing.EncodedObject) error {
return t.encode(o, true)
}
+// EncodeWithoutSignature export a Tag into a plumbing.EncodedObject without the signature (correspond to the payload of the PGP signature).
+func (t *Tag) EncodeWithoutSignature(o plumbing.EncodedObject) error {
+ return t.encode(o, false)
+}
+
func (t *Tag) encode(o plumbing.EncodedObject, includeSig bool) (err error) {
o.SetType(plumbing.TagObject)
w, err := o.Writer()
@@ -289,7 +296,7 @@ func (t *Tag) Verify(armoredKeyRing string) (*openpgp.Entity, error) {
encoded := &plumbing.MemoryObject{}
// Encode tag components, excluding signature and get a reader object.
- if err := t.encode(encoded, false); err != nil {
+ if err := t.EncodeWithoutSignature(encoded); err != nil {
return nil, err
}
er, err := encoded.Reader()
diff --git a/plumbing/object/tag_test.go b/plumbing/object/tag_test.go
index 59c28b0..addec8d 100644
--- a/plumbing/object/tag_test.go
+++ b/plumbing/object/tag_test.go
@@ -3,16 +3,17 @@ package object
import (
"fmt"
"io"
+ "io/ioutil"
"strings"
"time"
+ fixtures "gopkg.in/src-d/go-git-fixtures.v3"
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/plumbing/cache"
"gopkg.in/src-d/go-git.v4/storage/filesystem"
"gopkg.in/src-d/go-git.v4/storage/memory"
. "gopkg.in/check.v1"
- "gopkg.in/src-d/go-git-fixtures.v3"
)
type TagSuite struct {
@@ -375,3 +376,96 @@ sYyf9RfOnw/KUFAQbdtvLx3ikODQC+D3KBtuKI9ISHQfgw==
_, ok := e.Identities["Sunny <me@darkowlzz.space>"]
c.Assert(ok, Equals, true)
}
+
+func (s *TagSuite) TestDecodeAndVerify(c *C) {
+ objectText := `object 7dba2f128d1298e385b28b56a7e1c579779eac82
+type commit
+tag v1.6
+tagger Filip Navara <filip.navara@gmail.com> 1555269936 +0200
+
+Hello
+
+world
+
+boo
+-----BEGIN PGP SIGNATURE-----
+
+iQEzBAABCAAdFiEEdRIEYXeoLk1t7PBDqeqoMkraaZ4FAlyziT4ACgkQqeqoMkra
+aZ502wgAxG4+69l8PYfq45u1R3CCf4x0m5WwcYwvaa4ang0S9mExh/C32NHnpM/V
+DbqMpAlFvBlixOsZ8FNWaM8VXnvRWyx64E6WnInxjx9+Wgv2fy5P1N5rtpvi+S2V
+iGc0RQJlIloqXr7qPYDrwcbgg6AFg9EPhgJxLyizglu9nYvNsH1InaPXMjzgGX8+
+3irnIYEMIrLcKPrCyHo4Q6gdBjEEBF8hFclPJ8OwXBPc6uNYjnDYx0me9TTQYqoG
+oGgO/rADU9fy4c/Q1ZQpocba/ca6abRJ9LAx9VXFOSlQrMKLgHCYfqU/MAZXKcZM
+6XXOL4+8Z3FJN6CapZKX7cdYB8LJnw==
+=t5Px
+-----END PGP SIGNATURE-----
+
+`
+
+ armoredKeyRing := `
+-----BEGIN PGP PUBLIC KEY BLOCK-----
+
+mQENBFyzedYBCADN3lVNUNkrjn0kfwKAxGQOI8a1977UaIq9ktFg+Uv4Jyq2Y59L
+ZVx2WYk1iDaRhxhv203HV//CA/Hr4IoPjK53qAkg2bPyi8UuDbL+gU+4Z+IiSeXd
+18ZcAbcYt188PWoUq9/82ofO8EiaBbUEEZJjEegLDtX8gxBDG0aI3Yj4Txj73mno
+w6+E5HDkgPElmH3oNQcr8iK9U2Kuj+ZAHkzbWL++gDCPiLl2eWf0Cr1nlVsv6YLa
+Fsn5vjMGT3dMJFc78ZqCHOeyYK7KHjW1EjzgqeG2eJVay+ZQ5zEx4Fp/dL0RdUSV
+U7zslRiraaPxshdhYOjQ0o72RpSkP1G6+8OhABEBAAG0JUZpbGlwIE5hdmFyYSA8
+ZmlsaXAubmF2YXJhQGdtYWlsLmNvbT6JAVQEEwEIAD4WIQR1EgRhd6guTW3s8EOp
+6qgyStppngUCXLN51gIbAwUJA8JnAAULCQgHAgYVCgkICwIEFgIDAQIeAQIXgAAK
+CRCp6qgyStppnlzjB/sFu7HqJrTRsnHsoWo2+nDeicXnR0VAhiLvv7uRRw4i90FJ
+0zDwjAmIH+po6vPffWRMcWOFVvAwZCX7/XcvDNF9OupFj/aold334+VVN0ha47IQ
+g44bJZie9mvLagEsqUXggpKQjd414Tk08aUucfaN9RFJIOGCwF05j2eXOBGR2HTe
+FLq3obeObryEPf0c8N/nw4RQ8OOcq98gxiHx5Gk+nLCcJCTvOlc9ULqpJ2a6cZry
+kxgSOI9dd74ilRQdpfPvoEeEGSqkY+daf+dhgSMT2mII0UJ6qQeY0DpCZZNsL8dr
+PxR4SPRlzLBuJIpnHY21ebOqwOPOLjzR+J2RBufkuQENBFyzedYBCADTCglXrST6
+DRz7Uq3zrrrzdCchHH0/+LgYOEoGs82UvdFfigQYGTydmXz27bHKfWNfGIa9IlLF
+MhasFueCnKnmfVxnlINRdyAXv7Tmx4mSjuCEmGkvM1nPpdhxWXptnVMqhQMddiMO
+N55bElDK2ftPc2s4dBmTItXXbet2kFZiv7MZBZpA4eRAHj5DDSwl8pnQArU50RDZ
+q3qYKvAP/z2SLjekcOFtMhZ9BXMvwAW4FWV0ztpfP3LvUUb0T7fSo5cXlm/0eqwa
+MUrUlbbwJMDg1/wJ3pbKhZlP+xXNLj5UE86TtfqNqaohOcIBdCsdTUQgbkLVlibP
+JmZH7lGDhvi3ABEBAAGJATwEGAEIACYWIQR1EgRhd6guTW3s8EOp6qgyStppngUC
+XLN51gIbDAUJA8JnAAAKCRCp6qgyStppntq1B/9bmw4XjEm5KyXwWnlAVGr8skXY
+KIJr6drUOOwQzl7rxsJRjUsFdX0IjaZwx303G/23eQMIvVkoaWpHrT0Y7EsTQ55x
++GSuANhEzobks4spzQ66VW9FHRlRr5wg5PTwWnGtV/5QVSTY/zeC9R/AFUJFsDWe
+tgHlNrb6MWx5EtypZDpAkubAMvD/QoZHX0oPXYAA2CugD4uSdzjf6Ys3xUuwjKKG
+5hvimAg1/Hympq71Znb6Ec1m4ZM22Br7dcWHIX2GWfDPyRG+rYPu4Fk9KKAD4FRz
+HdzbB2ak/HxIeCqmHVlmUqa+WfTMUJcsgOm3/ZFPCSoL6l0bz9Z1XVbiyD03
+=+gC9
+-----END PGP PUBLIC KEY BLOCK-----
+`
+
+ tagEncodedObject := &plumbing.MemoryObject{}
+
+ _, err := tagEncodedObject.Write([]byte(objectText))
+ tagEncodedObject.SetType(plumbing.TagObject)
+ c.Assert(err, IsNil)
+
+ tag := &Tag{}
+ err = tag.Decode(tagEncodedObject)
+ c.Assert(err, IsNil)
+
+ _, err = tag.Verify(armoredKeyRing)
+ c.Assert(err, IsNil)
+}
+
+func (s *TagSuite) TestEncodeWithoutSignature(c *C) {
+ //Similar to TestString since no signature
+ encoded := &plumbing.MemoryObject{}
+ tag := s.tag(c, plumbing.NewHash("b742a2a9fa0afcfa9a6fad080980fbc26b007c69"))
+ err := tag.EncodeWithoutSignature(encoded)
+ c.Assert(err, IsNil)
+ er, err := encoded.Reader()
+ c.Assert(err, IsNil)
+ payload, err := ioutil.ReadAll(er)
+ c.Assert(err, IsNil)
+
+ c.Assert(string(payload), Equals, ""+
+ "object f7b877701fbf855b44c0a9e86f3fdce2c298b07f\n"+
+ "type commit\n"+
+ "tag annotated-tag\n"+
+ "tagger Máximo Cuadros <mcuadros@gmail.com> 1474485215 +0200\n"+
+ "\n"+
+ "example annotated tag\n",
+ )
+}
diff --git a/plumbing/object/tree.go b/plumbing/object/tree.go
index 1f9ea26..d30cf6e 100644
--- a/plumbing/object/tree.go
+++ b/plumbing/object/tree.go
@@ -230,7 +230,9 @@ func (t *Tree) Decode(o plumbing.EncodedObject) (err error) {
}
defer ioutil.CheckClose(reader, &err)
- r := bufio.NewReader(reader)
+ r := bufPool.Get().(*bufio.Reader)
+ defer bufPool.Put(r)
+ r.Reset(reader)
for {
str, err := r.ReadString(' ')
if err != nil {
@@ -383,7 +385,7 @@ func NewTreeWalker(t *Tree, recursive bool, seen map[plumbing.Hash]bool) *TreeWa
// underlying repository will be skipped automatically. It is possible that this
// may change in future versions.
func (w *TreeWalker) Next() (name string, entry TreeEntry, err error) {
- var obj Object
+ var obj *Tree
for {
current := len(w.stack) - 1
if current < 0 {
@@ -403,7 +405,7 @@ func (w *TreeWalker) Next() (name string, entry TreeEntry, err error) {
// Finished with the current tree, move back up to the parent
w.stack = w.stack[:current]
w.base, _ = path.Split(w.base)
- w.base = path.Clean(w.base) // Remove trailing slash
+ w.base = strings.TrimSuffix(w.base, "/")
continue
}
@@ -419,7 +421,7 @@ func (w *TreeWalker) Next() (name string, entry TreeEntry, err error) {
obj, err = GetTree(w.s, entry.Hash)
}
- name = path.Join(w.base, entry.Name)
+ name = simpleJoin(w.base, entry.Name)
if err != nil {
err = io.EOF
@@ -433,9 +435,9 @@ func (w *TreeWalker) Next() (name string, entry TreeEntry, err error) {
return
}
- if t, ok := obj.(*Tree); ok {
- w.stack = append(w.stack, &treeEntryIter{t, 0})
- w.base = path.Join(w.base, entry.Name)
+ if obj != nil {
+ w.stack = append(w.stack, &treeEntryIter{obj, 0})
+ w.base = simpleJoin(w.base, entry.Name)
}
return
@@ -509,3 +511,10 @@ func (iter *TreeIter) ForEach(cb func(*Tree) error) error {
return cb(t)
})
}
+
+func simpleJoin(parent, child string) string {
+ if len(parent) > 0 {
+ return parent + "/" + child
+ }
+ return child
+} \ No newline at end of file
diff --git a/plumbing/transport/ssh/common.go b/plumbing/transport/ssh/common.go
index e4a3d18..d320d43 100644
--- a/plumbing/transport/ssh/common.go
+++ b/plumbing/transport/ssh/common.go
@@ -2,6 +2,7 @@
package ssh
import (
+ "context"
"fmt"
"reflect"
"strconv"
@@ -11,6 +12,7 @@ import (
"github.com/kevinburke/ssh_config"
"golang.org/x/crypto/ssh"
+ "golang.org/x/net/proxy"
)
// DefaultClient is the default SSH client.
@@ -115,7 +117,7 @@ func (c *command) connect() error {
overrideConfig(c.config, config)
- c.client, err = ssh.Dial("tcp", c.getHostWithPort(), config)
+ c.client, err = dial("tcp", c.getHostWithPort(), config)
if err != nil {
return err
}
@@ -130,6 +132,29 @@ func (c *command) connect() error {
return nil
}
+func dial(network, addr string, config *ssh.ClientConfig) (*ssh.Client, error) {
+ var (
+ ctx = context.Background()
+ cancel context.CancelFunc
+ )
+ if config.Timeout > 0 {
+ ctx, cancel = context.WithTimeout(ctx, config.Timeout)
+ } else {
+ ctx, cancel = context.WithCancel(ctx)
+ }
+ defer cancel()
+
+ conn, err := proxy.Dial(ctx, network, addr)
+ if err != nil {
+ return nil, err
+ }
+ c, chans, reqs, err := ssh.NewClientConn(conn, addr, config)
+ if err != nil {
+ return nil, err
+ }
+ return ssh.NewClient(c, chans, reqs), nil
+}
+
func (c *command) getHostWithPort() string {
if addr, found := c.doGetHostWithPortFromSSHConfig(); found {
return addr
diff --git a/plumbing/transport/ssh/proxy_test.go b/plumbing/transport/ssh/proxy_test.go
new file mode 100644
index 0000000..3caf1ff
--- /dev/null
+++ b/plumbing/transport/ssh/proxy_test.go
@@ -0,0 +1,36 @@
+package ssh
+
+import (
+ "fmt"
+ "log"
+ "net"
+ "os"
+
+ "github.com/armon/go-socks5"
+ . "gopkg.in/check.v1"
+)
+
+type ProxySuite struct {
+ UploadPackSuite
+}
+
+var _ = Suite(&ProxySuite{})
+
+func (s *ProxySuite) SetUpSuite(c *C) {
+ s.UploadPackSuite.SetUpSuite(c)
+
+ l, err := net.Listen("tcp", "localhost:0")
+ c.Assert(err, IsNil)
+
+ server, err := socks5.New(&socks5.Config{})
+ c.Assert(err, IsNil)
+
+ port := l.Addr().(*net.TCPAddr).Port
+
+ err = os.Setenv("ALL_PROXY", fmt.Sprintf("socks5://localhost:%d", port))
+ c.Assert(err, IsNil)
+
+ go func() {
+ log.Fatal(server.Serve(l))
+ }()
+}
diff --git a/repository.go b/repository.go
index e5b12b0..a94dc2f 100644
--- a/repository.go
+++ b/repository.go
@@ -1306,16 +1306,6 @@ func (r *Repository) Worktree() (*Worktree, error) {
return &Worktree{r: r, Filesystem: r.wt}, nil
}
-func countTrue(vals ...bool) int {
- sum := 0
- for _, v := range vals {
- if v {
- sum++
- }
- }
- return sum
-}
-
// ResolveRevision resolves revision to corresponding hash. It will always
// resolve to a commit hash, not a tree or annotated tag.
//
@@ -1336,54 +1326,57 @@ func (r *Repository) ResolveRevision(rev plumbing.Revision) (*plumbing.Hash, err
switch item.(type) {
case revision.Ref:
revisionRef := item.(revision.Ref)
- var ref *plumbing.Reference
- var hashCommit, refCommit, tagCommit *object.Commit
- var rErr, hErr, tErr error
+
+ var tryHashes []plumbing.Hash
+
+ maybeHash := plumbing.NewHash(string(revisionRef))
+
+ if !maybeHash.IsZero() {
+ tryHashes = append(tryHashes, maybeHash)
+ }
for _, rule := range append([]string{"%s"}, plumbing.RefRevParseRules...) {
- ref, err = storer.ResolveReference(r.Storer, plumbing.ReferenceName(fmt.Sprintf(rule, revisionRef)))
+ ref, err := storer.ResolveReference(r.Storer, plumbing.ReferenceName(fmt.Sprintf(rule, revisionRef)))
if err == nil {
+ tryHashes = append(tryHashes, ref.Hash())
break
}
}
- if ref != nil {
- tag, tObjErr := r.TagObject(ref.Hash())
- if tObjErr != nil {
- tErr = tObjErr
- } else {
- tagCommit, tErr = tag.Commit()
+ // in ambiguous cases, `git rev-parse` will emit a warning, but
+ // will always return the oid in preference to a ref; we don't have
+ // the ability to emit a warning here, so (for speed purposes)
+ // don't bother to detect the ambiguity either, just return in the
+ // priority that git would.
+ gotOne := false
+ for _, hash := range tryHashes {
+ commitObj, err := r.CommitObject(hash)
+ if err == nil {
+ commit = commitObj
+ gotOne = true
+ break
}
- refCommit, rErr = r.CommitObject(ref.Hash())
- } else {
- rErr = plumbing.ErrReferenceNotFound
- tErr = plumbing.ErrReferenceNotFound
- }
- maybeHash := plumbing.NewHash(string(revisionRef)).String() == string(revisionRef)
- if maybeHash {
- hashCommit, hErr = r.CommitObject(plumbing.NewHash(string(revisionRef)))
- } else {
- hErr = plumbing.ErrReferenceNotFound
+ tagObj, err := r.TagObject(hash)
+ if err == nil {
+ // If the tag target lookup fails here, this most likely
+ // represents some sort of repo corruption, so let the
+ // error bubble up.
+ tagCommit, err := tagObj.Commit()
+ if err != nil {
+ return &plumbing.ZeroHash, err
+ }
+ commit = tagCommit
+ gotOne = true
+ break
+ }
}
- isTag := tErr == nil
- isCommit := rErr == nil
- isHash := hErr == nil
-
- switch {
- case countTrue(isTag, isCommit, isHash) > 1:
- return &plumbing.ZeroHash, fmt.Errorf(`refname "%s" is ambiguous`, revisionRef)
- case isTag:
- commit = tagCommit
- case isCommit:
- commit = refCommit
- case isHash:
- commit = hashCommit
- default:
+ if !gotOne {
return &plumbing.ZeroHash, plumbing.ErrReferenceNotFound
}
+
case revision.CaretPath:
depth := item.(revision.CaretPath).Depth
diff --git a/repository_test.go b/repository_test.go
index ccbe29b..0148c78 100644
--- a/repository_test.go
+++ b/repository_test.go
@@ -2415,7 +2415,7 @@ func (s *RepositorySuite) TestResolveRevision(c *C) {
for rev, hash := range datas {
h, err := r.ResolveRevision(plumbing.Revision(rev))
- c.Assert(err, IsNil)
+ c.Assert(err, IsNil, Commentf("while checking %s", rev))
c.Check(h.String(), Equals, hash, Commentf("while checking %s", rev))
}
}
@@ -2427,13 +2427,14 @@ func (s *RepositorySuite) TestResolveRevisionAnnotated(c *C) {
c.Assert(err, IsNil)
datas := map[string]string{
- "refs/tags/annotated-tag": "f7b877701fbf855b44c0a9e86f3fdce2c298b07f",
+ "refs/tags/annotated-tag": "f7b877701fbf855b44c0a9e86f3fdce2c298b07f",
+ "b742a2a9fa0afcfa9a6fad080980fbc26b007c69": "f7b877701fbf855b44c0a9e86f3fdce2c298b07f",
}
for rev, hash := range datas {
h, err := r.ResolveRevision(plumbing.Revision(rev))
- c.Assert(err, IsNil)
+ c.Assert(err, IsNil, Commentf("while checking %s", rev))
c.Check(h.String(), Equals, hash, Commentf("while checking %s", rev))
}
}
@@ -2459,12 +2460,11 @@ func (s *RepositorySuite) TestResolveRevisionWithErrors(c *C) {
"HEAD^3": `Revision invalid : "3" found must be 0, 1 or 2 after "^"`,
"HEAD^{/whatever}": `No commit message match regexp : "whatever"`,
"4e1243bd22c66e76c2ba9eddc1f91394e57f9f83": "reference not found",
- "918c48b83bd081e863dbe1b80f8998f058cd8294": `refname "918c48b83bd081e863dbe1b80f8998f058cd8294" is ambiguous`,
}
for rev, rerr := range datas {
_, err := r.ResolveRevision(plumbing.Revision(rev))
-
+ c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, rerr)
}
}
diff --git a/storage/filesystem/dotgit/dotgit.go b/storage/filesystem/dotgit/dotgit.go
index ba9667e..111769b 100644
--- a/storage/filesystem/dotgit/dotgit.go
+++ b/storage/filesystem/dotgit/dotgit.go
@@ -83,7 +83,7 @@ type DotGit struct {
packList []plumbing.Hash
packMap map[plumbing.Hash]struct{}
- files map[string]billy.File
+ files map[plumbing.Hash]billy.File
}
// New returns a DotGit value ready to be used. The path argument must
@@ -245,8 +245,15 @@ func (d *DotGit) objectPackPath(hash plumbing.Hash, extension string) string {
}
func (d *DotGit) objectPackOpen(hash plumbing.Hash, extension string) (billy.File, error) {
- if d.files == nil {
- d.files = make(map[string]billy.File)
+ if d.options.KeepDescriptors && extension == "pack" {
+ if d.files == nil {
+ d.files = make(map[plumbing.Hash]billy.File)
+ }
+
+ f, ok := d.files[hash]
+ if ok {
+ return f, nil
+ }
}
err := d.hasPack(hash)
@@ -255,11 +262,6 @@ func (d *DotGit) objectPackOpen(hash plumbing.Hash, extension string) (billy.Fil
}
path := d.objectPackPath(hash, extension)
- f, ok := d.files[path]
- if ok {
- return f, nil
- }
-
pack, err := d.fs.Open(path)
if err != nil {
if os.IsNotExist(err) {
@@ -270,7 +272,7 @@ func (d *DotGit) objectPackOpen(hash plumbing.Hash, extension string) (billy.Fil
}
if d.options.KeepDescriptors && extension == "pack" {
- d.files[path] = pack
+ d.files[hash] = pack
}
return pack, nil
diff --git a/storage/filesystem/index.go b/storage/filesystem/index.go
index 2ebf57e..d04195c 100644
--- a/storage/filesystem/index.go
+++ b/storage/filesystem/index.go
@@ -1,6 +1,7 @@
package filesystem
import (
+ "bufio"
"os"
"gopkg.in/src-d/go-git.v4/plumbing/format/index"
@@ -41,7 +42,7 @@ func (s *IndexStorage) Index() (i *index.Index, err error) {
defer ioutil.CheckClose(f, &err)
- d := index.NewDecoder(f)
+ d := index.NewDecoder(bufio.NewReader(f))
err = d.Decode(idx)
return idx, err
}
diff --git a/storage/filesystem/object.go b/storage/filesystem/object.go
index 3eb62a2..ad5d8d0 100644
--- a/storage/filesystem/object.go
+++ b/storage/filesystem/object.go
@@ -26,6 +26,10 @@ type ObjectStorage struct {
dir *dotgit.DotGit
index map[plumbing.Hash]idxfile.Index
+
+ packList []plumbing.Hash
+ packListIdx int
+ packfiles map[plumbing.Hash]*packfile.Packfile
}
// NewObjectStorage creates a new ObjectStorage with the given .git directory and cache.
@@ -187,6 +191,73 @@ func (s *ObjectStorage) encodedObjectSizeFromUnpacked(h plumbing.Hash) (
return size, err
}
+func (s *ObjectStorage) packfile(idx idxfile.Index, pack plumbing.Hash) (*packfile.Packfile, error) {
+ if p := s.packfileFromCache(pack); p != nil {
+ return p, nil
+ }
+
+ f, err := s.dir.ObjectPack(pack)
+ if err != nil {
+ return nil, err
+ }
+
+ var p *packfile.Packfile
+ if s.objectCache != nil {
+ p = packfile.NewPackfileWithCache(idx, s.dir.Fs(), f, s.objectCache)
+ } else {
+ p = packfile.NewPackfile(idx, s.dir.Fs(), f)
+ }
+
+ return p, s.storePackfileInCache(pack, p)
+}
+
+func (s *ObjectStorage) packfileFromCache(hash plumbing.Hash) *packfile.Packfile {
+ if s.packfiles == nil {
+ if s.options.KeepDescriptors {
+ s.packfiles = make(map[plumbing.Hash]*packfile.Packfile)
+ } else if s.options.MaxOpenDescriptors > 0 {
+ s.packList = make([]plumbing.Hash, s.options.MaxOpenDescriptors)
+ s.packfiles = make(map[plumbing.Hash]*packfile.Packfile, s.options.MaxOpenDescriptors)
+ }
+ }
+
+ return s.packfiles[hash]
+}
+
+func (s *ObjectStorage) storePackfileInCache(hash plumbing.Hash, p *packfile.Packfile) error {
+ if s.options.KeepDescriptors {
+ s.packfiles[hash] = p
+ return nil
+ }
+
+ if s.options.MaxOpenDescriptors <= 0 {
+ return nil
+ }
+
+ // start over as the limit of packList is hit
+ if s.packListIdx >= len(s.packList) {
+ s.packListIdx = 0
+ }
+
+ // close the existing packfile if open
+ if next := s.packList[s.packListIdx]; !next.IsZero() {
+ open := s.packfiles[next]
+ delete(s.packfiles, next)
+ if open != nil {
+ if err := open.Close(); err != nil {
+ return err
+ }
+ }
+ }
+
+ // cache newly open packfile
+ s.packList[s.packListIdx] = hash
+ s.packfiles[hash] = p
+ s.packListIdx++
+
+ return nil
+}
+
func (s *ObjectStorage) encodedObjectSizeFromPackfile(h plumbing.Hash) (
size int64, err error) {
if err := s.requireIndex(); err != nil {
@@ -198,12 +269,6 @@ func (s *ObjectStorage) encodedObjectSizeFromPackfile(h plumbing.Hash) (
return 0, plumbing.ErrObjectNotFound
}
- f, err := s.dir.ObjectPack(pack)
- if err != nil {
- return 0, err
- }
- defer ioutil.CheckClose(f, &err)
-
idx := s.index[pack]
hash, err := idx.FindHash(offset)
if err == nil {
@@ -215,11 +280,13 @@ func (s *ObjectStorage) encodedObjectSizeFromPackfile(h plumbing.Hash) (
return 0, err
}
- var p *packfile.Packfile
- if s.objectCache != nil {
- p = packfile.NewPackfileWithCache(idx, s.dir.Fs(), f, s.objectCache)
- } else {
- p = packfile.NewPackfile(idx, s.dir.Fs(), f)
+ p, err := s.packfile(idx, pack)
+ if err != nil {
+ return 0, err
+ }
+
+ if !s.options.KeepDescriptors && s.options.MaxOpenDescriptors == 0 {
+ defer ioutil.CheckClose(p, &err)
}
return p.GetSizeByOffset(offset)
@@ -361,29 +428,28 @@ func (s *ObjectStorage) getFromPackfile(h plumbing.Hash, canBeDelta bool) (
return nil, plumbing.ErrObjectNotFound
}
- f, err := s.dir.ObjectPack(pack)
+ idx := s.index[pack]
+ p, err := s.packfile(idx, pack)
if err != nil {
return nil, err
}
- if !s.options.KeepDescriptors {
- defer ioutil.CheckClose(f, &err)
+ if !s.options.KeepDescriptors && s.options.MaxOpenDescriptors == 0 {
+ defer ioutil.CheckClose(p, &err)
}
- idx := s.index[pack]
if canBeDelta {
- return s.decodeDeltaObjectAt(f, idx, offset, hash)
+ return s.decodeDeltaObjectAt(p, offset, hash)
}
- return s.decodeObjectAt(f, idx, offset)
+ return s.decodeObjectAt(p, offset)
}
func (s *ObjectStorage) decodeObjectAt(
- f billy.File,
- idx idxfile.Index,
+ p *packfile.Packfile,
offset int64,
) (plumbing.EncodedObject, error) {
- hash, err := idx.FindHash(offset)
+ hash, err := p.FindHash(offset)
if err == nil {
obj, ok := s.objectCache.Get(hash)
if ok {
@@ -395,28 +461,16 @@ func (s *ObjectStorage) decodeObjectAt(
return nil, err
}
- var p *packfile.Packfile
- if s.objectCache != nil {
- p = packfile.NewPackfileWithCache(idx, s.dir.Fs(), f, s.objectCache)
- } else {
- p = packfile.NewPackfile(idx, s.dir.Fs(), f)
- }
-
return p.GetByOffset(offset)
}
func (s *ObjectStorage) decodeDeltaObjectAt(
- f billy.File,
- idx idxfile.Index,
+ p *packfile.Packfile,
offset int64,
hash plumbing.Hash,
) (plumbing.EncodedObject, error) {
- if _, err := f.Seek(0, io.SeekStart); err != nil {
- return nil, err
- }
-
- p := packfile.NewScanner(f)
- header, err := p.SeekObjectHeader(offset)
+ scan := p.Scanner()
+ header, err := scan.SeekObjectHeader(offset)
if err != nil {
return nil, err
}
@@ -429,12 +483,12 @@ func (s *ObjectStorage) decodeDeltaObjectAt(
case plumbing.REFDeltaObject:
base = header.Reference
case plumbing.OFSDeltaObject:
- base, err = idx.FindHash(header.OffsetReference)
+ base, err = p.FindHash(header.OffsetReference)
if err != nil {
return nil, err
}
default:
- return s.decodeObjectAt(f, idx, offset)
+ return s.decodeObjectAt(p, offset)
}
obj := &plumbing.MemoryObject{}
@@ -444,7 +498,7 @@ func (s *ObjectStorage) decodeDeltaObjectAt(
return nil, err
}
- if _, _, err := p.NextObject(w); err != nil {
+ if _, _, err := scan.NextObject(w); err != nil {
return nil, err
}
@@ -515,7 +569,20 @@ func (s *ObjectStorage) buildPackfileIters(
// Close closes all opened files.
func (s *ObjectStorage) Close() error {
- return s.dir.Close()
+ var firstError error
+ if s.options.KeepDescriptors || s.options.MaxOpenDescriptors > 0 {
+ for _, packfile := range s.packfiles {
+ err := packfile.Close()
+ if firstError == nil && err != nil {
+ firstError = err
+ }
+ }
+ }
+
+ s.packfiles = nil
+ s.dir.Close()
+
+ return firstError
}
type lazyPackfilesIter struct {
diff --git a/storage/filesystem/object_test.go b/storage/filesystem/object_test.go
index 5cfb227..c2461db 100644
--- a/storage/filesystem/object_test.go
+++ b/storage/filesystem/object_test.go
@@ -86,6 +86,24 @@ func (s *FsSuite) TestGetFromPackfileKeepDescriptors(c *C) {
})
}
+func (s *FsSuite) TestGetFromPackfileMaxOpenDescriptors(c *C) {
+ fs := fixtures.ByTag(".git").ByTag("multi-packfile").One().DotGit()
+ o := NewObjectStorageWithOptions(dotgit.New(fs), cache.NewObjectLRUDefault(), Options{MaxOpenDescriptors: 1})
+
+ expected := plumbing.NewHash("8d45a34641d73851e01d3754320b33bb5be3c4d3")
+ obj, err := o.getFromPackfile(expected, false)
+ c.Assert(err, IsNil)
+ c.Assert(obj.Hash(), Equals, expected)
+
+ expected = plumbing.NewHash("e9cfa4c9ca160546efd7e8582ec77952a27b17db")
+ obj, err = o.getFromPackfile(expected, false)
+ c.Assert(err, IsNil)
+ c.Assert(obj.Hash(), Equals, expected)
+
+ err = o.Close()
+ c.Assert(err, IsNil)
+}
+
func (s *FsSuite) TestGetSizeOfObjectFile(c *C) {
fs := fixtures.ByTag(".git").ByTag("unpacked").One().DotGit()
o := NewObjectStorage(dotgit.New(fs), cache.NewObjectLRUDefault())
diff --git a/storage/filesystem/storage.go b/storage/filesystem/storage.go
index 370f7bd..88d1ed4 100644
--- a/storage/filesystem/storage.go
+++ b/storage/filesystem/storage.go
@@ -31,6 +31,9 @@ type Options struct {
// KeepDescriptors makes the file descriptors to be reused but they will
// need to be manually closed calling Close().
KeepDescriptors bool
+ // MaxOpenDescriptors is the max number of file descriptors to keep
+ // open. If KeepDescriptors is true, all file descriptors will remain open.
+ MaxOpenDescriptors int
}
// NewStorage returns a new Storage backed by a given `fs.Filesystem` and cache.
@@ -43,7 +46,6 @@ func NewStorage(fs billy.Filesystem, cache cache.Object) *Storage {
func NewStorageWithOptions(fs billy.Filesystem, cache cache.Object, ops Options) *Storage {
dirOps := dotgit.Options{
ExclusiveAccess: ops.ExclusiveAccess,
- KeepDescriptors: ops.KeepDescriptors,
}
dir := dotgit.NewWithOptions(fs, dirOps)
diff --git a/utils/binary/read.go b/utils/binary/read.go
index 50da1ff..12e57c3 100644
--- a/utils/binary/read.go
+++ b/utils/binary/read.go
@@ -25,6 +25,10 @@ func Read(r io.Reader, data ...interface{}) error {
// ReadUntil reads from r untin delim is found
func ReadUntil(r io.Reader, delim byte) ([]byte, error) {
+ if bufr, ok := r.(*bufio.Reader); ok {
+ return ReadUntilFromBufioReader(bufr, delim)
+ }
+
var buf [1]byte
value := make([]byte, 0, 16)
for {
@@ -44,6 +48,17 @@ func ReadUntil(r io.Reader, delim byte) ([]byte, error) {
}
}
+// ReadUntilFromBufioReader is like bufio.ReadBytes but drops the delimiter
+// from the result.
+func ReadUntilFromBufioReader(r *bufio.Reader, delim byte) ([]byte, error) {
+ value, err := r.ReadBytes(delim)
+ if err != nil || len(value) == 0 {
+ return nil, err
+ }
+
+ return value[:len(value)-1], nil
+}
+
// ReadVariableWidthInt reads and returns an int in Git VLQ special format:
//
// Ordinary VLQ has some redundancies, example: the number 358 can be
diff --git a/utils/binary/read_test.go b/utils/binary/read_test.go
index 5674653..22867c2 100644
--- a/utils/binary/read_test.go
+++ b/utils/binary/read_test.go
@@ -1,6 +1,7 @@
package binary
import (
+ "bufio"
"bytes"
"encoding/binary"
"testing"
@@ -39,6 +40,15 @@ func (s *BinarySuite) TestReadUntil(c *C) {
c.Assert(string(b), Equals, "foo")
}
+func (s *BinarySuite) TestReadUntilFromBufioReader(c *C) {
+ buf := bufio.NewReader(bytes.NewBuffer([]byte("foo bar")))
+
+ b, err := ReadUntilFromBufioReader(buf, ' ')
+ c.Assert(err, IsNil)
+ c.Assert(b, HasLen, 3)
+ c.Assert(string(b), Equals, "foo")
+}
+
func (s *BinarySuite) TestReadVariableWidthInt(c *C) {
buf := bytes.NewBuffer([]byte{129, 110})
diff --git a/worktree.go b/worktree.go
index dae40a3..1b10449 100644
--- a/worktree.go
+++ b/worktree.go
@@ -160,6 +160,8 @@ func (w *Worktree) Checkout(opts *CheckoutOptions) error {
ro := &ResetOptions{Commit: c, Mode: MergeReset}
if opts.Force {
ro.Mode = HardReset
+ } else if opts.Keep {
+ ro.Mode = SoftReset
}
if !opts.Hash.IsZero() && !opts.Create {
diff --git a/worktree_test.go b/worktree_test.go
index afedc91..045a76d 100644
--- a/worktree_test.go
+++ b/worktree_test.go
@@ -314,6 +314,46 @@ func (s *WorktreeSuite) TestCheckoutForce(c *C) {
c.Assert(entries, HasLen, 8)
}
+func (s *WorktreeSuite) TestCheckoutKeep(c *C) {
+ w := &Worktree{
+ r: s.Repository,
+ Filesystem: memfs.New(),
+ }
+
+ err := w.Checkout(&CheckoutOptions{
+ Force: true,
+ })
+ c.Assert(err, IsNil)
+
+ // Create a new branch and create a new file.
+ err = w.Checkout(&CheckoutOptions{
+ Branch: plumbing.NewBranchReferenceName("new-branch"),
+ Create: true,
+ })
+ c.Assert(err, IsNil)
+
+ w.Filesystem = memfs.New()
+ f, err := w.Filesystem.Create("new-file.txt")
+ c.Assert(err, IsNil)
+ _, err = f.Write([]byte("DUMMY"))
+ c.Assert(err, IsNil)
+ c.Assert(f.Close(), IsNil)
+
+ // Add the file to staging.
+ _, err = w.Add("new-file.txt")
+ c.Assert(err, IsNil)
+
+ // Switch branch to master, and verify that the new file was kept in staging.
+ err = w.Checkout(&CheckoutOptions{
+ Keep: true,
+ })
+ c.Assert(err, IsNil)
+
+ fi, err := w.Filesystem.Stat("new-file.txt")
+ c.Assert(err, IsNil)
+ c.Assert(fi.Size(), Equals, int64(5))
+}
+
func (s *WorktreeSuite) TestCheckoutSymlink(c *C) {
if runtime.GOOS == "windows" {
c.Skip("git doesn't support symlinks by default in windows")