diff options
-rw-r--r-- | blame.go | 21 | ||||
-rw-r--r-- | blame_test.go | 26 | ||||
-rw-r--r-- | config/config_test.go | 15 | ||||
-rw-r--r-- | config/refspec.go | 2 | ||||
-rw-r--r-- | options.go | 4 | ||||
-rw-r--r-- | plumbing/format/index/decoder.go | 2 | ||||
-rw-r--r-- | plumbing/format/packfile/packfile.go | 18 | ||||
-rw-r--r-- | plumbing/object/commit_walker_file.go | 115 | ||||
-rw-r--r-- | plumbing/object/tree.go | 11 | ||||
-rw-r--r-- | plumbing/object/tree_test.go | 6 | ||||
-rw-r--r-- | plumbing/storer/object.go | 2 | ||||
-rw-r--r-- | plumbing/storer/object_test.go | 10 | ||||
-rw-r--r-- | plumbing/transport/common_test.go | 20 | ||||
-rw-r--r-- | prune.go | 2 | ||||
-rw-r--r-- | references.go | 4 | ||||
-rw-r--r-- | references_test.go | 14 | ||||
-rw-r--r-- | repository.go | 75 | ||||
-rw-r--r-- | repository_test.go | 172 | ||||
-rw-r--r-- | status.go | 2 | ||||
-rw-r--r-- | storage/filesystem/object.go | 105 | ||||
-rw-r--r-- | storage/filesystem/object_test.go | 82 | ||||
-rw-r--r-- | storage/memory/storage.go | 10 |
22 files changed, 667 insertions, 51 deletions
@@ -123,14 +123,25 @@ func newLine(author, text string, date time.Time, hash plumbing.Hash) *Line { } func newLines(contents []string, commits []*object.Commit) ([]*Line, error) { - if len(contents) != len(commits) { - return nil, errors.New("contents and commits have different length") + lcontents := len(contents) + lcommits := len(commits) + + if lcontents != lcommits { + if lcontents == lcommits-1 && contents[lcontents-1] != "\n" { + contents = append(contents, "\n") + } else { + return nil, errors.New("contents and commits have different length") + } } - result := make([]*Line, 0, len(contents)) + + result := make([]*Line, 0, lcontents) for i := range contents { - l := newLine(commits[i].Author.Email, contents[i], commits[i].Author.When, commits[i].Hash) - result = append(result, l) + result = append(result, newLine( + commits[i].Author.Email, contents[i], + commits[i].Author.When, commits[i].Hash, + )) } + return result, nil } diff --git a/blame_test.go b/blame_test.go index 92911b1..e0ac129 100644 --- a/blame_test.go +++ b/blame_test.go @@ -2,6 +2,7 @@ package git import ( "gopkg.in/src-d/go-git.v4/plumbing" + "gopkg.in/src-d/go-git.v4/plumbing/object" . "gopkg.in/check.v1" "gopkg.in/src-d/go-git-fixtures.v3" @@ -13,6 +14,31 @@ type BlameSuite struct { var _ = Suite(&BlameSuite{}) +func (s *BlameSuite) TestNewLines(c *C) { + h := plumbing.NewHash("ce9f123d790717599aaeb76bc62510de437761be") + lines, err := newLines([]string{"foo"}, []*object.Commit{{ + Hash: h, + Message: "foo", + }}) + + c.Assert(err, IsNil) + c.Assert(lines, HasLen, 1) + c.Assert(lines[0].Text, Equals, "foo") + c.Assert(lines[0].Hash, Equals, h) +} + +func (s *BlameSuite) TestNewLinesWithNewLine(c *C) { + lines, err := newLines([]string{"foo"}, []*object.Commit{ + {Message: "foo"}, + {Message: "bar"}, + }) + + c.Assert(err, IsNil) + c.Assert(lines, HasLen, 2) + c.Assert(lines[0].Text, Equals, "foo") + c.Assert(lines[1].Text, Equals, "\n") +} + type blameTest struct { repo string rev string diff --git a/config/config_test.go b/config/config_test.go index fe73de8..db0932c 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -24,6 +24,8 @@ func (s *ConfigSuite) TestUnmarshall(c *C) { url = git@github.com:src-d/go-git.git fetch = +refs/heads/*:refs/remotes/origin/* fetch = +refs/pull/*:refs/remotes/origin/pull/* +[remote "win-local"] + url = X:\\Git\\ [submodule "qux"] path = qux url = https://github.com/foo/qux.git @@ -41,13 +43,15 @@ func (s *ConfigSuite) TestUnmarshall(c *C) { c.Assert(cfg.Core.Worktree, Equals, "foo") c.Assert(cfg.Core.CommentChar, Equals, "bar") c.Assert(cfg.Pack.Window, Equals, uint(20)) - c.Assert(cfg.Remotes, HasLen, 2) + c.Assert(cfg.Remotes, HasLen, 3) c.Assert(cfg.Remotes["origin"].Name, Equals, "origin") c.Assert(cfg.Remotes["origin"].URLs, DeepEquals, []string{"git@github.com:mcuadros/go-git.git"}) c.Assert(cfg.Remotes["origin"].Fetch, DeepEquals, []RefSpec{"+refs/heads/*:refs/remotes/origin/*"}) c.Assert(cfg.Remotes["alt"].Name, Equals, "alt") c.Assert(cfg.Remotes["alt"].URLs, DeepEquals, []string{"git@github.com:mcuadros/go-git.git", "git@github.com:src-d/go-git.git"}) c.Assert(cfg.Remotes["alt"].Fetch, DeepEquals, []RefSpec{"+refs/heads/*:refs/remotes/origin/*", "+refs/pull/*:refs/remotes/origin/pull/*"}) + c.Assert(cfg.Remotes["win-local"].Name, Equals, "win-local") + c.Assert(cfg.Remotes["win-local"].URLs, DeepEquals, []string{"X:\\Git\\"}) c.Assert(cfg.Submodules, HasLen, 1) c.Assert(cfg.Submodules["qux"].Name, Equals, "qux") c.Assert(cfg.Submodules["qux"].URL, Equals, "https://github.com/foo/qux.git") @@ -69,6 +73,8 @@ func (s *ConfigSuite) TestMarshall(c *C) { fetch = +refs/pull/*:refs/remotes/origin/pull/* [remote "origin"] url = git@github.com:mcuadros/go-git.git +[remote "win-local"] + url = "X:\\Git\\" [submodule "qux"] url = https://github.com/foo/qux.git [branch "master"] @@ -91,6 +97,11 @@ func (s *ConfigSuite) TestMarshall(c *C) { Fetch: []RefSpec{"+refs/heads/*:refs/remotes/origin/*", "+refs/pull/*:refs/remotes/origin/pull/*"}, } + cfg.Remotes["win-local"] = &RemoteConfig{ + Name: "win-local", + URLs: []string{"X:\\Git\\"}, + } + cfg.Submodules["qux"] = &Submodule{ Name: "qux", URL: "https://github.com/foo/qux.git", @@ -119,6 +130,8 @@ func (s *ConfigSuite) TestUnmarshallMarshall(c *C) { url = git@github.com:mcuadros/go-git.git fetch = +refs/heads/*:refs/remotes/origin/* mirror = true +[remote "win-local"] + url = "X:\\Git\\" [branch "master"] remote = origin merge = refs/heads/master diff --git a/config/refspec.go b/config/refspec.go index c9b9d52..391705c 100644 --- a/config/refspec.go +++ b/config/refspec.go @@ -15,7 +15,7 @@ const ( var ( ErrRefSpecMalformedSeparator = errors.New("malformed refspec, separators are wrong") - ErrRefSpecMalformedWildcard = errors.New("malformed refspec, missmatched number of wildcards") + ErrRefSpecMalformedWildcard = errors.New("malformed refspec, mismatched number of wildcards") ) // RefSpec is a mapping from local branches to remote references @@ -330,6 +330,10 @@ type LogOptions struct { // set Order=LogOrderCommitterTime for ordering by committer time (more compatible with `git log`) // set Order=LogOrderBSF for Breadth-first search Order LogOrder + + // Show only those commits in which the specified file was inserted/updated. + // It is equivalent to running `git log -- <file-name>`. + FileName *string } var ( diff --git a/plumbing/format/index/decoder.go b/plumbing/format/index/decoder.go index 1a58128..df25530 100644 --- a/plumbing/format/index/decoder.go +++ b/plumbing/format/index/decoder.go @@ -21,7 +21,7 @@ var ( // ErrMalformedSignature is returned by Decode when the index header file is // malformed ErrMalformedSignature = errors.New("malformed index signature file") - // ErrInvalidChecksum is returned by Decode if the SHA1 hash missmatch with + // ErrInvalidChecksum is returned by Decode if the SHA1 hash mismatch with // the read content ErrInvalidChecksum = errors.New("invalid checksum") diff --git a/plumbing/format/packfile/packfile.go b/plumbing/format/packfile/packfile.go index 852a834..0d13066 100644 --- a/plumbing/format/packfile/packfile.go +++ b/plumbing/format/packfile/packfile.go @@ -90,6 +90,24 @@ func (p *Packfile) GetByOffset(o int64) (plumbing.EncodedObject, error) { return p.nextObject() } +// GetSizeByOffset retrieves the size of the encoded object from the +// packfile with the given offset. +func (p *Packfile) GetSizeByOffset(o int64) (size int64, err error) { + if _, err := p.s.SeekFromStart(o); err != nil { + if err == io.EOF || isInvalid(err) { + return 0, plumbing.ErrObjectNotFound + } + + return 0, err + } + + h, err := p.nextObjectHeader() + if err != nil { + return 0, err + } + return h.Length, nil +} + func (p *Packfile) nextObjectHeader() (*ObjectHeader, error) { h, err := p.s.NextObjectHeader() p.s.pendingObject = nil diff --git a/plumbing/object/commit_walker_file.go b/plumbing/object/commit_walker_file.go new file mode 100644 index 0000000..84e738a --- /dev/null +++ b/plumbing/object/commit_walker_file.go @@ -0,0 +1,115 @@ +package object + +import ( + "gopkg.in/src-d/go-git.v4/plumbing/storer" + "io" +) + +type commitFileIter struct { + fileName string + sourceIter CommitIter + currentCommit *Commit +} + +// NewCommitFileIterFromIter returns a commit iterator which performs diffTree between +// successive trees returned from the commit iterator from the argument. The purpose of this is +// to find the commits that explain how the files that match the path came to be. +func NewCommitFileIterFromIter(fileName string, commitIter CommitIter) CommitIter { + iterator := new(commitFileIter) + iterator.sourceIter = commitIter + iterator.fileName = fileName + return iterator +} + +func (c *commitFileIter) Next() (*Commit, error) { + if c.currentCommit == nil { + var err error + c.currentCommit, err = c.sourceIter.Next() + if err != nil { + return nil, err + } + } + commit, commitErr := c.getNextFileCommit() + + // Setting current-commit to nil to prevent unwanted states when errors are raised + if commitErr != nil { + c.currentCommit = nil + } + return commit, commitErr +} + +func (c *commitFileIter) getNextFileCommit() (*Commit, error) { + for { + // Parent-commit can be nil if the current-commit is the initial commit + parentCommit, parentCommitErr := c.sourceIter.Next() + if parentCommitErr != nil { + // If the parent-commit is beyond the initial commit, keep it nil + if parentCommitErr != io.EOF { + return nil, parentCommitErr + } + parentCommit = nil + } + + // Fetch the trees of the current and parent commits + currentTree, currTreeErr := c.currentCommit.Tree() + if currTreeErr != nil { + return nil, currTreeErr + } + + var parentTree *Tree + if parentCommit != nil { + var parentTreeErr error + parentTree, parentTreeErr = parentCommit.Tree() + if parentTreeErr != nil { + return nil, parentTreeErr + } + } + + // Find diff between current and parent trees + changes, diffErr := DiffTree(currentTree, parentTree) + if diffErr != nil { + return nil, diffErr + } + + foundChangeForFile := false + for _, change := range changes { + if change.name() == c.fileName { + foundChangeForFile = true + break + } + } + + // Storing the current-commit in-case a change is found, and + // Updating the current-commit for the next-iteration + prevCommit := c.currentCommit + c.currentCommit = parentCommit + + if foundChangeForFile == true { + return prevCommit, nil + } + + // If not matches found and if parent-commit is beyond the initial commit, then return with EOF + if parentCommit == nil { + return nil, io.EOF + } + } +} + +func (c *commitFileIter) ForEach(cb func(*Commit) error) error { + for { + commit, nextErr := c.Next() + if nextErr != nil { + return nextErr + } + err := cb(commit) + if err == storer.ErrStop { + return nil + } else if err != nil { + return err + } + } +} + +func (c *commitFileIter) Close() { + c.sourceIter.Close() +} diff --git a/plumbing/object/tree.go b/plumbing/object/tree.go index c36a137..78d61a1 100644 --- a/plumbing/object/tree.go +++ b/plumbing/object/tree.go @@ -87,6 +87,17 @@ func (t *Tree) File(path string) (*File, error) { return NewFile(path, e.Mode, blob), nil } +// Size returns the plaintext size of an object, without reading it +// into memory. +func (t *Tree) Size(path string) (int64, error) { + e, err := t.FindEntry(path) + if err != nil { + return 0, ErrEntryNotFound + } + + return t.s.EncodedObjectSize(e.Hash) +} + // Tree returns the tree identified by the `path` argument. // The path is interpreted as relative to the tree receiver. func (t *Tree) Tree(path string) (*Tree, error) { diff --git a/plumbing/object/tree_test.go b/plumbing/object/tree_test.go index 7366421..889c63a 100644 --- a/plumbing/object/tree_test.go +++ b/plumbing/object/tree_test.go @@ -98,6 +98,12 @@ func (s *TreeSuite) TestFileFailsWithExistingTrees(c *C) { c.Assert(err, Equals, ErrFileNotFound) } +func (s *TreeSuite) TestSize(c *C) { + size, err := s.Tree.Size("LICENSE") + c.Assert(err, IsNil) + c.Assert(size, Equals, int64(1072)) +} + func (s *TreeSuite) TestFiles(c *C) { var count int err := s.Tree.Files().ForEach(func(f *File) error { diff --git a/plumbing/storer/object.go b/plumbing/storer/object.go index 92aa629..2ac9b09 100644 --- a/plumbing/storer/object.go +++ b/plumbing/storer/object.go @@ -40,6 +40,8 @@ type EncodedObjectStorer interface { // HasEncodedObject returns ErrObjNotFound if the object doesn't // exist. If the object does exist, it returns nil. HasEncodedObject(plumbing.Hash) error + // EncodedObjectSize returns the plaintext size of the encoded object. + EncodedObjectSize(plumbing.Hash) (int64, error) } // DeltaObjectStorer is an EncodedObjectStorer that can return delta diff --git a/plumbing/storer/object_test.go b/plumbing/storer/object_test.go index 6b4fe0f..bc22f7b 100644 --- a/plumbing/storer/object_test.go +++ b/plumbing/storer/object_test.go @@ -141,6 +141,16 @@ func (o *MockObjectStorage) HasEncodedObject(h plumbing.Hash) error { return plumbing.ErrObjectNotFound } +func (o *MockObjectStorage) EncodedObjectSize(h plumbing.Hash) ( + size int64, err error) { + for _, o := range o.db { + if o.Hash() == h { + return o.Size(), nil + } + } + return 0, plumbing.ErrObjectNotFound +} + func (o *MockObjectStorage) EncodedObject(t plumbing.ObjectType, h plumbing.Hash) (plumbing.EncodedObject, error) { for _, o := range o.db { if o.Hash() == h { diff --git a/plumbing/transport/common_test.go b/plumbing/transport/common_test.go index 17f62a6..65ed5b9 100644 --- a/plumbing/transport/common_test.go +++ b/plumbing/transport/common_test.go @@ -1,6 +1,7 @@ package transport import ( + "fmt" "net/url" "testing" @@ -155,12 +156,21 @@ func (s *SuiteCommon) TestNewEndpointFileURL(c *C) { } func (s *SuiteCommon) TestValidEndpoint(c *C) { - e, err := NewEndpoint("http://github.com/user/repository.git") - e.User = "person@mail.com" - e.Password = " !\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~" - url, err := url.Parse(e.String()) + user := "person@mail.com" + pass := " !\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~" + e, err := NewEndpoint(fmt.Sprintf( + "http://%s:%s@github.com/user/repository.git", + url.PathEscape(user), + url.PathEscape(pass), + )) c.Assert(err, IsNil) - c.Assert(url, NotNil) + c.Assert(e, NotNil) + c.Assert(e.User, Equals, user) + c.Assert(e.Password, Equals, pass) + c.Assert(e.Host, Equals, "github.com") + c.Assert(e.Path, Equals, "/user/repository.git") + + c.Assert(e.String(), Equals, "http://person@mail.com:%20%21%22%23$%25&%27%28%29%2A+%2C-.%2F:%3B%3C=%3E%3F@%5B%5C%5D%5E_%60%7B%7C%7D~@github.com/user/repository.git") } func (s *SuiteCommon) TestNewEndpointInvalidURL(c *C) { @@ -49,7 +49,7 @@ func (r *Repository) Prune(opt PruneOptions) error { } // Otherwise it is a candidate for pruning. // Check out for too new objects next. - if opt.OnlyObjectsOlderThan != (time.Time{}) { + if !opt.OnlyObjectsOlderThan.IsZero() { // Errors here are non-fatal. The object may be e.g. packed. // Or concurrently deleted. Skip such objects. t, err := los.LooseObjectTime(hash) diff --git a/references.go b/references.go index a1872a5..5673ac1 100644 --- a/references.go +++ b/references.go @@ -47,7 +47,9 @@ func (s commitSorterer) Len() int { } func (s commitSorterer) Less(i, j int) bool { - return s.l[i].Committer.When.Before(s.l[j].Committer.When) + return s.l[i].Committer.When.Before(s.l[j].Committer.When) || + s.l[i].Committer.When.Equal(s.l[j].Committer.When) && + s.l[i].Author.When.Before(s.l[j].Author.When) } func (s commitSorterer) Swap(i, j int) { diff --git a/references_test.go b/references_test.go index cefc7a2..6e75563 100644 --- a/references_test.go +++ b/references_test.go @@ -163,7 +163,19 @@ var referencesTests = [...]struct { "1e14f94bcf82694fdc7e2dcbbfdbbed58db0f4d9", "1e3d328a2cabda5d0aaddc5dec65271343e0dc37", }}, - + {"https://github.com/spinnaker/spinnaker.git", "f39d86f59a0781f130e8de6b2115329c1fbe9545", "README.adoc", []string{ + "638f61b3331695f46f1a88095e26dea0f09f176b", + "bd42370d3fe8d410e78acb96f81cb3d838ad1c21", + "d6905eab6fec1841c7cf8e4484499f5c8d7d423e", + "c0a70a0f5aa494f0ae01c55ba191f2325556489a", + "811795c8a185e88f5d269195cb68b29c8d0fe170", + "d6e6fe0194447cc280f942d6a2e0521b68ea7796", + "174bdbf9edfb0ca88415dd4a673852d5b22e7036", + "9944d6cf72b8f82d622d85dad7434472bc8f397d", + "e805183c72f0426fb073728c01901c2fd2db1da6", + "8ef83dd443a05e9122681950399edaa58a38d466", + "d73f9cee49a5ad27a42a6e18af7c49a8f28ad8a8", + }}, // FAILS /* // this contains an empty move diff --git a/repository.go b/repository.go index 08b822c..507ff44 100644 --- a/repository.go +++ b/repository.go @@ -649,11 +649,12 @@ func (r *Repository) clone(ctx context.Context, o *CloneOptions) error { } ref, err := r.fetchAndUpdateReferences(ctx, &FetchOptions{ - RefSpecs: r.cloneRefSpec(o, c), - Depth: o.Depth, - Auth: o.Auth, - Progress: o.Progress, - Tags: o.Tags, + RefSpecs: r.cloneRefSpec(o, c), + Depth: o.Depth, + Auth: o.Auth, + Progress: o.Progress, + Tags: o.Tags, + RemoteName: o.RemoteName, }, o.ReferenceName) if err != nil { return err @@ -960,19 +961,26 @@ func (r *Repository) Log(o *LogOptions) (object.CommitIter, error) { return nil, err } + var commitIter object.CommitIter switch o.Order { case LogOrderDefault: - return object.NewCommitPreorderIter(commit, nil, nil), nil + commitIter = object.NewCommitPreorderIter(commit, nil, nil) case LogOrderDFS: - return object.NewCommitPreorderIter(commit, nil, nil), nil + commitIter = object.NewCommitPreorderIter(commit, nil, nil) case LogOrderDFSPost: - return object.NewCommitPostorderIter(commit, nil), nil + commitIter = object.NewCommitPostorderIter(commit, nil) case LogOrderBSF: - return object.NewCommitIterBSF(commit, nil, nil), nil + commitIter = object.NewCommitIterBSF(commit, nil, nil) case LogOrderCommitterTime: - return object.NewCommitIterCTime(commit, nil, nil), nil + commitIter = object.NewCommitIterCTime(commit, nil, nil) + default: + return nil, fmt.Errorf("invalid Order=%v", o.Order) + } + + if o.FileName == nil { + return commitIter, nil } - return nil, fmt.Errorf("invalid Order=%v", o.Order) + return object.NewCommitFileIterFromIter(*o.FileName, commitIter), nil } // Tags returns all the tag References in a repository. @@ -1158,7 +1166,18 @@ func (r *Repository) Worktree() (*Worktree, error) { return &Worktree{r: r, Filesystem: r.wt}, nil } -// ResolveRevision resolves revision to corresponding hash. +func countTrue(vals ...bool) int { + sum := 0 + for _, v := range vals { + if v { + sum++ + } + } + return sum +} + +// ResolveRevision resolves revision to corresponding hash. It will always +// resolve to a commit hash, not a tree or annotated tag. // // Implemented resolvers : HEAD, branch, tag, heads/branch, refs/heads/branch, // refs/tags/tag, refs/remotes/origin/branch, refs/remotes/origin/HEAD, tilde and caret (HEAD~1, master~^, tag~2, ref/heads/master~1, ...), selection by text (HEAD^{/fix nasty bug}) @@ -1178,8 +1197,8 @@ func (r *Repository) ResolveRevision(rev plumbing.Revision) (*plumbing.Hash, err case revision.Ref: revisionRef := item.(revision.Ref) var ref *plumbing.Reference - var hashCommit, refCommit *object.Commit - var rErr, hErr error + var hashCommit, refCommit, tagCommit *object.Commit + var rErr, hErr, tErr error for _, rule := range append([]string{"%s"}, plumbing.RefRevParseRules...) { ref, err = storer.ResolveReference(r.Storer, plumbing.ReferenceName(fmt.Sprintf(rule, revisionRef))) @@ -1190,24 +1209,38 @@ func (r *Repository) ResolveRevision(rev plumbing.Revision) (*plumbing.Hash, err } if ref != nil { + tag, tObjErr := r.TagObject(ref.Hash()) + if tObjErr != nil { + tErr = tObjErr + } else { + tagCommit, tErr = tag.Commit() + } refCommit, rErr = r.CommitObject(ref.Hash()) } else { rErr = plumbing.ErrReferenceNotFound + tErr = plumbing.ErrReferenceNotFound } - isHash := plumbing.NewHash(string(revisionRef)).String() == string(revisionRef) - - if isHash { + maybeHash := plumbing.NewHash(string(revisionRef)).String() == string(revisionRef) + if maybeHash { hashCommit, hErr = r.CommitObject(plumbing.NewHash(string(revisionRef))) + } else { + hErr = plumbing.ErrReferenceNotFound } + isTag := tErr == nil + isCommit := rErr == nil + isHash := hErr == nil + switch { - case rErr == nil && !isHash: + case countTrue(isTag, isCommit, isHash) > 1: + return &plumbing.ZeroHash, fmt.Errorf(`refname "%s" is ambiguous`, revisionRef) + case isTag: + commit = tagCommit + case isCommit: commit = refCommit - case rErr != nil && isHash && hErr == nil: + case isHash: commit = hashCommit - case rErr == nil && isHash && hErr == nil: - return &plumbing.ZeroHash, fmt.Errorf(`refname "%s" is ambiguous`, revisionRef) default: return &plumbing.ZeroHash, plumbing.ErrReferenceNotFound } diff --git a/repository_test.go b/repository_test.go index 3179d51..07c3570 100644 --- a/repository_test.go +++ b/repository_test.go @@ -568,6 +568,19 @@ func (s *RepositorySuite) TestPlainClone(c *C) { c.Assert(cfg.Branches["master"].Name, Equals, "master") } +func (s *RepositorySuite) TestPlainCloneWithRemoteName(c *C) { + r, err := PlainClone(c.MkDir(), false, &CloneOptions{ + URL: s.GetBasicLocalRepositoryURL(), + RemoteName: "test", + }) + + c.Assert(err, IsNil) + + remote, err := r.Remote("test") + c.Assert(err, IsNil) + c.Assert(remote, NotNil) +} + func (s *RepositorySuite) TestPlainCloneContext(c *C) { ctx, cancel := context.WithCancel(context.Background()) cancel() @@ -1143,6 +1156,145 @@ func (s *RepositorySuite) TestLogError(c *C) { c.Assert(err, NotNil) } +func (s *RepositorySuite) TestLogFileNext(c *C) { + r, _ := Init(memory.NewStorage(), nil) + err := r.clone(context.Background(), &CloneOptions{ + URL: s.GetBasicLocalRepositoryURL(), + }) + + c.Assert(err, IsNil) + + fileName := "vendor/foo.go" + cIter, err := r.Log(&LogOptions{FileName: &fileName}) + + c.Assert(err, IsNil) + + commitOrder := []plumbing.Hash{ + plumbing.NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5"), + } + + for _, o := range commitOrder { + commit, err := cIter.Next() + c.Assert(err, IsNil) + c.Assert(commit.Hash, Equals, o) + } + _, err = cIter.Next() + c.Assert(err, Equals, io.EOF) +} + +func (s *RepositorySuite) TestLogFileForEach(c *C) { + r, _ := Init(memory.NewStorage(), nil) + err := r.clone(context.Background(), &CloneOptions{ + URL: s.GetBasicLocalRepositoryURL(), + }) + + c.Assert(err, IsNil) + + fileName := "php/crappy.php" + cIter, err := r.Log(&LogOptions{FileName: &fileName}) + + c.Assert(err, IsNil) + + commitOrder := []plumbing.Hash{ + plumbing.NewHash("918c48b83bd081e863dbe1b80f8998f058cd8294"), + } + + expectedIndex := 0 + cIter.ForEach(func(commit *object.Commit) error { + expectedCommitHash := commitOrder[expectedIndex] + c.Assert(commit.Hash.String(), Equals, expectedCommitHash.String()) + expectedIndex += 1 + return nil + }) + c.Assert(expectedIndex, Equals, 1) +} + +func (s *RepositorySuite) TestLogInvalidFile(c *C) { + r, _ := Init(memory.NewStorage(), nil) + err := r.clone(context.Background(), &CloneOptions{ + URL: s.GetBasicLocalRepositoryURL(), + }) + c.Assert(err, IsNil) + + // Throwing in a file that does not exist + fileName := "vendor/foo12.go" + cIter, err := r.Log(&LogOptions{FileName: &fileName}) + // Not raising an error since `git log -- vendor/foo12.go` responds silently + c.Assert(err, IsNil) + + _, err = cIter.Next() + c.Assert(err, Equals, io.EOF) +} + +func (s *RepositorySuite) TestLogFileInitialCommit(c *C) { + r, _ := Init(memory.NewStorage(), nil) + err := r.clone(context.Background(), &CloneOptions{ + URL: s.GetBasicLocalRepositoryURL(), + }) + c.Assert(err, IsNil) + + fileName := "LICENSE" + cIter, err := r.Log(&LogOptions{ + Order: LogOrderCommitterTime, + FileName: &fileName, + }) + + c.Assert(err, IsNil) + + commitOrder := []plumbing.Hash{ + plumbing.NewHash("b029517f6300c2da0f4b651b8642506cd6aaf45d"), + } + + expectedIndex := 0 + cIter.ForEach(func(commit *object.Commit) error { + expectedCommitHash := commitOrder[expectedIndex] + c.Assert(commit.Hash.String(), Equals, expectedCommitHash.String()) + expectedIndex += 1 + return nil + }) + c.Assert(expectedIndex, Equals, 1) +} + +func (s *RepositorySuite) TestLogFileWithOtherParamsFail(c *C) { + r, _ := Init(memory.NewStorage(), nil) + err := r.clone(context.Background(), &CloneOptions{ + URL: s.GetBasicLocalRepositoryURL(), + }) + c.Assert(err, IsNil) + + fileName := "vendor/foo.go" + cIter, err := r.Log(&LogOptions{ + Order: LogOrderCommitterTime, + FileName: &fileName, + From: plumbing.NewHash("35e85108805c84807bc66a02d91535e1e24b38b9"), + }) + c.Assert(err, IsNil) + _, iterErr := cIter.Next() + c.Assert(iterErr, Equals, io.EOF) +} + +func (s *RepositorySuite) TestLogFileWithOtherParamsPass(c *C) { + r, _ := Init(memory.NewStorage(), nil) + err := r.clone(context.Background(), &CloneOptions{ + URL: s.GetBasicLocalRepositoryURL(), + }) + c.Assert(err, IsNil) + + fileName := "LICENSE" + cIter, err := r.Log(&LogOptions{ + Order: LogOrderCommitterTime, + FileName: &fileName, + From: plumbing.NewHash("35e85108805c84807bc66a02d91535e1e24b38b9"), + }) + c.Assert(err, IsNil) + commitVal, iterErr := cIter.Next() + c.Assert(iterErr, Equals, nil) + c.Assert(commitVal.Hash.String(), Equals, "b029517f6300c2da0f4b651b8642506cd6aaf45d") + + _, iterErr = cIter.Next() + c.Assert(iterErr, Equals, io.EOF) +} + func (s *RepositorySuite) TestCommit(c *C) { r, _ := Init(memory.NewStorage(), nil) err := r.clone(context.Background(), &CloneOptions{ @@ -1913,7 +2065,25 @@ func (s *RepositorySuite) TestResolveRevision(c *C) { h, err := r.ResolveRevision(plumbing.Revision(rev)) c.Assert(err, IsNil) - c.Assert(h.String(), Equals, hash) + c.Check(h.String(), Equals, hash, Commentf("while checking %s", rev)) + } +} + +func (s *RepositorySuite) TestResolveRevisionAnnotated(c *C) { + f := fixtures.ByURL("https://github.com/git-fixtures/tags.git").One() + sto := filesystem.NewStorage(f.DotGit(), cache.NewObjectLRUDefault()) + r, err := Open(sto, f.DotGit()) + c.Assert(err, IsNil) + + datas := map[string]string{ + "refs/tags/annotated-tag": "f7b877701fbf855b44c0a9e86f3fdce2c298b07f", + } + + for rev, hash := range datas { + h, err := r.ResolveRevision(plumbing.Revision(rev)) + + c.Assert(err, IsNil) + c.Check(h.String(), Equals, hash, Commentf("while checking %s", rev)) } } @@ -26,7 +26,7 @@ func (s Status) IsUntracked(path string) bool { return ok && stat.Worktree == Untracked } -// IsClean returns true if all the files aren't in Unmodified status. +// IsClean returns true if all the files are in Unmodified status. func (s Status) IsClean() bool { for _, status := range s { if status.Worktree != Unmodified || status.Staging != Unmodified { diff --git a/storage/filesystem/object.go b/storage/filesystem/object.go index 9eb085f..6cd2d4c 100644 --- a/storage/filesystem/object.go +++ b/storage/filesystem/object.go @@ -160,6 +160,79 @@ func (s *ObjectStorage) HasEncodedObject(h plumbing.Hash) (err error) { return nil } +func (s *ObjectStorage) encodedObjectSizeFromUnpacked(h plumbing.Hash) ( + size int64, err error) { + f, err := s.dir.Object(h) + if err != nil { + if os.IsNotExist(err) { + return 0, plumbing.ErrObjectNotFound + } + + return 0, err + } + + r, err := objfile.NewReader(f) + if err != nil { + return 0, err + } + defer ioutil.CheckClose(r, &err) + + _, size, err = r.Header() + return size, err +} + +func (s *ObjectStorage) encodedObjectSizeFromPackfile(h plumbing.Hash) ( + size int64, err error) { + if err := s.requireIndex(); err != nil { + return 0, err + } + + pack, _, offset := s.findObjectInPackfile(h) + if offset == -1 { + return 0, plumbing.ErrObjectNotFound + } + + f, err := s.dir.ObjectPack(pack) + if err != nil { + return 0, err + } + defer ioutil.CheckClose(f, &err) + + idx := s.index[pack] + hash, err := idx.FindHash(offset) + if err == nil { + obj, ok := s.deltaBaseCache.Get(hash) + if ok { + return obj.Size(), nil + } + } else if err != nil && err != plumbing.ErrObjectNotFound { + return 0, err + } + + var p *packfile.Packfile + if s.deltaBaseCache != nil { + p = packfile.NewPackfileWithCache(idx, s.dir.Fs(), f, s.deltaBaseCache) + } else { + p = packfile.NewPackfile(idx, s.dir.Fs(), f) + } + + return p.GetSizeByOffset(offset) +} + +// EncodedObjectSize returns the plaintext size of the given object, +// without actually reading the full object data from storage. +func (s *ObjectStorage) EncodedObjectSize(h plumbing.Hash) ( + size int64, err error) { + size, err = s.encodedObjectSizeFromUnpacked(h) + if err != nil && err != plumbing.ErrObjectNotFound { + return 0, err + } else if err == nil { + return size, nil + } + + return s.encodedObjectSizeFromPackfile(h) +} + // EncodedObject returns the object with the given hash, by searching for it in // the packfile and the git object directories. func (s *ObjectStorage) EncodedObject(t plumbing.ObjectType, h plumbing.Hash) (plumbing.EncodedObject, error) { @@ -396,7 +469,10 @@ func (s *ObjectStorage) IterEncodedObjects(t plumbing.ObjectType) (storer.Encode return storer.NewMultiEncodedObjectIter(iters), nil } -func (s *ObjectStorage) buildPackfileIters(t plumbing.ObjectType, seen map[plumbing.Hash]struct{}) (storer.EncodedObjectIter, error) { +func (s *ObjectStorage) buildPackfileIters( + t plumbing.ObjectType, + seen map[plumbing.Hash]struct{}, +) (storer.EncodedObjectIter, error) { if err := s.requireIndex(); err != nil { return nil, err } @@ -412,7 +488,10 @@ func (s *ObjectStorage) buildPackfileIters(t plumbing.ObjectType, seen map[plumb if err != nil { return nil, err } - return newPackfileIter(s.dir.Fs(), pack, t, seen, s.index[h], s.deltaBaseCache) + return newPackfileIter( + s.dir.Fs(), pack, t, seen, s.index[h], + s.deltaBaseCache, s.options.KeepDescriptors, + ) }, }, nil } @@ -473,16 +552,21 @@ type packfileIter struct { pack billy.File iter storer.EncodedObjectIter seen map[plumbing.Hash]struct{} + + // tells whether the pack file should be left open after iteration or not + keepPack bool } // NewPackfileIter returns a new EncodedObjectIter for the provided packfile // and object type. Packfile and index file will be closed after they're -// used. +// used. If keepPack is true the packfile won't be closed after the iteration +// finished. func NewPackfileIter( fs billy.Filesystem, f billy.File, idxFile billy.File, t plumbing.ObjectType, + keepPack bool, ) (storer.EncodedObjectIter, error) { idx := idxfile.NewMemoryIndex() if err := idxfile.NewDecoder(idxFile).Decode(idx); err != nil { @@ -493,7 +577,8 @@ func NewPackfileIter( return nil, err } - return newPackfileIter(fs, f, t, make(map[plumbing.Hash]struct{}), idx, nil) + seen := make(map[plumbing.Hash]struct{}) + return newPackfileIter(fs, f, t, seen, idx, nil, keepPack) } func newPackfileIter( @@ -503,6 +588,7 @@ func newPackfileIter( seen map[plumbing.Hash]struct{}, index idxfile.Index, cache cache.Object, + keepPack bool, ) (storer.EncodedObjectIter, error) { var p *packfile.Packfile if cache != nil { @@ -517,9 +603,10 @@ func newPackfileIter( } return &packfileIter{ - pack: f, - iter: iter, - seen: seen, + pack: f, + iter: iter, + seen: seen, + keepPack: keepPack, }, nil } @@ -557,7 +644,9 @@ func (iter *packfileIter) ForEach(cb func(plumbing.EncodedObject) error) error { func (iter *packfileIter) Close() { iter.iter.Close() - _ = iter.pack.Close() + if !iter.keepPack { + _ = iter.pack.Close() + } } type objectsIter struct { diff --git a/storage/filesystem/object_test.go b/storage/filesystem/object_test.go index bd4a94b..4e6bbfb 100644 --- a/storage/filesystem/object_test.go +++ b/storage/filesystem/object_test.go @@ -83,6 +83,44 @@ func (s *FsSuite) TestGetFromPackfileKeepDescriptors(c *C) { }) } +func (s *FsSuite) TestGetSizeOfObjectFile(c *C) { + fs := fixtures.ByTag(".git").ByTag("unpacked").One().DotGit() + o := NewObjectStorage(dotgit.New(fs), cache.NewObjectLRUDefault()) + + // Get the size of `tree_walker.go`. + expected := plumbing.NewHash("cbd81c47be12341eb1185b379d1c82675aeded6a") + size, err := o.EncodedObjectSize(expected) + c.Assert(err, IsNil) + c.Assert(size, Equals, int64(2412)) +} + +func (s *FsSuite) TestGetSizeFromPackfile(c *C) { + fixtures.Basic().ByTag(".git").Test(c, func(f *fixtures.Fixture) { + fs := f.DotGit() + o := NewObjectStorage(dotgit.New(fs), cache.NewObjectLRUDefault()) + + // Get the size of `binary.jpg`. + expected := plumbing.NewHash("d5c0f4ab811897cadf03aec358ae60d21f91c50d") + size, err := o.EncodedObjectSize(expected) + c.Assert(err, IsNil) + c.Assert(size, Equals, int64(76110)) + }) +} + +func (s *FsSuite) TestGetSizeOfAllObjectFiles(c *C) { + fs := fixtures.ByTag(".git").One().DotGit() + o := NewObjectStorage(dotgit.New(fs), cache.NewObjectLRUDefault()) + + // Get the size of `tree_walker.go`. + err := o.ForEachObjectHash(func(h plumbing.Hash) error { + size, err := o.EncodedObjectSize(h) + c.Assert(err, IsNil) + c.Assert(size, Not(Equals), int64(0)) + return nil + }) + c.Assert(err, IsNil) +} + func (s *FsSuite) TestGetFromPackfileMultiplePackfiles(c *C) { fs := fixtures.ByTag(".git").ByTag("multi-packfile").One().DotGit() o := NewObjectStorage(dotgit.New(fs), cache.NewObjectLRUDefault()) @@ -153,18 +191,54 @@ func (s *FsSuite) TestPackfileIter(c *C) { idxf, err := dg.ObjectPackIdx(h) c.Assert(err, IsNil) - iter, err := NewPackfileIter(fs, f, idxf, t) + iter, err := NewPackfileIter(fs, f, idxf, t, false) c.Assert(err, IsNil) + err = iter.ForEach(func(o plumbing.EncodedObject) error { c.Assert(o.Type(), Equals, t) return nil }) - c.Assert(err, IsNil) } } }) +} + +func (s *FsSuite) TestPackfileIterKeepDescriptors(c *C) { + fixtures.ByTag(".git").Test(c, func(f *fixtures.Fixture) { + fs := f.DotGit() + ops := dotgit.Options{KeepDescriptors: true} + dg := dotgit.NewWithOptions(fs, ops) + for _, t := range objectTypes { + ph, err := dg.ObjectPacks() + c.Assert(err, IsNil) + + for _, h := range ph { + f, err := dg.ObjectPack(h) + c.Assert(err, IsNil) + + idxf, err := dg.ObjectPackIdx(h) + c.Assert(err, IsNil) + + iter, err := NewPackfileIter(fs, f, idxf, t, true) + c.Assert(err, IsNil) + + err = iter.ForEach(func(o plumbing.EncodedObject) error { + c.Assert(o.Type(), Equals, t) + return nil + }) + c.Assert(err, IsNil) + + // test twice to check that packfiles are not closed + err = iter.ForEach(func(o plumbing.EncodedObject) error { + c.Assert(o.Type(), Equals, t) + return nil + }) + c.Assert(err, IsNil) + } + } + }) } func BenchmarkPackfileIter(b *testing.B) { @@ -201,7 +275,7 @@ func BenchmarkPackfileIter(b *testing.B) { b.Fatal(err) } - iter, err := NewPackfileIter(fs, f, idxf, t) + iter, err := NewPackfileIter(fs, f, idxf, t, false) if err != nil { b.Fatal(err) } @@ -257,7 +331,7 @@ func BenchmarkPackfileIterReadContent(b *testing.B) { b.Fatal(err) } - iter, err := NewPackfileIter(fs, f, idxf, t) + iter, err := NewPackfileIter(fs, f, idxf, t, false) if err != nil { b.Fatal(err) } diff --git a/storage/memory/storage.go b/storage/memory/storage.go index 2e32509..6e11742 100644 --- a/storage/memory/storage.go +++ b/storage/memory/storage.go @@ -122,6 +122,16 @@ func (o *ObjectStorage) HasEncodedObject(h plumbing.Hash) (err error) { return nil } +func (o *ObjectStorage) EncodedObjectSize(h plumbing.Hash) ( + size int64, err error) { + obj, ok := o.Objects[h] + if !ok { + return 0, plumbing.ErrObjectNotFound + } + + return obj.Size(), nil +} + func (o *ObjectStorage) EncodedObject(t plumbing.ObjectType, h plumbing.Hash) (plumbing.EncodedObject, error) { obj, ok := o.Objects[h] if !ok || (plumbing.AnyObject != t && obj.Type() != t) { |