aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.gitignore3
-rw-r--r--.travis.yml4
-rw-r--r--_examples/branch/main.go2
-rw-r--r--blame.go11
-rw-r--r--blame_test.go6
-rw-r--r--common_test.go20
-rw-r--r--config/branch.go71
-rw-r--r--config/branch_test.go76
-rw-r--r--config/config.go83
-rw-r--r--config/config_test.go93
-rw-r--r--config/modules.go20
-rw-r--r--config/modules_test.go26
-rw-r--r--config/refspec.go10
-rw-r--r--config/refspec_test.go19
-rw-r--r--example_test.go10
-rw-r--r--go.mod29
-rw-r--r--go.sum57
-rw-r--r--object_walker.go2
-rw-r--r--options.go71
-rw-r--r--plumbing/cache/buffer_lru.go98
-rw-r--r--plumbing/cache/buffer_test.go151
-rw-r--r--plumbing/cache/common.go13
-rw-r--r--plumbing/cache/object_lru.go24
-rw-r--r--plumbing/cache/object_test.go19
-rw-r--r--plumbing/cache/queue.go46
-rw-r--r--plumbing/format/diff/unified_encoder.go16
-rw-r--r--plumbing/format/diff/unified_encoder_test.go74
-rw-r--r--plumbing/format/gitignore/dir.go87
-rw-r--r--plumbing/format/gitignore/dir_test.go169
-rw-r--r--plumbing/format/gitignore/pattern.go3
-rw-r--r--plumbing/format/gitignore/pattern_test.go6
-rw-r--r--plumbing/format/idxfile/decoder.go109
-rw-r--r--plumbing/format/idxfile/decoder_test.go106
-rw-r--r--plumbing/format/idxfile/encoder.go101
-rw-r--r--plumbing/format/idxfile/encoder_test.go21
-rw-r--r--plumbing/format/idxfile/idxfile.go345
-rw-r--r--plumbing/format/idxfile/idxfile_test.go169
-rw-r--r--plumbing/format/idxfile/writer.go186
-rw-r--r--plumbing/format/idxfile/writer_test.go98
-rw-r--r--plumbing/format/index/decoder.go2
-rw-r--r--plumbing/format/index/index.go32
-rw-r--r--plumbing/format/index/index_test.go37
-rw-r--r--plumbing/format/index/match.go186
-rw-r--r--plumbing/format/packfile/common.go21
-rw-r--r--plumbing/format/packfile/decoder.go495
-rw-r--r--plumbing/format/packfile/decoder_test.go396
-rw-r--r--plumbing/format/packfile/delta_selector.go8
-rw-r--r--plumbing/format/packfile/delta_test.go21
-rw-r--r--plumbing/format/packfile/diff_delta.go2
-rw-r--r--plumbing/format/packfile/encoder.go1
-rw-r--r--plumbing/format/packfile/encoder_advanced_test.go57
-rw-r--r--plumbing/format/packfile/encoder_test.go135
-rw-r--r--plumbing/format/packfile/fsobject.go116
-rw-r--r--plumbing/format/packfile/index.go82
-rw-r--r--plumbing/format/packfile/index_test.go122
-rw-r--r--plumbing/format/packfile/object_pack.go42
-rw-r--r--plumbing/format/packfile/packfile.go520
-rw-r--r--plumbing/format/packfile/packfile_test.go279
-rw-r--r--plumbing/format/packfile/parser.go489
-rw-r--r--plumbing/format/packfile/parser_test.go195
-rw-r--r--plumbing/format/packfile/patch_delta.go2
-rw-r--r--plumbing/format/packfile/scanner.go45
-rw-r--r--plumbing/format/packfile/scanner_test.go79
-rw-r--r--plumbing/format/pktline/encoder.go3
-rw-r--r--plumbing/format/pktline/scanner.go2
-rw-r--r--plumbing/format/pktline/scanner_test.go16
-rw-r--r--plumbing/memory.go8
-rw-r--r--plumbing/object/blob.go2
-rw-r--r--plumbing/object/blob_test.go23
-rw-r--r--plumbing/object/change.go21
-rw-r--r--plumbing/object/change_adaptor_test.go4
-rw-r--r--plumbing/object/change_test.go68
-rw-r--r--plumbing/object/commit.go70
-rw-r--r--plumbing/object/commit_test.go136
-rw-r--r--plumbing/object/commit_walker_bfs.go100
-rw-r--r--plumbing/object/commit_walker_ctime.go103
-rw-r--r--plumbing/object/commit_walker_file.go115
-rw-r--r--plumbing/object/commit_walker_test.go96
-rw-r--r--plumbing/object/difftree.go13
-rw-r--r--plumbing/object/difftree_test.go20
-rw-r--r--plumbing/object/file.go2
-rw-r--r--plumbing/object/file_test.go17
-rw-r--r--plumbing/object/object.go6
-rw-r--r--plumbing/object/object_test.go9
-rw-r--r--plumbing/object/patch.go32
-rw-r--r--plumbing/object/patch_test.go5
-rw-r--r--plumbing/object/tag.go20
-rw-r--r--plumbing/object/tag_test.go7
-rw-r--r--plumbing/object/tree.go56
-rw-r--r--plumbing/object/tree_test.go10
-rw-r--r--plumbing/protocol/packp/advrefs.go108
-rw-r--r--plumbing/protocol/packp/advrefs_test.go73
-rw-r--r--plumbing/revlist/revlist_test.go12
-rw-r--r--plumbing/storer/object.go2
-rw-r--r--plumbing/transport/common.go4
-rw-r--r--plumbing/transport/common_test.go20
-rw-r--r--plumbing/transport/http/common.go65
-rw-r--r--plumbing/transport/http/common_test.go13
-rw-r--r--plumbing/transport/http/receive_pack.go2
-rw-r--r--plumbing/transport/http/upload_pack.go2
-rw-r--r--plumbing/transport/http/upload_pack_test.go28
-rw-r--r--plumbing/transport/internal/common/common.go5
-rw-r--r--plumbing/transport/internal/common/common_test.go78
-rw-r--r--plumbing/transport/server/loader.go3
-rw-r--r--plumbing/transport/server/server.go11
-rw-r--r--plumbing/transport/server/server_test.go4
-rw-r--r--plumbing/transport/ssh/auth_method.go21
-rw-r--r--plumbing/transport/ssh/auth_method_test.go68
-rw-r--r--plumbing/transport/ssh/common_test.go4
-rw-r--r--plumbing/transport/test/receive_pack.go50
-rw-r--r--plumbing/transport/test/upload_pack.go5
-rw-r--r--prune.go2
-rw-r--r--prune_test.go4
-rw-r--r--remote.go109
-rw-r--r--remote_test.go101
-rw-r--r--repository.go380
-rw-r--r--repository_test.go914
-rw-r--r--status.go15
-rw-r--r--storage/filesystem/config.go10
-rw-r--r--storage/filesystem/config_test.go2
-rw-r--r--storage/filesystem/dotgit/dotgit.go (renamed from storage/filesystem/internal/dotgit/dotgit.go)328
-rw-r--r--storage/filesystem/dotgit/dotgit_rewrite_packed_refs.go81
-rw-r--r--storage/filesystem/dotgit/dotgit_setref.go (renamed from storage/filesystem/internal/dotgit/dotgit_setref.go)2
-rw-r--r--storage/filesystem/dotgit/dotgit_setref_norwfs.go (renamed from storage/filesystem/internal/dotgit/dotgit_setref_norwfs.go)0
-rw-r--r--storage/filesystem/dotgit/dotgit_test.go (renamed from storage/filesystem/internal/dotgit/dotgit_test.go)129
-rw-r--r--storage/filesystem/dotgit/writers.go (renamed from storage/filesystem/internal/dotgit/writers.go)30
-rw-r--r--storage/filesystem/dotgit/writers_test.go (renamed from storage/filesystem/internal/dotgit/writers_test.go)3
-rw-r--r--storage/filesystem/index.go6
-rw-r--r--storage/filesystem/internal/dotgit/dotgit_rewrite_packed_refs_nix.go17
-rw-r--r--storage/filesystem/internal/dotgit/dotgit_rewrite_packed_refs_norwfs.go34
-rw-r--r--storage/filesystem/internal/dotgit/dotgit_rewrite_packed_refs_windows.go42
-rw-r--r--storage/filesystem/module.go5
-rw-r--r--storage/filesystem/object.go306
-rw-r--r--storage/filesystem/object_test.go270
-rw-r--r--storage/filesystem/reference.go2
-rw-r--r--storage/filesystem/shallow.go6
-rw-r--r--storage/filesystem/storage.go40
-rw-r--r--storage/filesystem/storage_test.go27
-rw-r--r--storage/memory/storage.go2
-rw-r--r--submodule_test.go32
-rw-r--r--utils/diff/diff.go4
-rw-r--r--utils/merkletrie/difftree.go21
-rw-r--r--utils/merkletrie/difftree_test.go61
-rw-r--r--worktree.go91
-rw-r--r--worktree_bsd.go (renamed from worktree_darwin.go)4
-rw-r--r--worktree_commit.go41
-rw-r--r--worktree_commit_test.go194
-rw-r--r--worktree_linux.go4
-rw-r--r--worktree_status.go297
-rw-r--r--worktree_test.go357
-rw-r--r--worktree_windows.go15
151 files changed, 9223 insertions, 2125 deletions
diff --git a/.gitignore b/.gitignore
index 2d83068..038dd9f 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1 +1,4 @@
coverage.out
+*~
+coverage.txt
+profile.out
diff --git a/.travis.yml b/.travis.yml
index ee975e4..c68b5f4 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -1,8 +1,8 @@
language: go
go:
- - 1.8.x
- - 1.9.x
+ - "1.10"
+ - "1.11"
go_import_path: gopkg.in/src-d/go-git.v4
diff --git a/_examples/branch/main.go b/_examples/branch/main.go
index fa1ad01..ff33ead 100644
--- a/_examples/branch/main.go
+++ b/_examples/branch/main.go
@@ -28,7 +28,7 @@ func main() {
// Create a new plumbing.HashReference object with the name of the branch
// and the hash from the HEAD. The reference name should be a full reference
- // name and now a abbreviated one, as is used on the git cli.
+ // name and not an abbreviated one, as is used on the git cli.
//
// For tags we should use `refs/tags/%s` instead of `refs/heads/%s` used
// for branches.
diff --git a/blame.go b/blame.go
index df112ca..349cdd9 100644
--- a/blame.go
+++ b/blame.go
@@ -6,6 +6,7 @@ import (
"fmt"
"strconv"
"strings"
+ "time"
"unicode/utf8"
"gopkg.in/src-d/go-git.v4/plumbing"
@@ -106,12 +107,18 @@ type Line struct {
Author string
// Text is the original text of the line.
Text string
+ // Date is when the original text of the line was introduced
+ Date time.Time
+ // Hash is the commit hash that introduced the original line
+ Hash plumbing.Hash
}
-func newLine(author, text string) *Line {
+func newLine(author, text string, date time.Time, hash plumbing.Hash) *Line {
return &Line{
Author: author,
Text: text,
+ Hash: hash,
+ Date: date,
}
}
@@ -121,7 +128,7 @@ func newLines(contents []string, commits []*object.Commit) ([]*Line, error) {
}
result := make([]*Line, 0, len(contents))
for i := range contents {
- l := newLine(commits[i].Author.Email, contents[i])
+ l := newLine(commits[i].Author.Email, contents[i], commits[i].Author.When, commits[i].Hash)
result = append(result, l)
}
return result, nil
diff --git a/blame_test.go b/blame_test.go
index 5374610..92911b1 100644
--- a/blame_test.go
+++ b/blame_test.go
@@ -32,6 +32,10 @@ func (s *BlameSuite) TestBlame(c *C) {
obt, err := Blame(commit, t.path)
c.Assert(err, IsNil)
c.Assert(obt, DeepEquals, exp)
+
+ for i, l := range obt.Lines {
+ c.Assert(l.Hash.String(), Equals, t.blames[i])
+ }
}
}
@@ -53,6 +57,8 @@ func (s *BlameSuite) mockBlame(c *C, t blameTest, r *Repository) (blame *BlameRe
l := &Line{
Author: commit.Author.Email,
Text: lines[i],
+ Date: commit.Author.When,
+ Hash: commit.Hash,
}
blamedLines = append(blamedLines, l)
}
diff --git a/common_test.go b/common_test.go
index f8f4e61..dad0a37 100644
--- a/common_test.go
+++ b/common_test.go
@@ -4,6 +4,7 @@ import (
"testing"
"gopkg.in/src-d/go-git.v4/plumbing"
+ "gopkg.in/src-d/go-git.v4/plumbing/cache"
"gopkg.in/src-d/go-git.v4/plumbing/format/packfile"
"gopkg.in/src-d/go-git.v4/plumbing/transport"
"gopkg.in/src-d/go-git.v4/storage/filesystem"
@@ -59,10 +60,7 @@ func (s *BaseSuite) NewRepository(f *fixtures.Fixture) *Repository {
dotgit = f.DotGit()
worktree = memfs.New()
- st, err := filesystem.NewStorage(dotgit)
- if err != nil {
- panic(err)
- }
+ st := filesystem.NewStorage(dotgit, cache.NewObjectLRUDefault())
r, err := Open(st, worktree)
if err != nil {
@@ -89,10 +87,7 @@ func (s *BaseSuite) NewRepositoryWithEmptyWorktree(f *fixtures.Fixture) *Reposit
worktree := memfs.New()
- st, err := filesystem.NewStorage(dotgit)
- if err != nil {
- panic(err)
- }
+ st := filesystem.NewStorage(dotgit, cache.NewObjectLRUDefault())
r, err := Open(st, worktree)
if err != nil {
@@ -113,14 +108,7 @@ func (s *BaseSuite) NewRepositoryFromPackfile(f *fixtures.Fixture) *Repository {
p := f.Packfile()
defer p.Close()
- n := packfile.NewScanner(p)
- d, err := packfile.NewDecoder(n, storer)
- if err != nil {
- panic(err)
- }
-
- _, err = d.Decode()
- if err != nil {
+ if err := packfile.UpdateObjectStorage(storer, p); err != nil {
panic(err)
}
diff --git a/config/branch.go b/config/branch.go
new file mode 100644
index 0000000..e18073c
--- /dev/null
+++ b/config/branch.go
@@ -0,0 +1,71 @@
+package config
+
+import (
+ "errors"
+
+ "gopkg.in/src-d/go-git.v4/plumbing"
+ format "gopkg.in/src-d/go-git.v4/plumbing/format/config"
+)
+
+var (
+ errBranchEmptyName = errors.New("branch config: empty name")
+ errBranchInvalidMerge = errors.New("branch config: invalid merge")
+)
+
+// Branch contains information on the
+// local branches and which remote to track
+type Branch struct {
+ // Name of branch
+ Name string
+ // Remote name of remote to track
+ Remote string
+ // Merge is the local refspec for the branch
+ Merge plumbing.ReferenceName
+
+ raw *format.Subsection
+}
+
+// Validate validates fields of branch
+func (b *Branch) Validate() error {
+ if b.Name == "" {
+ return errBranchEmptyName
+ }
+
+ if b.Merge != "" && !b.Merge.IsBranch() {
+ return errBranchInvalidMerge
+ }
+
+ return nil
+}
+
+func (b *Branch) marshal() *format.Subsection {
+ if b.raw == nil {
+ b.raw = &format.Subsection{}
+ }
+
+ b.raw.Name = b.Name
+
+ if b.Remote == "" {
+ b.raw.RemoveOption(remoteSection)
+ } else {
+ b.raw.SetOption(remoteSection, b.Remote)
+ }
+
+ if b.Merge == "" {
+ b.raw.RemoveOption(mergeKey)
+ } else {
+ b.raw.SetOption(mergeKey, string(b.Merge))
+ }
+
+ return b.raw
+}
+
+func (b *Branch) unmarshal(s *format.Subsection) error {
+ b.raw = s
+
+ b.Name = b.raw.Name
+ b.Remote = b.raw.Options.Get(remoteSection)
+ b.Merge = plumbing.ReferenceName(b.raw.Options.Get(mergeKey))
+
+ return b.Validate()
+}
diff --git a/config/branch_test.go b/config/branch_test.go
new file mode 100644
index 0000000..d74122e
--- /dev/null
+++ b/config/branch_test.go
@@ -0,0 +1,76 @@
+package config
+
+import (
+ . "gopkg.in/check.v1"
+ "gopkg.in/src-d/go-git.v4/plumbing"
+)
+
+type BranchSuite struct{}
+
+var _ = Suite(&BranchSuite{})
+
+func (b *BranchSuite) TestValidateName(c *C) {
+ goodBranch := Branch{
+ Name: "master",
+ Remote: "some_remote",
+ Merge: "refs/heads/master",
+ }
+ badBranch := Branch{
+ Remote: "some_remote",
+ Merge: "refs/heads/master",
+ }
+ c.Assert(goodBranch.Validate(), IsNil)
+ c.Assert(badBranch.Validate(), NotNil)
+}
+
+func (b *BranchSuite) TestValidateMerge(c *C) {
+ goodBranch := Branch{
+ Name: "master",
+ Remote: "some_remote",
+ Merge: "refs/heads/master",
+ }
+ badBranch := Branch{
+ Name: "master",
+ Remote: "some_remote",
+ Merge: "blah",
+ }
+ c.Assert(goodBranch.Validate(), IsNil)
+ c.Assert(badBranch.Validate(), NotNil)
+}
+
+func (b *BranchSuite) TestMarshall(c *C) {
+ expected := []byte(`[core]
+ bare = false
+[branch "branch-tracking-on-clone"]
+ remote = fork
+ merge = refs/heads/branch-tracking-on-clone
+`)
+
+ cfg := NewConfig()
+ cfg.Branches["branch-tracking-on-clone"] = &Branch{
+ Name: "branch-tracking-on-clone",
+ Remote: "fork",
+ Merge: plumbing.ReferenceName("refs/heads/branch-tracking-on-clone"),
+ }
+
+ actual, err := cfg.Marshal()
+ c.Assert(err, IsNil)
+ c.Assert(string(actual), Equals, string(expected))
+}
+
+func (b *BranchSuite) TestUnmarshall(c *C) {
+ input := []byte(`[core]
+ bare = false
+[branch "branch-tracking-on-clone"]
+ remote = fork
+ merge = refs/heads/branch-tracking-on-clone
+`)
+
+ cfg := NewConfig()
+ err := cfg.Unmarshal(input)
+ c.Assert(err, IsNil)
+ branch := cfg.Branches["branch-tracking-on-clone"]
+ c.Assert(branch.Name, Equals, "branch-tracking-on-clone")
+ c.Assert(branch.Remote, Equals, "fork")
+ c.Assert(branch.Merge, Equals, plumbing.ReferenceName("refs/heads/branch-tracking-on-clone"))
+}
diff --git a/config/config.go b/config/config.go
index 87a847d..a637f6d 100644
--- a/config/config.go
+++ b/config/config.go
@@ -25,7 +25,7 @@ type ConfigStorer interface {
}
var (
- ErrInvalid = errors.New("config invalid remote")
+ ErrInvalid = errors.New("config invalid key in remote or branch")
ErrRemoteConfigNotFound = errors.New("remote config not found")
ErrRemoteConfigEmptyURL = errors.New("remote config: empty URL")
ErrRemoteConfigEmptyName = errors.New("remote config: empty name")
@@ -40,6 +40,9 @@ type Config struct {
IsBare bool
// Worktree is the path to the root of the working tree.
Worktree string
+ // CommentChar is the character indicating the start of a
+ // comment for commands like commit and tag
+ CommentChar string
}
Pack struct {
@@ -55,7 +58,9 @@ type Config struct {
// Submodules list of repository submodules, the key of the map is the name
// of the submodule, should equal to Submodule.Name.
Submodules map[string]*Submodule
-
+ // Branches list of branches, the key is the branch name and should
+ // equal Branch.Name
+ Branches map[string]*Branch
// Raw contains the raw information of a config file. The main goal is
// preserve the parsed information from the original format, to avoid
// dropping unsupported fields.
@@ -67,6 +72,7 @@ func NewConfig() *Config {
config := &Config{
Remotes: make(map[string]*RemoteConfig),
Submodules: make(map[string]*Submodule),
+ Branches: make(map[string]*Branch),
Raw: format.New(),
}
@@ -87,19 +93,32 @@ func (c *Config) Validate() error {
}
}
+ for name, b := range c.Branches {
+ if b.Name != name {
+ return ErrInvalid
+ }
+
+ if err := b.Validate(); err != nil {
+ return err
+ }
+ }
+
return nil
}
const (
remoteSection = "remote"
submoduleSection = "submodule"
+ branchSection = "branch"
coreSection = "core"
packSection = "pack"
fetchKey = "fetch"
urlKey = "url"
bareKey = "bare"
worktreeKey = "worktree"
+ commentCharKey = "commentChar"
windowKey = "window"
+ mergeKey = "merge"
// DefaultPackWindow holds the number of previous objects used to
// generate deltas. The value 10 is the same used by git command.
@@ -120,7 +139,12 @@ func (c *Config) Unmarshal(b []byte) error {
if err := c.unmarshalPack(); err != nil {
return err
}
- c.unmarshalSubmodules()
+ unmarshalSubmodules(c.Raw, c.Submodules)
+
+ if err := c.unmarshalBranches(); err != nil {
+ return err
+ }
+
return c.unmarshalRemotes()
}
@@ -131,6 +155,7 @@ func (c *Config) unmarshalCore() {
}
c.Core.Worktree = s.Options.Get(worktreeKey)
+ c.Core.CommentChar = s.Options.Get(commentCharKey)
}
func (c *Config) unmarshalPack() error {
@@ -162,22 +187,41 @@ func (c *Config) unmarshalRemotes() error {
return nil
}
-func (c *Config) unmarshalSubmodules() {
- s := c.Raw.Section(submoduleSection)
+func unmarshalSubmodules(fc *format.Config, submodules map[string]*Submodule) {
+ s := fc.Section(submoduleSection)
for _, sub := range s.Subsections {
m := &Submodule{}
m.unmarshal(sub)
- c.Submodules[m.Name] = m
+ if m.Validate() == ErrModuleBadPath {
+ continue
+ }
+
+ submodules[m.Name] = m
}
}
+func (c *Config) unmarshalBranches() error {
+ bs := c.Raw.Section(branchSection)
+ for _, sub := range bs.Subsections {
+ b := &Branch{}
+
+ if err := b.unmarshal(sub); err != nil {
+ return err
+ }
+
+ c.Branches[b.Name] = b
+ }
+ return nil
+}
+
// Marshal returns Config encoded as a git-config file.
func (c *Config) Marshal() ([]byte, error) {
c.marshalCore()
c.marshalPack()
c.marshalRemotes()
c.marshalSubmodules()
+ c.marshalBranches()
buf := bytes.NewBuffer(nil)
if err := format.NewEncoder(buf).Encode(c.Raw); err != nil {
@@ -245,6 +289,33 @@ func (c *Config) marshalSubmodules() {
}
}
+func (c *Config) marshalBranches() {
+ s := c.Raw.Section(branchSection)
+ newSubsections := make(format.Subsections, 0, len(c.Branches))
+ added := make(map[string]bool)
+ for _, subsection := range s.Subsections {
+ if branch, ok := c.Branches[subsection.Name]; ok {
+ newSubsections = append(newSubsections, branch.marshal())
+ added[subsection.Name] = true
+ }
+ }
+
+ branchNames := make([]string, 0, len(c.Branches))
+ for name := range c.Branches {
+ branchNames = append(branchNames, name)
+ }
+
+ sort.Strings(branchNames)
+
+ for _, name := range branchNames {
+ if !added[name] {
+ newSubsections = append(newSubsections, c.Branches[name].marshal())
+ }
+ }
+
+ s.Subsections = newSubsections
+}
+
// RemoteConfig contains the configuration for a given remote repository.
type RemoteConfig struct {
// Name of the remote
diff --git a/config/config_test.go b/config/config_test.go
index 1f120c0..db0932c 100644
--- a/config/config_test.go
+++ b/config/config_test.go
@@ -1,6 +1,9 @@
package config
-import . "gopkg.in/check.v1"
+import (
+ . "gopkg.in/check.v1"
+ "gopkg.in/src-d/go-git.v4/plumbing"
+)
type ConfigSuite struct{}
@@ -10,6 +13,7 @@ func (s *ConfigSuite) TestUnmarshall(c *C) {
input := []byte(`[core]
bare = true
worktree = foo
+ commentchar = bar
[pack]
window = 20
[remote "origin"]
@@ -20,6 +24,8 @@ func (s *ConfigSuite) TestUnmarshall(c *C) {
url = git@github.com:src-d/go-git.git
fetch = +refs/heads/*:refs/remotes/origin/*
fetch = +refs/pull/*:refs/remotes/origin/pull/*
+[remote "win-local"]
+ url = X:\\Git\\
[submodule "qux"]
path = qux
url = https://github.com/foo/qux.git
@@ -35,19 +41,23 @@ func (s *ConfigSuite) TestUnmarshall(c *C) {
c.Assert(cfg.Core.IsBare, Equals, true)
c.Assert(cfg.Core.Worktree, Equals, "foo")
+ c.Assert(cfg.Core.CommentChar, Equals, "bar")
c.Assert(cfg.Pack.Window, Equals, uint(20))
- c.Assert(cfg.Remotes, HasLen, 2)
+ c.Assert(cfg.Remotes, HasLen, 3)
c.Assert(cfg.Remotes["origin"].Name, Equals, "origin")
c.Assert(cfg.Remotes["origin"].URLs, DeepEquals, []string{"git@github.com:mcuadros/go-git.git"})
c.Assert(cfg.Remotes["origin"].Fetch, DeepEquals, []RefSpec{"+refs/heads/*:refs/remotes/origin/*"})
c.Assert(cfg.Remotes["alt"].Name, Equals, "alt")
c.Assert(cfg.Remotes["alt"].URLs, DeepEquals, []string{"git@github.com:mcuadros/go-git.git", "git@github.com:src-d/go-git.git"})
c.Assert(cfg.Remotes["alt"].Fetch, DeepEquals, []RefSpec{"+refs/heads/*:refs/remotes/origin/*", "+refs/pull/*:refs/remotes/origin/pull/*"})
+ c.Assert(cfg.Remotes["win-local"].Name, Equals, "win-local")
+ c.Assert(cfg.Remotes["win-local"].URLs, DeepEquals, []string{"X:\\Git\\"})
c.Assert(cfg.Submodules, HasLen, 1)
c.Assert(cfg.Submodules["qux"].Name, Equals, "qux")
c.Assert(cfg.Submodules["qux"].URL, Equals, "https://github.com/foo/qux.git")
c.Assert(cfg.Submodules["qux"].Branch, Equals, "bar")
-
+ c.Assert(cfg.Branches["master"].Remote, Equals, "origin")
+ c.Assert(cfg.Branches["master"].Merge, Equals, plumbing.ReferenceName("refs/heads/master"))
}
func (s *ConfigSuite) TestMarshall(c *C) {
@@ -63,8 +73,13 @@ func (s *ConfigSuite) TestMarshall(c *C) {
fetch = +refs/pull/*:refs/remotes/origin/pull/*
[remote "origin"]
url = git@github.com:mcuadros/go-git.git
+[remote "win-local"]
+ url = "X:\\Git\\"
[submodule "qux"]
url = https://github.com/foo/qux.git
+[branch "master"]
+ remote = origin
+ merge = refs/heads/master
`)
cfg := NewConfig()
@@ -82,11 +97,22 @@ func (s *ConfigSuite) TestMarshall(c *C) {
Fetch: []RefSpec{"+refs/heads/*:refs/remotes/origin/*", "+refs/pull/*:refs/remotes/origin/pull/*"},
}
+ cfg.Remotes["win-local"] = &RemoteConfig{
+ Name: "win-local",
+ URLs: []string{"X:\\Git\\"},
+ }
+
cfg.Submodules["qux"] = &Submodule{
Name: "qux",
URL: "https://github.com/foo/qux.git",
}
+ cfg.Branches["master"] = &Branch{
+ Name: "master",
+ Remote: "origin",
+ Merge: "refs/heads/master",
+ }
+
b, err := cfg.Marshal()
c.Assert(err, IsNil)
@@ -104,6 +130,8 @@ func (s *ConfigSuite) TestUnmarshallMarshall(c *C) {
url = git@github.com:mcuadros/go-git.git
fetch = +refs/heads/*:refs/remotes/origin/*
mirror = true
+[remote "win-local"]
+ url = "X:\\Git\\"
[branch "master"]
remote = origin
merge = refs/heads/master
@@ -118,6 +146,29 @@ func (s *ConfigSuite) TestUnmarshallMarshall(c *C) {
c.Assert(string(output), DeepEquals, string(input))
}
+func (s *ConfigSuite) TestValidateConfig(c *C) {
+ config := &Config{
+ Remotes: map[string]*RemoteConfig{
+ "bar": {
+ Name: "bar",
+ URLs: []string{"http://foo/bar"},
+ },
+ },
+ Branches: map[string]*Branch{
+ "bar": {
+ Name: "bar",
+ },
+ "foo": {
+ Name: "foo",
+ Remote: "origin",
+ Merge: plumbing.ReferenceName("refs/heads/foo"),
+ },
+ },
+ }
+
+ c.Assert(config.Validate(), IsNil)
+}
+
func (s *ConfigSuite) TestValidateInvalidRemote(c *C) {
config := &Config{
Remotes: map[string]*RemoteConfig{
@@ -128,7 +179,7 @@ func (s *ConfigSuite) TestValidateInvalidRemote(c *C) {
c.Assert(config.Validate(), Equals, ErrRemoteConfigEmptyURL)
}
-func (s *ConfigSuite) TestValidateInvalidKey(c *C) {
+func (s *ConfigSuite) TestValidateInvalidRemoteKey(c *C) {
config := &Config{
Remotes: map[string]*RemoteConfig{
"bar": {Name: "foo"},
@@ -157,10 +208,44 @@ func (s *ConfigSuite) TestRemoteConfigValidateDefault(c *C) {
c.Assert(fetch[0].String(), Equals, "+refs/heads/*:refs/remotes/foo/*")
}
+func (s *ConfigSuite) TestValidateInvalidBranchKey(c *C) {
+ config := &Config{
+ Branches: map[string]*Branch{
+ "foo": {
+ Name: "bar",
+ Remote: "origin",
+ Merge: plumbing.ReferenceName("refs/heads/bar"),
+ },
+ },
+ }
+
+ c.Assert(config.Validate(), Equals, ErrInvalid)
+}
+
+func (s *ConfigSuite) TestValidateInvalidBranch(c *C) {
+ config := &Config{
+ Branches: map[string]*Branch{
+ "bar": {
+ Name: "bar",
+ Remote: "origin",
+ Merge: plumbing.ReferenceName("refs/heads/bar"),
+ },
+ "foo": {
+ Name: "foo",
+ Remote: "origin",
+ Merge: plumbing.ReferenceName("baz"),
+ },
+ },
+ }
+
+ c.Assert(config.Validate(), Equals, errBranchInvalidMerge)
+}
+
func (s *ConfigSuite) TestRemoteConfigDefaultValues(c *C) {
config := NewConfig()
c.Assert(config.Remotes, HasLen, 0)
+ c.Assert(config.Branches, HasLen, 0)
c.Assert(config.Submodules, HasLen, 0)
c.Assert(config.Raw, NotNil)
c.Assert(config.Pack.Window, Equals, DefaultPackWindow)
diff --git a/config/modules.go b/config/modules.go
index b208984..90758d9 100644
--- a/config/modules.go
+++ b/config/modules.go
@@ -3,6 +3,7 @@ package config
import (
"bytes"
"errors"
+ "regexp"
format "gopkg.in/src-d/go-git.v4/plumbing/format/config"
)
@@ -10,6 +11,12 @@ import (
var (
ErrModuleEmptyURL = errors.New("module config: empty URL")
ErrModuleEmptyPath = errors.New("module config: empty path")
+ ErrModuleBadPath = errors.New("submodule has an invalid path")
+)
+
+var (
+ // Matches module paths with dotdot ".." components.
+ dotdotPath = regexp.MustCompile(`(^|[/\\])\.\.([/\\]|$)`)
)
// Modules defines the submodules properties, represents a .gitmodules file
@@ -44,14 +51,7 @@ func (m *Modules) Unmarshal(b []byte) error {
return err
}
- s := m.raw.Section(submoduleSection)
- for _, sub := range s.Subsections {
- mod := &Submodule{}
- mod.unmarshal(sub)
-
- m.Submodules[mod.Path] = mod
- }
-
+ unmarshalSubmodules(m.raw, m.Submodules)
return nil
}
@@ -102,6 +102,10 @@ func (m *Submodule) Validate() error {
return ErrModuleEmptyURL
}
+ if dotdotPath.MatchString(m.Path) {
+ return ErrModuleBadPath
+ }
+
return nil
}
diff --git a/config/modules_test.go b/config/modules_test.go
index 36cd93f..8e10d70 100644
--- a/config/modules_test.go
+++ b/config/modules_test.go
@@ -11,6 +11,29 @@ func (s *ModulesSuite) TestValidateMissingURL(c *C) {
c.Assert(m.Validate(), Equals, ErrModuleEmptyURL)
}
+func (s *ModulesSuite) TestValidateBadPath(c *C) {
+ input := []string{
+ `..`,
+ `../`,
+ `../bar`,
+
+ `/..`,
+ `/../bar`,
+
+ `foo/..`,
+ `foo/../`,
+ `foo/../bar`,
+ }
+
+ for _, p := range input {
+ m := &Submodule{
+ Path: p,
+ URL: "https://example.com/",
+ }
+ c.Assert(m.Validate(), Equals, ErrModuleBadPath)
+ }
+}
+
func (s *ModulesSuite) TestValidateMissingName(c *C) {
m := &Submodule{URL: "bar"}
c.Assert(m.Validate(), Equals, ErrModuleEmptyPath)
@@ -39,6 +62,9 @@ func (s *ModulesSuite) TestUnmarshall(c *C) {
path = foo/bar
url = https://github.com/foo/bar.git
branch = dev
+[submodule "suspicious"]
+ path = ../../foo/bar
+ url = https://github.com/foo/bar.git
`)
cfg := NewModules()
diff --git a/config/refspec.go b/config/refspec.go
index af7e732..391705c 100644
--- a/config/refspec.go
+++ b/config/refspec.go
@@ -15,7 +15,7 @@ const (
var (
ErrRefSpecMalformedSeparator = errors.New("malformed refspec, separators are wrong")
- ErrRefSpecMalformedWildcard = errors.New("malformed refspec, missmatched number of wildcards")
+ ErrRefSpecMalformedWildcard = errors.New("malformed refspec, mismatched number of wildcards")
)
// RefSpec is a mapping from local branches to remote references
@@ -62,7 +62,13 @@ func (s RefSpec) IsDelete() bool {
// Src return the src side.
func (s RefSpec) Src() string {
spec := string(s)
- start := strings.Index(spec, refSpecForce) + 1
+
+ var start int
+ if s.IsForceUpdate() {
+ start = 1
+ } else {
+ start = 0
+ }
end := strings.Index(spec, refSpecSeparator)
return spec[start:end]
diff --git a/config/refspec_test.go b/config/refspec_test.go
index 5ee6108..675e075 100644
--- a/config/refspec_test.go
+++ b/config/refspec_test.go
@@ -62,8 +62,17 @@ func (s *RefSpecSuite) TestRefSpecSrc(c *C) {
spec := RefSpec("refs/heads/*:refs/remotes/origin/*")
c.Assert(spec.Src(), Equals, "refs/heads/*")
+ spec = RefSpec("+refs/heads/*:refs/remotes/origin/*")
+ c.Assert(spec.Src(), Equals, "refs/heads/*")
+
spec = RefSpec(":refs/heads/master")
c.Assert(spec.Src(), Equals, "")
+
+ spec = RefSpec("refs/heads/love+hate:refs/heads/love+hate")
+ c.Assert(spec.Src(), Equals, "refs/heads/love+hate")
+
+ spec = RefSpec("+refs/heads/love+hate:refs/heads/love+hate")
+ c.Assert(spec.Src(), Equals, "refs/heads/love+hate")
}
func (s *RefSpecSuite) TestRefSpecMatch(c *C) {
@@ -71,9 +80,19 @@ func (s *RefSpecSuite) TestRefSpecMatch(c *C) {
c.Assert(spec.Match(plumbing.ReferenceName("refs/heads/foo")), Equals, false)
c.Assert(spec.Match(plumbing.ReferenceName("refs/heads/master")), Equals, true)
+ spec = RefSpec("+refs/heads/master:refs/remotes/origin/master")
+ c.Assert(spec.Match(plumbing.ReferenceName("refs/heads/foo")), Equals, false)
+ c.Assert(spec.Match(plumbing.ReferenceName("refs/heads/master")), Equals, true)
+
spec = RefSpec(":refs/heads/master")
c.Assert(spec.Match(plumbing.ReferenceName("")), Equals, true)
c.Assert(spec.Match(plumbing.ReferenceName("refs/heads/master")), Equals, false)
+
+ spec = RefSpec("refs/heads/love+hate:heads/love+hate")
+ c.Assert(spec.Match(plumbing.ReferenceName("refs/heads/love+hate")), Equals, true)
+
+ spec = RefSpec("+refs/heads/love+hate:heads/love+hate")
+ c.Assert(spec.Match(plumbing.ReferenceName("refs/heads/love+hate")), Equals, true)
}
func (s *RefSpecSuite) TestRefSpecMatchGlob(c *C) {
diff --git a/example_test.go b/example_test.go
index e9d8e8b..ef7e3d3 100644
--- a/example_test.go
+++ b/example_test.go
@@ -24,12 +24,18 @@ func ExampleClone() {
// Clones the repository into the worktree (fs) and storer all the .git
// content into the storer
- _, _ = git.Clone(storer, fs, &git.CloneOptions{
+ _, err := git.Clone(storer, fs, &git.CloneOptions{
URL: "https://github.com/git-fixtures/basic.git",
})
+ if err != nil {
+ log.Fatal(err)
+ }
// Prints the content of the CHANGELOG file from the cloned repository
- changelog, _ := fs.Open("CHANGELOG")
+ changelog, err := fs.Open("CHANGELOG")
+ if err != nil {
+ log.Fatal(err)
+ }
io.Copy(os.Stdout, changelog)
// Output: Initial changelog
diff --git a/go.mod b/go.mod
new file mode 100644
index 0000000..e269350
--- /dev/null
+++ b/go.mod
@@ -0,0 +1,29 @@
+module gopkg.in/src-d/go-git.v4
+
+require (
+ github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7 // indirect
+ github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 // indirect
+ github.com/davecgh/go-spew v1.1.1 // indirect
+ github.com/emirpasic/gods v1.9.0
+ github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 // indirect
+ github.com/gliderlabs/ssh v0.1.1
+ github.com/google/go-cmp v0.2.0
+ github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99
+ github.com/jessevdk/go-flags v1.4.0
+ github.com/kevinburke/ssh_config v0.0.0-20180830205328-81db2a75821e
+ github.com/mitchellh/go-homedir v1.0.0
+ github.com/pelletier/go-buffruneio v0.2.0 // indirect
+ github.com/pkg/errors v0.8.0 // indirect
+ github.com/pmezard/go-difflib v1.0.0 // indirect
+ github.com/sergi/go-diff v1.0.0
+ github.com/src-d/gcfg v1.3.0
+ github.com/stretchr/testify v1.2.2 // indirect
+ github.com/xanzy/ssh-agent v0.2.0
+ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793
+ golang.org/x/net v0.0.0-20180906233101-161cd47e91fd // indirect
+ golang.org/x/text v0.3.0
+ gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127
+ gopkg.in/src-d/go-billy.v4 v4.2.1
+ gopkg.in/src-d/go-git-fixtures.v3 v3.1.1
+ gopkg.in/warnings.v0 v0.1.2 // indirect
+)
diff --git a/go.sum b/go.sum
new file mode 100644
index 0000000..e262a66
--- /dev/null
+++ b/go.sum
@@ -0,0 +1,57 @@
+github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7 h1:uSoVVbwJiQipAclBbw+8quDsfcvFjOpI5iCf4p/cqCs=
+github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7/go.mod h1:6zEj6s6u/ghQa61ZWa/C2Aw3RkjiTBOix7dkqa1VLIs=
+github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 h1:kFOfPq6dUM1hTo4JG6LR5AXSUEsOjtdm0kw0FtQtMJA=
+github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/emirpasic/gods v1.9.0 h1:rUF4PuzEjMChMiNsVjdI+SyLu7rEqpQ5reNFnhC7oFo=
+github.com/emirpasic/gods v1.9.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o=
+github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 h1:BHsljHzVlRcyQhjrss6TZTdY2VfCqZPbv5k3iBFa2ZQ=
+github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
+github.com/gliderlabs/ssh v0.1.1 h1:j3L6gSLQalDETeEg/Jg0mGY0/y/N6zI2xX1978P0Uqw=
+github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
+github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ=
+github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
+github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A=
+github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo=
+github.com/jessevdk/go-flags v1.4.0 h1:4IU2WS7AumrZ/40jfhf4QVDMsQwqA7VEHozFRrGARJA=
+github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
+github.com/kevinburke/ssh_config v0.0.0-20180830205328-81db2a75821e h1:RgQk53JHp/Cjunrr1WlsXSZpqXn+uREuHvUVcK82CV8=
+github.com/kevinburke/ssh_config v0.0.0-20180830205328-81db2a75821e/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM=
+github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
+github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/mitchellh/go-homedir v1.0.0 h1:vKb8ShqSby24Yrqr/yDYkuFz8d0WUjys40rvnGC8aR0=
+github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
+github.com/pelletier/go-buffruneio v0.2.0 h1:U4t4R6YkofJ5xHm3dJzuRpPZ0mr5MMCoAWooScCR7aA=
+github.com/pelletier/go-buffruneio v0.2.0/go.mod h1:JkE26KsDizTr40EUHkXVtNPvgGtbSNq5BcowyYOWdKo=
+github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw=
+github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ=
+github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
+github.com/src-d/gcfg v1.3.0 h1:2BEDr8r0I0b8h/fOqwtxCEiq2HJu8n2JGZJQFGXWLjg=
+github.com/src-d/gcfg v1.3.0/go.mod h1:p/UMsR43ujA89BJY9duynAwIpvqEujIH/jFlfL7jWoI=
+github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
+github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/xanzy/ssh-agent v0.2.0 h1:Adglfbi5p9Z0BmK2oKU9nTG+zKfniSfnaMYB+ULd+Ro=
+github.com/xanzy/ssh-agent v0.2.0/go.mod h1:0NyE30eGUDliuLEHJgYte/zncp2zdTStcOnWhgSqHD8=
+golang.org/x/crypto v0.0.0-20180904163835-0709b304e793 h1:u+LnwYTOOW7Ukr/fppxEb1Nwz0AtPflrblfvUudpo+I=
+golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/net v0.0.0-20180906233101-161cd47e91fd h1:nTDtHvHSdCn1m6ITfMRqtOd/9+7a3s8RBNOZ3eYZzJA=
+golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/sys v0.0.0-20180903190138-2b024373dcd9 h1:lkiLiLBHGoH3XnqSLUIaBsilGMUjI+Uy2Xu2JLUtTas=
+golang.org/x/sys v0.0.0-20180903190138-2b024373dcd9/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/src-d/go-billy.v4 v4.2.1 h1:omN5CrMrMcQ+4I8bJ0wEhOBPanIRWzFC953IiXKdYzo=
+gopkg.in/src-d/go-billy.v4 v4.2.1/go.mod h1:tm33zBoOwxjYHZIE+OV8bxTWFMJLrconzFMd38aARFk=
+gopkg.in/src-d/go-git-fixtures.v3 v3.1.1 h1:XWW/s5W18RaJpmo1l0IYGqXKuJITWRFuA45iOf1dKJs=
+gopkg.in/src-d/go-git-fixtures.v3 v3.1.1/go.mod h1:dLBcvytrw/TYZsNTWCnkNF2DSIlzWYqTe3rJR56Ac7g=
+gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME=
+gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=
diff --git a/object_walker.go b/object_walker.go
index 4cbbcca..f8b19cd 100644
--- a/object_walker.go
+++ b/object_walker.go
@@ -94,6 +94,8 @@ func (p *objectWalker) walkObjectTree(hash plumbing.Hash) error {
return err
}
}
+ case *object.Tag:
+ return p.walkObjectTree(obj.Target)
default:
// Error out on unhandled object types.
return fmt.Errorf("Unknown object %X %s %T\n", obj.ID(), obj.Type(), obj)
diff --git a/options.go b/options.go
index a87b497..b8bc1e9 100644
--- a/options.go
+++ b/options.go
@@ -3,7 +3,9 @@ package git
import (
"errors"
"regexp"
+ "strings"
+ "golang.org/x/crypto/openpgp"
"gopkg.in/src-d/go-git.v4/config"
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/plumbing/object"
@@ -307,12 +309,31 @@ func (o *ResetOptions) Validate(r *Repository) error {
return nil
}
+type LogOrder int8
+
+const (
+ LogOrderDefault LogOrder = iota
+ LogOrderDFS
+ LogOrderDFSPost
+ LogOrderBSF
+ LogOrderCommitterTime
+)
+
// LogOptions describes how a log action should be performed.
type LogOptions struct {
// When the From option is set the log will only contain commits
// reachable from it. If this option is not set, HEAD will be used as
// the default From.
From plumbing.Hash
+
+ // The default traversal algorithm is Depth-first search
+ // set Order=LogOrderCommitterTime for ordering by committer time (more compatible with `git log`)
+ // set Order=LogOrderBSF for Breadth-first search
+ Order LogOrder
+
+ // Show only those commits in which the specified file was inserted/updated.
+ // It is equivalent to running `git log -- <file-name>`.
+ FileName *string
}
var (
@@ -332,6 +353,10 @@ type CommitOptions struct {
// Parents are the parents commits for the new commit, by default when
// len(Parents) is zero, the hash of HEAD reference is used.
Parents []plumbing.Hash
+ // SignKey denotes a key to sign the commit with. A nil value here means the
+ // commit will not be signed. The private key must be present and already
+ // decrypted.
+ SignKey *openpgp.Entity
}
// Validate validates the fields and sets the default values.
@@ -358,6 +383,41 @@ func (o *CommitOptions) Validate(r *Repository) error {
return nil
}
+var (
+ ErrMissingName = errors.New("name field is required")
+ ErrMissingTagger = errors.New("tagger field is required")
+ ErrMissingMessage = errors.New("message field is required")
+)
+
+// CreateTagOptions describes how a tag object should be created.
+type CreateTagOptions struct {
+ // Tagger defines the signature of the tag creator.
+ Tagger *object.Signature
+ // Message defines the annotation of the tag. It is canonicalized during
+ // validation into the format expected by git - no leading whitespace and
+ // ending in a newline.
+ Message string
+ // SignKey denotes a key to sign the tag with. A nil value here means the tag
+ // will not be signed. The private key must be present and already decrypted.
+ SignKey *openpgp.Entity
+}
+
+// Validate validates the fields and sets the default values.
+func (o *CreateTagOptions) Validate(r *Repository, hash plumbing.Hash) error {
+ if o.Tagger == nil {
+ return ErrMissingTagger
+ }
+
+ if o.Message == "" {
+ return ErrMissingMessage
+ }
+
+ // Canonicalize the message into the expected message format.
+ o.Message = strings.TrimSpace(o.Message) + "\n"
+
+ return nil
+}
+
// ListOptions describes how a remote list should be performed.
type ListOptions struct {
// Auth credentials, if required, to use with the remote repository.
@@ -405,3 +465,14 @@ func (o *GrepOptions) Validate(w *Worktree) error {
return nil
}
+
+// PlainOpenOptions describes how opening a plain repository should be
+// performed.
+type PlainOpenOptions struct {
+ // DetectDotGit defines whether parent directories should be
+ // walked until a .git directory or file is found.
+ DetectDotGit bool
+}
+
+// Validate validates the fields and sets the default values.
+func (o *PlainOpenOptions) Validate() error { return nil }
diff --git a/plumbing/cache/buffer_lru.go b/plumbing/cache/buffer_lru.go
new file mode 100644
index 0000000..acaf195
--- /dev/null
+++ b/plumbing/cache/buffer_lru.go
@@ -0,0 +1,98 @@
+package cache
+
+import (
+ "container/list"
+ "sync"
+)
+
+// BufferLRU implements an object cache with an LRU eviction policy and a
+// maximum size (measured in object size).
+type BufferLRU struct {
+ MaxSize FileSize
+
+ actualSize FileSize
+ ll *list.List
+ cache map[int64]*list.Element
+ mut sync.Mutex
+}
+
+// NewBufferLRU creates a new BufferLRU with the given maximum size. The maximum
+// size will never be exceeded.
+func NewBufferLRU(maxSize FileSize) *BufferLRU {
+ return &BufferLRU{MaxSize: maxSize}
+}
+
+// NewBufferLRUDefault creates a new BufferLRU with the default cache size.
+func NewBufferLRUDefault() *BufferLRU {
+ return &BufferLRU{MaxSize: DefaultMaxSize}
+}
+
+type buffer struct {
+ Key int64
+ Slice []byte
+}
+
+// Put puts a buffer into the cache. If the buffer is already in the cache, it
+// will be marked as used. Otherwise, it will be inserted. A buffers might
+// be evicted to make room for the new one.
+func (c *BufferLRU) Put(key int64, slice []byte) {
+ c.mut.Lock()
+ defer c.mut.Unlock()
+
+ if c.cache == nil {
+ c.actualSize = 0
+ c.cache = make(map[int64]*list.Element, 1000)
+ c.ll = list.New()
+ }
+
+ bufSize := FileSize(len(slice))
+ if ee, ok := c.cache[key]; ok {
+ oldBuf := ee.Value.(buffer)
+ // in this case bufSize is a delta: new size - old size
+ bufSize -= FileSize(len(oldBuf.Slice))
+ c.ll.MoveToFront(ee)
+ ee.Value = buffer{key, slice}
+ } else {
+ if bufSize > c.MaxSize {
+ return
+ }
+ ee := c.ll.PushFront(buffer{key, slice})
+ c.cache[key] = ee
+ }
+
+ c.actualSize += bufSize
+ for c.actualSize > c.MaxSize {
+ last := c.ll.Back()
+ lastObj := last.Value.(buffer)
+ lastSize := FileSize(len(lastObj.Slice))
+
+ c.ll.Remove(last)
+ delete(c.cache, lastObj.Key)
+ c.actualSize -= lastSize
+ }
+}
+
+// Get returns a buffer by its key. It marks the buffer as used. If the buffer
+// is not in the cache, (nil, false) will be returned.
+func (c *BufferLRU) Get(key int64) ([]byte, bool) {
+ c.mut.Lock()
+ defer c.mut.Unlock()
+
+ ee, ok := c.cache[key]
+ if !ok {
+ return nil, false
+ }
+
+ c.ll.MoveToFront(ee)
+ return ee.Value.(buffer).Slice, true
+}
+
+// Clear the content of this buffer cache.
+func (c *BufferLRU) Clear() {
+ c.mut.Lock()
+ defer c.mut.Unlock()
+
+ c.ll = nil
+ c.cache = nil
+ c.actualSize = 0
+}
diff --git a/plumbing/cache/buffer_test.go b/plumbing/cache/buffer_test.go
new file mode 100644
index 0000000..3e3adc2
--- /dev/null
+++ b/plumbing/cache/buffer_test.go
@@ -0,0 +1,151 @@
+package cache
+
+import (
+ "bytes"
+ "sync"
+
+ . "gopkg.in/check.v1"
+)
+
+type BufferSuite struct {
+ c map[string]Buffer
+ aBuffer []byte
+ bBuffer []byte
+ cBuffer []byte
+ dBuffer []byte
+ eBuffer []byte
+}
+
+var _ = Suite(&BufferSuite{})
+
+func (s *BufferSuite) SetUpTest(c *C) {
+ s.aBuffer = []byte("a")
+ s.bBuffer = []byte("bbb")
+ s.cBuffer = []byte("c")
+ s.dBuffer = []byte("d")
+ s.eBuffer = []byte("ee")
+
+ s.c = make(map[string]Buffer)
+ s.c["two_bytes"] = NewBufferLRU(2 * Byte)
+ s.c["default_lru"] = NewBufferLRUDefault()
+}
+
+func (s *BufferSuite) TestPutSameBuffer(c *C) {
+ for _, o := range s.c {
+ o.Put(1, s.aBuffer)
+ o.Put(1, s.aBuffer)
+ _, ok := o.Get(1)
+ c.Assert(ok, Equals, true)
+ }
+}
+
+func (s *ObjectSuite) TestPutSameBufferWithDifferentSize(c *C) {
+ aBuffer := []byte("a")
+ bBuffer := []byte("bbb")
+ cBuffer := []byte("ccccc")
+ dBuffer := []byte("ddddddd")
+
+ cache := NewBufferLRU(7 * Byte)
+ cache.Put(1, aBuffer)
+ cache.Put(1, bBuffer)
+ cache.Put(1, cBuffer)
+ cache.Put(1, dBuffer)
+
+ c.Assert(cache.MaxSize, Equals, 7*Byte)
+ c.Assert(cache.actualSize, Equals, 7*Byte)
+ c.Assert(cache.ll.Len(), Equals, 1)
+
+ buf, ok := cache.Get(1)
+ c.Assert(bytes.Equal(buf, dBuffer), Equals, true)
+ c.Assert(FileSize(len(buf)), Equals, 7*Byte)
+ c.Assert(ok, Equals, true)
+}
+
+func (s *BufferSuite) TestPutBigBuffer(c *C) {
+ for _, o := range s.c {
+ o.Put(1, s.bBuffer)
+ _, ok := o.Get(2)
+ c.Assert(ok, Equals, false)
+ }
+}
+
+func (s *BufferSuite) TestPutCacheOverflow(c *C) {
+ // this test only works with an specific size
+ o := s.c["two_bytes"]
+
+ o.Put(1, s.aBuffer)
+ o.Put(2, s.cBuffer)
+ o.Put(3, s.dBuffer)
+
+ obj, ok := o.Get(1)
+ c.Assert(ok, Equals, false)
+ c.Assert(obj, IsNil)
+ obj, ok = o.Get(2)
+ c.Assert(ok, Equals, true)
+ c.Assert(obj, NotNil)
+ obj, ok = o.Get(3)
+ c.Assert(ok, Equals, true)
+ c.Assert(obj, NotNil)
+}
+
+func (s *BufferSuite) TestEvictMultipleBuffers(c *C) {
+ o := s.c["two_bytes"]
+
+ o.Put(1, s.cBuffer)
+ o.Put(2, s.dBuffer) // now cache is full with two objects
+ o.Put(3, s.eBuffer) // this put should evict all previous objects
+
+ obj, ok := o.Get(1)
+ c.Assert(ok, Equals, false)
+ c.Assert(obj, IsNil)
+ obj, ok = o.Get(2)
+ c.Assert(ok, Equals, false)
+ c.Assert(obj, IsNil)
+ obj, ok = o.Get(3)
+ c.Assert(ok, Equals, true)
+ c.Assert(obj, NotNil)
+}
+
+func (s *BufferSuite) TestClear(c *C) {
+ for _, o := range s.c {
+ o.Put(1, s.aBuffer)
+ o.Clear()
+ obj, ok := o.Get(1)
+ c.Assert(ok, Equals, false)
+ c.Assert(obj, IsNil)
+ }
+}
+
+func (s *BufferSuite) TestConcurrentAccess(c *C) {
+ for _, o := range s.c {
+ var wg sync.WaitGroup
+
+ for i := 0; i < 1000; i++ {
+ wg.Add(3)
+ go func(i int) {
+ o.Put(int64(i), []byte{00})
+ wg.Done()
+ }(i)
+
+ go func(i int) {
+ if i%30 == 0 {
+ o.Clear()
+ }
+ wg.Done()
+ }(i)
+
+ go func(i int) {
+ o.Get(int64(i))
+ wg.Done()
+ }(i)
+ }
+
+ wg.Wait()
+ }
+}
+
+func (s *BufferSuite) TestDefaultLRU(c *C) {
+ defaultLRU := s.c["default_lru"].(*BufferLRU)
+
+ c.Assert(defaultLRU.MaxSize, Equals, DefaultMaxSize)
+}
diff --git a/plumbing/cache/common.go b/plumbing/cache/common.go
index e77baf0..2b7f36a 100644
--- a/plumbing/cache/common.go
+++ b/plumbing/cache/common.go
@@ -24,3 +24,16 @@ type Object interface {
// Clear clears every object from the cache.
Clear()
}
+
+// Buffer is an interface to a buffer cache.
+type Buffer interface {
+ // Put puts a buffer into the cache. If the buffer is already in the cache,
+ // it will be marked as used. Otherwise, it will be inserted. Buffer might
+ // be evicted to make room for the new one.
+ Put(key int64, slice []byte)
+ // Get returns a buffer by its key. It marks the buffer as used. If the
+ // buffer is not in the cache, (nil, false) will be returned.
+ Get(key int64) ([]byte, bool)
+ // Clear clears every object from the cache.
+ Clear()
+}
diff --git a/plumbing/cache/object_lru.go b/plumbing/cache/object_lru.go
index 0494539..53d8b02 100644
--- a/plumbing/cache/object_lru.go
+++ b/plumbing/cache/object_lru.go
@@ -42,20 +42,24 @@ func (c *ObjectLRU) Put(obj plumbing.EncodedObject) {
c.ll = list.New()
}
+ objSize := FileSize(obj.Size())
key := obj.Hash()
if ee, ok := c.cache[key]; ok {
+ oldObj := ee.Value.(plumbing.EncodedObject)
+ // in this case objSize is a delta: new size - old size
+ objSize -= FileSize(oldObj.Size())
c.ll.MoveToFront(ee)
ee.Value = obj
- return
- }
-
- objSize := FileSize(obj.Size())
-
- if objSize > c.MaxSize {
- return
+ } else {
+ if objSize > c.MaxSize {
+ return
+ }
+ ee := c.ll.PushFront(obj)
+ c.cache[key] = ee
}
- for c.actualSize+objSize > c.MaxSize {
+ c.actualSize += objSize
+ for c.actualSize > c.MaxSize {
last := c.ll.Back()
lastObj := last.Value.(plumbing.EncodedObject)
lastSize := FileSize(lastObj.Size())
@@ -64,10 +68,6 @@ func (c *ObjectLRU) Put(obj plumbing.EncodedObject) {
delete(c.cache, lastObj.Hash())
c.actualSize -= lastSize
}
-
- ee := c.ll.PushFront(obj)
- c.cache[key] = ee
- c.actualSize += objSize
}
// Get returns an object by its hash. It marks the object as used. If the object
diff --git a/plumbing/cache/object_test.go b/plumbing/cache/object_test.go
index ac3f0a3..b3e5f79 100644
--- a/plumbing/cache/object_test.go
+++ b/plumbing/cache/object_test.go
@@ -45,6 +45,25 @@ func (s *ObjectSuite) TestPutSameObject(c *C) {
}
}
+func (s *ObjectSuite) TestPutSameObjectWithDifferentSize(c *C) {
+ const hash = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
+
+ cache := NewObjectLRU(7 * Byte)
+ cache.Put(newObject(hash, 1*Byte))
+ cache.Put(newObject(hash, 3*Byte))
+ cache.Put(newObject(hash, 5*Byte))
+ cache.Put(newObject(hash, 7*Byte))
+
+ c.Assert(cache.MaxSize, Equals, 7*Byte)
+ c.Assert(cache.actualSize, Equals, 7*Byte)
+ c.Assert(cache.ll.Len(), Equals, 1)
+
+ obj, ok := cache.Get(plumbing.NewHash(hash))
+ c.Assert(obj.Hash(), Equals, plumbing.NewHash(hash))
+ c.Assert(FileSize(obj.Size()), Equals, 7*Byte)
+ c.Assert(ok, Equals, true)
+}
+
func (s *ObjectSuite) TestPutBigObject(c *C) {
for _, o := range s.c {
o.Put(s.bObject)
diff --git a/plumbing/cache/queue.go b/plumbing/cache/queue.go
deleted file mode 100644
index 85413e0..0000000
--- a/plumbing/cache/queue.go
+++ /dev/null
@@ -1,46 +0,0 @@
-package cache
-
-import "gopkg.in/src-d/go-git.v4/plumbing"
-
-// queue is a basic FIFO queue based on a circular list that resize as needed.
-type queue struct {
- elements []plumbing.Hash
- size int
- head int
- tail int
- count int
-}
-
-// newQueue returns a queue with the specified initial size
-func newQueue(size int) *queue {
- return &queue{
- elements: make([]plumbing.Hash, size),
- size: size,
- }
-}
-
-// Push adds a node to the queue.
-func (q *queue) Push(h plumbing.Hash) {
- if q.head == q.tail && q.count > 0 {
- elements := make([]plumbing.Hash, len(q.elements)+q.size)
- copy(elements, q.elements[q.head:])
- copy(elements[len(q.elements)-q.head:], q.elements[:q.head])
- q.head = 0
- q.tail = len(q.elements)
- q.elements = elements
- }
- q.elements[q.tail] = h
- q.tail = (q.tail + 1) % len(q.elements)
- q.count++
-}
-
-// Pop removes and returns a Hash from the queue in first to last order.
-func (q *queue) Pop() plumbing.Hash {
- if q.count == 0 {
- return plumbing.ZeroHash
- }
- node := q.elements[q.head]
- q.head = (q.head + 1) % len(q.elements)
- q.count--
- return node
-}
diff --git a/plumbing/format/diff/unified_encoder.go b/plumbing/format/diff/unified_encoder.go
index cf2a34b..8bd6d8a 100644
--- a/plumbing/format/diff/unified_encoder.go
+++ b/plumbing/format/diff/unified_encoder.go
@@ -237,9 +237,13 @@ func (c *hunksGenerator) addLineNumbers(la, lb int, linesBefore int, i int, op O
// we need to search for a reference for the next diff
switch {
case linesBefore != 0 && c.ctxLines != 0:
- clb = lb - c.ctxLines + 1
+ if lb > c.ctxLines {
+ clb = lb - c.ctxLines + 1
+ } else {
+ clb = 1
+ }
case c.ctxLines == 0:
- clb = lb - c.ctxLines
+ clb = lb
case i != len(c.chunks)-1:
next := c.chunks[i+1]
if next.Type() == op || next.Type() == Equal {
@@ -262,11 +266,15 @@ func (c *hunksGenerator) processEqualsLines(ls []string, i int) {
c.current.AddOp(Equal, c.afterContext...)
c.afterContext = nil
} else {
- c.current.AddOp(Equal, c.afterContext[:c.ctxLines]...)
+ ctxLines := c.ctxLines
+ if ctxLines > len(c.afterContext) {
+ ctxLines = len(c.afterContext)
+ }
+ c.current.AddOp(Equal, c.afterContext[:ctxLines]...)
c.hunks = append(c.hunks, c.current)
c.current = nil
- c.beforeContext = c.afterContext[c.ctxLines:]
+ c.beforeContext = c.afterContext[ctxLines:]
c.afterContext = nil
}
}
diff --git a/plumbing/format/diff/unified_encoder_test.go b/plumbing/format/diff/unified_encoder_test.go
index 6e12070..7736af1 100644
--- a/plumbing/format/diff/unified_encoder_test.go
+++ b/plumbing/format/diff/unified_encoder_test.go
@@ -155,6 +155,43 @@ var fixtures []*fixture = []*fixture{{
filePatches: []testFilePatch{{
from: &testFile{
mode: filemode.Regular,
+ path: "README.md",
+ seed: "hello\nworld\n",
+ },
+ to: &testFile{
+ mode: filemode.Regular,
+ path: "README.md",
+ seed: "hello\nbug\n",
+ },
+ chunks: []testChunk{{
+ content: "hello",
+ op: Equal,
+ }, {
+ content: "world",
+ op: Delete,
+ }, {
+ content: "bug",
+ op: Add,
+ }},
+ }},
+ },
+ desc: "positive negative number",
+ context: 2,
+ diff: `diff --git a/README.md b/README.md
+index 94954abda49de8615a048f8d2e64b5de848e27a1..f3dad9514629b9ff9136283ae331ad1fc95748a8 100644
+--- a/README.md
++++ b/README.md
+@@ -1,2 +1,2 @@
+ hello
+-world
++bug
+`,
+}, {
+ patch: testPatch{
+ message: "",
+ filePatches: []testFilePatch{{
+ from: &testFile{
+ mode: filemode.Regular,
path: "test.txt",
seed: "test",
},
@@ -476,6 +513,43 @@ index ab5eed5d4a2c33aeef67e0188ee79bed666bde6f..0adddcde4fd38042c354518351820eb0
W
`,
}, {
+ patch: oneChunkPatch,
+ desc: "modified deleting lines file with context to 6",
+ context: 6,
+ diff: `diff --git a/onechunk.txt b/onechunk.txt
+index ab5eed5d4a2c33aeef67e0188ee79bed666bde6f..0adddcde4fd38042c354518351820eb06c417c82 100644
+--- a/onechunk.txt
++++ b/onechunk.txt
+@@ -1,27 +1,23 @@
+-A
+ B
+ C
+ D
+ E
+ F
+ G
+-H
+ I
+ J
+ K
+ L
+ M
+ N
+-Ñ
+ O
+ P
+ Q
+ R
+ S
+ T
+-U
+ V
+ W
+ X
+ Y
+ Z
+`,
+}, {
patch: oneChunkPatch,
desc: "modified deleting lines file with context to 3",
diff --git a/plumbing/format/gitignore/dir.go b/plumbing/format/gitignore/dir.go
index 41dd624..1e88970 100644
--- a/plumbing/format/gitignore/dir.go
+++ b/plumbing/format/gitignore/dir.go
@@ -1,24 +1,31 @@
package gitignore
import (
+ "bytes"
"io/ioutil"
"os"
+ "os/user"
"strings"
"gopkg.in/src-d/go-billy.v4"
+ "gopkg.in/src-d/go-git.v4/plumbing/format/config"
+ gioutil "gopkg.in/src-d/go-git.v4/utils/ioutil"
)
const (
commentPrefix = "#"
+ coreSection = "core"
eol = "\n"
+ excludesfile = "excludesfile"
gitDir = ".git"
gitignoreFile = ".gitignore"
+ gitconfigFile = ".gitconfig"
+ systemFile = "/etc/gitconfig"
)
-// ReadPatterns reads gitignore patterns recursively traversing through the directory
-// structure. The result is in the ascending order of priority (last higher).
-func ReadPatterns(fs billy.Filesystem, path []string) (ps []Pattern, err error) {
- f, err := fs.Open(fs.Join(append(path, gitignoreFile)...))
+// readIgnoreFile reads a specific git ignore file.
+func readIgnoreFile(fs billy.Filesystem, path []string, ignoreFile string) (ps []Pattern, err error) {
+ f, err := fs.Open(fs.Join(append(path, ignoreFile)...))
if err == nil {
defer f.Close()
@@ -33,6 +40,14 @@ func ReadPatterns(fs billy.Filesystem, path []string) (ps []Pattern, err error)
return nil, err
}
+ return
+}
+
+// ReadPatterns reads gitignore patterns recursively traversing through the directory
+// structure. The result is in the ascending order of priority (last higher).
+func ReadPatterns(fs billy.Filesystem, path []string) (ps []Pattern, err error) {
+ ps, _ = readIgnoreFile(fs, path, gitignoreFile)
+
var fis []os.FileInfo
fis, err = fs.ReadDir(fs.Join(path...))
if err != nil {
@@ -55,3 +70,67 @@ func ReadPatterns(fs billy.Filesystem, path []string) (ps []Pattern, err error)
return
}
+
+func loadPatterns(fs billy.Filesystem, path string) (ps []Pattern, err error) {
+ f, err := fs.Open(path)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return nil, nil
+ }
+ return nil, err
+ }
+
+ defer gioutil.CheckClose(f, &err)
+
+ b, err := ioutil.ReadAll(f)
+ if err != nil {
+ return
+ }
+
+ d := config.NewDecoder(bytes.NewBuffer(b))
+
+ raw := config.New()
+ if err = d.Decode(raw); err != nil {
+ return
+ }
+
+ s := raw.Section(coreSection)
+ efo := s.Options.Get(excludesfile)
+ if efo == "" {
+ return nil, nil
+ }
+
+ ps, err = readIgnoreFile(fs, nil, efo)
+ if os.IsNotExist(err) {
+ return nil, nil
+ }
+
+ return
+}
+
+// LoadGlobalPatterns loads gitignore patterns from from the gitignore file
+// declared in a user's ~/.gitconfig file. If the ~/.gitconfig file does not
+// exist the function will return nil. If the core.excludesfile property
+// is not declared, the function will return nil. If the file pointed to by
+// the core.excludesfile property does not exist, the function will return nil.
+//
+// The function assumes fs is rooted at the root filesystem.
+func LoadGlobalPatterns(fs billy.Filesystem) (ps []Pattern, err error) {
+ usr, err := user.Current()
+ if err != nil {
+ return
+ }
+
+ return loadPatterns(fs, fs.Join(usr.HomeDir, gitconfigFile))
+}
+
+// LoadSystemPatterns loads gitignore patterns from from the gitignore file
+// declared in a system's /etc/gitconfig file. If the ~/.gitconfig file does
+// not exist the function will return nil. If the core.excludesfile property
+// is not declared, the function will return nil. If the file pointed to by
+// the core.excludesfile property does not exist, the function will return nil.
+//
+// The function assumes fs is rooted at the root filesystem.
+func LoadSystemPatterns(fs billy.Filesystem) (ps []Pattern, err error) {
+ return loadPatterns(fs, systemFile)
+}
diff --git a/plumbing/format/gitignore/dir_test.go b/plumbing/format/gitignore/dir_test.go
index b8a5453..13e2d82 100644
--- a/plumbing/format/gitignore/dir_test.go
+++ b/plumbing/format/gitignore/dir_test.go
@@ -2,6 +2,8 @@ package gitignore
import (
"os"
+ "os/user"
+ "strconv"
. "gopkg.in/check.v1"
"gopkg.in/src-d/go-billy.v4"
@@ -9,12 +11,19 @@ import (
)
type MatcherSuite struct {
- FS billy.Filesystem
+ GFS billy.Filesystem // git repository root
+ RFS billy.Filesystem // root that contains user home
+ MCFS billy.Filesystem // root that contains user home, but missing ~/.gitconfig
+ MEFS billy.Filesystem // root that contains user home, but missing excludesfile entry
+ MIFS billy.Filesystem // root that contains user home, but missing .gitnignore
+
+ SFS billy.Filesystem // root that contains /etc/gitconfig
}
var _ = Suite(&MatcherSuite{})
func (s *MatcherSuite) SetUpTest(c *C) {
+ // setup generic git repository root
fs := memfs.New()
f, err := fs.Create(".gitignore")
c.Assert(err, IsNil)
@@ -36,11 +45,127 @@ func (s *MatcherSuite) SetUpTest(c *C) {
fs.MkdirAll("vendor/github.com", os.ModePerm)
fs.MkdirAll("vendor/gopkg.in", os.ModePerm)
- s.FS = fs
+ s.GFS = fs
+
+ // setup root that contains user home
+ usr, err := user.Current()
+ c.Assert(err, IsNil)
+
+ fs = memfs.New()
+ err = fs.MkdirAll(usr.HomeDir, os.ModePerm)
+ c.Assert(err, IsNil)
+
+ f, err = fs.Create(fs.Join(usr.HomeDir, gitconfigFile))
+ c.Assert(err, IsNil)
+ _, err = f.Write([]byte("[core]\n"))
+ c.Assert(err, IsNil)
+ _, err = f.Write([]byte(" excludesfile = " + strconv.Quote(fs.Join(usr.HomeDir, ".gitignore_global")) + "\n"))
+ c.Assert(err, IsNil)
+ err = f.Close()
+ c.Assert(err, IsNil)
+
+ f, err = fs.Create(fs.Join(usr.HomeDir, ".gitignore_global"))
+ c.Assert(err, IsNil)
+ _, err = f.Write([]byte("# IntelliJ\n"))
+ c.Assert(err, IsNil)
+ _, err = f.Write([]byte(".idea/\n"))
+ c.Assert(err, IsNil)
+ _, err = f.Write([]byte("*.iml\n"))
+ c.Assert(err, IsNil)
+ err = f.Close()
+ c.Assert(err, IsNil)
+
+ s.RFS = fs
+
+ // root that contains user home, but missing ~/.gitconfig
+ fs = memfs.New()
+ err = fs.MkdirAll(usr.HomeDir, os.ModePerm)
+ c.Assert(err, IsNil)
+
+ f, err = fs.Create(fs.Join(usr.HomeDir, ".gitignore_global"))
+ c.Assert(err, IsNil)
+ _, err = f.Write([]byte("# IntelliJ\n"))
+ c.Assert(err, IsNil)
+ _, err = f.Write([]byte(".idea/\n"))
+ c.Assert(err, IsNil)
+ _, err = f.Write([]byte("*.iml\n"))
+ c.Assert(err, IsNil)
+ err = f.Close()
+ c.Assert(err, IsNil)
+
+ s.MCFS = fs
+
+ // setup root that contains user home, but missing excludesfile entry
+ fs = memfs.New()
+ err = fs.MkdirAll(usr.HomeDir, os.ModePerm)
+ c.Assert(err, IsNil)
+
+ f, err = fs.Create(fs.Join(usr.HomeDir, gitconfigFile))
+ c.Assert(err, IsNil)
+ _, err = f.Write([]byte("[core]\n"))
+ c.Assert(err, IsNil)
+ err = f.Close()
+ c.Assert(err, IsNil)
+
+ f, err = fs.Create(fs.Join(usr.HomeDir, ".gitignore_global"))
+ c.Assert(err, IsNil)
+ _, err = f.Write([]byte("# IntelliJ\n"))
+ c.Assert(err, IsNil)
+ _, err = f.Write([]byte(".idea/\n"))
+ c.Assert(err, IsNil)
+ _, err = f.Write([]byte("*.iml\n"))
+ c.Assert(err, IsNil)
+ err = f.Close()
+ c.Assert(err, IsNil)
+
+ s.MEFS = fs
+
+ // setup root that contains user home, but missing .gitnignore
+ fs = memfs.New()
+ err = fs.MkdirAll(usr.HomeDir, os.ModePerm)
+ c.Assert(err, IsNil)
+
+ f, err = fs.Create(fs.Join(usr.HomeDir, gitconfigFile))
+ c.Assert(err, IsNil)
+ _, err = f.Write([]byte("[core]\n"))
+ c.Assert(err, IsNil)
+ _, err = f.Write([]byte(" excludesfile = " + strconv.Quote(fs.Join(usr.HomeDir, ".gitignore_global")) + "\n"))
+ c.Assert(err, IsNil)
+ err = f.Close()
+ c.Assert(err, IsNil)
+
+ s.MIFS = fs
+
+ // setup root that contains user home
+ fs = memfs.New()
+ err = fs.MkdirAll("etc", os.ModePerm)
+ c.Assert(err, IsNil)
+
+ f, err = fs.Create(systemFile)
+ c.Assert(err, IsNil)
+ _, err = f.Write([]byte("[core]\n"))
+ c.Assert(err, IsNil)
+ _, err = f.Write([]byte(" excludesfile = /etc/gitignore_global\n"))
+ c.Assert(err, IsNil)
+ err = f.Close()
+ c.Assert(err, IsNil)
+
+ f, err = fs.Create("/etc/gitignore_global")
+ c.Assert(err, IsNil)
+ _, err = f.Write([]byte("# IntelliJ\n"))
+ c.Assert(err, IsNil)
+ _, err = f.Write([]byte(".idea/\n"))
+ c.Assert(err, IsNil)
+ _, err = f.Write([]byte("*.iml\n"))
+ c.Assert(err, IsNil)
+ err = f.Close()
+ c.Assert(err, IsNil)
+
+ s.SFS = fs
}
func (s *MatcherSuite) TestDir_ReadPatterns(c *C) {
- ps, err := ReadPatterns(s.FS, nil)
+ ps, err := ReadPatterns(s.GFS, nil)
c.Assert(err, IsNil)
c.Assert(ps, HasLen, 2)
@@ -48,3 +173,41 @@ func (s *MatcherSuite) TestDir_ReadPatterns(c *C) {
c.Assert(m.Match([]string{"vendor", "gopkg.in"}, true), Equals, true)
c.Assert(m.Match([]string{"vendor", "github.com"}, true), Equals, false)
}
+
+func (s *MatcherSuite) TestDir_LoadGlobalPatterns(c *C) {
+ ps, err := LoadGlobalPatterns(s.RFS)
+ c.Assert(err, IsNil)
+ c.Assert(ps, HasLen, 2)
+
+ m := NewMatcher(ps)
+ c.Assert(m.Match([]string{"go-git.v4.iml"}, true), Equals, true)
+ c.Assert(m.Match([]string{".idea"}, true), Equals, true)
+}
+
+func (s *MatcherSuite) TestDir_LoadGlobalPatternsMissingGitconfig(c *C) {
+ ps, err := LoadGlobalPatterns(s.MCFS)
+ c.Assert(err, IsNil)
+ c.Assert(ps, HasLen, 0)
+}
+
+func (s *MatcherSuite) TestDir_LoadGlobalPatternsMissingExcludesfile(c *C) {
+ ps, err := LoadGlobalPatterns(s.MEFS)
+ c.Assert(err, IsNil)
+ c.Assert(ps, HasLen, 0)
+}
+
+func (s *MatcherSuite) TestDir_LoadGlobalPatternsMissingGitignore(c *C) {
+ ps, err := LoadGlobalPatterns(s.MIFS)
+ c.Assert(err, IsNil)
+ c.Assert(ps, HasLen, 0)
+}
+
+func (s *MatcherSuite) TestDir_LoadSystemPatterns(c *C) {
+ ps, err := LoadSystemPatterns(s.SFS)
+ c.Assert(err, IsNil)
+ c.Assert(ps, HasLen, 2)
+
+ m := NewMatcher(ps)
+ c.Assert(m.Match([]string{"go-git.v4.iml"}, true), Equals, true)
+ c.Assert(m.Match([]string{".idea"}, true), Equals, true)
+}
diff --git a/plumbing/format/gitignore/pattern.go b/plumbing/format/gitignore/pattern.go
index 2603352..098cb50 100644
--- a/plumbing/format/gitignore/pattern.go
+++ b/plumbing/format/gitignore/pattern.go
@@ -133,6 +133,9 @@ func (p *pattern) globMatch(path []string, isDir bool) bool {
} else if match {
matched = true
break
+ } else if len(path) == 0 {
+ // if nothing left then fail
+ matched = false
}
}
} else {
diff --git a/plumbing/format/gitignore/pattern_test.go b/plumbing/format/gitignore/pattern_test.go
index f94cef3..c410442 100644
--- a/plumbing/format/gitignore/pattern_test.go
+++ b/plumbing/format/gitignore/pattern_test.go
@@ -281,3 +281,9 @@ func (s *PatternSuite) TestGlobMatch_wrongPattern_onTraversal_mismatch(c *C) {
r := p.Match([]string{"value", "head", "vol["}, false)
c.Assert(r, Equals, NoMatch)
}
+
+func (s *PatternSuite) TestGlobMatch_issue_923(c *C) {
+ p := ParsePattern("**/android/**/GeneratedPluginRegistrant.java", nil)
+ r := p.Match([]string{"packages", "flutter_tools", "lib", "src", "android", "gradle.dart"}, false)
+ c.Assert(r, Equals, NoMatch)
+}
diff --git a/plumbing/format/idxfile/decoder.go b/plumbing/format/idxfile/decoder.go
index f361213..5b92782 100644
--- a/plumbing/format/idxfile/decoder.go
+++ b/plumbing/format/idxfile/decoder.go
@@ -6,7 +6,6 @@ import (
"errors"
"io"
- "gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/utils/binary"
)
@@ -18,6 +17,11 @@ var (
ErrMalformedIdxFile = errors.New("Malformed IDX file")
)
+const (
+ fanout = 256
+ objectIDLength = 20
+)
+
// Decoder reads and decodes idx files from an input stream.
type Decoder struct {
*bufio.Reader
@@ -28,13 +32,13 @@ func NewDecoder(r io.Reader) *Decoder {
return &Decoder{bufio.NewReader(r)}
}
-// Decode reads from the stream and decode the content into the Idxfile struct.
-func (d *Decoder) Decode(idx *Idxfile) error {
+// Decode reads from the stream and decode the content into the MemoryIndex struct.
+func (d *Decoder) Decode(idx *MemoryIndex) error {
if err := validateHeader(d); err != nil {
return err
}
- flow := []func(*Idxfile, io.Reader) error{
+ flow := []func(*MemoryIndex, io.Reader) error{
readVersion,
readFanout,
readObjectNames,
@@ -49,10 +53,6 @@ func (d *Decoder) Decode(idx *Idxfile) error {
}
}
- if !idx.isValid() {
- return ErrMalformedIdxFile
- }
-
return nil
}
@@ -69,7 +69,7 @@ func validateHeader(r io.Reader) error {
return nil
}
-func readVersion(idx *Idxfile, r io.Reader) error {
+func readVersion(idx *MemoryIndex, r io.Reader) error {
v, err := binary.ReadUint32(r)
if err != nil {
return err
@@ -83,73 +83,92 @@ func readVersion(idx *Idxfile, r io.Reader) error {
return nil
}
-func readFanout(idx *Idxfile, r io.Reader) error {
- var err error
- for i := 0; i < 255; i++ {
- idx.Fanout[i], err = binary.ReadUint32(r)
+func readFanout(idx *MemoryIndex, r io.Reader) error {
+ for k := 0; k < fanout; k++ {
+ n, err := binary.ReadUint32(r)
if err != nil {
return err
}
+
+ idx.Fanout[k] = n
+ idx.FanoutMapping[k] = noMapping
}
- idx.ObjectCount, err = binary.ReadUint32(r)
- return err
+ return nil
}
-func readObjectNames(idx *Idxfile, r io.Reader) error {
- c := int(idx.ObjectCount)
- for i := 0; i < c; i++ {
- var ref plumbing.Hash
- if _, err := io.ReadFull(r, ref[:]); err != nil {
+func readObjectNames(idx *MemoryIndex, r io.Reader) error {
+ for k := 0; k < fanout; k++ {
+ var buckets uint32
+ if k == 0 {
+ buckets = idx.Fanout[k]
+ } else {
+ buckets = idx.Fanout[k] - idx.Fanout[k-1]
+ }
+
+ if buckets == 0 {
+ continue
+ }
+
+ if buckets < 0 {
+ return ErrMalformedIdxFile
+ }
+
+ idx.FanoutMapping[k] = len(idx.Names)
+
+ nameLen := int(buckets * objectIDLength)
+ bin := make([]byte, nameLen)
+ if _, err := io.ReadFull(r, bin); err != nil {
return err
}
- idx.Entries = append(idx.Entries, &Entry{Hash: ref})
+ idx.Names = append(idx.Names, bin)
+ idx.Offset32 = append(idx.Offset32, make([]byte, buckets*4))
+ idx.CRC32 = append(idx.CRC32, make([]byte, buckets*4))
}
return nil
}
-func readCRC32(idx *Idxfile, r io.Reader) error {
- c := int(idx.ObjectCount)
- for i := 0; i < c; i++ {
- if err := binary.Read(r, &idx.Entries[i].CRC32); err != nil {
- return err
+func readCRC32(idx *MemoryIndex, r io.Reader) error {
+ for k := 0; k < fanout; k++ {
+ if pos := idx.FanoutMapping[k]; pos != noMapping {
+ if _, err := io.ReadFull(r, idx.CRC32[pos]); err != nil {
+ return err
+ }
}
}
return nil
}
-func readOffsets(idx *Idxfile, r io.Reader) error {
- c := int(idx.ObjectCount)
-
- for i := 0; i < c; i++ {
- o, err := binary.ReadUint32(r)
- if err != nil {
- return err
+func readOffsets(idx *MemoryIndex, r io.Reader) error {
+ var o64cnt int
+ for k := 0; k < fanout; k++ {
+ if pos := idx.FanoutMapping[k]; pos != noMapping {
+ if _, err := io.ReadFull(r, idx.Offset32[pos]); err != nil {
+ return err
+ }
+
+ for p := 0; p < len(idx.Offset32[pos]); p += 4 {
+ if idx.Offset32[pos][p]&(byte(1)<<7) > 0 {
+ o64cnt++
+ }
+ }
}
-
- idx.Entries[i].Offset = uint64(o)
}
- for i := 0; i < c; i++ {
- if idx.Entries[i].Offset <= offsetLimit {
- continue
- }
-
- o, err := binary.ReadUint64(r)
- if err != nil {
+ if o64cnt > 0 {
+ idx.Offset64 = make([]byte, o64cnt*8)
+ if _, err := io.ReadFull(r, idx.Offset64); err != nil {
return err
}
-
- idx.Entries[i].Offset = o
}
return nil
}
-func readChecksums(idx *Idxfile, r io.Reader) error {
+func readChecksums(idx *MemoryIndex, r io.Reader) error {
if _, err := io.ReadFull(r, idx.PackfileChecksum[:]); err != nil {
return err
}
diff --git a/plumbing/format/idxfile/decoder_test.go b/plumbing/format/idxfile/decoder_test.go
index 20d6859..b43d7c5 100644
--- a/plumbing/format/idxfile/decoder_test.go
+++ b/plumbing/format/idxfile/decoder_test.go
@@ -4,11 +4,12 @@ import (
"bytes"
"encoding/base64"
"fmt"
+ "io"
+ "io/ioutil"
"testing"
+ "gopkg.in/src-d/go-git.v4/plumbing"
. "gopkg.in/src-d/go-git.v4/plumbing/format/idxfile"
- "gopkg.in/src-d/go-git.v4/plumbing/format/packfile"
- "gopkg.in/src-d/go-git.v4/storage/memory"
. "gopkg.in/check.v1"
"gopkg.in/src-d/go-git-fixtures.v3"
@@ -26,51 +27,34 @@ func (s *IdxfileSuite) TestDecode(c *C) {
f := fixtures.Basic().One()
d := NewDecoder(f.Idx())
- idx := &Idxfile{}
+ idx := new(MemoryIndex)
err := d.Decode(idx)
c.Assert(err, IsNil)
- c.Assert(idx.Entries, HasLen, 31)
- c.Assert(idx.Entries[0].Hash.String(), Equals, "1669dce138d9b841a518c64b10914d88f5e488ea")
- c.Assert(idx.Entries[0].Offset, Equals, uint64(615))
- c.Assert(idx.Entries[0].CRC32, Equals, uint32(3645019190))
+ count, _ := idx.Count()
+ c.Assert(count, Equals, int64(31))
- c.Assert(fmt.Sprintf("%x", idx.IdxChecksum), Equals, "fb794f1ec720b9bc8e43257451bd99c4be6fa1c9")
- c.Assert(fmt.Sprintf("%x", idx.PackfileChecksum), Equals, f.PackfileHash.String())
-}
-
-func (s *IdxfileSuite) TestDecodeCRCs(c *C) {
- f := fixtures.Basic().ByTag("ofs-delta").One()
-
- scanner := packfile.NewScanner(f.Packfile())
- storage := memory.NewStorage()
-
- pd, err := packfile.NewDecoder(scanner, storage)
+ hash := plumbing.NewHash("1669dce138d9b841a518c64b10914d88f5e488ea")
+ ok, err := idx.Contains(hash)
c.Assert(err, IsNil)
- _, err = pd.Decode()
- c.Assert(err, IsNil)
-
- i := pd.Index().ToIdxFile()
- i.Version = VersionSupported
+ c.Assert(ok, Equals, true)
- buf := bytes.NewBuffer(nil)
- e := NewEncoder(buf)
- _, err = e.Encode(i)
+ offset, err := idx.FindOffset(hash)
c.Assert(err, IsNil)
+ c.Assert(offset, Equals, int64(615))
- idx := &Idxfile{}
-
- d := NewDecoder(buf)
- err = d.Decode(idx)
+ crc32, err := idx.FindCRC32(hash)
c.Assert(err, IsNil)
+ c.Assert(crc32, Equals, uint32(3645019190))
- c.Assert(idx.Entries, DeepEquals, i.Entries)
+ c.Assert(fmt.Sprintf("%x", idx.IdxChecksum), Equals, "fb794f1ec720b9bc8e43257451bd99c4be6fa1c9")
+ c.Assert(fmt.Sprintf("%x", idx.PackfileChecksum), Equals, f.PackfileHash.String())
}
func (s *IdxfileSuite) TestDecode64bitsOffsets(c *C) {
f := bytes.NewBufferString(fixtureLarge4GB)
- idx := &Idxfile{}
+ idx := new(MemoryIndex)
d := NewDecoder(base64.NewDecoder(base64.StdEncoding, f))
err := d.Decode(idx)
@@ -88,29 +72,22 @@ func (s *IdxfileSuite) TestDecode64bitsOffsets(c *C) {
"35858be9c6f5914cbe6768489c41eb6809a2bceb": 5924278919,
}
- for _, e := range idx.Entries {
- c.Assert(expected[e.Hash.String()], Equals, e.Offset)
- }
-}
-
-func (s *IdxfileSuite) TestDecode64bitsOffsetsIdempotent(c *C) {
- f := bytes.NewBufferString(fixtureLarge4GB)
-
- expected := &Idxfile{}
-
- d := NewDecoder(base64.NewDecoder(base64.StdEncoding, f))
- err := d.Decode(expected)
+ iter, err := idx.Entries()
c.Assert(err, IsNil)
- buf := bytes.NewBuffer(nil)
- _, err = NewEncoder(buf).Encode(expected)
- c.Assert(err, IsNil)
+ var entries int
+ for {
+ e, err := iter.Next()
+ if err == io.EOF {
+ break
+ }
+ c.Assert(err, IsNil)
+ entries++
- idx := &Idxfile{}
- err = NewDecoder(buf).Decode(idx)
- c.Assert(err, IsNil)
+ c.Assert(expected[e.Hash.String()], Equals, e.Offset)
+ }
- c.Assert(idx.Entries, DeepEquals, expected.Entries)
+ c.Assert(entries, Equals, len(expected))
}
const fixtureLarge4GB = `/3RPYwAAAAIAAAAAAAAAAAAAAAAAAAABAAAAAQAAAAEAAAABAAAAAQAAAAEAAAABAAAAAQAAAAEA
@@ -139,3 +116,30 @@ AAAAAAAMgAAAAQAAAI6AAAACgAAAA4AAAASAAAAFAAAAAV9Qam8AAAABYR1ShwAAAACdxfYxAAAA
ANz1Di4AAAABPUnxJAAAAADNxzlGr6vCJpIFz4XaG/fi/f9C9zgQ8ptKSQpfQ1NMJBGTDTxxYGGp
ch2xUA==
`
+
+func BenchmarkDecode(b *testing.B) {
+ if err := fixtures.Init(); err != nil {
+ b.Errorf("unexpected error initializing fixtures: %s", err)
+ }
+
+ f := fixtures.Basic().One()
+ fixture, err := ioutil.ReadAll(f.Idx())
+ if err != nil {
+ b.Errorf("unexpected error reading idx file: %s", err)
+ }
+
+ defer func() {
+ if err := fixtures.Clean(); err != nil {
+ b.Errorf("unexpected error cleaning fixtures: %s", err)
+ }
+ }()
+
+ for i := 0; i < b.N; i++ {
+ f := bytes.NewBuffer(fixture)
+ idx := new(MemoryIndex)
+ d := NewDecoder(f)
+ if err := d.Decode(idx); err != nil {
+ b.Errorf("unexpected error decoding: %s", err)
+ }
+ }
+}
diff --git a/plumbing/format/idxfile/encoder.go b/plumbing/format/idxfile/encoder.go
index 40abfb8..e479511 100644
--- a/plumbing/format/idxfile/encoder.go
+++ b/plumbing/format/idxfile/encoder.go
@@ -4,12 +4,11 @@ import (
"crypto/sha1"
"hash"
"io"
- "sort"
"gopkg.in/src-d/go-git.v4/utils/binary"
)
-// Encoder writes Idxfile structs to an output stream.
+// Encoder writes MemoryIndex structs to an output stream.
type Encoder struct {
io.Writer
hash hash.Hash
@@ -22,11 +21,9 @@ func NewEncoder(w io.Writer) *Encoder {
return &Encoder{mw, h}
}
-// Encode encodes an Idxfile to the encoder writer.
-func (e *Encoder) Encode(idx *Idxfile) (int, error) {
- idx.Entries.Sort()
-
- flow := []func(*Idxfile) (int, error){
+// Encode encodes an MemoryIndex to the encoder writer.
+func (e *Encoder) Encode(idx *MemoryIndex) (int, error) {
+ flow := []func(*MemoryIndex) (int, error){
e.encodeHeader,
e.encodeFanout,
e.encodeHashes,
@@ -48,7 +45,7 @@ func (e *Encoder) Encode(idx *Idxfile) (int, error) {
return sz, nil
}
-func (e *Encoder) encodeHeader(idx *Idxfile) (int, error) {
+func (e *Encoder) encodeHeader(idx *MemoryIndex) (int, error) {
c, err := e.Write(idxHeader)
if err != nil {
return c, err
@@ -57,75 +54,81 @@ func (e *Encoder) encodeHeader(idx *Idxfile) (int, error) {
return c + 4, binary.WriteUint32(e, idx.Version)
}
-func (e *Encoder) encodeFanout(idx *Idxfile) (int, error) {
- fanout := idx.calculateFanout()
- for _, c := range fanout {
+func (e *Encoder) encodeFanout(idx *MemoryIndex) (int, error) {
+ for _, c := range idx.Fanout {
if err := binary.WriteUint32(e, c); err != nil {
return 0, err
}
}
- return 1024, nil
+ return fanout * 4, nil
}
-func (e *Encoder) encodeHashes(idx *Idxfile) (int, error) {
- sz := 0
- for _, ent := range idx.Entries {
- i, err := e.Write(ent.Hash[:])
- sz += i
+func (e *Encoder) encodeHashes(idx *MemoryIndex) (int, error) {
+ var size int
+ for k := 0; k < fanout; k++ {
+ pos := idx.FanoutMapping[k]
+ if pos == noMapping {
+ continue
+ }
+ n, err := e.Write(idx.Names[pos])
if err != nil {
- return sz, err
+ return size, err
}
+ size += n
}
-
- return sz, nil
+ return size, nil
}
-func (e *Encoder) encodeCRC32(idx *Idxfile) (int, error) {
- sz := 0
- for _, ent := range idx.Entries {
- err := binary.Write(e, ent.CRC32)
- sz += 4
+func (e *Encoder) encodeCRC32(idx *MemoryIndex) (int, error) {
+ var size int
+ for k := 0; k < fanout; k++ {
+ pos := idx.FanoutMapping[k]
+ if pos == noMapping {
+ continue
+ }
+ n, err := e.Write(idx.CRC32[pos])
if err != nil {
- return sz, err
+ return size, err
}
+
+ size += n
}
- return sz, nil
+ return size, nil
}
-func (e *Encoder) encodeOffsets(idx *Idxfile) (int, error) {
- sz := 0
-
- var o64bits []uint64
- for _, ent := range idx.Entries {
- o := ent.Offset
- if o > offsetLimit {
- o64bits = append(o64bits, o)
- o = offsetLimit + uint64(len(o64bits))
+func (e *Encoder) encodeOffsets(idx *MemoryIndex) (int, error) {
+ var size int
+ for k := 0; k < fanout; k++ {
+ pos := idx.FanoutMapping[k]
+ if pos == noMapping {
+ continue
}
- if err := binary.WriteUint32(e, uint32(o)); err != nil {
- return sz, err
+ n, err := e.Write(idx.Offset32[pos])
+ if err != nil {
+ return size, err
}
- sz += 4
+ size += n
}
- for _, o := range o64bits {
- if err := binary.WriteUint64(e, o); err != nil {
- return sz, err
+ if len(idx.Offset64) > 0 {
+ n, err := e.Write(idx.Offset64)
+ if err != nil {
+ return size, err
}
- sz += 8
+ size += n
}
- return sz, nil
+ return size, nil
}
-func (e *Encoder) encodeChecksums(idx *Idxfile) (int, error) {
+func (e *Encoder) encodeChecksums(idx *MemoryIndex) (int, error) {
if _, err := e.Write(idx.PackfileChecksum[:]); err != nil {
return 0, err
}
@@ -137,11 +140,3 @@ func (e *Encoder) encodeChecksums(idx *Idxfile) (int, error) {
return 40, nil
}
-
-// EntryList implements sort.Interface allowing sorting in increasing order.
-type EntryList []*Entry
-
-func (p EntryList) Len() int { return len(p) }
-func (p EntryList) Less(i, j int) bool { return p[i].Hash.String() < p[j].Hash.String() }
-func (p EntryList) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
-func (p EntryList) Sort() { sort.Sort(p) }
diff --git a/plumbing/format/idxfile/encoder_test.go b/plumbing/format/idxfile/encoder_test.go
index e5b96b7..e8deeea 100644
--- a/plumbing/format/idxfile/encoder_test.go
+++ b/plumbing/format/idxfile/encoder_test.go
@@ -4,37 +4,18 @@ import (
"bytes"
"io/ioutil"
- "gopkg.in/src-d/go-git.v4/plumbing"
. "gopkg.in/src-d/go-git.v4/plumbing/format/idxfile"
. "gopkg.in/check.v1"
"gopkg.in/src-d/go-git-fixtures.v3"
)
-func (s *IdxfileSuite) TestEncode(c *C) {
- expected := &Idxfile{}
- expected.Add(plumbing.NewHash("4bfc730165c370df4a012afbb45ba3f9c332c0d4"), 82, 82)
- expected.Add(plumbing.NewHash("8fa2238efdae08d83c12ee176fae65ff7c99af46"), 42, 42)
-
- buf := bytes.NewBuffer(nil)
- e := NewEncoder(buf)
- _, err := e.Encode(expected)
- c.Assert(err, IsNil)
-
- idx := &Idxfile{}
- d := NewDecoder(buf)
- err = d.Decode(idx)
- c.Assert(err, IsNil)
-
- c.Assert(idx.Entries, DeepEquals, expected.Entries)
-}
-
func (s *IdxfileSuite) TestDecodeEncode(c *C) {
fixtures.ByTag("packfile").Test(c, func(f *fixtures.Fixture) {
expected, err := ioutil.ReadAll(f.Idx())
c.Assert(err, IsNil)
- idx := &Idxfile{}
+ idx := new(MemoryIndex)
d := NewDecoder(bytes.NewBuffer(expected))
err = d.Decode(idx)
c.Assert(err, IsNil)
diff --git a/plumbing/format/idxfile/idxfile.go b/plumbing/format/idxfile/idxfile.go
index 6b05eaa..5fed278 100644
--- a/plumbing/format/idxfile/idxfile.go
+++ b/plumbing/format/idxfile/idxfile.go
@@ -1,30 +1,307 @@
package idxfile
-import "gopkg.in/src-d/go-git.v4/plumbing"
+import (
+ "bytes"
+ "io"
+ "sort"
+
+ "gopkg.in/src-d/go-git.v4/plumbing"
+ "gopkg.in/src-d/go-git.v4/utils/binary"
+)
const (
// VersionSupported is the only idx version supported.
VersionSupported = 2
- offsetLimit = 0x7fffffff
+ noMapping = -1
)
var (
idxHeader = []byte{255, 't', 'O', 'c'}
)
-// Idxfile is the in memory representation of an idx file.
-type Idxfile struct {
- Version uint32
- Fanout [255]uint32
- ObjectCount uint32
- Entries EntryList
+// Index represents an index of a packfile.
+type Index interface {
+ // Contains checks whether the given hash is in the index.
+ Contains(h plumbing.Hash) (bool, error)
+ // FindOffset finds the offset in the packfile for the object with
+ // the given hash.
+ FindOffset(h plumbing.Hash) (int64, error)
+ // FindCRC32 finds the CRC32 of the object with the given hash.
+ FindCRC32(h plumbing.Hash) (uint32, error)
+ // FindHash finds the hash for the object with the given offset.
+ FindHash(o int64) (plumbing.Hash, error)
+ // Count returns the number of entries in the index.
+ Count() (int64, error)
+ // Entries returns an iterator to retrieve all index entries.
+ Entries() (EntryIter, error)
+ // EntriesByOffset returns an iterator to retrieve all index entries ordered
+ // by offset.
+ EntriesByOffset() (EntryIter, error)
+}
+
+// MemoryIndex is the in memory representation of an idx file.
+type MemoryIndex struct {
+ Version uint32
+ Fanout [256]uint32
+ // FanoutMapping maps the position in the fanout table to the position
+ // in the Names, Offset32 and CRC32 slices. This improves the memory
+ // usage by not needing an array with unnecessary empty slots.
+ FanoutMapping [256]int
+ Names [][]byte
+ Offset32 [][]byte
+ CRC32 [][]byte
+ Offset64 []byte
PackfileChecksum [20]byte
IdxChecksum [20]byte
+
+ offsetHash map[int64]plumbing.Hash
+}
+
+var _ Index = (*MemoryIndex)(nil)
+
+// NewMemoryIndex returns an instance of a new MemoryIndex.
+func NewMemoryIndex() *MemoryIndex {
+ return &MemoryIndex{}
+}
+
+func (idx *MemoryIndex) findHashIndex(h plumbing.Hash) (int, bool) {
+ k := idx.FanoutMapping[h[0]]
+ if k == noMapping {
+ return 0, false
+ }
+
+ if len(idx.Names) <= k {
+ return 0, false
+ }
+
+ data := idx.Names[k]
+ high := uint64(len(idx.Offset32[k])) >> 2
+ if high == 0 {
+ return 0, false
+ }
+
+ low := uint64(0)
+ for {
+ mid := (low + high) >> 1
+ offset := mid * objectIDLength
+
+ cmp := bytes.Compare(h[:], data[offset:offset+objectIDLength])
+ if cmp < 0 {
+ high = mid
+ } else if cmp == 0 {
+ return int(mid), true
+ } else {
+ low = mid + 1
+ }
+
+ if low >= high {
+ break
+ }
+ }
+
+ return 0, false
+}
+
+// Contains implements the Index interface.
+func (idx *MemoryIndex) Contains(h plumbing.Hash) (bool, error) {
+ _, ok := idx.findHashIndex(h)
+ return ok, nil
+}
+
+// FindOffset implements the Index interface.
+func (idx *MemoryIndex) FindOffset(h plumbing.Hash) (int64, error) {
+ if len(idx.FanoutMapping) <= int(h[0]) {
+ return 0, plumbing.ErrObjectNotFound
+ }
+
+ k := idx.FanoutMapping[h[0]]
+ i, ok := idx.findHashIndex(h)
+ if !ok {
+ return 0, plumbing.ErrObjectNotFound
+ }
+
+ return idx.getOffset(k, i)
+}
+
+const isO64Mask = uint64(1) << 31
+
+func (idx *MemoryIndex) getOffset(firstLevel, secondLevel int) (int64, error) {
+ offset := secondLevel << 2
+ buf := bytes.NewBuffer(idx.Offset32[firstLevel][offset : offset+4])
+ ofs, err := binary.ReadUint32(buf)
+ if err != nil {
+ return -1, err
+ }
+
+ if (uint64(ofs) & isO64Mask) != 0 {
+ offset := 8 * (uint64(ofs) & ^isO64Mask)
+ buf := bytes.NewBuffer(idx.Offset64[offset : offset+8])
+ n, err := binary.ReadUint64(buf)
+ if err != nil {
+ return -1, err
+ }
+
+ return int64(n), nil
+ }
+
+ return int64(ofs), nil
+}
+
+// FindCRC32 implements the Index interface.
+func (idx *MemoryIndex) FindCRC32(h plumbing.Hash) (uint32, error) {
+ k := idx.FanoutMapping[h[0]]
+ i, ok := idx.findHashIndex(h)
+ if !ok {
+ return 0, plumbing.ErrObjectNotFound
+ }
+
+ return idx.getCRC32(k, i)
+}
+
+func (idx *MemoryIndex) getCRC32(firstLevel, secondLevel int) (uint32, error) {
+ offset := secondLevel << 2
+ buf := bytes.NewBuffer(idx.CRC32[firstLevel][offset : offset+4])
+ return binary.ReadUint32(buf)
+}
+
+// FindHash implements the Index interface.
+func (idx *MemoryIndex) FindHash(o int64) (plumbing.Hash, error) {
+ // Lazily generate the reverse offset/hash map if required.
+ if idx.offsetHash == nil {
+ if err := idx.genOffsetHash(); err != nil {
+ return plumbing.ZeroHash, err
+ }
+ }
+
+ hash, ok := idx.offsetHash[o]
+ if !ok {
+ return plumbing.ZeroHash, plumbing.ErrObjectNotFound
+ }
+
+ return hash, nil
+}
+
+// genOffsetHash generates the offset/hash mapping for reverse search.
+func (idx *MemoryIndex) genOffsetHash() error {
+ count, err := idx.Count()
+ if err != nil {
+ return err
+ }
+
+ idx.offsetHash = make(map[int64]plumbing.Hash, count)
+
+ iter, err := idx.Entries()
+ if err != nil {
+ return err
+ }
+
+ for {
+ entry, err := iter.Next()
+ if err != nil {
+ if err == io.EOF {
+ return nil
+ }
+ return err
+ }
+
+ idx.offsetHash[int64(entry.Offset)] = entry.Hash
+ }
+}
+
+// Count implements the Index interface.
+func (idx *MemoryIndex) Count() (int64, error) {
+ return int64(idx.Fanout[fanout-1]), nil
}
-func NewIdxfile() *Idxfile {
- return &Idxfile{}
+// Entries implements the Index interface.
+func (idx *MemoryIndex) Entries() (EntryIter, error) {
+ return &idxfileEntryIter{idx, 0, 0, 0}, nil
+}
+
+// EntriesByOffset implements the Index interface.
+func (idx *MemoryIndex) EntriesByOffset() (EntryIter, error) {
+ count, err := idx.Count()
+ if err != nil {
+ return nil, err
+ }
+
+ iter := &idxfileEntryOffsetIter{
+ entries: make(entriesByOffset, count),
+ }
+
+ entries, err := idx.Entries()
+ if err != nil {
+ return nil, err
+ }
+
+ for pos := 0; int64(pos) < count; pos++ {
+ entry, err := entries.Next()
+ if err != nil {
+ return nil, err
+ }
+
+ iter.entries[pos] = entry
+ }
+
+ sort.Sort(iter.entries)
+
+ return iter, nil
+}
+
+// EntryIter is an iterator that will return the entries in a packfile index.
+type EntryIter interface {
+ // Next returns the next entry in the packfile index.
+ Next() (*Entry, error)
+ // Close closes the iterator.
+ Close() error
+}
+
+type idxfileEntryIter struct {
+ idx *MemoryIndex
+ total int
+ firstLevel, secondLevel int
+}
+
+func (i *idxfileEntryIter) Next() (*Entry, error) {
+ for {
+ if i.firstLevel >= fanout {
+ return nil, io.EOF
+ }
+
+ if i.total >= int(i.idx.Fanout[i.firstLevel]) {
+ i.firstLevel++
+ i.secondLevel = 0
+ continue
+ }
+
+ entry := new(Entry)
+ ofs := i.secondLevel * objectIDLength
+ copy(entry.Hash[:], i.idx.Names[i.idx.FanoutMapping[i.firstLevel]][ofs:])
+
+ pos := i.idx.FanoutMapping[entry.Hash[0]]
+
+ offset, err := i.idx.getOffset(pos, i.secondLevel)
+ if err != nil {
+ return nil, err
+ }
+ entry.Offset = uint64(offset)
+
+ entry.CRC32, err = i.idx.getCRC32(pos, i.secondLevel)
+ if err != nil {
+ return nil, err
+ }
+
+ i.secondLevel++
+ i.total++
+
+ return entry, nil
+ }
+}
+
+func (i *idxfileEntryIter) Close() error {
+ i.firstLevel = fanout
+ return nil
}
// Entry is the in memory representation of an object entry in the idx file.
@@ -34,35 +311,37 @@ type Entry struct {
Offset uint64
}
-// Add adds a new Entry with the given values to the Idxfile.
-func (idx *Idxfile) Add(h plumbing.Hash, offset uint64, crc32 uint32) {
- idx.Entries = append(idx.Entries, &Entry{
- Hash: h,
- Offset: offset,
- CRC32: crc32,
- })
+type idxfileEntryOffsetIter struct {
+ entries entriesByOffset
+ pos int
}
-func (idx *Idxfile) isValid() bool {
- fanout := idx.calculateFanout()
- for k, c := range idx.Fanout {
- if fanout[k] != c {
- return false
- }
+func (i *idxfileEntryOffsetIter) Next() (*Entry, error) {
+ if i.pos >= len(i.entries) {
+ return nil, io.EOF
}
- return true
+ entry := i.entries[i.pos]
+ i.pos++
+
+ return entry, nil
}
-func (idx *Idxfile) calculateFanout() [256]uint32 {
- fanout := [256]uint32{}
- for _, e := range idx.Entries {
- fanout[e.Hash[0]]++
- }
+func (i *idxfileEntryOffsetIter) Close() error {
+ i.pos = len(i.entries) + 1
+ return nil
+}
- for i := 1; i < 256; i++ {
- fanout[i] += fanout[i-1]
- }
+type entriesByOffset []*Entry
+
+func (o entriesByOffset) Len() int {
+ return len(o)
+}
+
+func (o entriesByOffset) Less(i int, j int) bool {
+ return o[i].Offset < o[j].Offset
+}
- return fanout
+func (o entriesByOffset) Swap(i int, j int) {
+ o[i], o[j] = o[j], o[i]
}
diff --git a/plumbing/format/idxfile/idxfile_test.go b/plumbing/format/idxfile/idxfile_test.go
new file mode 100644
index 0000000..0e0ca2a
--- /dev/null
+++ b/plumbing/format/idxfile/idxfile_test.go
@@ -0,0 +1,169 @@
+package idxfile_test
+
+import (
+ "bytes"
+ "encoding/base64"
+ "fmt"
+ "io"
+ "testing"
+
+ "gopkg.in/src-d/go-git.v4/plumbing"
+ "gopkg.in/src-d/go-git.v4/plumbing/format/idxfile"
+
+ . "gopkg.in/check.v1"
+ "gopkg.in/src-d/go-git-fixtures.v3"
+)
+
+func BenchmarkFindOffset(b *testing.B) {
+ idx, err := fixtureIndex()
+ if err != nil {
+ b.Fatalf(err.Error())
+ }
+
+ for i := 0; i < b.N; i++ {
+ for _, h := range fixtureHashes {
+ _, err := idx.FindOffset(h)
+ if err != nil {
+ b.Fatalf("error getting offset: %s", err)
+ }
+ }
+ }
+}
+
+func BenchmarkFindCRC32(b *testing.B) {
+ idx, err := fixtureIndex()
+ if err != nil {
+ b.Fatalf(err.Error())
+ }
+
+ for i := 0; i < b.N; i++ {
+ for _, h := range fixtureHashes {
+ _, err := idx.FindCRC32(h)
+ if err != nil {
+ b.Fatalf("error getting crc32: %s", err)
+ }
+ }
+ }
+}
+
+func BenchmarkContains(b *testing.B) {
+ idx, err := fixtureIndex()
+ if err != nil {
+ b.Fatalf(err.Error())
+ }
+
+ for i := 0; i < b.N; i++ {
+ for _, h := range fixtureHashes {
+ ok, err := idx.Contains(h)
+ if err != nil {
+ b.Fatalf("error checking if hash is in index: %s", err)
+ }
+
+ if !ok {
+ b.Error("expected hash to be in index")
+ }
+ }
+ }
+}
+
+func BenchmarkEntries(b *testing.B) {
+ idx, err := fixtureIndex()
+ if err != nil {
+ b.Fatalf(err.Error())
+ }
+
+ for i := 0; i < b.N; i++ {
+ iter, err := idx.Entries()
+ if err != nil {
+ b.Fatalf("unexpected error getting entries: %s", err)
+ }
+
+ var entries int
+ for {
+ _, err := iter.Next()
+ if err != nil {
+ if err == io.EOF {
+ break
+ }
+
+ b.Errorf("unexpected error getting entry: %s", err)
+ }
+
+ entries++
+ }
+
+ if entries != len(fixtureHashes) {
+ b.Errorf("expecting entries to be %d, got %d", len(fixtureHashes), entries)
+ }
+ }
+}
+
+type IndexSuite struct {
+ fixtures.Suite
+}
+
+var _ = Suite(&IndexSuite{})
+
+func (s *IndexSuite) TestFindHash(c *C) {
+ idx, err := fixtureIndex()
+ c.Assert(err, IsNil)
+
+ for i, pos := range fixtureOffsets {
+ hash, err := idx.FindHash(pos)
+ c.Assert(err, IsNil)
+ c.Assert(hash, Equals, fixtureHashes[i])
+ }
+}
+
+func (s *IndexSuite) TestEntriesByOffset(c *C) {
+ idx, err := fixtureIndex()
+ c.Assert(err, IsNil)
+
+ entries, err := idx.EntriesByOffset()
+ c.Assert(err, IsNil)
+
+ for _, pos := range fixtureOffsets {
+ e, err := entries.Next()
+ c.Assert(err, IsNil)
+
+ c.Assert(e.Offset, Equals, uint64(pos))
+ }
+}
+
+var fixtureHashes = []plumbing.Hash{
+ plumbing.NewHash("303953e5aa461c203a324821bc1717f9b4fff895"),
+ plumbing.NewHash("5296768e3d9f661387ccbff18c4dea6c997fd78c"),
+ plumbing.NewHash("03fc8d58d44267274edef4585eaeeb445879d33f"),
+ plumbing.NewHash("8f3ceb4ea4cb9e4a0f751795eb41c9a4f07be772"),
+ plumbing.NewHash("e0d1d625010087f79c9e01ad9d8f95e1628dda02"),
+ plumbing.NewHash("90eba326cdc4d1d61c5ad25224ccbf08731dd041"),
+ plumbing.NewHash("bab53055add7bc35882758a922c54a874d6b1272"),
+ plumbing.NewHash("1b8995f51987d8a449ca5ea4356595102dc2fbd4"),
+ plumbing.NewHash("35858be9c6f5914cbe6768489c41eb6809a2bceb"),
+}
+
+var fixtureOffsets = []int64{
+ 12,
+ 142,
+ 1601322837,
+ 2646996529,
+ 3452385606,
+ 3707047470,
+ 5323223332,
+ 5894072943,
+ 5924278919,
+}
+
+func fixtureIndex() (*idxfile.MemoryIndex, error) {
+ f := bytes.NewBufferString(fixtureLarge4GB)
+
+ idx := new(idxfile.MemoryIndex)
+
+ d := idxfile.NewDecoder(base64.NewDecoder(base64.StdEncoding, f))
+ err := d.Decode(idx)
+ if err != nil {
+ return nil, fmt.Errorf("unexpected error decoding index: %s", err)
+ }
+
+ return idx, nil
+}
diff --git a/plumbing/format/idxfile/writer.go b/plumbing/format/idxfile/writer.go
new file mode 100644
index 0000000..aa919e7
--- /dev/null
+++ b/plumbing/format/idxfile/writer.go
@@ -0,0 +1,186 @@
+package idxfile
+
+import (
+ "bytes"
+ "fmt"
+ "math"
+ "sort"
+ "sync"
+
+ "gopkg.in/src-d/go-git.v4/plumbing"
+ "gopkg.in/src-d/go-git.v4/utils/binary"
+)
+
+// objects implements sort.Interface and uses hash as sorting key.
+type objects []Entry
+
+// Writer implements a packfile Observer interface and is used to generate
+// indexes.
+type Writer struct {
+ m sync.Mutex
+
+ count uint32
+ checksum plumbing.Hash
+ objects objects
+ offset64 uint32
+ finished bool
+ index *MemoryIndex
+ added map[plumbing.Hash]struct{}
+}
+
+// Index returns a previously created MemoryIndex or creates a new one if
+// needed.
+func (w *Writer) Index() (*MemoryIndex, error) {
+ w.m.Lock()
+ defer w.m.Unlock()
+
+ if w.index == nil {
+ return w.createIndex()
+ }
+
+ return w.index, nil
+}
+
+// Add appends new object data.
+func (w *Writer) Add(h plumbing.Hash, pos uint64, crc uint32) {
+ w.m.Lock()
+ defer w.m.Unlock()
+
+ if w.added == nil {
+ w.added = make(map[plumbing.Hash]struct{})
+ }
+
+ if _, ok := w.added[h]; !ok {
+ w.added[h] = struct{}{}
+ w.objects = append(w.objects, Entry{h, crc, pos})
+ }
+
+}
+
+func (w *Writer) Finished() bool {
+ return w.finished
+}
+
+// OnHeader implements packfile.Observer interface.
+func (w *Writer) OnHeader(count uint32) error {
+ w.count = count
+ w.objects = make(objects, 0, count)
+ return nil
+}
+
+// OnInflatedObjectHeader implements packfile.Observer interface.
+func (w *Writer) OnInflatedObjectHeader(t plumbing.ObjectType, objSize int64, pos int64) error {
+ return nil
+}
+
+// OnInflatedObjectContent implements packfile.Observer interface.
+func (w *Writer) OnInflatedObjectContent(h plumbing.Hash, pos int64, crc uint32, _ []byte) error {
+ w.Add(h, uint64(pos), crc)
+ return nil
+}
+
+// OnFooter implements packfile.Observer interface.
+func (w *Writer) OnFooter(h plumbing.Hash) error {
+ w.checksum = h
+ w.finished = true
+ _, err := w.createIndex()
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// creatIndex returns a filled MemoryIndex with the information filled by
+// the observer callbacks.
+func (w *Writer) createIndex() (*MemoryIndex, error) {
+ if !w.finished {
+ return nil, fmt.Errorf("the index still hasn't finished building")
+ }
+
+ idx := new(MemoryIndex)
+ w.index = idx
+
+ sort.Sort(w.objects)
+
+ // unmap all fans by default
+ for i := range idx.FanoutMapping {
+ idx.FanoutMapping[i] = noMapping
+ }
+
+ buf := new(bytes.Buffer)
+
+ last := -1
+ bucket := -1
+ for i, o := range w.objects {
+ fan := o.Hash[0]
+
+ // fill the gaps between fans
+ for j := last + 1; j < int(fan); j++ {
+ idx.Fanout[j] = uint32(i)
+ }
+
+ // update the number of objects for this position
+ idx.Fanout[fan] = uint32(i + 1)
+
+ // we move from one bucket to another, update counters and allocate
+ // memory
+ if last != int(fan) {
+ bucket++
+ idx.FanoutMapping[fan] = bucket
+ last = int(fan)
+
+ idx.Names = append(idx.Names, make([]byte, 0))
+ idx.Offset32 = append(idx.Offset32, make([]byte, 0))
+ idx.CRC32 = append(idx.CRC32, make([]byte, 0))
+ }
+
+ idx.Names[bucket] = append(idx.Names[bucket], o.Hash[:]...)
+
+ offset := o.Offset
+ if offset > math.MaxInt32 {
+ offset = w.addOffset64(offset)
+ }
+
+ buf.Truncate(0)
+ binary.WriteUint32(buf, uint32(offset))
+ idx.Offset32[bucket] = append(idx.Offset32[bucket], buf.Bytes()...)
+
+ buf.Truncate(0)
+ binary.WriteUint32(buf, uint32(o.CRC32))
+ idx.CRC32[bucket] = append(idx.CRC32[bucket], buf.Bytes()...)
+ }
+
+ for j := last + 1; j < 256; j++ {
+ idx.Fanout[j] = uint32(len(w.objects))
+ }
+
+ idx.Version = VersionSupported
+ idx.PackfileChecksum = w.checksum
+
+ return idx, nil
+}
+
+func (w *Writer) addOffset64(pos uint64) uint64 {
+ buf := new(bytes.Buffer)
+ binary.WriteUint64(buf, pos)
+ w.index.Offset64 = append(w.index.Offset64, buf.Bytes()...)
+
+ index := uint64(w.offset64 | (1 << 31))
+ w.offset64++
+
+ return index
+}
+
+func (o objects) Len() int {
+ return len(o)
+}
+
+func (o objects) Less(i int, j int) bool {
+ cmp := bytes.Compare(o[i].Hash[:], o[j].Hash[:])
+ return cmp < 0
+}
+
+func (o objects) Swap(i int, j int) {
+ o[i], o[j] = o[j], o[i]
+}
diff --git a/plumbing/format/idxfile/writer_test.go b/plumbing/format/idxfile/writer_test.go
new file mode 100644
index 0000000..912211d
--- /dev/null
+++ b/plumbing/format/idxfile/writer_test.go
@@ -0,0 +1,98 @@
+package idxfile_test
+
+import (
+ "bytes"
+ "encoding/base64"
+ "io/ioutil"
+
+ "gopkg.in/src-d/go-git.v4/plumbing"
+ "gopkg.in/src-d/go-git.v4/plumbing/format/idxfile"
+ "gopkg.in/src-d/go-git.v4/plumbing/format/packfile"
+
+ . "gopkg.in/check.v1"
+ "gopkg.in/src-d/go-git-fixtures.v3"
+)
+
+type WriterSuite struct {
+ fixtures.Suite
+}
+
+var _ = Suite(&WriterSuite{})
+
+func (s *WriterSuite) TestWriter(c *C) {
+ f := fixtures.Basic().One()
+ scanner := packfile.NewScanner(f.Packfile())
+
+ obs := new(idxfile.Writer)
+ parser, err := packfile.NewParser(scanner, obs)
+ c.Assert(err, IsNil)
+
+ _, err = parser.Parse()
+ c.Assert(err, IsNil)
+
+ idx, err := obs.Index()
+ c.Assert(err, IsNil)
+
+ idxFile := f.Idx()
+ expected, err := ioutil.ReadAll(idxFile)
+ c.Assert(err, IsNil)
+ idxFile.Close()
+
+ buf := new(bytes.Buffer)
+ encoder := idxfile.NewEncoder(buf)
+ n, err := encoder.Encode(idx)
+ c.Assert(err, IsNil)
+ c.Assert(n, Equals, len(expected))
+
+ c.Assert(buf.Bytes(), DeepEquals, expected)
+}
+
+func (s *WriterSuite) TestWriterLarge(c *C) {
+ writer := new(idxfile.Writer)
+ err := writer.OnHeader(uint32(len(fixture4GbEntries)))
+ c.Assert(err, IsNil)
+
+ for _, o := range fixture4GbEntries {
+ err = writer.OnInflatedObjectContent(plumbing.NewHash(o.hash), o.offset, o.crc, nil)
+ c.Assert(err, IsNil)
+ }
+
+ err = writer.OnFooter(fixture4GbChecksum)
+ c.Assert(err, IsNil)
+
+ idx, err := writer.Index()
+ c.Assert(err, IsNil)
+
+ // load fixture index
+ f := bytes.NewBufferString(fixtureLarge4GB)
+ expected, err := ioutil.ReadAll(base64.NewDecoder(base64.StdEncoding, f))
+ c.Assert(err, IsNil)
+
+ buf := new(bytes.Buffer)
+ encoder := idxfile.NewEncoder(buf)
+ n, err := encoder.Encode(idx)
+ c.Assert(err, IsNil)
+ c.Assert(n, Equals, len(expected))
+
+ c.Assert(buf.Bytes(), DeepEquals, expected)
+}
+
+var (
+ fixture4GbChecksum = plumbing.NewHash("afabc2269205cf85da1bf7e2fdff42f73810f29b")
+
+ fixture4GbEntries = []struct {
+ offset int64
+ hash string
+ crc uint32
+ }{
+ {12, "303953e5aa461c203a324821bc1717f9b4fff895", 0xbc347c4c},
+ {142, "5296768e3d9f661387ccbff18c4dea6c997fd78c", 0xcdc22842},
+ {1601322837, "03fc8d58d44267274edef4585eaeeb445879d33f", 0x929dfaaa},
+ {2646996529, "8f3ceb4ea4cb9e4a0f751795eb41c9a4f07be772", 0xa61def8a},
+ {3452385606, "e0d1d625010087f79c9e01ad9d8f95e1628dda02", 0x06bea180},
+ {3707047470, "90eba326cdc4d1d61c5ad25224ccbf08731dd041", 0x7193f3ba},
+ {5323223332, "bab53055add7bc35882758a922c54a874d6b1272", 0xac269b8e},
+ {5894072943, "1b8995f51987d8a449ca5ea4356595102dc2fbd4", 0x2187c056},
+ {5924278919, "35858be9c6f5914cbe6768489c41eb6809a2bceb", 0x9c89d9d2},
+ }
+)
diff --git a/plumbing/format/index/decoder.go b/plumbing/format/index/decoder.go
index 1a58128..df25530 100644
--- a/plumbing/format/index/decoder.go
+++ b/plumbing/format/index/decoder.go
@@ -21,7 +21,7 @@ var (
// ErrMalformedSignature is returned by Decode when the index header file is
// malformed
ErrMalformedSignature = errors.New("malformed index signature file")
- // ErrInvalidChecksum is returned by Decode if the SHA1 hash missmatch with
+ // ErrInvalidChecksum is returned by Decode if the SHA1 hash mismatch with
// the read content
ErrInvalidChecksum = errors.New("invalid checksum")
diff --git a/plumbing/format/index/index.go b/plumbing/format/index/index.go
index 9de4230..fc7b8cd 100644
--- a/plumbing/format/index/index.go
+++ b/plumbing/format/index/index.go
@@ -4,6 +4,7 @@ import (
"bytes"
"errors"
"fmt"
+ "path/filepath"
"time"
"gopkg.in/src-d/go-git.v4/plumbing"
@@ -51,8 +52,20 @@ type Index struct {
ResolveUndo *ResolveUndo
}
+// Add creates a new Entry and returns it. The caller should first check that
+// another entry with the same path does not exist.
+func (i *Index) Add(path string) *Entry {
+ e := &Entry{
+ Name: filepath.ToSlash(path),
+ }
+
+ i.Entries = append(i.Entries, e)
+ return e
+}
+
// Entry returns the entry that match the given path, if any.
func (i *Index) Entry(path string) (*Entry, error) {
+ path = filepath.ToSlash(path)
for _, e := range i.Entries {
if e.Name == path {
return e, nil
@@ -64,6 +77,7 @@ func (i *Index) Entry(path string) (*Entry, error) {
// Remove remove the entry that match the give path and returns deleted entry.
func (i *Index) Remove(path string) (*Entry, error) {
+ path = filepath.ToSlash(path)
for index, e := range i.Entries {
if e.Name == path {
i.Entries = append(i.Entries[:index], i.Entries[index+1:]...)
@@ -74,6 +88,24 @@ func (i *Index) Remove(path string) (*Entry, error) {
return nil, ErrEntryNotFound
}
+// Glob returns the all entries matching pattern or nil if there is no matching
+// entry. The syntax of patterns is the same as in filepath.Glob.
+func (i *Index) Glob(pattern string) (matches []*Entry, err error) {
+ pattern = filepath.ToSlash(pattern)
+ for _, e := range i.Entries {
+ m, err := match(pattern, e.Name)
+ if err != nil {
+ return nil, err
+ }
+
+ if m {
+ matches = append(matches, e)
+ }
+ }
+
+ return
+}
+
// String is equivalent to `git ls-files --stage --debug`
func (i *Index) String() string {
buf := bytes.NewBuffer(nil)
diff --git a/plumbing/format/index/index_test.go b/plumbing/format/index/index_test.go
index cad5f9c..ecf3c0d 100644
--- a/plumbing/format/index/index_test.go
+++ b/plumbing/format/index/index_test.go
@@ -1,9 +1,22 @@
package index
import (
+ "path/filepath"
+
. "gopkg.in/check.v1"
)
+func (s *IndexSuite) TestIndexAdd(c *C) {
+ idx := &Index{}
+ e := idx.Add("foo")
+ e.Size = 42
+
+ e, err := idx.Entry("foo")
+ c.Assert(err, IsNil)
+ c.Assert(e.Name, Equals, "foo")
+ c.Assert(e.Size, Equals, uint32(42))
+}
+
func (s *IndexSuite) TestIndexEntry(c *C) {
idx := &Index{
Entries: []*Entry{
@@ -37,3 +50,27 @@ func (s *IndexSuite) TestIndexRemove(c *C) {
c.Assert(e, IsNil)
c.Assert(err, Equals, ErrEntryNotFound)
}
+
+func (s *IndexSuite) TestIndexGlob(c *C) {
+ idx := &Index{
+ Entries: []*Entry{
+ {Name: "foo/bar/bar", Size: 42},
+ {Name: "foo/baz/qux", Size: 42},
+ {Name: "fux", Size: 82},
+ },
+ }
+
+ m, err := idx.Glob(filepath.Join("foo", "b*"))
+ c.Assert(err, IsNil)
+ c.Assert(m, HasLen, 2)
+ c.Assert(m[0].Name, Equals, "foo/bar/bar")
+ c.Assert(m[1].Name, Equals, "foo/baz/qux")
+
+ m, err = idx.Glob("f*")
+ c.Assert(err, IsNil)
+ c.Assert(m, HasLen, 3)
+
+ m, err = idx.Glob("f*/baz/q*")
+ c.Assert(err, IsNil)
+ c.Assert(m, HasLen, 1)
+}
diff --git a/plumbing/format/index/match.go b/plumbing/format/index/match.go
new file mode 100644
index 0000000..2891d7d
--- /dev/null
+++ b/plumbing/format/index/match.go
@@ -0,0 +1,186 @@
+package index
+
+import (
+ "path/filepath"
+ "runtime"
+ "unicode/utf8"
+)
+
+// match is filepath.Match with support to match fullpath and not only filenames
+// code from:
+// https://github.com/golang/go/blob/39852bf4cce6927e01d0136c7843f65a801738cb/src/path/filepath/match.go#L44-L224
+func match(pattern, name string) (matched bool, err error) {
+Pattern:
+ for len(pattern) > 0 {
+ var star bool
+ var chunk string
+ star, chunk, pattern = scanChunk(pattern)
+
+ // Look for match at current position.
+ t, ok, err := matchChunk(chunk, name)
+ // if we're the last chunk, make sure we've exhausted the name
+ // otherwise we'll give a false result even if we could still match
+ // using the star
+ if ok && (len(t) == 0 || len(pattern) > 0) {
+ name = t
+ continue
+ }
+ if err != nil {
+ return false, err
+ }
+ if star {
+ // Look for match skipping i+1 bytes.
+ // Cannot skip /.
+ for i := 0; i < len(name); i++ {
+ t, ok, err := matchChunk(chunk, name[i+1:])
+ if ok {
+ // if we're the last chunk, make sure we exhausted the name
+ if len(pattern) == 0 && len(t) > 0 {
+ continue
+ }
+ name = t
+ continue Pattern
+ }
+ if err != nil {
+ return false, err
+ }
+ }
+ }
+ return false, nil
+ }
+ return len(name) == 0, nil
+}
+
+// scanChunk gets the next segment of pattern, which is a non-star string
+// possibly preceded by a star.
+func scanChunk(pattern string) (star bool, chunk, rest string) {
+ for len(pattern) > 0 && pattern[0] == '*' {
+ pattern = pattern[1:]
+ star = true
+ }
+ inrange := false
+ var i int
+Scan:
+ for i = 0; i < len(pattern); i++ {
+ switch pattern[i] {
+ case '\\':
+ if runtime.GOOS != "windows" {
+ // error check handled in matchChunk: bad pattern.
+ if i+1 < len(pattern) {
+ i++
+ }
+ }
+ case '[':
+ inrange = true
+ case ']':
+ inrange = false
+ case '*':
+ if !inrange {
+ break Scan
+ }
+ }
+ }
+ return star, pattern[0:i], pattern[i:]
+}
+
+// matchChunk checks whether chunk matches the beginning of s.
+// If so, it returns the remainder of s (after the match).
+// Chunk is all single-character operators: literals, char classes, and ?.
+func matchChunk(chunk, s string) (rest string, ok bool, err error) {
+ for len(chunk) > 0 {
+ if len(s) == 0 {
+ return
+ }
+ switch chunk[0] {
+ case '[':
+ // character class
+ r, n := utf8.DecodeRuneInString(s)
+ s = s[n:]
+ chunk = chunk[1:]
+ // We can't end right after '[', we're expecting at least
+ // a closing bracket and possibly a caret.
+ if len(chunk) == 0 {
+ err = filepath.ErrBadPattern
+ return
+ }
+ // possibly negated
+ negated := chunk[0] == '^'
+ if negated {
+ chunk = chunk[1:]
+ }
+ // parse all ranges
+ match := false
+ nrange := 0
+ for {
+ if len(chunk) > 0 && chunk[0] == ']' && nrange > 0 {
+ chunk = chunk[1:]
+ break
+ }
+ var lo, hi rune
+ if lo, chunk, err = getEsc(chunk); err != nil {
+ return
+ }
+ hi = lo
+ if chunk[0] == '-' {
+ if hi, chunk, err = getEsc(chunk[1:]); err != nil {
+ return
+ }
+ }
+ if lo <= r && r <= hi {
+ match = true
+ }
+ nrange++
+ }
+ if match == negated {
+ return
+ }
+
+ case '?':
+ _, n := utf8.DecodeRuneInString(s)
+ s = s[n:]
+ chunk = chunk[1:]
+
+ case '\\':
+ if runtime.GOOS != "windows" {
+ chunk = chunk[1:]
+ if len(chunk) == 0 {
+ err = filepath.ErrBadPattern
+ return
+ }
+ }
+ fallthrough
+
+ default:
+ if chunk[0] != s[0] {
+ return
+ }
+ s = s[1:]
+ chunk = chunk[1:]
+ }
+ }
+ return s, true, nil
+}
+
+// getEsc gets a possibly-escaped character from chunk, for a character class.
+func getEsc(chunk string) (r rune, nchunk string, err error) {
+ if len(chunk) == 0 || chunk[0] == '-' || chunk[0] == ']' {
+ err = filepath.ErrBadPattern
+ return
+ }
+ if chunk[0] == '\\' && runtime.GOOS != "windows" {
+ chunk = chunk[1:]
+ if len(chunk) == 0 {
+ err = filepath.ErrBadPattern
+ return
+ }
+ }
+ r, n := utf8.DecodeRuneInString(chunk)
+ if r == utf8.RuneError && n == 1 {
+ err = filepath.ErrBadPattern
+ }
+ nchunk = chunk[n:]
+ if len(nchunk) == 0 {
+ err = filepath.ErrBadPattern
+ }
+ return
+}
diff --git a/plumbing/format/packfile/common.go b/plumbing/format/packfile/common.go
index 7dad1f6..2b4aceb 100644
--- a/plumbing/format/packfile/common.go
+++ b/plumbing/format/packfile/common.go
@@ -23,25 +23,28 @@ const (
maskType = uint8(112) // 0111 0000
)
-// UpdateObjectStorage updates the given storer.EncodedObjectStorer with the contents of the
+// UpdateObjectStorage updates the storer with the objects in the given
// packfile.
-func UpdateObjectStorage(s storer.EncodedObjectStorer, packfile io.Reader) error {
- if sw, ok := s.(storer.PackfileWriter); ok {
- return writePackfileToObjectStorage(sw, packfile)
+func UpdateObjectStorage(s storer.Storer, packfile io.Reader) error {
+ if pw, ok := s.(storer.PackfileWriter); ok {
+ return WritePackfileToObjectStorage(pw, packfile)
}
- stream := NewScanner(packfile)
- d, err := NewDecoder(stream, s)
+ p, err := NewParserWithStorage(NewScanner(packfile), s)
if err != nil {
return err
}
- _, err = d.Decode()
+ _, err = p.Parse()
return err
}
-func writePackfileToObjectStorage(sw storer.PackfileWriter, packfile io.Reader) error {
- var err error
+// WritePackfileToObjectStorage writes all the packfile objects into the given
+// object storage.
+func WritePackfileToObjectStorage(
+ sw storer.PackfileWriter,
+ packfile io.Reader,
+) (err error) {
w, err := sw.PackfileWriter()
if err != nil {
return err
diff --git a/plumbing/format/packfile/decoder.go b/plumbing/format/packfile/decoder.go
deleted file mode 100644
index f706e5d..0000000
--- a/plumbing/format/packfile/decoder.go
+++ /dev/null
@@ -1,495 +0,0 @@
-package packfile
-
-import (
- "bytes"
-
- "gopkg.in/src-d/go-git.v4/plumbing"
- "gopkg.in/src-d/go-git.v4/plumbing/cache"
- "gopkg.in/src-d/go-git.v4/plumbing/storer"
-)
-
-// Format specifies if the packfile uses ref-deltas or ofs-deltas.
-type Format int
-
-// Possible values of the Format type.
-const (
- UnknownFormat Format = iota
- OFSDeltaFormat
- REFDeltaFormat
-)
-
-var (
- // ErrMaxObjectsLimitReached is returned by Decode when the number
- // of objects in the packfile is higher than
- // Decoder.MaxObjectsLimit.
- ErrMaxObjectsLimitReached = NewError("max. objects limit reached")
- // ErrInvalidObject is returned by Decode when an invalid object is
- // found in the packfile.
- ErrInvalidObject = NewError("invalid git object")
- // ErrPackEntryNotFound is returned by Decode when a reference in
- // the packfile references and unknown object.
- ErrPackEntryNotFound = NewError("can't find a pack entry")
- // ErrZLib is returned by Decode when there was an error unzipping
- // the packfile contents.
- ErrZLib = NewError("zlib reading error")
- // ErrCannotRecall is returned by RecallByOffset or RecallByHash if the object
- // to recall cannot be returned.
- ErrCannotRecall = NewError("cannot recall object")
- // ErrResolveDeltasNotSupported is returned if a NewDecoder is used with a
- // non-seekable scanner and without a plumbing.ObjectStorage
- ErrResolveDeltasNotSupported = NewError("resolve delta is not supported")
- // ErrNonSeekable is returned if a ReadObjectAt method is called without a
- // seekable scanner
- ErrNonSeekable = NewError("non-seekable scanner")
- // ErrRollback error making Rollback over a transaction after an error
- ErrRollback = NewError("rollback error, during set error")
- // ErrAlreadyDecoded is returned if NewDecoder is called for a second time
- ErrAlreadyDecoded = NewError("packfile was already decoded")
-)
-
-// Decoder reads and decodes packfiles from an input Scanner, if an ObjectStorer
-// was provided the decoded objects are store there. If not the decode object
-// is destroyed. The Offsets and CRCs are calculated whether an
-// ObjectStorer was provided or not.
-type Decoder struct {
- deltaBaseCache cache.Object
-
- s *Scanner
- o storer.EncodedObjectStorer
- tx storer.Transaction
-
- isDecoded bool
-
- // hasBuiltIndex indicates if the index is fully built or not. If it is not,
- // will be built incrementally while decoding.
- hasBuiltIndex bool
- idx *Index
-
- offsetToType map[int64]plumbing.ObjectType
- decoderType plumbing.ObjectType
-}
-
-// NewDecoder returns a new Decoder that decodes a Packfile using the given
-// Scanner and stores the objects in the provided EncodedObjectStorer. ObjectStorer can be nil, in this
-// If the passed EncodedObjectStorer is nil, objects are not stored, but
-// offsets on the Packfile and CRCs are calculated.
-//
-// If EncodedObjectStorer is nil and the Scanner is not Seekable, ErrNonSeekable is
-// returned.
-//
-// If the ObjectStorer implements storer.Transactioner, a transaction is created
-// during the Decode execution. If anything fails, Rollback is called
-func NewDecoder(s *Scanner, o storer.EncodedObjectStorer) (*Decoder, error) {
- return NewDecoderForType(s, o, plumbing.AnyObject,
- cache.NewObjectLRUDefault())
-}
-
-// NewDecoderWithCache is a version of NewDecoder where cache can be specified.
-func NewDecoderWithCache(s *Scanner, o storer.EncodedObjectStorer,
- cacheObject cache.Object) (*Decoder, error) {
-
- return NewDecoderForType(s, o, plumbing.AnyObject, cacheObject)
-}
-
-// NewDecoderForType returns a new Decoder but in this case for a specific object type.
-// When an object is read using this Decoder instance and it is not of the same type of
-// the specified one, nil will be returned. This is intended to avoid the content
-// deserialization of all the objects.
-//
-// cacheObject is a cache.Object implementation that is used to speed up the
-// process. If cache is not needed you can pass nil. To create an LRU cache
-// object with the default size you can use the helper cache.ObjectLRUDefault().
-func NewDecoderForType(s *Scanner, o storer.EncodedObjectStorer,
- t plumbing.ObjectType, cacheObject cache.Object) (*Decoder, error) {
-
- if t == plumbing.OFSDeltaObject ||
- t == plumbing.REFDeltaObject ||
- t == plumbing.InvalidObject {
- return nil, plumbing.ErrInvalidType
- }
-
- if !canResolveDeltas(s, o) {
- return nil, ErrResolveDeltasNotSupported
- }
-
- return &Decoder{
- s: s,
- o: o,
- deltaBaseCache: cacheObject,
-
- idx: NewIndex(0),
- offsetToType: make(map[int64]plumbing.ObjectType),
- decoderType: t,
- }, nil
-}
-
-func canResolveDeltas(s *Scanner, o storer.EncodedObjectStorer) bool {
- return s.IsSeekable || o != nil
-}
-
-// Decode reads a packfile and stores it in the value pointed to by s. The
-// offsets and the CRCs are calculated by this method
-func (d *Decoder) Decode() (checksum plumbing.Hash, err error) {
- defer func() { d.isDecoded = true }()
-
- if d.isDecoded {
- return plumbing.ZeroHash, ErrAlreadyDecoded
- }
-
- if err := d.doDecode(); err != nil {
- return plumbing.ZeroHash, err
- }
-
- return d.s.Checksum()
-}
-
-func (d *Decoder) doDecode() error {
- _, count, err := d.s.Header()
- if err != nil {
- return err
- }
-
- if !d.hasBuiltIndex {
- d.idx = NewIndex(int(count))
- }
- defer func() { d.hasBuiltIndex = true }()
-
- _, isTxStorer := d.o.(storer.Transactioner)
- switch {
- case d.o == nil:
- return d.decodeObjects(int(count))
- case isTxStorer:
- return d.decodeObjectsWithObjectStorerTx(int(count))
- default:
- return d.decodeObjectsWithObjectStorer(int(count))
- }
-}
-
-func (d *Decoder) decodeObjects(count int) error {
- for i := 0; i < count; i++ {
- if _, err := d.DecodeObject(); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (d *Decoder) decodeObjectsWithObjectStorer(count int) error {
- for i := 0; i < count; i++ {
- obj, err := d.DecodeObject()
- if err != nil {
- return err
- }
-
- if _, err := d.o.SetEncodedObject(obj); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (d *Decoder) decodeObjectsWithObjectStorerTx(count int) error {
- d.tx = d.o.(storer.Transactioner).Begin()
-
- for i := 0; i < count; i++ {
- obj, err := d.DecodeObject()
- if err != nil {
- return err
- }
-
- if _, err := d.tx.SetEncodedObject(obj); err != nil {
- if rerr := d.tx.Rollback(); rerr != nil {
- return ErrRollback.AddDetails(
- "error: %s, during tx.Set error: %s", rerr, err,
- )
- }
-
- return err
- }
-
- }
-
- return d.tx.Commit()
-}
-
-// DecodeObject reads the next object from the scanner and returns it. This
-// method can be used in replacement of the Decode method, to work in a
-// interactive way. If you created a new decoder instance using NewDecoderForType
-// constructor, if the object decoded is not equals to the specified one, nil will
-// be returned
-func (d *Decoder) DecodeObject() (plumbing.EncodedObject, error) {
- return d.doDecodeObject(d.decoderType)
-}
-
-func (d *Decoder) doDecodeObject(t plumbing.ObjectType) (plumbing.EncodedObject, error) {
- h, err := d.s.NextObjectHeader()
- if err != nil {
- return nil, err
- }
-
- if t == plumbing.AnyObject {
- return d.decodeByHeader(h)
- }
-
- return d.decodeIfSpecificType(h)
-}
-
-func (d *Decoder) decodeIfSpecificType(h *ObjectHeader) (plumbing.EncodedObject, error) {
- var (
- obj plumbing.EncodedObject
- realType plumbing.ObjectType
- err error
- )
- switch h.Type {
- case plumbing.OFSDeltaObject:
- realType, err = d.ofsDeltaType(h.OffsetReference)
- case plumbing.REFDeltaObject:
- realType, err = d.refDeltaType(h.Reference)
- if err == plumbing.ErrObjectNotFound {
- obj, err = d.decodeByHeader(h)
- if err != nil {
- realType = obj.Type()
- }
- }
- default:
- realType = h.Type
- }
-
- if err != nil {
- return nil, err
- }
-
- d.offsetToType[h.Offset] = realType
-
- if d.decoderType == realType {
- if obj != nil {
- return obj, nil
- }
-
- return d.decodeByHeader(h)
- }
-
- return nil, nil
-}
-
-func (d *Decoder) ofsDeltaType(offset int64) (plumbing.ObjectType, error) {
- t, ok := d.offsetToType[offset]
- if !ok {
- return plumbing.InvalidObject, plumbing.ErrObjectNotFound
- }
-
- return t, nil
-}
-
-func (d *Decoder) refDeltaType(ref plumbing.Hash) (plumbing.ObjectType, error) {
- e, ok := d.idx.LookupHash(ref)
- if !ok {
- return plumbing.InvalidObject, plumbing.ErrObjectNotFound
- }
-
- return d.ofsDeltaType(int64(e.Offset))
-}
-
-func (d *Decoder) decodeByHeader(h *ObjectHeader) (plumbing.EncodedObject, error) {
- obj := d.newObject()
- obj.SetSize(h.Length)
- obj.SetType(h.Type)
-
- var crc uint32
- var err error
- switch h.Type {
- case plumbing.CommitObject, plumbing.TreeObject, plumbing.BlobObject, plumbing.TagObject:
- crc, err = d.fillRegularObjectContent(obj)
- case plumbing.REFDeltaObject:
- crc, err = d.fillREFDeltaObjectContent(obj, h.Reference)
- case plumbing.OFSDeltaObject:
- crc, err = d.fillOFSDeltaObjectContent(obj, h.OffsetReference)
- default:
- err = ErrInvalidObject.AddDetails("type %q", h.Type)
- }
-
- if err != nil {
- return obj, err
- }
-
- if !d.hasBuiltIndex {
- d.idx.Add(obj.Hash(), uint64(h.Offset), crc)
- }
-
- return obj, nil
-}
-
-func (d *Decoder) newObject() plumbing.EncodedObject {
- if d.o == nil {
- return &plumbing.MemoryObject{}
- }
-
- return d.o.NewEncodedObject()
-}
-
-// DecodeObjectAt reads an object at the given location. Every EncodedObject
-// returned is added into a internal index. This is intended to be able to regenerate
-// objects from deltas (offset deltas or reference deltas) without an package index
-// (.idx file). If Decode wasn't called previously objects offset should provided
-// using the SetOffsets method. It decodes the object regardless of the Decoder
-// type.
-func (d *Decoder) DecodeObjectAt(offset int64) (plumbing.EncodedObject, error) {
- if !d.s.IsSeekable {
- return nil, ErrNonSeekable
- }
-
- beforeJump, err := d.s.SeekFromStart(offset)
- if err != nil {
- return nil, err
- }
-
- defer func() {
- _, seekErr := d.s.SeekFromStart(beforeJump)
- if err == nil {
- err = seekErr
- }
- }()
-
- return d.doDecodeObject(plumbing.AnyObject)
-}
-
-func (d *Decoder) fillRegularObjectContent(obj plumbing.EncodedObject) (uint32, error) {
- w, err := obj.Writer()
- if err != nil {
- return 0, err
- }
-
- _, crc, err := d.s.NextObject(w)
- return crc, err
-}
-
-func (d *Decoder) fillREFDeltaObjectContent(obj plumbing.EncodedObject, ref plumbing.Hash) (uint32, error) {
- buf := bufPool.Get().(*bytes.Buffer)
- buf.Reset()
- _, crc, err := d.s.NextObject(buf)
- if err != nil {
- return 0, err
- }
-
- base, ok := d.cacheGet(ref)
- if !ok {
- base, err = d.recallByHash(ref)
- if err != nil {
- return 0, err
- }
- }
-
- obj.SetType(base.Type())
- err = ApplyDelta(obj, base, buf.Bytes())
- d.cachePut(obj)
- bufPool.Put(buf)
-
- return crc, err
-}
-
-func (d *Decoder) fillOFSDeltaObjectContent(obj plumbing.EncodedObject, offset int64) (uint32, error) {
- buf := bytes.NewBuffer(nil)
- _, crc, err := d.s.NextObject(buf)
- if err != nil {
- return 0, err
- }
-
- e, ok := d.idx.LookupOffset(uint64(offset))
- var base plumbing.EncodedObject
- if ok {
- base, ok = d.cacheGet(e.Hash)
- }
-
- if !ok {
- base, err = d.recallByOffset(offset)
- if err != nil {
- return 0, err
- }
-
- d.cachePut(base)
- }
-
- obj.SetType(base.Type())
- err = ApplyDelta(obj, base, buf.Bytes())
- d.cachePut(obj)
-
- return crc, err
-}
-
-func (d *Decoder) cacheGet(h plumbing.Hash) (plumbing.EncodedObject, bool) {
- if d.deltaBaseCache == nil {
- return nil, false
- }
-
- return d.deltaBaseCache.Get(h)
-}
-
-func (d *Decoder) cachePut(obj plumbing.EncodedObject) {
- if d.deltaBaseCache == nil {
- return
- }
-
- d.deltaBaseCache.Put(obj)
-}
-
-func (d *Decoder) recallByOffset(o int64) (plumbing.EncodedObject, error) {
- if d.s.IsSeekable {
- return d.DecodeObjectAt(o)
- }
-
- if e, ok := d.idx.LookupOffset(uint64(o)); ok {
- return d.recallByHashNonSeekable(e.Hash)
- }
-
- return nil, plumbing.ErrObjectNotFound
-}
-
-func (d *Decoder) recallByHash(h plumbing.Hash) (plumbing.EncodedObject, error) {
- if d.s.IsSeekable {
- if e, ok := d.idx.LookupHash(h); ok {
- return d.DecodeObjectAt(int64(e.Offset))
- }
- }
-
- return d.recallByHashNonSeekable(h)
-}
-
-// recallByHashNonSeekable if we are in a transaction the objects are read from
-// the transaction, if not are directly read from the ObjectStorer
-func (d *Decoder) recallByHashNonSeekable(h plumbing.Hash) (obj plumbing.EncodedObject, err error) {
- if d.tx != nil {
- obj, err = d.tx.EncodedObject(plumbing.AnyObject, h)
- } else {
- obj, err = d.o.EncodedObject(plumbing.AnyObject, h)
- }
-
- if err != plumbing.ErrObjectNotFound {
- return obj, err
- }
-
- return nil, plumbing.ErrObjectNotFound
-}
-
-// SetIndex sets an index for the packfile. It is recommended to set this.
-// The index might be read from a file or reused from a previous Decoder usage
-// (see Index function).
-func (d *Decoder) SetIndex(idx *Index) {
- d.hasBuiltIndex = true
- d.idx = idx
-}
-
-// Index returns the index for the packfile. If index was set with SetIndex,
-// Index will return it. Otherwise, it will return an index that is built while
-// decoding. If neither SetIndex was called with a full index or Decode called
-// for the whole packfile, then the returned index will be incomplete.
-func (d *Decoder) Index() *Index {
- return d.idx
-}
-
-// Close closes the Scanner. usually this mean that the whole reader is read and
-// discarded
-func (d *Decoder) Close() error {
- return d.s.Close()
-}
diff --git a/plumbing/format/packfile/decoder_test.go b/plumbing/format/packfile/decoder_test.go
deleted file mode 100644
index b5bc7b7..0000000
--- a/plumbing/format/packfile/decoder_test.go
+++ /dev/null
@@ -1,396 +0,0 @@
-package packfile_test
-
-import (
- "io"
-
- "gopkg.in/src-d/go-git.v4/plumbing"
- "gopkg.in/src-d/go-git.v4/plumbing/cache"
- "gopkg.in/src-d/go-git.v4/plumbing/format/idxfile"
- "gopkg.in/src-d/go-git.v4/plumbing/format/packfile"
- "gopkg.in/src-d/go-git.v4/plumbing/storer"
- "gopkg.in/src-d/go-git.v4/storage/filesystem"
- "gopkg.in/src-d/go-git.v4/storage/memory"
-
- . "gopkg.in/check.v1"
- "gopkg.in/src-d/go-billy.v4/memfs"
- "gopkg.in/src-d/go-git-fixtures.v3"
-)
-
-type ReaderSuite struct {
- fixtures.Suite
-}
-
-var _ = Suite(&ReaderSuite{})
-
-func (s *ReaderSuite) TestNewDecodeNonSeekable(c *C) {
- scanner := packfile.NewScanner(nil)
- d, err := packfile.NewDecoder(scanner, nil)
-
- c.Assert(d, IsNil)
- c.Assert(err, NotNil)
-}
-
-func (s *ReaderSuite) TestDecode(c *C) {
- fixtures.Basic().ByTag("packfile").Test(c, func(f *fixtures.Fixture) {
- scanner := packfile.NewScanner(f.Packfile())
- storage := memory.NewStorage()
-
- d, err := packfile.NewDecoder(scanner, storage)
- c.Assert(err, IsNil)
- defer d.Close()
-
- ch, err := d.Decode()
- c.Assert(err, IsNil)
- c.Assert(ch, Equals, f.PackfileHash)
-
- assertObjects(c, storage, expectedHashes)
- })
-}
-
-func (s *ReaderSuite) TestDecodeByTypeRefDelta(c *C) {
- f := fixtures.Basic().ByTag("ref-delta").One()
-
- storage := memory.NewStorage()
- scanner := packfile.NewScanner(f.Packfile())
- d, err := packfile.NewDecoderForType(scanner, storage, plumbing.CommitObject,
- cache.NewObjectLRUDefault())
- c.Assert(err, IsNil)
-
- // Index required to decode by ref-delta.
- d.SetIndex(getIndexFromIdxFile(f.Idx()))
-
- defer d.Close()
-
- _, count, err := scanner.Header()
- c.Assert(err, IsNil)
-
- var i uint32
- for i = 0; i < count; i++ {
- obj, err := d.DecodeObject()
- c.Assert(err, IsNil)
-
- if obj != nil {
- c.Assert(obj.Type(), Equals, plumbing.CommitObject)
- }
- }
-}
-
-func (s *ReaderSuite) TestDecodeByTypeRefDeltaError(c *C) {
- fixtures.Basic().ByTag("ref-delta").Test(c, func(f *fixtures.Fixture) {
- storage := memory.NewStorage()
- scanner := packfile.NewScanner(f.Packfile())
- d, err := packfile.NewDecoderForType(scanner, storage,
- plumbing.CommitObject, cache.NewObjectLRUDefault())
- c.Assert(err, IsNil)
-
- defer d.Close()
-
- _, count, err := scanner.Header()
- c.Assert(err, IsNil)
-
- isError := false
- var i uint32
- for i = 0; i < count; i++ {
- _, err := d.DecodeObject()
- if err != nil {
- isError = true
- break
- }
- }
- c.Assert(isError, Equals, true)
- })
-
-}
-
-func (s *ReaderSuite) TestDecodeByType(c *C) {
- ts := []plumbing.ObjectType{
- plumbing.CommitObject,
- plumbing.TagObject,
- plumbing.TreeObject,
- plumbing.BlobObject,
- }
-
- fixtures.Basic().ByTag("packfile").Test(c, func(f *fixtures.Fixture) {
- for _, t := range ts {
- storage := memory.NewStorage()
- scanner := packfile.NewScanner(f.Packfile())
- d, err := packfile.NewDecoderForType(scanner, storage, t,
- cache.NewObjectLRUDefault())
- c.Assert(err, IsNil)
-
- // when the packfile is ref-delta based, the offsets are required
- if f.Is("ref-delta") {
- d.SetIndex(getIndexFromIdxFile(f.Idx()))
- }
-
- defer d.Close()
-
- _, count, err := scanner.Header()
- c.Assert(err, IsNil)
-
- var i uint32
- for i = 0; i < count; i++ {
- obj, err := d.DecodeObject()
- c.Assert(err, IsNil)
-
- if obj != nil {
- c.Assert(obj.Type(), Equals, t)
- }
- }
- }
- })
-}
-func (s *ReaderSuite) TestDecodeByTypeConstructor(c *C) {
- f := fixtures.Basic().ByTag("packfile").One()
- storage := memory.NewStorage()
- scanner := packfile.NewScanner(f.Packfile())
-
- _, err := packfile.NewDecoderForType(scanner, storage,
- plumbing.OFSDeltaObject, cache.NewObjectLRUDefault())
- c.Assert(err, Equals, plumbing.ErrInvalidType)
-
- _, err = packfile.NewDecoderForType(scanner, storage,
- plumbing.REFDeltaObject, cache.NewObjectLRUDefault())
-
- c.Assert(err, Equals, plumbing.ErrInvalidType)
-
- _, err = packfile.NewDecoderForType(scanner, storage, plumbing.InvalidObject,
- cache.NewObjectLRUDefault())
- c.Assert(err, Equals, plumbing.ErrInvalidType)
-}
-
-func (s *ReaderSuite) TestDecodeMultipleTimes(c *C) {
- f := fixtures.Basic().ByTag("packfile").One()
- scanner := packfile.NewScanner(f.Packfile())
- storage := memory.NewStorage()
-
- d, err := packfile.NewDecoder(scanner, storage)
- c.Assert(err, IsNil)
- defer d.Close()
-
- ch, err := d.Decode()
- c.Assert(err, IsNil)
- c.Assert(ch, Equals, f.PackfileHash)
-
- ch, err = d.Decode()
- c.Assert(err, Equals, packfile.ErrAlreadyDecoded)
- c.Assert(ch, Equals, plumbing.ZeroHash)
-}
-
-func (s *ReaderSuite) TestDecodeInMemory(c *C) {
- fixtures.Basic().ByTag("packfile").Test(c, func(f *fixtures.Fixture) {
- scanner := packfile.NewScanner(f.Packfile())
- d, err := packfile.NewDecoder(scanner, nil)
- c.Assert(err, IsNil)
-
- ch, err := d.Decode()
- c.Assert(err, IsNil)
- c.Assert(ch, Equals, f.PackfileHash)
- })
-}
-
-type nonSeekableReader struct {
- r io.Reader
-}
-
-func (nsr nonSeekableReader) Read(b []byte) (int, error) {
- return nsr.r.Read(b)
-}
-
-func (s *ReaderSuite) TestDecodeNoSeekableWithTxStorer(c *C) {
- fixtures.Basic().ByTag("packfile").Test(c, func(f *fixtures.Fixture) {
- reader := nonSeekableReader{
- r: f.Packfile(),
- }
-
- scanner := packfile.NewScanner(reader)
-
- var storage storer.EncodedObjectStorer = memory.NewStorage()
- _, isTxStorer := storage.(storer.Transactioner)
- c.Assert(isTxStorer, Equals, true)
-
- d, err := packfile.NewDecoder(scanner, storage)
- c.Assert(err, IsNil)
- defer d.Close()
-
- ch, err := d.Decode()
- c.Assert(err, IsNil)
- c.Assert(ch, Equals, f.PackfileHash)
-
- assertObjects(c, storage, expectedHashes)
- })
-}
-
-func (s *ReaderSuite) TestDecodeNoSeekableWithoutTxStorer(c *C) {
- fixtures.Basic().ByTag("packfile").Test(c, func(f *fixtures.Fixture) {
- reader := nonSeekableReader{
- r: f.Packfile(),
- }
-
- scanner := packfile.NewScanner(reader)
-
- var storage storer.EncodedObjectStorer
- storage, _ = filesystem.NewStorage(memfs.New())
- _, isTxStorer := storage.(storer.Transactioner)
- c.Assert(isTxStorer, Equals, false)
-
- d, err := packfile.NewDecoder(scanner, storage)
- c.Assert(err, IsNil)
- defer d.Close()
-
- ch, err := d.Decode()
- c.Assert(err, IsNil)
- c.Assert(ch, Equals, f.PackfileHash)
-
- assertObjects(c, storage, expectedHashes)
- })
-}
-
-var expectedHashes = []string{
- "918c48b83bd081e863dbe1b80f8998f058cd8294",
- "af2d6a6954d532f8ffb47615169c8fdf9d383a1a",
- "1669dce138d9b841a518c64b10914d88f5e488ea",
- "a5b8b09e2f8fcb0bb99d3ccb0958157b40890d69",
- "b8e471f58bcbca63b07bda20e428190409c2db47",
- "35e85108805c84807bc66a02d91535e1e24b38b9",
- "b029517f6300c2da0f4b651b8642506cd6aaf45d",
- "32858aad3c383ed1ff0a0f9bdf231d54a00c9e88",
- "d3ff53e0564a9f87d8e84b6e28e5060e517008aa",
- "c192bd6a24ea1ab01d78686e417c8bdc7c3d197f",
- "d5c0f4ab811897cadf03aec358ae60d21f91c50d",
- "49c6bb89b17060d7b4deacb7b338fcc6ea2352a9",
- "cf4aa3b38974fb7d81f367c0830f7d78d65ab86b",
- "9dea2395f5403188298c1dabe8bdafe562c491e3",
- "586af567d0bb5e771e49bdd9434f5e0fb76d25fa",
- "9a48f23120e880dfbe41f7c9b7b708e9ee62a492",
- "5a877e6a906a2743ad6e45d99c1793642aaf8eda",
- "c8f1d8c61f9da76f4cb49fd86322b6e685dba956",
- "a8d315b2b1c615d43042c3a62402b8a54288cf5c",
- "a39771a7651f97faf5c72e08224d857fc35133db",
- "880cd14280f4b9b6ed3986d6671f907d7cc2a198",
- "fb72698cab7617ac416264415f13224dfd7a165e",
- "4d081c50e250fa32ea8b1313cf8bb7c2ad7627fd",
- "eba74343e2f15d62adedfd8c883ee0262b5c8021",
- "c2d30fa8ef288618f65f6eed6e168e0d514886f4",
- "8dcef98b1d52143e1e2dbc458ffe38f925786bf2",
- "aa9b383c260e1d05fbbf6b30a02914555e20c725",
- "6ecf0ef2c2dffb796033e5a02219af86ec6584e5",
- "dbd3641b371024f44d0e469a9c8f5457b0660de1",
- "e8d3ffab552895c19b9fcf7aa264d277cde33881",
- "7e59600739c96546163833214c36459e324bad0a",
-}
-
-func (s *ReaderSuite) TestDecodeCRCs(c *C) {
- f := fixtures.Basic().ByTag("ofs-delta").One()
-
- scanner := packfile.NewScanner(f.Packfile())
- storage := memory.NewStorage()
-
- d, err := packfile.NewDecoder(scanner, storage)
- c.Assert(err, IsNil)
- _, err = d.Decode()
- c.Assert(err, IsNil)
-
- var sum uint64
- idx := d.Index().ToIdxFile()
- for _, e := range idx.Entries {
- sum += uint64(e.CRC32)
- }
-
- c.Assert(int(sum), Equals, 78022211966)
-}
-
-func (s *ReaderSuite) TestDecodeObjectAt(c *C) {
- f := fixtures.Basic().One()
- scanner := packfile.NewScanner(f.Packfile())
- d, err := packfile.NewDecoder(scanner, nil)
- c.Assert(err, IsNil)
-
- // when the packfile is ref-delta based, the offsets are required
- if f.Is("ref-delta") {
- d.SetIndex(getIndexFromIdxFile(f.Idx()))
- }
-
- // the objects at reference 186, is a delta, so should be recall,
- // without being read before.
- obj, err := d.DecodeObjectAt(186)
- c.Assert(err, IsNil)
- c.Assert(obj.Hash().String(), Equals, "6ecf0ef2c2dffb796033e5a02219af86ec6584e5")
-}
-
-func (s *ReaderSuite) TestDecodeObjectAtForType(c *C) {
- f := fixtures.Basic().One()
- scanner := packfile.NewScanner(f.Packfile())
- d, err := packfile.NewDecoderForType(scanner, nil, plumbing.TreeObject,
- cache.NewObjectLRUDefault())
- c.Assert(err, IsNil)
-
- // when the packfile is ref-delta based, the offsets are required
- if f.Is("ref-delta") {
- d.SetIndex(getIndexFromIdxFile(f.Idx()))
- }
-
- // the objects at reference 186, is a delta, so should be recall,
- // without being read before.
- obj, err := d.DecodeObjectAt(186)
- c.Assert(err, IsNil)
- c.Assert(obj.Type(), Equals, plumbing.CommitObject)
- c.Assert(obj.Hash().String(), Equals, "6ecf0ef2c2dffb796033e5a02219af86ec6584e5")
-}
-
-func (s *ReaderSuite) TestIndex(c *C) {
- f := fixtures.Basic().One()
- scanner := packfile.NewScanner(f.Packfile())
- d, err := packfile.NewDecoder(scanner, nil)
- c.Assert(err, IsNil)
-
- c.Assert(d.Index().ToIdxFile().Entries, HasLen, 0)
-
- _, err = d.Decode()
- c.Assert(err, IsNil)
-
- c.Assert(len(d.Index().ToIdxFile().Entries), Equals, 31)
-}
-
-func (s *ReaderSuite) TestSetIndex(c *C) {
- f := fixtures.Basic().One()
- scanner := packfile.NewScanner(f.Packfile())
- d, err := packfile.NewDecoder(scanner, nil)
- c.Assert(err, IsNil)
-
- idx := packfile.NewIndex(1)
- h := plumbing.NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5")
- idx.Add(h, uint64(42), 0)
- d.SetIndex(idx)
-
- idxf := d.Index().ToIdxFile()
- c.Assert(idxf.Entries, HasLen, 1)
- c.Assert(idxf.Entries[0].Offset, Equals, uint64(42))
-}
-
-func assertObjects(c *C, s storer.EncodedObjectStorer, expects []string) {
-
- i, err := s.IterEncodedObjects(plumbing.AnyObject)
- c.Assert(err, IsNil)
-
- var count int
- err = i.ForEach(func(plumbing.EncodedObject) error { count++; return nil })
- c.Assert(err, IsNil)
- c.Assert(count, Equals, len(expects))
-
- for _, exp := range expects {
- obt, err := s.EncodedObject(plumbing.AnyObject, plumbing.NewHash(exp))
- c.Assert(err, IsNil)
- c.Assert(obt.Hash().String(), Equals, exp)
- }
-}
-
-func getIndexFromIdxFile(r io.Reader) *packfile.Index {
- idxf := idxfile.NewIdxfile()
- d := idxfile.NewDecoder(r)
- if err := d.Decode(idxf); err != nil {
- panic(err)
- }
-
- return packfile.NewIndexFromIdxFile(idxf)
-}
diff --git a/plumbing/format/packfile/delta_selector.go b/plumbing/format/packfile/delta_selector.go
index cd38c16..6710085 100644
--- a/plumbing/format/packfile/delta_selector.go
+++ b/plumbing/format/packfile/delta_selector.go
@@ -103,7 +103,7 @@ func (dw *deltaSelector) objectsToPack(
otp := newObjectToPack(o)
if _, ok := o.(plumbing.DeltaObject); ok {
- otp.Original = nil
+ otp.CleanOriginal()
}
objectsToPack = append(objectsToPack, otp)
@@ -196,7 +196,8 @@ func (dw *deltaSelector) restoreOriginal(otp *ObjectToPack) error {
return err
}
- otp.Original = obj
+ otp.SetOriginal(obj)
+
return nil
}
@@ -230,7 +231,8 @@ func (dw *deltaSelector) walk(
delete(indexMap, obj.Hash())
if obj.IsDelta() {
- obj.Original = nil
+ obj.SaveOriginalMetadata()
+ obj.CleanOriginal()
}
}
diff --git a/plumbing/format/packfile/delta_test.go b/plumbing/format/packfile/delta_test.go
index 42b777a..98f53f6 100644
--- a/plumbing/format/packfile/delta_test.go
+++ b/plumbing/format/packfile/delta_test.go
@@ -62,7 +62,7 @@ func (s *DeltaSuite) SetUpSuite(c *C) {
target: []piece{{"1", 30}, {"2", 20}, {"7", 40}, {"4", 400},
{"5", 10}},
}, {
- description: "A copy operation bigger tan 64kb",
+ description: "A copy operation bigger than 64kb",
base: []piece{{bigRandStr, 1}, {"1", 200}},
target: []piece{{bigRandStr, 1}},
}}
@@ -72,12 +72,16 @@ var bigRandStr = randStringBytes(100 * 1024)
const letterBytes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
-func randStringBytes(n int) string {
+func randBytes(n int) []byte {
b := make([]byte, n)
for i := range b {
b[i] = letterBytes[rand.Intn(len(letterBytes))]
}
- return string(b)
+ return b
+}
+
+func randStringBytes(n int) string {
+ return string(randBytes(n))
}
func (s *DeltaSuite) TestAddDelta(c *C) {
@@ -110,3 +114,14 @@ func (s *DeltaSuite) TestIncompleteDelta(c *C) {
c.Assert(err, NotNil)
c.Assert(result, IsNil)
}
+
+func (s *DeltaSuite) TestMaxCopySizeDelta(c *C) {
+ baseBuf := randBytes(maxCopySize)
+ targetBuf := baseBuf[0:]
+ targetBuf = append(targetBuf, byte(1))
+
+ delta := DiffDelta(baseBuf, targetBuf)
+ result, err := PatchDelta(baseBuf, delta)
+ c.Assert(err, IsNil)
+ c.Assert(result, DeepEquals, targetBuf)
+}
diff --git a/plumbing/format/packfile/diff_delta.go b/plumbing/format/packfile/diff_delta.go
index 4d56dc1..d35e78a 100644
--- a/plumbing/format/packfile/diff_delta.go
+++ b/plumbing/format/packfile/diff_delta.go
@@ -111,7 +111,7 @@ func diffDelta(index *deltaIndex, src []byte, tgt []byte) []byte {
rl := l
aOffset := offset
- for {
+ for rl > 0 {
if rl < maxCopySize {
buf.Write(encodeCopyOperation(aOffset, rl))
break
diff --git a/plumbing/format/packfile/encoder.go b/plumbing/format/packfile/encoder.go
index 6686dd5..b077918 100644
--- a/plumbing/format/packfile/encoder.go
+++ b/plumbing/format/packfile/encoder.go
@@ -87,6 +87,7 @@ func (e *Encoder) entry(o *ObjectToPack) error {
// (for example due to a concurrent repack) and a different base
// was chosen, forcing a cycle. Select something other than a
// delta, and write this object.
+ e.selector.restoreOriginal(o)
o.BackToOriginal()
}
diff --git a/plumbing/format/packfile/encoder_advanced_test.go b/plumbing/format/packfile/encoder_advanced_test.go
index 1075875..e15126e 100644
--- a/plumbing/format/packfile/encoder_advanced_test.go
+++ b/plumbing/format/packfile/encoder_advanced_test.go
@@ -2,13 +2,17 @@ package packfile_test
import (
"bytes"
+ "io"
"math/rand"
+ "testing"
+ "gopkg.in/src-d/go-billy.v4/memfs"
"gopkg.in/src-d/go-git.v4/plumbing"
+ "gopkg.in/src-d/go-git.v4/plumbing/cache"
+ "gopkg.in/src-d/go-git.v4/plumbing/format/idxfile"
. "gopkg.in/src-d/go-git.v4/plumbing/format/packfile"
"gopkg.in/src-d/go-git.v4/plumbing/storer"
"gopkg.in/src-d/go-git.v4/storage/filesystem"
- "gopkg.in/src-d/go-git.v4/storage/memory"
. "gopkg.in/check.v1"
"gopkg.in/src-d/go-git-fixtures.v3"
@@ -21,30 +25,38 @@ type EncoderAdvancedSuite struct {
var _ = Suite(&EncoderAdvancedSuite{})
func (s *EncoderAdvancedSuite) TestEncodeDecode(c *C) {
+ if testing.Short() {
+ c.Skip("skipping test in short mode.")
+ }
+
fixs := fixtures.Basic().ByTag("packfile").ByTag(".git")
fixs = append(fixs, fixtures.ByURL("https://github.com/src-d/go-git.git").
ByTag("packfile").ByTag(".git").One())
fixs.Test(c, func(f *fixtures.Fixture) {
- storage, err := filesystem.NewStorage(f.DotGit())
- c.Assert(err, IsNil)
+ storage := filesystem.NewStorage(f.DotGit(), cache.NewObjectLRUDefault())
s.testEncodeDecode(c, storage, 10)
})
-
}
func (s *EncoderAdvancedSuite) TestEncodeDecodeNoDeltaCompression(c *C) {
+ if testing.Short() {
+ c.Skip("skipping test in short mode.")
+ }
+
fixs := fixtures.Basic().ByTag("packfile").ByTag(".git")
fixs = append(fixs, fixtures.ByURL("https://github.com/src-d/go-git.git").
ByTag("packfile").ByTag(".git").One())
fixs.Test(c, func(f *fixtures.Fixture) {
- storage, err := filesystem.NewStorage(f.DotGit())
- c.Assert(err, IsNil)
+ storage := filesystem.NewStorage(f.DotGit(), cache.NewObjectLRUDefault())
s.testEncodeDecode(c, storage, 0)
})
}
-func (s *EncoderAdvancedSuite) testEncodeDecode(c *C, storage storer.Storer, packWindow uint) {
-
+func (s *EncoderAdvancedSuite) testEncodeDecode(
+ c *C,
+ storage storer.Storer,
+ packWindow uint,
+) {
objIter, err := storage.IterEncodedObjects(plumbing.AnyObject)
c.Assert(err, IsNil)
@@ -71,16 +83,35 @@ func (s *EncoderAdvancedSuite) testEncodeDecode(c *C, storage storer.Storer, pac
encodeHash, err := enc.Encode(hashes, packWindow)
c.Assert(err, IsNil)
- scanner := NewScanner(buf)
- storage = memory.NewStorage()
- d, err := NewDecoder(scanner, storage)
+ fs := memfs.New()
+ f, err := fs.Create("packfile")
+ c.Assert(err, IsNil)
+
+ _, err = f.Write(buf.Bytes())
+ c.Assert(err, IsNil)
+
+ _, err = f.Seek(0, io.SeekStart)
+ c.Assert(err, IsNil)
+
+ w := new(idxfile.Writer)
+ parser, err := NewParser(NewScanner(f), w)
+ c.Assert(err, IsNil)
+
+ _, err = parser.Parse()
c.Assert(err, IsNil)
- decodeHash, err := d.Decode()
+ index, err := w.Index()
c.Assert(err, IsNil)
+ _, err = f.Seek(0, io.SeekStart)
+ c.Assert(err, IsNil)
+
+ p := NewPackfile(index, fs, f)
+
+ decodeHash, err := p.ID()
+ c.Assert(err, IsNil)
c.Assert(encodeHash, Equals, decodeHash)
- objIter, err = storage.IterEncodedObjects(plumbing.AnyObject)
+ objIter, err = p.GetAll()
c.Assert(err, IsNil)
obtainedObjects := map[plumbing.Hash]bool{}
err = objIter.ForEach(func(o plumbing.EncodedObject) error {
diff --git a/plumbing/format/packfile/encoder_test.go b/plumbing/format/packfile/encoder_test.go
index 320036b..80b916d 100644
--- a/plumbing/format/packfile/encoder_test.go
+++ b/plumbing/format/packfile/encoder_test.go
@@ -2,8 +2,12 @@ package packfile
import (
"bytes"
+ "io"
+ stdioutil "io/ioutil"
+ "gopkg.in/src-d/go-billy.v4/memfs"
"gopkg.in/src-d/go-git.v4/plumbing"
+ "gopkg.in/src-d/go-git.v4/plumbing/format/idxfile"
"gopkg.in/src-d/go-git.v4/storage/memory"
. "gopkg.in/check.v1"
@@ -130,24 +134,20 @@ func (s *EncoderSuite) simpleDeltaTest(c *C) {
})
c.Assert(err, IsNil)
- scanner := NewScanner(s.buf)
-
- storage := memory.NewStorage()
- d, err := NewDecoder(scanner, storage)
- c.Assert(err, IsNil)
-
- decHash, err := d.Decode()
+ p, cleanup := packfileFromReader(c, s.buf)
+ defer cleanup()
+ decHash, err := p.ID()
c.Assert(err, IsNil)
c.Assert(encHash, Equals, decHash)
- decSrc, err := storage.EncodedObject(srcObject.Type(), srcObject.Hash())
+ decSrc, err := p.Get(srcObject.Hash())
c.Assert(err, IsNil)
- c.Assert(decSrc, DeepEquals, srcObject)
+ objectsEqual(c, decSrc, srcObject)
- decTarget, err := storage.EncodedObject(targetObject.Type(), targetObject.Hash())
+ decTarget, err := p.Get(targetObject.Hash())
c.Assert(err, IsNil)
- c.Assert(decTarget, DeepEquals, targetObject)
+ objectsEqual(c, decTarget, targetObject)
}
func (s *EncoderSuite) deltaOverDeltaTest(c *C) {
@@ -173,27 +173,24 @@ func (s *EncoderSuite) deltaOverDeltaTest(c *C) {
})
c.Assert(err, IsNil)
- scanner := NewScanner(s.buf)
- storage := memory.NewStorage()
- d, err := NewDecoder(scanner, storage)
- c.Assert(err, IsNil)
-
- decHash, err := d.Decode()
+ p, cleanup := packfileFromReader(c, s.buf)
+ defer cleanup()
+ decHash, err := p.ID()
c.Assert(err, IsNil)
c.Assert(encHash, Equals, decHash)
- decSrc, err := storage.EncodedObject(srcObject.Type(), srcObject.Hash())
+ decSrc, err := p.Get(srcObject.Hash())
c.Assert(err, IsNil)
- c.Assert(decSrc, DeepEquals, srcObject)
+ objectsEqual(c, decSrc, srcObject)
- decTarget, err := storage.EncodedObject(targetObject.Type(), targetObject.Hash())
+ decTarget, err := p.Get(targetObject.Hash())
c.Assert(err, IsNil)
- c.Assert(decTarget, DeepEquals, targetObject)
+ objectsEqual(c, decTarget, targetObject)
- decOtherTarget, err := storage.EncodedObject(otherTargetObject.Type(), otherTargetObject.Hash())
+ decOtherTarget, err := p.Get(otherTargetObject.Hash())
c.Assert(err, IsNil)
- c.Assert(decOtherTarget, DeepEquals, otherTargetObject)
+ objectsEqual(c, decOtherTarget, otherTargetObject)
}
func (s *EncoderSuite) deltaOverDeltaCyclicTest(c *C) {
@@ -202,6 +199,15 @@ func (s *EncoderSuite) deltaOverDeltaCyclicTest(c *C) {
o3 := newObject(plumbing.BlobObject, []byte("011111"))
o4 := newObject(plumbing.BlobObject, []byte("01111100000"))
+ _, err := s.store.SetEncodedObject(o1)
+ c.Assert(err, IsNil)
+ _, err = s.store.SetEncodedObject(o2)
+ c.Assert(err, IsNil)
+ _, err = s.store.SetEncodedObject(o3)
+ c.Assert(err, IsNil)
+ _, err = s.store.SetEncodedObject(o4)
+ c.Assert(err, IsNil)
+
d2, err := GetDelta(o1, o2)
c.Assert(err, IsNil)
@@ -219,6 +225,18 @@ func (s *EncoderSuite) deltaOverDeltaCyclicTest(c *C) {
pd3.SetDelta(pd4, d3)
pd4.SetDelta(pd3, d4)
+ // SetOriginal is used by delta selector when generating ObjectToPack.
+ // It also fills type, hash and size values to be used when Original
+ // is nil.
+ po1.SetOriginal(po1.Original)
+ pd2.SetOriginal(pd2.Original)
+ pd2.CleanOriginal()
+
+ pd3.SetOriginal(pd3.Original)
+ pd3.CleanOriginal()
+
+ pd4.SetOriginal(pd4.Original)
+
encHash, err := s.enc.encode([]*ObjectToPack{
po1,
pd2,
@@ -227,29 +245,74 @@ func (s *EncoderSuite) deltaOverDeltaCyclicTest(c *C) {
})
c.Assert(err, IsNil)
- scanner := NewScanner(s.buf)
- storage := memory.NewStorage()
- d, err := NewDecoder(scanner, storage)
+ p, cleanup := packfileFromReader(c, s.buf)
+ defer cleanup()
+ decHash, err := p.ID()
c.Assert(err, IsNil)
- decHash, err := d.Decode()
+ c.Assert(encHash, Equals, decHash)
+
+ decSrc, err := p.Get(o1.Hash())
c.Assert(err, IsNil)
+ objectsEqual(c, decSrc, o1)
- c.Assert(encHash, Equals, decHash)
+ decTarget, err := p.Get(o2.Hash())
+ c.Assert(err, IsNil)
+ objectsEqual(c, decTarget, o2)
- decSrc, err := storage.EncodedObject(o1.Type(), o1.Hash())
+ decOtherTarget, err := p.Get(o3.Hash())
c.Assert(err, IsNil)
- c.Assert(decSrc, DeepEquals, o1)
+ objectsEqual(c, decOtherTarget, o3)
- decTarget, err := storage.EncodedObject(o2.Type(), o2.Hash())
+ decAnotherTarget, err := p.Get(o4.Hash())
c.Assert(err, IsNil)
- c.Assert(decTarget, DeepEquals, o2)
+ objectsEqual(c, decAnotherTarget, o4)
+}
- decOtherTarget, err := storage.EncodedObject(o3.Type(), o3.Hash())
+func objectsEqual(c *C, o1, o2 plumbing.EncodedObject) {
+ c.Assert(o1.Type(), Equals, o2.Type())
+ c.Assert(o1.Hash(), Equals, o2.Hash())
+ c.Assert(o1.Size(), Equals, o2.Size())
+
+ r1, err := o1.Reader()
c.Assert(err, IsNil)
- c.Assert(decOtherTarget, DeepEquals, o3)
- decAnotherTarget, err := storage.EncodedObject(o4.Type(), o4.Hash())
+ b1, err := stdioutil.ReadAll(r1)
c.Assert(err, IsNil)
- c.Assert(decAnotherTarget, DeepEquals, o4)
+
+ r2, err := o2.Reader()
+ c.Assert(err, IsNil)
+
+ b2, err := stdioutil.ReadAll(r2)
+ c.Assert(err, IsNil)
+
+ c.Assert(bytes.Compare(b1, b2), Equals, 0)
+}
+
+func packfileFromReader(c *C, buf *bytes.Buffer) (*Packfile, func()) {
+ fs := memfs.New()
+ file, err := fs.Create("packfile")
+ c.Assert(err, IsNil)
+
+ _, err = file.Write(buf.Bytes())
+ c.Assert(err, IsNil)
+
+ _, err = file.Seek(0, io.SeekStart)
+ c.Assert(err, IsNil)
+
+ scanner := NewScanner(file)
+
+ w := new(idxfile.Writer)
+ p, err := NewParser(scanner, w)
+ c.Assert(err, IsNil)
+
+ _, err = p.Parse()
+ c.Assert(err, IsNil)
+
+ index, err := w.Index()
+ c.Assert(err, IsNil)
+
+ return NewPackfile(index, fs, file), func() {
+ c.Assert(file.Close(), IsNil)
+ }
}
diff --git a/plumbing/format/packfile/fsobject.go b/plumbing/format/packfile/fsobject.go
new file mode 100644
index 0000000..330cb73
--- /dev/null
+++ b/plumbing/format/packfile/fsobject.go
@@ -0,0 +1,116 @@
+package packfile
+
+import (
+ "io"
+
+ billy "gopkg.in/src-d/go-billy.v4"
+ "gopkg.in/src-d/go-git.v4/plumbing"
+ "gopkg.in/src-d/go-git.v4/plumbing/cache"
+ "gopkg.in/src-d/go-git.v4/plumbing/format/idxfile"
+)
+
+// FSObject is an object from the packfile on the filesystem.
+type FSObject struct {
+ hash plumbing.Hash
+ h *ObjectHeader
+ offset int64
+ size int64
+ typ plumbing.ObjectType
+ index idxfile.Index
+ fs billy.Filesystem
+ path string
+ cache cache.Object
+}
+
+// NewFSObject creates a new filesystem object.
+func NewFSObject(
+ hash plumbing.Hash,
+ finalType plumbing.ObjectType,
+ offset int64,
+ contentSize int64,
+ index idxfile.Index,
+ fs billy.Filesystem,
+ path string,
+ cache cache.Object,
+) *FSObject {
+ return &FSObject{
+ hash: hash,
+ offset: offset,
+ size: contentSize,
+ typ: finalType,
+ index: index,
+ fs: fs,
+ path: path,
+ cache: cache,
+ }
+}
+
+// Reader implements the plumbing.EncodedObject interface.
+func (o *FSObject) Reader() (io.ReadCloser, error) {
+ obj, ok := o.cache.Get(o.hash)
+ if ok {
+ reader, err := obj.Reader()
+ if err != nil {
+ return nil, err
+ }
+
+ return reader, nil
+ }
+
+ f, err := o.fs.Open(o.path)
+ if err != nil {
+ return nil, err
+ }
+
+ p := NewPackfileWithCache(o.index, nil, f, o.cache)
+ r, err := p.getObjectContent(o.offset)
+ if err != nil {
+ _ = f.Close()
+ return nil, err
+ }
+
+ if err := f.Close(); err != nil {
+ return nil, err
+ }
+
+ return r, nil
+}
+
+// SetSize implements the plumbing.EncodedObject interface. This method
+// is a noop.
+func (o *FSObject) SetSize(int64) {}
+
+// SetType implements the plumbing.EncodedObject interface. This method is
+// a noop.
+func (o *FSObject) SetType(plumbing.ObjectType) {}
+
+// Hash implements the plumbing.EncodedObject interface.
+func (o *FSObject) Hash() plumbing.Hash { return o.hash }
+
+// Size implements the plumbing.EncodedObject interface.
+func (o *FSObject) Size() int64 { return o.size }
+
+// Type implements the plumbing.EncodedObject interface.
+func (o *FSObject) Type() plumbing.ObjectType {
+ return o.typ
+}
+
+// Writer implements the plumbing.EncodedObject interface. This method always
+// returns a nil writer.
+func (o *FSObject) Writer() (io.WriteCloser, error) {
+ return nil, nil
+}
+
+type objectReader struct {
+ io.ReadCloser
+ f billy.File
+}
+
+func (r *objectReader) Close() error {
+ if err := r.ReadCloser.Close(); err != nil {
+ _ = r.f.Close()
+ return err
+ }
+
+ return r.f.Close()
+}
diff --git a/plumbing/format/packfile/index.go b/plumbing/format/packfile/index.go
deleted file mode 100644
index 2c5f98f..0000000
--- a/plumbing/format/packfile/index.go
+++ /dev/null
@@ -1,82 +0,0 @@
-package packfile
-
-import (
- "gopkg.in/src-d/go-git.v4/plumbing"
- "gopkg.in/src-d/go-git.v4/plumbing/format/idxfile"
-)
-
-// Index is an in-memory representation of a packfile index.
-// This uses idxfile.Idxfile under the hood to obtain indexes from .idx files
-// or to store them.
-type Index struct {
- byHash map[plumbing.Hash]*idxfile.Entry
- byOffset map[uint64]*idxfile.Entry
-}
-
-// NewIndex creates a new empty index with the given size. Size is a hint and
-// can be 0. It is recommended to set it to the number of objects to be indexed
-// if it is known beforehand (e.g. reading from a packfile).
-func NewIndex(size int) *Index {
- return &Index{
- byHash: make(map[plumbing.Hash]*idxfile.Entry, size),
- byOffset: make(map[uint64]*idxfile.Entry, size),
- }
-}
-
-// NewIndexFromIdxFile creates a new Index from an idxfile.IdxFile.
-func NewIndexFromIdxFile(idxf *idxfile.Idxfile) *Index {
- idx := &Index{
- byHash: make(map[plumbing.Hash]*idxfile.Entry, idxf.ObjectCount),
- byOffset: make(map[uint64]*idxfile.Entry, idxf.ObjectCount),
- }
- for _, e := range idxf.Entries {
- idx.add(e)
- }
-
- return idx
-}
-
-// Add adds a new Entry with the given values to the index.
-func (idx *Index) Add(h plumbing.Hash, offset uint64, crc32 uint32) {
- e := idxfile.Entry{
- Hash: h,
- Offset: offset,
- CRC32: crc32,
- }
- idx.add(&e)
-}
-
-func (idx *Index) add(e *idxfile.Entry) {
- idx.byHash[e.Hash] = e
- idx.byOffset[e.Offset] = e
-}
-
-// LookupHash looks an entry up by its hash. An idxfile.Entry is returned and
-// a bool, which is true if it was found or false if it wasn't.
-func (idx *Index) LookupHash(h plumbing.Hash) (*idxfile.Entry, bool) {
- e, ok := idx.byHash[h]
- return e, ok
-}
-
-// LookupHash looks an entry up by its offset in the packfile. An idxfile.Entry
-// is returned and a bool, which is true if it was found or false if it wasn't.
-func (idx *Index) LookupOffset(offset uint64) (*idxfile.Entry, bool) {
- e, ok := idx.byOffset[offset]
- return e, ok
-}
-
-// Size returns the number of entries in the index.
-func (idx *Index) Size() int {
- return len(idx.byHash)
-}
-
-// ToIdxFile converts the index to an idxfile.Idxfile, which can then be used
-// to serialize.
-func (idx *Index) ToIdxFile() *idxfile.Idxfile {
- idxf := idxfile.NewIdxfile()
- for _, e := range idx.byHash {
- idxf.Entries = append(idxf.Entries, e)
- }
-
- return idxf
-}
diff --git a/plumbing/format/packfile/index_test.go b/plumbing/format/packfile/index_test.go
deleted file mode 100644
index 6714704..0000000
--- a/plumbing/format/packfile/index_test.go
+++ /dev/null
@@ -1,122 +0,0 @@
-package packfile
-
-import (
- "strconv"
- "strings"
-
- "gopkg.in/src-d/go-git.v4/plumbing"
-
- . "gopkg.in/check.v1"
-)
-
-type IndexSuite struct{}
-
-var _ = Suite(&IndexSuite{})
-
-func (s *IndexSuite) TestLookupOffset(c *C) {
- idx := NewIndex(0)
-
- for o1 := 0; o1 < 10000; o1 += 100 {
- for o2 := 0; o2 < 10000; o2 += 100 {
- if o2 >= o1 {
- e, ok := idx.LookupOffset(uint64(o2))
- c.Assert(ok, Equals, false)
- c.Assert(e, IsNil)
- } else {
- e, ok := idx.LookupOffset(uint64(o2))
- c.Assert(ok, Equals, true)
- c.Assert(e, NotNil)
- c.Assert(e.Hash, Equals, s.toHash(o2))
- c.Assert(e.Offset, Equals, uint64(o2))
- }
- }
-
- h1 := s.toHash(o1)
- idx.Add(h1, uint64(o1), 0)
-
- for o2 := 0; o2 < 10000; o2 += 100 {
- if o2 > o1 {
- e, ok := idx.LookupOffset(uint64(o2))
- c.Assert(ok, Equals, false)
- c.Assert(e, IsNil)
- } else {
- e, ok := idx.LookupOffset(uint64(o2))
- c.Assert(ok, Equals, true)
- c.Assert(e, NotNil)
- c.Assert(e.Hash, Equals, s.toHash(o2))
- c.Assert(e.Offset, Equals, uint64(o2))
- }
- }
- }
-}
-
-func (s *IndexSuite) TestLookupHash(c *C) {
- idx := NewIndex(0)
-
- for o1 := 0; o1 < 10000; o1 += 100 {
- for o2 := 0; o2 < 10000; o2 += 100 {
- if o2 >= o1 {
- e, ok := idx.LookupHash(s.toHash(o2))
- c.Assert(ok, Equals, false)
- c.Assert(e, IsNil)
- } else {
- e, ok := idx.LookupHash(s.toHash(o2))
- c.Assert(ok, Equals, true)
- c.Assert(e, NotNil)
- c.Assert(e.Hash, Equals, s.toHash(o2))
- c.Assert(e.Offset, Equals, uint64(o2))
- }
- }
-
- h1 := s.toHash(o1)
- idx.Add(h1, uint64(o1), 0)
-
- for o2 := 0; o2 < 10000; o2 += 100 {
- if o2 > o1 {
- e, ok := idx.LookupHash(s.toHash(o2))
- c.Assert(ok, Equals, false)
- c.Assert(e, IsNil)
- } else {
- e, ok := idx.LookupHash(s.toHash(o2))
- c.Assert(ok, Equals, true)
- c.Assert(e, NotNil)
- c.Assert(e.Hash, Equals, s.toHash(o2))
- c.Assert(e.Offset, Equals, uint64(o2))
- }
- }
- }
-}
-
-func (s *IndexSuite) TestSize(c *C) {
- idx := NewIndex(0)
-
- for o1 := 0; o1 < 1000; o1++ {
- c.Assert(idx.Size(), Equals, o1)
- h1 := s.toHash(o1)
- idx.Add(h1, uint64(o1), 0)
- }
-}
-
-func (s *IndexSuite) TestIdxFileEmpty(c *C) {
- idx := NewIndex(0)
- idxf := idx.ToIdxFile()
- idx2 := NewIndexFromIdxFile(idxf)
- c.Assert(idx, DeepEquals, idx2)
-}
-
-func (s *IndexSuite) TestIdxFile(c *C) {
- idx := NewIndex(0)
- for o1 := 0; o1 < 1000; o1++ {
- h1 := s.toHash(o1)
- idx.Add(h1, uint64(o1), 0)
- }
-
- idx2 := NewIndexFromIdxFile(idx.ToIdxFile())
- c.Assert(idx, DeepEquals, idx2)
-}
-
-func (s *IndexSuite) toHash(i int) plumbing.Hash {
- is := strconv.Itoa(i)
- padding := strings.Repeat("a", 40-len(is))
- return plumbing.NewHash(padding + is)
-}
diff --git a/plumbing/format/packfile/object_pack.go b/plumbing/format/packfile/object_pack.go
index 1563517..dfea571 100644
--- a/plumbing/format/packfile/object_pack.go
+++ b/plumbing/format/packfile/object_pack.go
@@ -23,6 +23,12 @@ type ObjectToPack struct {
// offset in pack when object has been already written, or 0 if it
// has not been written yet
Offset int64
+
+ // Information from the original object
+ resolvedOriginal bool
+ originalType plumbing.ObjectType
+ originalSize int64
+ originalHash plumbing.Hash
}
// newObjectToPack creates a correct ObjectToPack based on a non-delta object
@@ -47,7 +53,7 @@ func newDeltaObjectToPack(base *ObjectToPack, original, delta plumbing.EncodedOb
// BackToOriginal converts that ObjectToPack to a non-deltified object if it was one
func (o *ObjectToPack) BackToOriginal() {
- if o.IsDelta() {
+ if o.IsDelta() && o.Original != nil {
o.Object = o.Original
o.Base = nil
o.Depth = 0
@@ -71,11 +77,37 @@ func (o *ObjectToPack) WantWrite() bool {
return o.Offset == 1
}
+// SetOriginal sets both Original and saves size, type and hash. If object
+// is nil Original is set but previous resolved values are kept
+func (o *ObjectToPack) SetOriginal(obj plumbing.EncodedObject) {
+ o.Original = obj
+ o.SaveOriginalMetadata()
+}
+
+// SaveOriginalMetadata saves size, type and hash of Original object
+func (o *ObjectToPack) SaveOriginalMetadata() {
+ if o.Original != nil {
+ o.originalSize = o.Original.Size()
+ o.originalType = o.Original.Type()
+ o.originalHash = o.Original.Hash()
+ o.resolvedOriginal = true
+ }
+}
+
+// CleanOriginal sets Original to nil
+func (o *ObjectToPack) CleanOriginal() {
+ o.Original = nil
+}
+
func (o *ObjectToPack) Type() plumbing.ObjectType {
if o.Original != nil {
return o.Original.Type()
}
+ if o.resolvedOriginal {
+ return o.originalType
+ }
+
if o.Base != nil {
return o.Base.Type()
}
@@ -92,6 +124,10 @@ func (o *ObjectToPack) Hash() plumbing.Hash {
return o.Original.Hash()
}
+ if o.resolvedOriginal {
+ return o.originalHash
+ }
+
do, ok := o.Object.(plumbing.DeltaObject)
if ok {
return do.ActualHash()
@@ -105,6 +141,10 @@ func (o *ObjectToPack) Size() int64 {
return o.Original.Size()
}
+ if o.resolvedOriginal {
+ return o.originalSize
+ }
+
do, ok := o.Object.(plumbing.DeltaObject)
if ok {
return do.ActualSize()
diff --git a/plumbing/format/packfile/packfile.go b/plumbing/format/packfile/packfile.go
new file mode 100644
index 0000000..852a834
--- /dev/null
+++ b/plumbing/format/packfile/packfile.go
@@ -0,0 +1,520 @@
+package packfile
+
+import (
+ "bytes"
+ "io"
+ "os"
+
+ billy "gopkg.in/src-d/go-billy.v4"
+ "gopkg.in/src-d/go-git.v4/plumbing"
+ "gopkg.in/src-d/go-git.v4/plumbing/cache"
+ "gopkg.in/src-d/go-git.v4/plumbing/format/idxfile"
+ "gopkg.in/src-d/go-git.v4/plumbing/storer"
+)
+
+var (
+ // ErrInvalidObject is returned by Decode when an invalid object is
+ // found in the packfile.
+ ErrInvalidObject = NewError("invalid git object")
+ // ErrZLib is returned by Decode when there was an error unzipping
+ // the packfile contents.
+ ErrZLib = NewError("zlib reading error")
+)
+
+// Packfile allows retrieving information from inside a packfile.
+type Packfile struct {
+ idxfile.Index
+ fs billy.Filesystem
+ file billy.File
+ s *Scanner
+ deltaBaseCache cache.Object
+ offsetToType map[int64]plumbing.ObjectType
+}
+
+// NewPackfileWithCache creates a new Packfile with the given object cache.
+// If the filesystem is provided, the packfile will return FSObjects, otherwise
+// it will return MemoryObjects.
+func NewPackfileWithCache(
+ index idxfile.Index,
+ fs billy.Filesystem,
+ file billy.File,
+ cache cache.Object,
+) *Packfile {
+ s := NewScanner(file)
+ return &Packfile{
+ index,
+ fs,
+ file,
+ s,
+ cache,
+ make(map[int64]plumbing.ObjectType),
+ }
+}
+
+// NewPackfile returns a packfile representation for the given packfile file
+// and packfile idx.
+// If the filesystem is provided, the packfile will return FSObjects, otherwise
+// it will return MemoryObjects.
+func NewPackfile(index idxfile.Index, fs billy.Filesystem, file billy.File) *Packfile {
+ return NewPackfileWithCache(index, fs, file, cache.NewObjectLRUDefault())
+}
+
+// Get retrieves the encoded object in the packfile with the given hash.
+func (p *Packfile) Get(h plumbing.Hash) (plumbing.EncodedObject, error) {
+ offset, err := p.FindOffset(h)
+ if err != nil {
+ return nil, err
+ }
+
+ return p.GetByOffset(offset)
+}
+
+// GetByOffset retrieves the encoded object from the packfile with the given
+// offset.
+func (p *Packfile) GetByOffset(o int64) (plumbing.EncodedObject, error) {
+ hash, err := p.FindHash(o)
+ if err == nil {
+ if obj, ok := p.deltaBaseCache.Get(hash); ok {
+ return obj, nil
+ }
+ }
+
+ if _, err := p.s.SeekFromStart(o); err != nil {
+ if err == io.EOF || isInvalid(err) {
+ return nil, plumbing.ErrObjectNotFound
+ }
+
+ return nil, err
+ }
+
+ return p.nextObject()
+}
+
+func (p *Packfile) nextObjectHeader() (*ObjectHeader, error) {
+ h, err := p.s.NextObjectHeader()
+ p.s.pendingObject = nil
+ return h, err
+}
+
+func (p *Packfile) getObjectData(
+ h *ObjectHeader,
+) (typ plumbing.ObjectType, size int64, err error) {
+ switch h.Type {
+ case plumbing.CommitObject, plumbing.TreeObject, plumbing.BlobObject, plumbing.TagObject:
+ typ = h.Type
+ size = h.Length
+ case plumbing.REFDeltaObject, plumbing.OFSDeltaObject:
+ buf := bufPool.Get().(*bytes.Buffer)
+ buf.Reset()
+ defer bufPool.Put(buf)
+
+ _, _, err = p.s.NextObject(buf)
+ if err != nil {
+ return
+ }
+
+ delta := buf.Bytes()
+ _, delta = decodeLEB128(delta) // skip src size
+ sz, _ := decodeLEB128(delta)
+ size = int64(sz)
+
+ var offset int64
+ if h.Type == plumbing.REFDeltaObject {
+ offset, err = p.FindOffset(h.Reference)
+ if err != nil {
+ return
+ }
+ } else {
+ offset = h.OffsetReference
+ }
+
+ if baseType, ok := p.offsetToType[offset]; ok {
+ typ = baseType
+ } else {
+ if _, err = p.s.SeekFromStart(offset); err != nil {
+ return
+ }
+
+ h, err = p.nextObjectHeader()
+ if err != nil {
+ return
+ }
+
+ typ, _, err = p.getObjectData(h)
+ if err != nil {
+ return
+ }
+ }
+ default:
+ err = ErrInvalidObject.AddDetails("type %q", h.Type)
+ }
+
+ return
+}
+
+func (p *Packfile) getObjectSize(h *ObjectHeader) (int64, error) {
+ switch h.Type {
+ case plumbing.CommitObject, plumbing.TreeObject, plumbing.BlobObject, plumbing.TagObject:
+ return h.Length, nil
+ case plumbing.REFDeltaObject, plumbing.OFSDeltaObject:
+ buf := bufPool.Get().(*bytes.Buffer)
+ buf.Reset()
+ defer bufPool.Put(buf)
+
+ if _, _, err := p.s.NextObject(buf); err != nil {
+ return 0, err
+ }
+
+ delta := buf.Bytes()
+ _, delta = decodeLEB128(delta) // skip src size
+ sz, _ := decodeLEB128(delta)
+ return int64(sz), nil
+ default:
+ return 0, ErrInvalidObject.AddDetails("type %q", h.Type)
+ }
+}
+
+func (p *Packfile) getObjectType(h *ObjectHeader) (typ plumbing.ObjectType, err error) {
+ switch h.Type {
+ case plumbing.CommitObject, plumbing.TreeObject, plumbing.BlobObject, plumbing.TagObject:
+ return h.Type, nil
+ case plumbing.REFDeltaObject, plumbing.OFSDeltaObject:
+ var offset int64
+ if h.Type == plumbing.REFDeltaObject {
+ offset, err = p.FindOffset(h.Reference)
+ if err != nil {
+ return
+ }
+ } else {
+ offset = h.OffsetReference
+ }
+
+ if baseType, ok := p.offsetToType[offset]; ok {
+ typ = baseType
+ } else {
+ if _, err = p.s.SeekFromStart(offset); err != nil {
+ return
+ }
+
+ h, err = p.nextObjectHeader()
+ if err != nil {
+ return
+ }
+
+ typ, err = p.getObjectType(h)
+ if err != nil {
+ return
+ }
+ }
+ default:
+ err = ErrInvalidObject.AddDetails("type %q", h.Type)
+ }
+
+ return
+}
+
+func (p *Packfile) nextObject() (plumbing.EncodedObject, error) {
+ h, err := p.nextObjectHeader()
+ if err != nil {
+ if err == io.EOF || isInvalid(err) {
+ return nil, plumbing.ErrObjectNotFound
+ }
+ return nil, err
+ }
+
+ // If we have no filesystem, we will return a MemoryObject instead
+ // of an FSObject.
+ if p.fs == nil {
+ return p.getNextObject(h)
+ }
+
+ hash, err := p.FindHash(h.Offset)
+ if err != nil {
+ return nil, err
+ }
+
+ size, err := p.getObjectSize(h)
+ if err != nil {
+ return nil, err
+ }
+
+ typ, err := p.getObjectType(h)
+ if err != nil {
+ return nil, err
+ }
+
+ p.offsetToType[h.Offset] = typ
+
+ return NewFSObject(
+ hash,
+ typ,
+ h.Offset,
+ size,
+ p.Index,
+ p.fs,
+ p.file.Name(),
+ p.deltaBaseCache,
+ ), nil
+}
+
+func (p *Packfile) getObjectContent(offset int64) (io.ReadCloser, error) {
+ ref, err := p.FindHash(offset)
+ if err == nil {
+ obj, ok := p.cacheGet(ref)
+ if ok {
+ reader, err := obj.Reader()
+ if err != nil {
+ return nil, err
+ }
+
+ return reader, nil
+ }
+ }
+
+ if _, err := p.s.SeekFromStart(offset); err != nil {
+ return nil, err
+ }
+
+ h, err := p.nextObjectHeader()
+ if err != nil {
+ return nil, err
+ }
+
+ obj, err := p.getNextObject(h)
+ if err != nil {
+ return nil, err
+ }
+
+ return obj.Reader()
+}
+
+func (p *Packfile) getNextObject(h *ObjectHeader) (plumbing.EncodedObject, error) {
+ var obj = new(plumbing.MemoryObject)
+ obj.SetSize(h.Length)
+ obj.SetType(h.Type)
+
+ var err error
+ switch h.Type {
+ case plumbing.CommitObject, plumbing.TreeObject, plumbing.BlobObject, plumbing.TagObject:
+ err = p.fillRegularObjectContent(obj)
+ case plumbing.REFDeltaObject:
+ err = p.fillREFDeltaObjectContent(obj, h.Reference)
+ case plumbing.OFSDeltaObject:
+ err = p.fillOFSDeltaObjectContent(obj, h.OffsetReference)
+ default:
+ err = ErrInvalidObject.AddDetails("type %q", h.Type)
+ }
+
+ if err != nil {
+ return nil, err
+ }
+
+ return obj, nil
+}
+
+func (p *Packfile) fillRegularObjectContent(obj plumbing.EncodedObject) error {
+ w, err := obj.Writer()
+ if err != nil {
+ return err
+ }
+
+ _, _, err = p.s.NextObject(w)
+ p.cachePut(obj)
+
+ return err
+}
+
+func (p *Packfile) fillREFDeltaObjectContent(obj plumbing.EncodedObject, ref plumbing.Hash) error {
+ buf := bufPool.Get().(*bytes.Buffer)
+ buf.Reset()
+ _, _, err := p.s.NextObject(buf)
+ if err != nil {
+ return err
+ }
+
+ base, ok := p.cacheGet(ref)
+ if !ok {
+ base, err = p.Get(ref)
+ if err != nil {
+ return err
+ }
+ }
+
+ obj.SetType(base.Type())
+ err = ApplyDelta(obj, base, buf.Bytes())
+ p.cachePut(obj)
+ bufPool.Put(buf)
+
+ return err
+}
+
+func (p *Packfile) fillOFSDeltaObjectContent(obj plumbing.EncodedObject, offset int64) error {
+ buf := bytes.NewBuffer(nil)
+ _, _, err := p.s.NextObject(buf)
+ if err != nil {
+ return err
+ }
+
+ var base plumbing.EncodedObject
+ var ok bool
+ hash, err := p.FindHash(offset)
+ if err == nil {
+ base, ok = p.cacheGet(hash)
+ }
+
+ if !ok {
+ base, err = p.GetByOffset(offset)
+ if err != nil {
+ return err
+ }
+
+ p.cachePut(base)
+ }
+
+ obj.SetType(base.Type())
+ err = ApplyDelta(obj, base, buf.Bytes())
+ p.cachePut(obj)
+
+ return err
+}
+
+func (p *Packfile) cacheGet(h plumbing.Hash) (plumbing.EncodedObject, bool) {
+ if p.deltaBaseCache == nil {
+ return nil, false
+ }
+
+ return p.deltaBaseCache.Get(h)
+}
+
+func (p *Packfile) cachePut(obj plumbing.EncodedObject) {
+ if p.deltaBaseCache == nil {
+ return
+ }
+
+ p.deltaBaseCache.Put(obj)
+}
+
+// GetAll returns an iterator with all encoded objects in the packfile.
+// The iterator returned is not thread-safe, it should be used in the same
+// thread as the Packfile instance.
+func (p *Packfile) GetAll() (storer.EncodedObjectIter, error) {
+ return p.GetByType(plumbing.AnyObject)
+}
+
+// GetByType returns all the objects of the given type.
+func (p *Packfile) GetByType(typ plumbing.ObjectType) (storer.EncodedObjectIter, error) {
+ switch typ {
+ case plumbing.AnyObject,
+ plumbing.BlobObject,
+ plumbing.TreeObject,
+ plumbing.CommitObject,
+ plumbing.TagObject:
+ entries, err := p.EntriesByOffset()
+ if err != nil {
+ return nil, err
+ }
+
+ return &objectIter{
+ // Easiest way to provide an object decoder is just to pass a Packfile
+ // instance. To not mess with the seeks, it's a new instance with a
+ // different scanner but the same cache and offset to hash map for
+ // reusing as much cache as possible.
+ p: p,
+ iter: entries,
+ typ: typ,
+ }, nil
+ default:
+ return nil, plumbing.ErrInvalidType
+ }
+}
+
+// ID returns the ID of the packfile, which is the checksum at the end of it.
+func (p *Packfile) ID() (plumbing.Hash, error) {
+ prev, err := p.file.Seek(-20, io.SeekEnd)
+ if err != nil {
+ return plumbing.ZeroHash, err
+ }
+
+ var hash plumbing.Hash
+ if _, err := io.ReadFull(p.file, hash[:]); err != nil {
+ return plumbing.ZeroHash, err
+ }
+
+ if _, err := p.file.Seek(prev, io.SeekStart); err != nil {
+ return plumbing.ZeroHash, err
+ }
+
+ return hash, nil
+}
+
+// Close the packfile and its resources.
+func (p *Packfile) Close() error {
+ closer, ok := p.file.(io.Closer)
+ if !ok {
+ return nil
+ }
+
+ return closer.Close()
+}
+
+type objectIter struct {
+ p *Packfile
+ typ plumbing.ObjectType
+ iter idxfile.EntryIter
+}
+
+func (i *objectIter) Next() (plumbing.EncodedObject, error) {
+ for {
+ e, err := i.iter.Next()
+ if err != nil {
+ return nil, err
+ }
+
+ obj, err := i.p.GetByOffset(int64(e.Offset))
+ if err != nil {
+ return nil, err
+ }
+
+ if i.typ == plumbing.AnyObject || obj.Type() == i.typ {
+ return obj, nil
+ }
+ }
+}
+
+func (i *objectIter) ForEach(f func(plumbing.EncodedObject) error) error {
+ for {
+ o, err := i.Next()
+ if err != nil {
+ if err == io.EOF {
+ return nil
+ }
+ return err
+ }
+
+ if err := f(o); err != nil {
+ return err
+ }
+ }
+}
+
+func (i *objectIter) Close() {
+ i.iter.Close()
+}
+
+// isInvalid checks whether an error is an os.PathError with an os.ErrInvalid
+// error inside. It also checks for the windows error, which is different from
+// os.ErrInvalid.
+func isInvalid(err error) bool {
+ pe, ok := err.(*os.PathError)
+ if !ok {
+ return false
+ }
+
+ errstr := pe.Err.Error()
+ return errstr == errInvalidUnix || errstr == errInvalidWindows
+}
+
+// errInvalidWindows is the Windows equivalent to os.ErrInvalid
+const errInvalidWindows = "The parameter is incorrect."
+
+var errInvalidUnix = os.ErrInvalid.Error()
diff --git a/plumbing/format/packfile/packfile_test.go b/plumbing/format/packfile/packfile_test.go
new file mode 100644
index 0000000..05dc8a7
--- /dev/null
+++ b/plumbing/format/packfile/packfile_test.go
@@ -0,0 +1,279 @@
+package packfile_test
+
+import (
+ "io"
+ "math"
+
+ . "gopkg.in/check.v1"
+ "gopkg.in/src-d/go-billy.v4/osfs"
+ fixtures "gopkg.in/src-d/go-git-fixtures.v3"
+ "gopkg.in/src-d/go-git.v4/plumbing"
+ "gopkg.in/src-d/go-git.v4/plumbing/format/idxfile"
+ "gopkg.in/src-d/go-git.v4/plumbing/format/packfile"
+ "gopkg.in/src-d/go-git.v4/plumbing/storer"
+)
+
+type PackfileSuite struct {
+ fixtures.Suite
+ p *packfile.Packfile
+ idx *idxfile.MemoryIndex
+ f *fixtures.Fixture
+}
+
+var _ = Suite(&PackfileSuite{})
+
+func (s *PackfileSuite) TestGet(c *C) {
+ for h := range expectedEntries {
+ obj, err := s.p.Get(h)
+ c.Assert(err, IsNil)
+ c.Assert(obj, Not(IsNil))
+ c.Assert(obj.Hash(), Equals, h)
+ }
+
+ _, err := s.p.Get(plumbing.ZeroHash)
+ c.Assert(err, Equals, plumbing.ErrObjectNotFound)
+}
+
+func (s *PackfileSuite) TestGetByOffset(c *C) {
+ for h, o := range expectedEntries {
+ obj, err := s.p.GetByOffset(o)
+ c.Assert(err, IsNil)
+ c.Assert(obj, Not(IsNil))
+ c.Assert(obj.Hash(), Equals, h)
+ }
+
+ _, err := s.p.GetByOffset(math.MaxInt64)
+ c.Assert(err, Equals, plumbing.ErrObjectNotFound)
+}
+
+func (s *PackfileSuite) TestID(c *C) {
+ id, err := s.p.ID()
+ c.Assert(err, IsNil)
+ c.Assert(id, Equals, s.f.PackfileHash)
+}
+
+func (s *PackfileSuite) TestGetAll(c *C) {
+ iter, err := s.p.GetAll()
+ c.Assert(err, IsNil)
+
+ var objects int
+ for {
+ o, err := iter.Next()
+ if err == io.EOF {
+ break
+ }
+ c.Assert(err, IsNil)
+
+ objects++
+ _, ok := expectedEntries[o.Hash()]
+ c.Assert(ok, Equals, true)
+ }
+
+ c.Assert(objects, Equals, len(expectedEntries))
+}
+
+var expectedEntries = map[plumbing.Hash]int64{
+ plumbing.NewHash("1669dce138d9b841a518c64b10914d88f5e488ea"): 615,
+ plumbing.NewHash("32858aad3c383ed1ff0a0f9bdf231d54a00c9e88"): 1524,
+ plumbing.NewHash("35e85108805c84807bc66a02d91535e1e24b38b9"): 1063,
+ plumbing.NewHash("49c6bb89b17060d7b4deacb7b338fcc6ea2352a9"): 78882,
+ plumbing.NewHash("4d081c50e250fa32ea8b1313cf8bb7c2ad7627fd"): 84688,
+ plumbing.NewHash("586af567d0bb5e771e49bdd9434f5e0fb76d25fa"): 84559,
+ plumbing.NewHash("5a877e6a906a2743ad6e45d99c1793642aaf8eda"): 84479,
+ plumbing.NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5"): 186,
+ plumbing.NewHash("7e59600739c96546163833214c36459e324bad0a"): 84653,
+ plumbing.NewHash("880cd14280f4b9b6ed3986d6671f907d7cc2a198"): 78050,
+ plumbing.NewHash("8dcef98b1d52143e1e2dbc458ffe38f925786bf2"): 84741,
+ plumbing.NewHash("918c48b83bd081e863dbe1b80f8998f058cd8294"): 286,
+ plumbing.NewHash("9a48f23120e880dfbe41f7c9b7b708e9ee62a492"): 80998,
+ plumbing.NewHash("9dea2395f5403188298c1dabe8bdafe562c491e3"): 84032,
+ plumbing.NewHash("a39771a7651f97faf5c72e08224d857fc35133db"): 84430,
+ plumbing.NewHash("a5b8b09e2f8fcb0bb99d3ccb0958157b40890d69"): 838,
+ plumbing.NewHash("a8d315b2b1c615d43042c3a62402b8a54288cf5c"): 84375,
+ plumbing.NewHash("aa9b383c260e1d05fbbf6b30a02914555e20c725"): 84760,
+ plumbing.NewHash("af2d6a6954d532f8ffb47615169c8fdf9d383a1a"): 449,
+ plumbing.NewHash("b029517f6300c2da0f4b651b8642506cd6aaf45d"): 1392,
+ plumbing.NewHash("b8e471f58bcbca63b07bda20e428190409c2db47"): 1230,
+ plumbing.NewHash("c192bd6a24ea1ab01d78686e417c8bdc7c3d197f"): 1713,
+ plumbing.NewHash("c2d30fa8ef288618f65f6eed6e168e0d514886f4"): 84725,
+ plumbing.NewHash("c8f1d8c61f9da76f4cb49fd86322b6e685dba956"): 80725,
+ plumbing.NewHash("cf4aa3b38974fb7d81f367c0830f7d78d65ab86b"): 84608,
+ plumbing.NewHash("d3ff53e0564a9f87d8e84b6e28e5060e517008aa"): 1685,
+ plumbing.NewHash("d5c0f4ab811897cadf03aec358ae60d21f91c50d"): 2351,
+ plumbing.NewHash("dbd3641b371024f44d0e469a9c8f5457b0660de1"): 84115,
+ plumbing.NewHash("e8d3ffab552895c19b9fcf7aa264d277cde33881"): 12,
+ plumbing.NewHash("eba74343e2f15d62adedfd8c883ee0262b5c8021"): 84708,
+ plumbing.NewHash("fb72698cab7617ac416264415f13224dfd7a165e"): 84671,
+}
+
+func (s *PackfileSuite) SetUpTest(c *C) {
+ s.f = fixtures.Basic().One()
+
+ fs := osfs.New("")
+ f, err := fs.Open(s.f.Packfile().Name())
+ c.Assert(err, IsNil)
+
+ s.idx = idxfile.NewMemoryIndex()
+ c.Assert(idxfile.NewDecoder(s.f.Idx()).Decode(s.idx), IsNil)
+
+ s.p = packfile.NewPackfile(s.idx, fs, f)
+}
+
+func (s *PackfileSuite) TearDownTest(c *C) {
+ c.Assert(s.p.Close(), IsNil)
+}
+
+func (s *PackfileSuite) TestDecode(c *C) {
+ fixtures.Basic().ByTag("packfile").Test(c, func(f *fixtures.Fixture) {
+ index := getIndexFromIdxFile(f.Idx())
+ fs := osfs.New("")
+ pf, err := fs.Open(f.Packfile().Name())
+ c.Assert(err, IsNil)
+
+ p := packfile.NewPackfile(index, fs, pf)
+ defer p.Close()
+
+ for _, h := range expectedHashes {
+ obj, err := p.Get(plumbing.NewHash(h))
+ c.Assert(err, IsNil)
+ c.Assert(obj.Hash().String(), Equals, h)
+ }
+ })
+}
+
+func (s *PackfileSuite) TestDecodeByTypeRefDelta(c *C) {
+ f := fixtures.Basic().ByTag("ref-delta").One()
+
+ index := getIndexFromIdxFile(f.Idx())
+ fs := osfs.New("")
+ pf, err := fs.Open(f.Packfile().Name())
+ c.Assert(err, IsNil)
+
+ packfile := packfile.NewPackfile(index, fs, pf)
+ defer packfile.Close()
+
+ iter, err := packfile.GetByType(plumbing.CommitObject)
+ c.Assert(err, IsNil)
+
+ var count int
+ for {
+ obj, err := iter.Next()
+ if err == io.EOF {
+ break
+ }
+ count++
+ c.Assert(err, IsNil)
+ c.Assert(obj.Type(), Equals, plumbing.CommitObject)
+ }
+
+ c.Assert(count > 0, Equals, true)
+}
+
+func (s *PackfileSuite) TestDecodeByType(c *C) {
+ ts := []plumbing.ObjectType{
+ plumbing.CommitObject,
+ plumbing.TagObject,
+ plumbing.TreeObject,
+ plumbing.BlobObject,
+ }
+
+ fixtures.Basic().ByTag("packfile").Test(c, func(f *fixtures.Fixture) {
+ for _, t := range ts {
+ index := getIndexFromIdxFile(f.Idx())
+ fs := osfs.New("")
+ pf, err := fs.Open(f.Packfile().Name())
+ c.Assert(err, IsNil)
+
+ packfile := packfile.NewPackfile(index, fs, pf)
+ defer packfile.Close()
+
+ iter, err := packfile.GetByType(t)
+ c.Assert(err, IsNil)
+
+ c.Assert(iter.ForEach(func(obj plumbing.EncodedObject) error {
+ c.Assert(obj.Type(), Equals, t)
+ return nil
+ }), IsNil)
+ }
+ })
+}
+
+func (s *PackfileSuite) TestDecodeByTypeConstructor(c *C) {
+ f := fixtures.Basic().ByTag("packfile").One()
+ index := getIndexFromIdxFile(f.Idx())
+ fs := osfs.New("")
+ pf, err := fs.Open(f.Packfile().Name())
+ c.Assert(err, IsNil)
+
+ packfile := packfile.NewPackfile(index, fs, pf)
+ defer packfile.Close()
+
+ _, err = packfile.GetByType(plumbing.OFSDeltaObject)
+ c.Assert(err, Equals, plumbing.ErrInvalidType)
+
+ _, err = packfile.GetByType(plumbing.REFDeltaObject)
+ c.Assert(err, Equals, plumbing.ErrInvalidType)
+
+ _, err = packfile.GetByType(plumbing.InvalidObject)
+ c.Assert(err, Equals, plumbing.ErrInvalidType)
+}
+
+var expectedHashes = []string{
+ "918c48b83bd081e863dbe1b80f8998f058cd8294",
+ "af2d6a6954d532f8ffb47615169c8fdf9d383a1a",
+ "1669dce138d9b841a518c64b10914d88f5e488ea",
+ "a5b8b09e2f8fcb0bb99d3ccb0958157b40890d69",
+ "b8e471f58bcbca63b07bda20e428190409c2db47",
+ "35e85108805c84807bc66a02d91535e1e24b38b9",
+ "b029517f6300c2da0f4b651b8642506cd6aaf45d",
+ "32858aad3c383ed1ff0a0f9bdf231d54a00c9e88",
+ "d3ff53e0564a9f87d8e84b6e28e5060e517008aa",
+ "c192bd6a24ea1ab01d78686e417c8bdc7c3d197f",
+ "d5c0f4ab811897cadf03aec358ae60d21f91c50d",
+ "49c6bb89b17060d7b4deacb7b338fcc6ea2352a9",
+ "cf4aa3b38974fb7d81f367c0830f7d78d65ab86b",
+ "9dea2395f5403188298c1dabe8bdafe562c491e3",
+ "586af567d0bb5e771e49bdd9434f5e0fb76d25fa",
+ "9a48f23120e880dfbe41f7c9b7b708e9ee62a492",
+ "5a877e6a906a2743ad6e45d99c1793642aaf8eda",
+ "c8f1d8c61f9da76f4cb49fd86322b6e685dba956",
+ "a8d315b2b1c615d43042c3a62402b8a54288cf5c",
+ "a39771a7651f97faf5c72e08224d857fc35133db",
+ "880cd14280f4b9b6ed3986d6671f907d7cc2a198",
+ "fb72698cab7617ac416264415f13224dfd7a165e",
+ "4d081c50e250fa32ea8b1313cf8bb7c2ad7627fd",
+ "eba74343e2f15d62adedfd8c883ee0262b5c8021",
+ "c2d30fa8ef288618f65f6eed6e168e0d514886f4",
+ "8dcef98b1d52143e1e2dbc458ffe38f925786bf2",
+ "aa9b383c260e1d05fbbf6b30a02914555e20c725",
+ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5",
+ "dbd3641b371024f44d0e469a9c8f5457b0660de1",
+ "e8d3ffab552895c19b9fcf7aa264d277cde33881",
+ "7e59600739c96546163833214c36459e324bad0a",
+}
+
+func assertObjects(c *C, s storer.EncodedObjectStorer, expects []string) {
+ i, err := s.IterEncodedObjects(plumbing.AnyObject)
+ c.Assert(err, IsNil)
+
+ var count int
+ err = i.ForEach(func(plumbing.EncodedObject) error { count++; return nil })
+ c.Assert(err, IsNil)
+ c.Assert(count, Equals, len(expects))
+
+ for _, exp := range expects {
+ obt, err := s.EncodedObject(plumbing.AnyObject, plumbing.NewHash(exp))
+ c.Assert(err, IsNil)
+ c.Assert(obt.Hash().String(), Equals, exp)
+ }
+}
+
+func getIndexFromIdxFile(r io.Reader) idxfile.Index {
+ idxf := idxfile.NewMemoryIndex()
+ d := idxfile.NewDecoder(r)
+ if err := d.Decode(idxf); err != nil {
+ panic(err)
+ }
+
+ return idxf
+}
diff --git a/plumbing/format/packfile/parser.go b/plumbing/format/packfile/parser.go
new file mode 100644
index 0000000..28582b5
--- /dev/null
+++ b/plumbing/format/packfile/parser.go
@@ -0,0 +1,489 @@
+package packfile
+
+import (
+ "bytes"
+ "errors"
+ "io"
+
+ "gopkg.in/src-d/go-git.v4/plumbing"
+ "gopkg.in/src-d/go-git.v4/plumbing/cache"
+ "gopkg.in/src-d/go-git.v4/plumbing/storer"
+)
+
+var (
+ // ErrReferenceDeltaNotFound is returned when the reference delta is not
+ // found.
+ ErrReferenceDeltaNotFound = errors.New("reference delta not found")
+
+ // ErrNotSeekableSource is returned when the source for the parser is not
+ // seekable and a storage was not provided, so it can't be parsed.
+ ErrNotSeekableSource = errors.New("parser source is not seekable and storage was not provided")
+
+ // ErrDeltaNotCached is returned when the delta could not be found in cache.
+ ErrDeltaNotCached = errors.New("delta could not be found in cache")
+)
+
+// Observer interface is implemented by index encoders.
+type Observer interface {
+ // OnHeader is called when a new packfile is opened.
+ OnHeader(count uint32) error
+ // OnInflatedObjectHeader is called for each object header read.
+ OnInflatedObjectHeader(t plumbing.ObjectType, objSize int64, pos int64) error
+ // OnInflatedObjectContent is called for each decoded object.
+ OnInflatedObjectContent(h plumbing.Hash, pos int64, crc uint32, content []byte) error
+ // OnFooter is called when decoding is done.
+ OnFooter(h plumbing.Hash) error
+}
+
+// Parser decodes a packfile and calls any observer associated to it. Is used
+// to generate indexes.
+type Parser struct {
+ storage storer.EncodedObjectStorer
+ scanner *Scanner
+ count uint32
+ oi []*objectInfo
+ oiByHash map[plumbing.Hash]*objectInfo
+ oiByOffset map[int64]*objectInfo
+ hashOffset map[plumbing.Hash]int64
+ pendingRefDeltas map[plumbing.Hash][]*objectInfo
+ checksum plumbing.Hash
+
+ cache *cache.BufferLRU
+ // delta content by offset, only used if source is not seekable
+ deltas map[int64][]byte
+
+ ob []Observer
+}
+
+// NewParser creates a new Parser. The Scanner source must be seekable.
+// If it's not, NewParserWithStorage should be used instead.
+func NewParser(scanner *Scanner, ob ...Observer) (*Parser, error) {
+ return NewParserWithStorage(scanner, nil, ob...)
+}
+
+// NewParserWithStorage creates a new Parser. The scanner source must either
+// be seekable or a storage must be provided.
+func NewParserWithStorage(
+ scanner *Scanner,
+ storage storer.EncodedObjectStorer,
+ ob ...Observer,
+) (*Parser, error) {
+ if !scanner.IsSeekable && storage == nil {
+ return nil, ErrNotSeekableSource
+ }
+
+ var deltas map[int64][]byte
+ if !scanner.IsSeekable {
+ deltas = make(map[int64][]byte)
+ }
+
+ return &Parser{
+ storage: storage,
+ scanner: scanner,
+ ob: ob,
+ count: 0,
+ cache: cache.NewBufferLRUDefault(),
+ pendingRefDeltas: make(map[plumbing.Hash][]*objectInfo),
+ deltas: deltas,
+ }, nil
+}
+
+func (p *Parser) forEachObserver(f func(o Observer) error) error {
+ for _, o := range p.ob {
+ if err := f(o); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (p *Parser) onHeader(count uint32) error {
+ return p.forEachObserver(func(o Observer) error {
+ return o.OnHeader(count)
+ })
+}
+
+func (p *Parser) onInflatedObjectHeader(
+ t plumbing.ObjectType,
+ objSize int64,
+ pos int64,
+) error {
+ return p.forEachObserver(func(o Observer) error {
+ return o.OnInflatedObjectHeader(t, objSize, pos)
+ })
+}
+
+func (p *Parser) onInflatedObjectContent(
+ h plumbing.Hash,
+ pos int64,
+ crc uint32,
+ content []byte,
+) error {
+ return p.forEachObserver(func(o Observer) error {
+ return o.OnInflatedObjectContent(h, pos, crc, content)
+ })
+}
+
+func (p *Parser) onFooter(h plumbing.Hash) error {
+ return p.forEachObserver(func(o Observer) error {
+ return o.OnFooter(h)
+ })
+}
+
+// Parse start decoding phase of the packfile.
+func (p *Parser) Parse() (plumbing.Hash, error) {
+ if err := p.init(); err != nil {
+ return plumbing.ZeroHash, err
+ }
+
+ if err := p.indexObjects(); err != nil {
+ return plumbing.ZeroHash, err
+ }
+
+ var err error
+ p.checksum, err = p.scanner.Checksum()
+ if err != nil && err != io.EOF {
+ return plumbing.ZeroHash, err
+ }
+
+ if err := p.resolveDeltas(); err != nil {
+ return plumbing.ZeroHash, err
+ }
+
+ if len(p.pendingRefDeltas) > 0 {
+ return plumbing.ZeroHash, ErrReferenceDeltaNotFound
+ }
+
+ if err := p.onFooter(p.checksum); err != nil {
+ return plumbing.ZeroHash, err
+ }
+
+ return p.checksum, nil
+}
+
+func (p *Parser) init() error {
+ _, c, err := p.scanner.Header()
+ if err != nil {
+ return err
+ }
+
+ if err := p.onHeader(c); err != nil {
+ return err
+ }
+
+ p.count = c
+ p.oiByHash = make(map[plumbing.Hash]*objectInfo, p.count)
+ p.oiByOffset = make(map[int64]*objectInfo, p.count)
+ p.oi = make([]*objectInfo, p.count)
+
+ return nil
+}
+
+func (p *Parser) indexObjects() error {
+ buf := new(bytes.Buffer)
+
+ for i := uint32(0); i < p.count; i++ {
+ buf.Reset()
+
+ oh, err := p.scanner.NextObjectHeader()
+ if err != nil {
+ return err
+ }
+
+ delta := false
+ var ota *objectInfo
+ switch t := oh.Type; t {
+ case plumbing.OFSDeltaObject:
+ delta = true
+
+ parent, ok := p.oiByOffset[oh.OffsetReference]
+ if !ok {
+ return plumbing.ErrObjectNotFound
+ }
+
+ ota = newDeltaObject(oh.Offset, oh.Length, t, parent)
+ parent.Children = append(parent.Children, ota)
+ case plumbing.REFDeltaObject:
+ delta = true
+
+ parent, ok := p.oiByHash[oh.Reference]
+ if ok {
+ ota = newDeltaObject(oh.Offset, oh.Length, t, parent)
+ parent.Children = append(parent.Children, ota)
+ } else {
+ ota = newBaseObject(oh.Offset, oh.Length, t)
+ p.pendingRefDeltas[oh.Reference] = append(
+ p.pendingRefDeltas[oh.Reference],
+ ota,
+ )
+ }
+ default:
+ ota = newBaseObject(oh.Offset, oh.Length, t)
+ }
+
+ _, crc, err := p.scanner.NextObject(buf)
+ if err != nil {
+ return err
+ }
+
+ ota.Crc32 = crc
+ ota.Length = oh.Length
+
+ data := buf.Bytes()
+ if !delta {
+ sha1, err := getSHA1(ota.Type, data)
+ if err != nil {
+ return err
+ }
+
+ ota.SHA1 = sha1
+ p.oiByHash[ota.SHA1] = ota
+ }
+
+ if p.storage != nil && !delta {
+ obj := new(plumbing.MemoryObject)
+ obj.SetSize(oh.Length)
+ obj.SetType(oh.Type)
+ if _, err := obj.Write(data); err != nil {
+ return err
+ }
+
+ if _, err := p.storage.SetEncodedObject(obj); err != nil {
+ return err
+ }
+ }
+
+ if delta && !p.scanner.IsSeekable {
+ p.deltas[oh.Offset] = make([]byte, len(data))
+ copy(p.deltas[oh.Offset], data)
+ }
+
+ p.oiByOffset[oh.Offset] = ota
+ p.oi[i] = ota
+ }
+
+ return nil
+}
+
+func (p *Parser) resolveDeltas() error {
+ for _, obj := range p.oi {
+ content, err := p.get(obj)
+ if err != nil {
+ return err
+ }
+
+ if err := p.onInflatedObjectHeader(obj.Type, obj.Length, obj.Offset); err != nil {
+ return err
+ }
+
+ if err := p.onInflatedObjectContent(obj.SHA1, obj.Offset, obj.Crc32, content); err != nil {
+ return err
+ }
+
+ if !obj.IsDelta() && len(obj.Children) > 0 {
+ for _, child := range obj.Children {
+ if _, err := p.resolveObject(child, content); err != nil {
+ return err
+ }
+ }
+
+ // Remove the delta from the cache.
+ if obj.DiskType.IsDelta() && !p.scanner.IsSeekable {
+ delete(p.deltas, obj.Offset)
+ }
+ }
+ }
+
+ return nil
+}
+
+func (p *Parser) get(o *objectInfo) ([]byte, error) {
+ b, ok := p.cache.Get(o.Offset)
+ // If it's not on the cache and is not a delta we can try to find it in the
+ // storage, if there's one.
+ if !ok && p.storage != nil && !o.Type.IsDelta() {
+ var err error
+ e, err := p.storage.EncodedObject(plumbing.AnyObject, o.SHA1)
+ if err != nil {
+ return nil, err
+ }
+
+ r, err := e.Reader()
+ if err != nil {
+ return nil, err
+ }
+
+ b = make([]byte, e.Size())
+ if _, err = r.Read(b); err != nil {
+ return nil, err
+ }
+ }
+
+ if b != nil {
+ return b, nil
+ }
+
+ var data []byte
+ if o.DiskType.IsDelta() {
+ base, err := p.get(o.Parent)
+ if err != nil {
+ return nil, err
+ }
+
+ data, err = p.resolveObject(o, base)
+ if err != nil {
+ return nil, err
+ }
+ } else {
+ var err error
+ data, err = p.readData(o)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ if len(o.Children) > 0 {
+ p.cache.Put(o.Offset, data)
+ }
+
+ return data, nil
+}
+
+func (p *Parser) resolveObject(
+ o *objectInfo,
+ base []byte,
+) ([]byte, error) {
+ if !o.DiskType.IsDelta() {
+ return nil, nil
+ }
+
+ data, err := p.readData(o)
+ if err != nil {
+ return nil, err
+ }
+
+ data, err = applyPatchBase(o, data, base)
+ if err != nil {
+ return nil, err
+ }
+
+ if pending, ok := p.pendingRefDeltas[o.SHA1]; ok {
+ for _, po := range pending {
+ po.Parent = o
+ o.Children = append(o.Children, po)
+ }
+ delete(p.pendingRefDeltas, o.SHA1)
+ }
+
+ if p.storage != nil {
+ obj := new(plumbing.MemoryObject)
+ obj.SetSize(o.Size())
+ obj.SetType(o.Type)
+ if _, err := obj.Write(data); err != nil {
+ return nil, err
+ }
+
+ if _, err := p.storage.SetEncodedObject(obj); err != nil {
+ return nil, err
+ }
+ }
+
+ return data, nil
+}
+
+func (p *Parser) readData(o *objectInfo) ([]byte, error) {
+ if !p.scanner.IsSeekable && o.DiskType.IsDelta() {
+ data, ok := p.deltas[o.Offset]
+ if !ok {
+ return nil, ErrDeltaNotCached
+ }
+
+ return data, nil
+ }
+
+ if _, err := p.scanner.SeekFromStart(o.Offset); err != nil {
+ return nil, err
+ }
+
+ if _, err := p.scanner.NextObjectHeader(); err != nil {
+ return nil, err
+ }
+
+ buf := new(bytes.Buffer)
+ if _, _, err := p.scanner.NextObject(buf); err != nil {
+ return nil, err
+ }
+
+ return buf.Bytes(), nil
+}
+
+func applyPatchBase(ota *objectInfo, data, base []byte) ([]byte, error) {
+ patched, err := PatchDelta(base, data)
+ if err != nil {
+ return nil, err
+ }
+
+ if ota.SHA1 == plumbing.ZeroHash {
+ ota.Type = ota.Parent.Type
+ sha1, err := getSHA1(ota.Type, patched)
+ if err != nil {
+ return nil, err
+ }
+
+ ota.SHA1 = sha1
+ ota.Length = int64(len(patched))
+ }
+
+ return patched, nil
+}
+
+func getSHA1(t plumbing.ObjectType, data []byte) (plumbing.Hash, error) {
+ hasher := plumbing.NewHasher(t, int64(len(data)))
+ if _, err := hasher.Write(data); err != nil {
+ return plumbing.ZeroHash, err
+ }
+
+ return hasher.Sum(), nil
+}
+
+type objectInfo struct {
+ Offset int64
+ Length int64
+ Type plumbing.ObjectType
+ DiskType plumbing.ObjectType
+
+ Crc32 uint32
+
+ Parent *objectInfo
+ Children []*objectInfo
+ SHA1 plumbing.Hash
+}
+
+func newBaseObject(offset, length int64, t plumbing.ObjectType) *objectInfo {
+ return newDeltaObject(offset, length, t, nil)
+}
+
+func newDeltaObject(
+ offset, length int64,
+ t plumbing.ObjectType,
+ parent *objectInfo,
+) *objectInfo {
+ obj := &objectInfo{
+ Offset: offset,
+ Length: length,
+ Type: t,
+ DiskType: t,
+ Crc32: 0,
+ Parent: parent,
+ }
+
+ return obj
+}
+
+func (o *objectInfo) IsDelta() bool {
+ return o.Type.IsDelta()
+}
+
+func (o *objectInfo) Size() int64 {
+ return o.Length
+}
diff --git a/plumbing/format/packfile/parser_test.go b/plumbing/format/packfile/parser_test.go
new file mode 100644
index 0000000..012a140
--- /dev/null
+++ b/plumbing/format/packfile/parser_test.go
@@ -0,0 +1,195 @@
+package packfile_test
+
+import (
+ "testing"
+
+ "gopkg.in/src-d/go-git.v4/plumbing"
+ "gopkg.in/src-d/go-git.v4/plumbing/format/packfile"
+
+ . "gopkg.in/check.v1"
+ "gopkg.in/src-d/go-git-fixtures.v3"
+)
+
+type ParserSuite struct {
+ fixtures.Suite
+}
+
+var _ = Suite(&ParserSuite{})
+
+func (s *ParserSuite) TestParserHashes(c *C) {
+ f := fixtures.Basic().One()
+ scanner := packfile.NewScanner(f.Packfile())
+
+ obs := new(testObserver)
+ parser, err := packfile.NewParser(scanner, obs)
+ c.Assert(err, IsNil)
+
+ ch, err := parser.Parse()
+ c.Assert(err, IsNil)
+
+ checksum := "a3fed42da1e8189a077c0e6846c040dcf73fc9dd"
+ c.Assert(ch.String(), Equals, checksum)
+
+ c.Assert(obs.checksum, Equals, checksum)
+ c.Assert(int(obs.count), Equals, int(31))
+
+ commit := plumbing.CommitObject
+ blob := plumbing.BlobObject
+ tree := plumbing.TreeObject
+
+ objs := []observerObject{
+ {"e8d3ffab552895c19b9fcf7aa264d277cde33881", commit, 254, 12, 0xaa07ba4b},
+ {"6ecf0ef2c2dffb796033e5a02219af86ec6584e5", commit, 245, 186, 0xf706df58},
+ {"918c48b83bd081e863dbe1b80f8998f058cd8294", commit, 242, 286, 0x12438846},
+ {"af2d6a6954d532f8ffb47615169c8fdf9d383a1a", commit, 242, 449, 0x2905a38c},
+ {"1669dce138d9b841a518c64b10914d88f5e488ea", commit, 333, 615, 0xd9429436},
+ {"a5b8b09e2f8fcb0bb99d3ccb0958157b40890d69", commit, 332, 838, 0xbecfde4e},
+ {"35e85108805c84807bc66a02d91535e1e24b38b9", commit, 244, 1063, 0x780e4b3e},
+ {"b8e471f58bcbca63b07bda20e428190409c2db47", commit, 243, 1230, 0xdc18344f},
+ {"b029517f6300c2da0f4b651b8642506cd6aaf45d", commit, 187, 1392, 0xcf4e4280},
+ {"32858aad3c383ed1ff0a0f9bdf231d54a00c9e88", blob, 189, 1524, 0x1f08118a},
+ {"d3ff53e0564a9f87d8e84b6e28e5060e517008aa", blob, 18, 1685, 0xafded7b8},
+ {"c192bd6a24ea1ab01d78686e417c8bdc7c3d197f", blob, 1072, 1713, 0xcc1428ed},
+ {"d5c0f4ab811897cadf03aec358ae60d21f91c50d", blob, 76110, 2351, 0x1631d22f},
+ {"880cd14280f4b9b6ed3986d6671f907d7cc2a198", blob, 2780, 78050, 0xbfff5850},
+ {"49c6bb89b17060d7b4deacb7b338fcc6ea2352a9", blob, 217848, 78882, 0xd108e1d8},
+ {"c8f1d8c61f9da76f4cb49fd86322b6e685dba956", blob, 706, 80725, 0x8e97ba25},
+ {"9a48f23120e880dfbe41f7c9b7b708e9ee62a492", blob, 11488, 80998, 0x7316ff70},
+ {"9dea2395f5403188298c1dabe8bdafe562c491e3", blob, 78, 84032, 0xdb4fce56},
+ {"dbd3641b371024f44d0e469a9c8f5457b0660de1", tree, 272, 84115, 0x901cce2c},
+ {"a8d315b2b1c615d43042c3a62402b8a54288cf5c", tree, 271, 84375, 0xec4552b0},
+ {"a39771a7651f97faf5c72e08224d857fc35133db", tree, 38, 84430, 0x847905bf},
+ {"5a877e6a906a2743ad6e45d99c1793642aaf8eda", tree, 75, 84479, 0x3689459a},
+ {"586af567d0bb5e771e49bdd9434f5e0fb76d25fa", tree, 38, 84559, 0xe67af94a},
+ {"cf4aa3b38974fb7d81f367c0830f7d78d65ab86b", tree, 34, 84608, 0xc2314a2e},
+ {"7e59600739c96546163833214c36459e324bad0a", blob, 9, 84653, 0xcd987848},
+ {"fb72698cab7617ac416264415f13224dfd7a165e", tree, 238, 84671, 0x8a853a6d},
+ {"4d081c50e250fa32ea8b1313cf8bb7c2ad7627fd", tree, 179, 84688, 0x70c6518},
+ {"eba74343e2f15d62adedfd8c883ee0262b5c8021", tree, 148, 84708, 0x4f4108e2},
+ {"c2d30fa8ef288618f65f6eed6e168e0d514886f4", tree, 110, 84725, 0xd6fe09e9},
+ {"8dcef98b1d52143e1e2dbc458ffe38f925786bf2", tree, 111, 84741, 0xf07a2804},
+ {"aa9b383c260e1d05fbbf6b30a02914555e20c725", tree, 73, 84760, 0x1d75d6be},
+ }
+
+ c.Assert(obs.objects, DeepEquals, objs)
+}
+
+type observerObject struct {
+ hash string
+ otype plumbing.ObjectType
+ size int64
+ offset int64
+ crc uint32
+}
+
+type testObserver struct {
+ count uint32
+ checksum string
+ objects []observerObject
+ pos map[int64]int
+}
+
+func (t *testObserver) OnHeader(count uint32) error {
+ t.count = count
+ t.pos = make(map[int64]int, count)
+ return nil
+}
+
+func (t *testObserver) OnInflatedObjectHeader(otype plumbing.ObjectType, objSize int64, pos int64) error {
+ o := t.get(pos)
+ o.otype = otype
+ o.size = objSize
+ o.offset = pos
+
+ t.put(pos, o)
+
+ return nil
+}
+
+func (t *testObserver) OnInflatedObjectContent(h plumbing.Hash, pos int64, crc uint32, _ []byte) error {
+ o := t.get(pos)
+ o.hash = h.String()
+ o.crc = crc
+
+ t.put(pos, o)
+
+ return nil
+}
+
+func (t *testObserver) OnFooter(h plumbing.Hash) error {
+ t.checksum = h.String()
+ return nil
+}
+
+func (t *testObserver) get(pos int64) observerObject {
+ i, ok := t.pos[pos]
+ if ok {
+ return t.objects[i]
+ }
+
+ return observerObject{}
+}
+
+func (t *testObserver) put(pos int64, o observerObject) {
+ i, ok := t.pos[pos]
+ if ok {
+ t.objects[i] = o
+ return
+ }
+
+ t.pos[pos] = len(t.objects)
+ t.objects = append(t.objects, o)
+}
+
+func BenchmarkParse(b *testing.B) {
+ if err := fixtures.Init(); err != nil {
+ b.Fatal(err)
+ }
+
+ defer func() {
+ if err := fixtures.Clean(); err != nil {
+ b.Fatal(err)
+ }
+ }()
+
+ for _, f := range fixtures.ByTag("packfile") {
+ b.Run(f.URL, func(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ parser, err := packfile.NewParser(packfile.NewScanner(f.Packfile()))
+ if err != nil {
+ b.Fatal(err)
+ }
+
+ _, err = parser.Parse()
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+ })
+ }
+}
+
+func BenchmarkParseBasic(b *testing.B) {
+ if err := fixtures.Init(); err != nil {
+ b.Fatal(err)
+ }
+
+ defer func() {
+ if err := fixtures.Clean(); err != nil {
+ b.Fatal(err)
+ }
+ }()
+
+ f := fixtures.Basic().One()
+ for i := 0; i < b.N; i++ {
+ parser, err := packfile.NewParser(packfile.NewScanner(f.Packfile()))
+ if err != nil {
+ b.Fatal(err)
+ }
+
+ _, err = parser.Parse()
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
diff --git a/plumbing/format/packfile/patch_delta.go b/plumbing/format/packfile/patch_delta.go
index c604851..a972f1c 100644
--- a/plumbing/format/packfile/patch_delta.go
+++ b/plumbing/format/packfile/patch_delta.go
@@ -63,8 +63,8 @@ func PatchDelta(src, delta []byte) ([]byte, error) {
targetSz, delta := decodeLEB128(delta)
remainingTargetSz := targetSz
- var dest []byte
var cmd byte
+ dest := make([]byte, 0, targetSz)
for {
if len(delta) == 0 {
return nil, ErrInvalidDelta
diff --git a/plumbing/format/packfile/scanner.go b/plumbing/format/packfile/scanner.go
index d2d776f..6fc183b 100644
--- a/plumbing/format/packfile/scanner.go
+++ b/plumbing/format/packfile/scanner.go
@@ -63,10 +63,7 @@ func NewScanner(r io.Reader) *Scanner {
crc := crc32.NewIEEE()
return &Scanner{
- r: &teeReader{
- newByteReadSeeker(seeker),
- crc,
- },
+ r: newTeeReader(newByteReadSeeker(seeker), crc),
crc: crc,
IsSeekable: ok,
}
@@ -143,6 +140,8 @@ func (s *Scanner) readCount() (uint32, error) {
// NextObjectHeader returns the ObjectHeader for the next object in the reader
func (s *Scanner) NextObjectHeader() (*ObjectHeader, error) {
+ defer s.Flush()
+
if err := s.doPending(); err != nil {
return nil, err
}
@@ -271,6 +270,7 @@ func (s *Scanner) NextObject(w io.Writer) (written int64, crc32 uint32, err erro
s.pendingObject = nil
written, err = s.copyObject(w)
+ s.Flush()
crc32 = s.crc.Sum32()
return
}
@@ -279,14 +279,15 @@ func (s *Scanner) NextObject(w io.Writer) (written int64, crc32 uint32, err erro
// from it zlib stream in an object entry in the packfile.
func (s *Scanner) copyObject(w io.Writer) (n int64, err error) {
if s.zr == nil {
- zr, err := zlib.NewReader(s.r)
+ var zr io.ReadCloser
+ zr, err = zlib.NewReader(s.r)
if err != nil {
return 0, fmt.Errorf("zlib initialization error: %s", err)
}
s.zr = zr.(readerResetter)
} else {
- if err := s.zr.Reset(s.r, nil); err != nil {
+ if err = s.zr.Reset(s.r, nil); err != nil {
return 0, fmt.Errorf("zlib reset error: %s", err)
}
}
@@ -339,6 +340,16 @@ func (s *Scanner) Close() error {
return err
}
+// Flush finishes writing the buffer to crc hasher in case we are using
+// a teeReader. Otherwise it is a no-op.
+func (s *Scanner) Flush() error {
+ tee, ok := s.r.(*teeReader)
+ if ok {
+ return tee.Flush()
+ }
+ return nil
+}
+
type trackableReader struct {
count int64
io.Reader
@@ -400,10 +411,21 @@ type reader interface {
type teeReader struct {
reader
- w hash.Hash32
+ w hash.Hash32
+ bufWriter *bufio.Writer
+}
+
+func newTeeReader(r reader, h hash.Hash32) *teeReader {
+ return &teeReader{
+ reader: r,
+ w: h,
+ bufWriter: bufio.NewWriter(h),
+ }
}
func (r *teeReader) Read(p []byte) (n int, err error) {
+ r.Flush()
+
n, err = r.reader.Read(p)
if n > 0 {
if n, err := r.w.Write(p[:n]); err != nil {
@@ -416,11 +438,12 @@ func (r *teeReader) Read(p []byte) (n int, err error) {
func (r *teeReader) ReadByte() (b byte, err error) {
b, err = r.reader.ReadByte()
if err == nil {
- _, err := r.w.Write([]byte{b})
- if err != nil {
- return 0, err
- }
+ return b, r.bufWriter.WriteByte(b)
}
return
}
+
+func (r *teeReader) Flush() (err error) {
+ return r.bufWriter.Flush()
+}
diff --git a/plumbing/format/packfile/scanner_test.go b/plumbing/format/packfile/scanner_test.go
index 000be7f..644d0eb 100644
--- a/plumbing/format/packfile/scanner_test.go
+++ b/plumbing/format/packfile/scanner_test.go
@@ -41,14 +41,16 @@ func (s *ScannerSuite) TestNextObjectHeaderWithoutHeader(c *C) {
}
func (s *ScannerSuite) TestNextObjectHeaderREFDelta(c *C) {
- s.testNextObjectHeader(c, "ref-delta", expectedHeadersREF)
+ s.testNextObjectHeader(c, "ref-delta", expectedHeadersREF, expectedCRCREF)
}
func (s *ScannerSuite) TestNextObjectHeaderOFSDelta(c *C) {
- s.testNextObjectHeader(c, "ofs-delta", expectedHeadersOFS)
+ s.testNextObjectHeader(c, "ofs-delta", expectedHeadersOFS, expectedCRCOFS)
}
-func (s *ScannerSuite) testNextObjectHeader(c *C, tag string, expected []ObjectHeader) {
+func (s *ScannerSuite) testNextObjectHeader(c *C, tag string,
+ expected []ObjectHeader, expectedCRC []uint32) {
+
r := fixtures.Basic().ByTag(tag).One().Packfile()
p := NewScanner(r)
@@ -61,9 +63,10 @@ func (s *ScannerSuite) testNextObjectHeader(c *C, tag string, expected []ObjectH
c.Assert(*h, DeepEquals, expected[i])
buf := bytes.NewBuffer(nil)
- n, _, err := p.NextObject(buf)
+ n, crcFromScanner, err := p.NextObject(buf)
c.Assert(err, IsNil)
c.Assert(n, Equals, h.Length)
+ c.Assert(crcFromScanner, Equals, expectedCRC[i])
}
n, err := p.Checksum()
@@ -149,6 +152,40 @@ var expectedHeadersOFS = []ObjectHeader{
{Type: plumbing.OFSDeltaObject, Offset: 84760, Length: 4, OffsetReference: 84741},
}
+var expectedCRCOFS = []uint32{
+ 0xaa07ba4b,
+ 0xf706df58,
+ 0x12438846,
+ 0x2905a38c,
+ 0xd9429436,
+ 0xbecfde4e,
+ 0x780e4b3e,
+ 0xdc18344f,
+ 0xcf4e4280,
+ 0x1f08118a,
+ 0xafded7b8,
+ 0xcc1428ed,
+ 0x1631d22f,
+ 0xbfff5850,
+ 0xd108e1d8,
+ 0x8e97ba25,
+ 0x7316ff70,
+ 0xdb4fce56,
+ 0x901cce2c,
+ 0xec4552b0,
+ 0x847905bf,
+ 0x3689459a,
+ 0xe67af94a,
+ 0xc2314a2e,
+ 0xcd987848,
+ 0x8a853a6d,
+ 0x70c6518,
+ 0x4f4108e2,
+ 0xd6fe09e9,
+ 0xf07a2804,
+ 0x1d75d6be,
+}
+
var expectedHeadersREF = []ObjectHeader{
{Type: plumbing.CommitObject, Offset: 12, Length: 254},
{Type: plumbing.REFDeltaObject, Offset: 186, Length: 93,
@@ -188,3 +225,37 @@ var expectedHeadersREF = []ObjectHeader{
Reference: plumbing.NewHash("eba74343e2f15d62adedfd8c883ee0262b5c8021")},
{Type: plumbing.TreeObject, Offset: 85485, Length: 73},
}
+
+var expectedCRCREF = []uint32{
+ 0xaa07ba4b,
+ 0xfb4725a4,
+ 0x12438846,
+ 0x2905a38c,
+ 0xd9429436,
+ 0xbecfde4e,
+ 0xdc18344f,
+ 0x780e4b3e,
+ 0xcf4e4280,
+ 0x1f08118a,
+ 0xafded7b8,
+ 0xcc1428ed,
+ 0x1631d22f,
+ 0x847905bf,
+ 0x3e20f31d,
+ 0x3689459a,
+ 0xd108e1d8,
+ 0x71143d4a,
+ 0xe67af94a,
+ 0x739fb89f,
+ 0xc2314a2e,
+ 0x87864926,
+ 0x415d752f,
+ 0xf72fb182,
+ 0x3ffa37d4,
+ 0xcd987848,
+ 0x2f20ac8f,
+ 0xf2f0575,
+ 0x7d8726e1,
+ 0x740bf39,
+ 0x26af4735,
+}
diff --git a/plumbing/format/pktline/encoder.go b/plumbing/format/pktline/encoder.go
index eae85cc..6d40979 100644
--- a/plumbing/format/pktline/encoder.go
+++ b/plumbing/format/pktline/encoder.go
@@ -17,6 +17,9 @@ type Encoder struct {
const (
// MaxPayloadSize is the maximum payload size of a pkt-line in bytes.
MaxPayloadSize = 65516
+
+ // For compatibility with canonical Git implementation, accept longer pkt-lines
+ OversizePayloadMax = 65520
)
var (
diff --git a/plumbing/format/pktline/scanner.go b/plumbing/format/pktline/scanner.go
index 4af254f..99aab46 100644
--- a/plumbing/format/pktline/scanner.go
+++ b/plumbing/format/pktline/scanner.go
@@ -97,7 +97,7 @@ func (s *Scanner) readPayloadLen() (int, error) {
return 0, nil
case n <= lenSize:
return 0, ErrInvalidPktLen
- case n > MaxPayloadSize+lenSize:
+ case n > OversizePayloadMax+lenSize:
return 0, ErrInvalidPktLen
default:
return n - lenSize, nil
diff --git a/plumbing/format/pktline/scanner_test.go b/plumbing/format/pktline/scanner_test.go
index 048ea38..9660c2d 100644
--- a/plumbing/format/pktline/scanner_test.go
+++ b/plumbing/format/pktline/scanner_test.go
@@ -20,7 +20,7 @@ func (s *SuiteScanner) TestInvalid(c *C) {
for _, test := range [...]string{
"0001", "0002", "0003", "0004",
"0001asdfsadf", "0004foo",
- "fff1", "fff2",
+ "fff5", "ffff",
"gorka",
"0", "003",
" 5a", "5 a", "5 \n",
@@ -34,6 +34,20 @@ func (s *SuiteScanner) TestInvalid(c *C) {
}
}
+func (s *SuiteScanner) TestDecodeOversizePktLines(c *C) {
+ for _, test := range [...]string{
+ "fff1" + strings.Repeat("a", 0xfff1),
+ "fff2" + strings.Repeat("a", 0xfff2),
+ "fff3" + strings.Repeat("a", 0xfff3),
+ "fff4" + strings.Repeat("a", 0xfff4),
+ } {
+ r := strings.NewReader(test)
+ sc := pktline.NewScanner(r)
+ _ = sc.Scan()
+ c.Assert(sc.Err(), IsNil)
+ }
+}
+
func (s *SuiteScanner) TestEmptyReader(c *C) {
r := strings.NewReader("")
sc := pktline.NewScanner(r)
diff --git a/plumbing/memory.go b/plumbing/memory.go
index 51cbb54..b8e1e1b 100644
--- a/plumbing/memory.go
+++ b/plumbing/memory.go
@@ -14,10 +14,10 @@ type MemoryObject struct {
sz int64
}
-// Hash return the object Hash, the hash is calculated on-the-fly the first
-// time is called, the subsequent calls the same Hash is returned even if the
-// type or the content has changed. The Hash is only generated if the size of
-// the content is exactly the Object.Size
+// Hash returns the object Hash, the hash is calculated on-the-fly the first
+// time it's called, in all subsequent calls the same Hash is returned even
+// if the type or the content have changed. The Hash is only generated if the
+// size of the content is exactly the object size.
func (o *MemoryObject) Hash() Hash {
if o.h == ZeroHash && int64(len(o.cont)) == o.sz {
o.h = ComputeHash(o.t, o.cont)
diff --git a/plumbing/object/blob.go b/plumbing/object/blob.go
index 2608477..f376baa 100644
--- a/plumbing/object/blob.go
+++ b/plumbing/object/blob.go
@@ -67,7 +67,7 @@ func (b *Blob) Decode(o plumbing.EncodedObject) error {
}
// Encode transforms a Blob into a plumbing.EncodedObject.
-func (b *Blob) Encode(o plumbing.EncodedObject) error {
+func (b *Blob) Encode(o plumbing.EncodedObject) (err error) {
o.SetType(plumbing.BlobObject)
w, err := o.Writer()
diff --git a/plumbing/object/blob_test.go b/plumbing/object/blob_test.go
index 5ed9de0..181436d 100644
--- a/plumbing/object/blob_test.go
+++ b/plumbing/object/blob_test.go
@@ -1,6 +1,7 @@
package object
import (
+ "bytes"
"io"
"io/ioutil"
@@ -88,8 +89,26 @@ func (s *BlobsSuite) TestBlobIter(c *C) {
}
c.Assert(err, IsNil)
- c.Assert(b, DeepEquals, blobs[i])
- i += 1
+ c.Assert(b.ID(), Equals, blobs[i].ID())
+ c.Assert(b.Size, Equals, blobs[i].Size)
+ c.Assert(b.Type(), Equals, blobs[i].Type())
+
+ r1, err := b.Reader()
+ c.Assert(err, IsNil)
+
+ b1, err := ioutil.ReadAll(r1)
+ c.Assert(err, IsNil)
+ c.Assert(r1.Close(), IsNil)
+
+ r2, err := blobs[i].Reader()
+ c.Assert(err, IsNil)
+
+ b2, err := ioutil.ReadAll(r2)
+ c.Assert(err, IsNil)
+ c.Assert(r2.Close(), IsNil)
+
+ c.Assert(bytes.Compare(b1, b2), Equals, 0)
+ i++
}
iter.Close()
diff --git a/plumbing/object/change.go b/plumbing/object/change.go
index 729ff5a..a1b4c27 100644
--- a/plumbing/object/change.go
+++ b/plumbing/object/change.go
@@ -2,6 +2,7 @@ package object
import (
"bytes"
+ "context"
"fmt"
"strings"
@@ -81,7 +82,15 @@ func (c *Change) String() string {
// Patch returns a Patch with all the file changes in chunks. This
// representation can be used to create several diff outputs.
func (c *Change) Patch() (*Patch, error) {
- return getPatch("", c)
+ return c.PatchContext(context.Background())
+}
+
+// Patch returns a Patch with all the file changes in chunks. This
+// representation can be used to create several diff outputs.
+// If context expires, an non-nil error will be returned
+// Provided context must be non-nil
+func (c *Change) PatchContext(ctx context.Context) (*Patch, error) {
+ return getPatchContext(ctx, "", c)
}
func (c *Change) name() string {
@@ -136,5 +145,13 @@ func (c Changes) String() string {
// Patch returns a Patch with all the changes in chunks. This
// representation can be used to create several diff outputs.
func (c Changes) Patch() (*Patch, error) {
- return getPatch("", c...)
+ return c.PatchContext(context.Background())
+}
+
+// Patch returns a Patch with all the changes in chunks. This
+// representation can be used to create several diff outputs.
+// If context expires, an non-nil error will be returned
+// Provided context must be non-nil
+func (c Changes) PatchContext(ctx context.Context) (*Patch, error) {
+ return getPatchContext(ctx, "", c...)
}
diff --git a/plumbing/object/change_adaptor_test.go b/plumbing/object/change_adaptor_test.go
index 803c3b8..c7c003b 100644
--- a/plumbing/object/change_adaptor_test.go
+++ b/plumbing/object/change_adaptor_test.go
@@ -4,6 +4,7 @@ import (
"sort"
"gopkg.in/src-d/go-git.v4/plumbing"
+ "gopkg.in/src-d/go-git.v4/plumbing/cache"
"gopkg.in/src-d/go-git.v4/plumbing/filemode"
"gopkg.in/src-d/go-git.v4/plumbing/storer"
"gopkg.in/src-d/go-git.v4/storage/filesystem"
@@ -23,8 +24,7 @@ type ChangeAdaptorSuite struct {
func (s *ChangeAdaptorSuite) SetUpSuite(c *C) {
s.Suite.SetUpSuite(c)
s.Fixture = fixtures.Basic().One()
- sto, err := filesystem.NewStorage(s.Fixture.DotGit())
- c.Assert(err, IsNil)
+ sto := filesystem.NewStorage(s.Fixture.DotGit(), cache.NewObjectLRUDefault())
s.Storer = sto
}
diff --git a/plumbing/object/change_test.go b/plumbing/object/change_test.go
index 7036fa3..e2f0a23 100644
--- a/plumbing/object/change_test.go
+++ b/plumbing/object/change_test.go
@@ -1,9 +1,11 @@
package object
import (
+ "context"
"sort"
"gopkg.in/src-d/go-git.v4/plumbing"
+ "gopkg.in/src-d/go-git.v4/plumbing/cache"
"gopkg.in/src-d/go-git.v4/plumbing/filemode"
"gopkg.in/src-d/go-git.v4/plumbing/format/diff"
"gopkg.in/src-d/go-git.v4/plumbing/storer"
@@ -24,8 +26,7 @@ func (s *ChangeSuite) SetUpSuite(c *C) {
s.Suite.SetUpSuite(c)
s.Fixture = fixtures.ByURL("https://github.com/src-d/go-git.git").
ByTag(".git").One()
- sto, err := filesystem.NewStorage(s.Fixture.DotGit())
- c.Assert(err, IsNil)
+ sto := filesystem.NewStorage(s.Fixture.DotGit(), cache.NewObjectLRUDefault())
s.Storer = sto
}
@@ -82,6 +83,12 @@ func (s *ChangeSuite) TestInsert(c *C) {
c.Assert(len(p.FilePatches()[0].Chunks()), Equals, 1)
c.Assert(p.FilePatches()[0].Chunks()[0].Type(), Equals, diff.Add)
+ p, err = change.PatchContext(context.Background())
+ c.Assert(err, IsNil)
+ c.Assert(len(p.FilePatches()), Equals, 1)
+ c.Assert(len(p.FilePatches()[0].Chunks()), Equals, 1)
+ c.Assert(p.FilePatches()[0].Chunks()[0].Type(), Equals, diff.Add)
+
str := change.String()
c.Assert(str, Equals, "<Action: Insert, Path: examples/clone/main.go>")
}
@@ -134,6 +141,12 @@ func (s *ChangeSuite) TestDelete(c *C) {
c.Assert(len(p.FilePatches()[0].Chunks()), Equals, 1)
c.Assert(p.FilePatches()[0].Chunks()[0].Type(), Equals, diff.Delete)
+ p, err = change.PatchContext(context.Background())
+ c.Assert(err, IsNil)
+ c.Assert(len(p.FilePatches()), Equals, 1)
+ c.Assert(len(p.FilePatches()[0].Chunks()), Equals, 1)
+ c.Assert(p.FilePatches()[0].Chunks()[0].Type(), Equals, diff.Delete)
+
str := change.String()
c.Assert(str, Equals, "<Action: Delete, Path: utils/difftree/difftree.go>")
}
@@ -206,6 +219,18 @@ func (s *ChangeSuite) TestModify(c *C) {
c.Assert(p.FilePatches()[0].Chunks()[5].Type(), Equals, diff.Add)
c.Assert(p.FilePatches()[0].Chunks()[6].Type(), Equals, diff.Equal)
+ p, err = change.PatchContext(context.Background())
+ c.Assert(err, IsNil)
+ c.Assert(len(p.FilePatches()), Equals, 1)
+ c.Assert(len(p.FilePatches()[0].Chunks()), Equals, 7)
+ c.Assert(p.FilePatches()[0].Chunks()[0].Type(), Equals, diff.Equal)
+ c.Assert(p.FilePatches()[0].Chunks()[1].Type(), Equals, diff.Delete)
+ c.Assert(p.FilePatches()[0].Chunks()[2].Type(), Equals, diff.Add)
+ c.Assert(p.FilePatches()[0].Chunks()[3].Type(), Equals, diff.Equal)
+ c.Assert(p.FilePatches()[0].Chunks()[4].Type(), Equals, diff.Delete)
+ c.Assert(p.FilePatches()[0].Chunks()[5].Type(), Equals, diff.Add)
+ c.Assert(p.FilePatches()[0].Chunks()[6].Type(), Equals, diff.Equal)
+
str := change.String()
c.Assert(str, Equals, "<Action: Modify, Path: utils/difftree/difftree.go>")
}
@@ -228,8 +253,7 @@ func (s *ChangeSuite) TestNoFileFilemodes(c *C) {
s.Suite.SetUpSuite(c)
f := fixtures.ByURL("https://github.com/git-fixtures/submodule.git").One()
- sto, err := filesystem.NewStorage(f.DotGit())
- c.Assert(err, IsNil)
+ sto := filesystem.NewStorage(f.DotGit(), cache.NewObjectLRUDefault())
iter, err := sto.IterEncodedObjects(plumbing.AnyObject)
c.Assert(err, IsNil)
@@ -367,3 +391,39 @@ func (s *ChangeSuite) TestChangesSort(c *C) {
sort.Sort(changes)
c.Assert(changes.String(), Equals, expected)
}
+
+func (s *ChangeSuite) TestCancel(c *C) {
+ // Commit a5078b19f08f63e7948abd0a5e2fb7d319d3a565 of the go-git
+ // fixture inserted "examples/clone/main.go".
+ //
+ // On that commit, the "examples/clone" tree is
+ // 6efca3ff41cab651332f9ebc0c96bb26be809615
+ //
+ // and the "examples/colone/main.go" is
+ // f95dc8f7923add1a8b9f72ecb1e8db1402de601a
+
+ path := "examples/clone/main.go"
+ name := "main.go"
+ mode := filemode.Regular
+ blob := plumbing.NewHash("f95dc8f7923add1a8b9f72ecb1e8db1402de601a")
+ tree := plumbing.NewHash("6efca3ff41cab651332f9ebc0c96bb26be809615")
+
+ change := &Change{
+ From: empty,
+ To: ChangeEntry{
+ Name: path,
+ Tree: s.tree(c, tree),
+ TreeEntry: TreeEntry{
+ Name: name,
+ Mode: mode,
+ Hash: blob,
+ },
+ },
+ }
+
+ ctx, cancel := context.WithCancel(context.Background())
+ cancel()
+ p, err := change.PatchContext(ctx)
+ c.Assert(p, IsNil)
+ c.Assert(err, ErrorMatches, "operation canceled")
+}
diff --git a/plumbing/object/commit.go b/plumbing/object/commit.go
index a317714..e254342 100644
--- a/plumbing/object/commit.go
+++ b/plumbing/object/commit.go
@@ -3,6 +3,7 @@ package object
import (
"bufio"
"bytes"
+ "context"
"errors"
"fmt"
"io"
@@ -16,8 +17,9 @@ import (
)
const (
- beginpgp string = "-----BEGIN PGP SIGNATURE-----"
- endpgp string = "-----END PGP SIGNATURE-----"
+ beginpgp string = "-----BEGIN PGP SIGNATURE-----"
+ endpgp string = "-----END PGP SIGNATURE-----"
+ headerpgp string = "gpgsig"
)
// Hash represents the hash of an object
@@ -75,7 +77,8 @@ func (c *Commit) Tree() (*Tree, error) {
}
// Patch returns the Patch between the actual commit and the provided one.
-func (c *Commit) Patch(to *Commit) (*Patch, error) {
+// Error will be return if context expires. Provided context must be non-nil
+func (c *Commit) PatchContext(ctx context.Context, to *Commit) (*Patch, error) {
fromTree, err := c.Tree()
if err != nil {
return nil, err
@@ -86,7 +89,12 @@ func (c *Commit) Patch(to *Commit) (*Patch, error) {
return nil, err
}
- return fromTree.Patch(toTree)
+ return fromTree.PatchContext(ctx, toTree)
+}
+
+// Patch returns the Patch between the actual commit and the provided one.
+func (c *Commit) Patch(to *Commit) (*Patch, error) {
+ return c.PatchContext(context.Background(), to)
}
// Parents return a CommitIter to the parent Commits.
@@ -174,23 +182,13 @@ func (c *Commit) Decode(o plumbing.EncodedObject) (err error) {
}
if pgpsig {
- // Check if it's the end of a PGP signature.
- if bytes.Contains(line, []byte(endpgp)) {
- c.PGPSignature += endpgp + "\n"
- pgpsig = false
- } else {
- // Trim the left padding.
+ if len(line) > 0 && line[0] == ' ' {
line = bytes.TrimLeft(line, " ")
c.PGPSignature += string(line)
+ continue
+ } else {
+ pgpsig = false
}
- continue
- }
-
- // Check if it's the beginning of a PGP signature.
- if bytes.Contains(line, []byte(beginpgp)) {
- c.PGPSignature += beginpgp + "\n"
- pgpsig = true
- continue
}
if !message {
@@ -201,15 +199,24 @@ func (c *Commit) Decode(o plumbing.EncodedObject) (err error) {
}
split := bytes.SplitN(line, []byte{' '}, 2)
+
+ var data []byte
+ if len(split) == 2 {
+ data = split[1]
+ }
+
switch string(split[0]) {
case "tree":
- c.TreeHash = plumbing.NewHash(string(split[1]))
+ c.TreeHash = plumbing.NewHash(string(data))
case "parent":
- c.ParentHashes = append(c.ParentHashes, plumbing.NewHash(string(split[1])))
+ c.ParentHashes = append(c.ParentHashes, plumbing.NewHash(string(data)))
case "author":
- c.Author.Decode(split[1])
+ c.Author.Decode(data)
case "committer":
- c.Committer.Decode(split[1])
+ c.Committer.Decode(data)
+ case headerpgp:
+ c.PGPSignature += string(data) + "\n"
+ pgpsig = true
}
} else {
c.Message += string(line)
@@ -226,7 +233,7 @@ func (b *Commit) Encode(o plumbing.EncodedObject) error {
return b.encode(o, true)
}
-func (b *Commit) encode(o plumbing.EncodedObject, includeSig bool) error {
+func (b *Commit) encode(o plumbing.EncodedObject, includeSig bool) (err error) {
o.SetType(plumbing.CommitObject)
w, err := o.Writer()
if err != nil {
@@ -262,17 +269,18 @@ func (b *Commit) encode(o plumbing.EncodedObject, includeSig bool) error {
}
if b.PGPSignature != "" && includeSig {
- if _, err = fmt.Fprint(w, "pgpsig"); err != nil {
+ if _, err = fmt.Fprint(w, "\n"+headerpgp+" "); err != nil {
return err
}
- // Split all the signature lines and write with a left padding and
- // newline at the end.
- lines := strings.Split(b.PGPSignature, "\n")
- for _, line := range lines {
- if _, err = fmt.Fprintf(w, " %s\n", line); err != nil {
- return err
- }
+ // Split all the signature lines and re-write with a left padding and
+ // newline. Use join for this so it's clear that a newline should not be
+ // added after this section, as it will be added when the message is
+ // printed.
+ signature := strings.TrimSuffix(b.PGPSignature, "\n")
+ lines := strings.Split(signature, "\n")
+ if _, err = fmt.Fprint(w, strings.Join(lines, "\n ")); err != nil {
+ return err
}
}
diff --git a/plumbing/object/commit_test.go b/plumbing/object/commit_test.go
index 191b14d..c9acf42 100644
--- a/plumbing/object/commit_test.go
+++ b/plumbing/object/commit_test.go
@@ -2,11 +2,13 @@ package object
import (
"bytes"
+ "context"
"io"
"strings"
"time"
"gopkg.in/src-d/go-git.v4/plumbing"
+ "gopkg.in/src-d/go-git.v4/plumbing/cache"
. "gopkg.in/check.v1"
"gopkg.in/src-d/go-git-fixtures.v3"
@@ -132,6 +134,59 @@ Binary files /dev/null and b/binary.jpg differ
c.Assert(buf.String(), Equals, patch.String())
}
+func (s *SuiteCommit) TestPatchContext(c *C) {
+ from := s.commit(c, plumbing.NewHash("918c48b83bd081e863dbe1b80f8998f058cd8294"))
+ to := s.commit(c, plumbing.NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5"))
+
+ patch, err := from.PatchContext(context.Background(), to)
+ c.Assert(err, IsNil)
+
+ buf := bytes.NewBuffer(nil)
+ err = patch.Encode(buf)
+ c.Assert(err, IsNil)
+
+ c.Assert(buf.String(), Equals, `diff --git a/vendor/foo.go b/vendor/foo.go
+new file mode 100644
+index 0000000000000000000000000000000000000000..9dea2395f5403188298c1dabe8bdafe562c491e3
+--- /dev/null
++++ b/vendor/foo.go
+@@ -0,0 +1,7 @@
++package main
++
++import "fmt"
++
++func main() {
++ fmt.Println("Hello, playground")
++}
+`)
+ c.Assert(buf.String(), Equals, patch.String())
+
+ from = s.commit(c, plumbing.NewHash("b8e471f58bcbca63b07bda20e428190409c2db47"))
+ to = s.commit(c, plumbing.NewHash("35e85108805c84807bc66a02d91535e1e24b38b9"))
+
+ patch, err = from.PatchContext(context.Background(), to)
+ c.Assert(err, IsNil)
+
+ buf.Reset()
+ err = patch.Encode(buf)
+ c.Assert(err, IsNil)
+
+ c.Assert(buf.String(), Equals, `diff --git a/CHANGELOG b/CHANGELOG
+deleted file mode 100644
+index d3ff53e0564a9f87d8e84b6e28e5060e517008aa..0000000000000000000000000000000000000000
+--- a/CHANGELOG
++++ /dev/null
+@@ -1 +0,0 @@
+-Initial changelog
+diff --git a/binary.jpg b/binary.jpg
+new file mode 100644
+index 0000000000000000000000000000000000000000..d5c0f4ab811897cadf03aec358ae60d21f91c50d
+Binary files /dev/null and b/binary.jpg differ
+`)
+
+ c.Assert(buf.String(), Equals, patch.String())
+}
+
func (s *SuiteCommit) TestCommitEncodeDecodeIdempotent(c *C) {
ts, err := time.Parse(time.RFC3339, "2006-01-02T15:04:05-07:00")
c.Assert(err, IsNil)
@@ -193,8 +248,7 @@ func (s *SuiteCommit) TestStringMultiLine(c *C) {
hash := plumbing.NewHash("e7d896db87294e33ca3202e536d4d9bb16023db3")
f := fixtures.ByURL("https://github.com/src-d/go-git.git").One()
- sto, err := filesystem.NewStorage(f.DotGit())
- c.Assert(err, IsNil)
+ sto := filesystem.NewStorage(f.DotGit(), cache.NewObjectLRUDefault())
o, err := sto.EncodedObject(plumbing.CommitObject, hash)
c.Assert(err, IsNil)
@@ -270,6 +324,54 @@ RUysgqjcpT8+iQM1PblGfHR4XAhuOqN5Fx06PSaFZhqvWFezJ28/CLyX5q+oIVk=
err = decoded.Decode(encoded)
c.Assert(err, IsNil)
c.Assert(decoded.PGPSignature, Equals, pgpsignature)
+
+ // signature with extra empty line, it caused "index out of range" when
+ // parsing it
+
+ pgpsignature2 := "\n" + pgpsignature
+
+ commit.PGPSignature = pgpsignature2
+ encoded = &plumbing.MemoryObject{}
+ decoded = &Commit{}
+
+ err = commit.Encode(encoded)
+ c.Assert(err, IsNil)
+
+ err = decoded.Decode(encoded)
+ c.Assert(err, IsNil)
+ c.Assert(decoded.PGPSignature, Equals, pgpsignature2)
+
+ // signature in author name
+
+ commit.PGPSignature = ""
+ commit.Author.Name = beginpgp
+ encoded = &plumbing.MemoryObject{}
+ decoded = &Commit{}
+
+ err = commit.Encode(encoded)
+ c.Assert(err, IsNil)
+
+ err = decoded.Decode(encoded)
+ c.Assert(err, IsNil)
+ c.Assert(decoded.PGPSignature, Equals, "")
+ c.Assert(decoded.Author.Name, Equals, beginpgp)
+
+ // broken signature
+
+ commit.PGPSignature = beginpgp + "\n" +
+ "some\n" +
+ "trash\n" +
+ endpgp +
+ "text\n"
+ encoded = &plumbing.MemoryObject{}
+ decoded = &Commit{}
+
+ err = commit.Encode(encoded)
+ c.Assert(err, IsNil)
+
+ err = decoded.Decode(encoded)
+ c.Assert(err, IsNil)
+ c.Assert(decoded.PGPSignature, Equals, commit.PGPSignature)
}
func (s *SuiteCommit) TestStat(c *C) {
@@ -363,3 +465,33 @@ sYyf9RfOnw/KUFAQbdtvLx3ikODQC+D3KBtuKI9ISHQfgw==
_, ok := e.Identities["Sunny <me@darkowlzz.space>"]
c.Assert(ok, Equals, true)
}
+
+func (s *SuiteCommit) TestPatchCancel(c *C) {
+ from := s.commit(c, plumbing.NewHash("918c48b83bd081e863dbe1b80f8998f058cd8294"))
+ to := s.commit(c, plumbing.NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5"))
+
+ ctx, cancel := context.WithCancel(context.Background())
+ cancel()
+ patch, err := from.PatchContext(ctx, to)
+ c.Assert(patch, IsNil)
+ c.Assert(err, ErrorMatches, "operation canceled")
+
+}
+
+func (s *SuiteCommit) TestMalformedHeader(c *C) {
+ encoded := &plumbing.MemoryObject{}
+ decoded := &Commit{}
+ commit := *s.Commit
+
+ commit.PGPSignature = "\n"
+ commit.Author.Name = "\n"
+ commit.Author.Email = "\n"
+ commit.Committer.Name = "\n"
+ commit.Committer.Email = "\n"
+
+ err := commit.Encode(encoded)
+ c.Assert(err, IsNil)
+
+ err = decoded.Decode(encoded)
+ c.Assert(err, IsNil)
+}
diff --git a/plumbing/object/commit_walker_bfs.go b/plumbing/object/commit_walker_bfs.go
new file mode 100644
index 0000000..aef1cf2
--- /dev/null
+++ b/plumbing/object/commit_walker_bfs.go
@@ -0,0 +1,100 @@
+package object
+
+import (
+ "io"
+
+ "gopkg.in/src-d/go-git.v4/plumbing"
+ "gopkg.in/src-d/go-git.v4/plumbing/storer"
+)
+
+type bfsCommitIterator struct {
+ seenExternal map[plumbing.Hash]bool
+ seen map[plumbing.Hash]bool
+ queue []*Commit
+}
+
+// NewCommitIterBSF returns a CommitIter that walks the commit history,
+// starting at the given commit and visiting its parents in pre-order.
+// The given callback will be called for each visited commit. Each commit will
+// be visited only once. If the callback returns an error, walking will stop
+// and will return the error. Other errors might be returned if the history
+// cannot be traversed (e.g. missing objects). Ignore allows to skip some
+// commits from being iterated.
+func NewCommitIterBSF(
+ c *Commit,
+ seenExternal map[plumbing.Hash]bool,
+ ignore []plumbing.Hash,
+) CommitIter {
+ seen := make(map[plumbing.Hash]bool)
+ for _, h := range ignore {
+ seen[h] = true
+ }
+
+ return &bfsCommitIterator{
+ seenExternal: seenExternal,
+ seen: seen,
+ queue: []*Commit{c},
+ }
+}
+
+func (w *bfsCommitIterator) appendHash(store storer.EncodedObjectStorer, h plumbing.Hash) error {
+ if w.seen[h] || w.seenExternal[h] {
+ return nil
+ }
+ c, err := GetCommit(store, h)
+ if err != nil {
+ return err
+ }
+ w.queue = append(w.queue, c)
+ return nil
+}
+
+func (w *bfsCommitIterator) Next() (*Commit, error) {
+ var c *Commit
+ for {
+ if len(w.queue) == 0 {
+ return nil, io.EOF
+ }
+ c = w.queue[0]
+ w.queue = w.queue[1:]
+
+ if w.seen[c.Hash] || w.seenExternal[c.Hash] {
+ continue
+ }
+
+ w.seen[c.Hash] = true
+
+ for _, h := range c.ParentHashes {
+ err := w.appendHash(c.s, h)
+ if err != nil {
+ return nil, nil
+ }
+ }
+
+ return c, nil
+ }
+}
+
+func (w *bfsCommitIterator) ForEach(cb func(*Commit) error) error {
+ for {
+ c, err := w.Next()
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ return err
+ }
+
+ err = cb(c)
+ if err == storer.ErrStop {
+ break
+ }
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (w *bfsCommitIterator) Close() {}
diff --git a/plumbing/object/commit_walker_ctime.go b/plumbing/object/commit_walker_ctime.go
new file mode 100644
index 0000000..0191614
--- /dev/null
+++ b/plumbing/object/commit_walker_ctime.go
@@ -0,0 +1,103 @@
+package object
+
+import (
+ "io"
+
+ "github.com/emirpasic/gods/trees/binaryheap"
+
+ "gopkg.in/src-d/go-git.v4/plumbing"
+ "gopkg.in/src-d/go-git.v4/plumbing/storer"
+)
+
+type commitIteratorByCTime struct {
+ seenExternal map[plumbing.Hash]bool
+ seen map[plumbing.Hash]bool
+ heap *binaryheap.Heap
+}
+
+// NewCommitIterCTime returns a CommitIter that walks the commit history,
+// starting at the given commit and visiting its parents while preserving Committer Time order.
+// this appears to be the closest order to `git log`
+// The given callback will be called for each visited commit. Each commit will
+// be visited only once. If the callback returns an error, walking will stop
+// and will return the error. Other errors might be returned if the history
+// cannot be traversed (e.g. missing objects). Ignore allows to skip some
+// commits from being iterated.
+func NewCommitIterCTime(
+ c *Commit,
+ seenExternal map[plumbing.Hash]bool,
+ ignore []plumbing.Hash,
+) CommitIter {
+ seen := make(map[plumbing.Hash]bool)
+ for _, h := range ignore {
+ seen[h] = true
+ }
+
+ heap := binaryheap.NewWith(func(a, b interface{}) int {
+ if a.(*Commit).Committer.When.Before(b.(*Commit).Committer.When) {
+ return 1
+ }
+ return -1
+ })
+ heap.Push(c)
+
+ return &commitIteratorByCTime{
+ seenExternal: seenExternal,
+ seen: seen,
+ heap: heap,
+ }
+}
+
+func (w *commitIteratorByCTime) Next() (*Commit, error) {
+ var c *Commit
+ for {
+ cIn, ok := w.heap.Pop()
+ if !ok {
+ return nil, io.EOF
+ }
+ c = cIn.(*Commit)
+
+ if w.seen[c.Hash] || w.seenExternal[c.Hash] {
+ continue
+ }
+
+ w.seen[c.Hash] = true
+
+ for _, h := range c.ParentHashes {
+ if w.seen[h] || w.seenExternal[h] {
+ continue
+ }
+ pc, err := GetCommit(c.s, h)
+ if err != nil {
+ return nil, err
+ }
+ w.heap.Push(pc)
+ }
+
+ return c, nil
+ }
+}
+
+func (w *commitIteratorByCTime) ForEach(cb func(*Commit) error) error {
+ for {
+ c, err := w.Next()
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ return err
+ }
+
+ err = cb(c)
+ if err == storer.ErrStop {
+ break
+ }
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (w *commitIteratorByCTime) Close() {}
diff --git a/plumbing/object/commit_walker_file.go b/plumbing/object/commit_walker_file.go
new file mode 100644
index 0000000..84e738a
--- /dev/null
+++ b/plumbing/object/commit_walker_file.go
@@ -0,0 +1,115 @@
+package object
+
+import (
+ "gopkg.in/src-d/go-git.v4/plumbing/storer"
+ "io"
+)
+
+type commitFileIter struct {
+ fileName string
+ sourceIter CommitIter
+ currentCommit *Commit
+}
+
+// NewCommitFileIterFromIter returns a commit iterator which performs diffTree between
+// successive trees returned from the commit iterator from the argument. The purpose of this is
+// to find the commits that explain how the files that match the path came to be.
+func NewCommitFileIterFromIter(fileName string, commitIter CommitIter) CommitIter {
+ iterator := new(commitFileIter)
+ iterator.sourceIter = commitIter
+ iterator.fileName = fileName
+ return iterator
+}
+
+func (c *commitFileIter) Next() (*Commit, error) {
+ if c.currentCommit == nil {
+ var err error
+ c.currentCommit, err = c.sourceIter.Next()
+ if err != nil {
+ return nil, err
+ }
+ }
+ commit, commitErr := c.getNextFileCommit()
+
+ // Setting current-commit to nil to prevent unwanted states when errors are raised
+ if commitErr != nil {
+ c.currentCommit = nil
+ }
+ return commit, commitErr
+}
+
+func (c *commitFileIter) getNextFileCommit() (*Commit, error) {
+ for {
+ // Parent-commit can be nil if the current-commit is the initial commit
+ parentCommit, parentCommitErr := c.sourceIter.Next()
+ if parentCommitErr != nil {
+ // If the parent-commit is beyond the initial commit, keep it nil
+ if parentCommitErr != io.EOF {
+ return nil, parentCommitErr
+ }
+ parentCommit = nil
+ }
+
+ // Fetch the trees of the current and parent commits
+ currentTree, currTreeErr := c.currentCommit.Tree()
+ if currTreeErr != nil {
+ return nil, currTreeErr
+ }
+
+ var parentTree *Tree
+ if parentCommit != nil {
+ var parentTreeErr error
+ parentTree, parentTreeErr = parentCommit.Tree()
+ if parentTreeErr != nil {
+ return nil, parentTreeErr
+ }
+ }
+
+ // Find diff between current and parent trees
+ changes, diffErr := DiffTree(currentTree, parentTree)
+ if diffErr != nil {
+ return nil, diffErr
+ }
+
+ foundChangeForFile := false
+ for _, change := range changes {
+ if change.name() == c.fileName {
+ foundChangeForFile = true
+ break
+ }
+ }
+
+ // Storing the current-commit in-case a change is found, and
+ // Updating the current-commit for the next-iteration
+ prevCommit := c.currentCommit
+ c.currentCommit = parentCommit
+
+ if foundChangeForFile == true {
+ return prevCommit, nil
+ }
+
+ // If not matches found and if parent-commit is beyond the initial commit, then return with EOF
+ if parentCommit == nil {
+ return nil, io.EOF
+ }
+ }
+}
+
+func (c *commitFileIter) ForEach(cb func(*Commit) error) error {
+ for {
+ commit, nextErr := c.Next()
+ if nextErr != nil {
+ return nextErr
+ }
+ err := cb(commit)
+ if err == storer.ErrStop {
+ return nil
+ } else if err != nil {
+ return err
+ }
+ }
+}
+
+func (c *commitFileIter) Close() {
+ c.sourceIter.Close()
+}
diff --git a/plumbing/object/commit_walker_test.go b/plumbing/object/commit_walker_test.go
index a27104e..9b0a260 100644
--- a/plumbing/object/commit_walker_test.go
+++ b/plumbing/object/commit_walker_test.go
@@ -132,3 +132,99 @@ func (s *CommitWalkerSuite) TestCommitPostIteratorWithIgnore(c *C) {
c.Assert(commit.Hash.String(), Equals, expected[i])
}
}
+
+func (s *CommitWalkerSuite) TestCommitCTimeIterator(c *C) {
+ commit := s.commit(c, s.Fixture.Head)
+
+ var commits []*Commit
+ NewCommitIterCTime(commit, nil, nil).ForEach(func(c *Commit) error {
+ commits = append(commits, c)
+ return nil
+ })
+
+ c.Assert(commits, HasLen, 8)
+
+ expected := []string{
+ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5", // 2015-04-05T23:30:47+02:00
+ "918c48b83bd081e863dbe1b80f8998f058cd8294", // 2015-03-31T13:56:18+02:00
+ "af2d6a6954d532f8ffb47615169c8fdf9d383a1a", // 2015-03-31T13:51:51+02:00
+ "1669dce138d9b841a518c64b10914d88f5e488ea", // 2015-03-31T13:48:14+02:00
+ "a5b8b09e2f8fcb0bb99d3ccb0958157b40890d69", // 2015-03-31T13:47:14+02:00
+ "35e85108805c84807bc66a02d91535e1e24b38b9", // 2015-03-31T13:46:24+02:00
+ "b8e471f58bcbca63b07bda20e428190409c2db47", // 2015-03-31T13:44:52+02:00
+ "b029517f6300c2da0f4b651b8642506cd6aaf45d", // 2015-03-31T13:42:21+02:00
+ }
+ for i, commit := range commits {
+ c.Assert(commit.Hash.String(), Equals, expected[i])
+ }
+}
+
+func (s *CommitWalkerSuite) TestCommitCTimeIteratorWithIgnore(c *C) {
+ commit := s.commit(c, s.Fixture.Head)
+
+ var commits []*Commit
+ NewCommitIterCTime(commit, nil, []plumbing.Hash{
+ plumbing.NewHash("af2d6a6954d532f8ffb47615169c8fdf9d383a1a"),
+ }).ForEach(func(c *Commit) error {
+ commits = append(commits, c)
+ return nil
+ })
+
+ c.Assert(commits, HasLen, 2)
+
+ expected := []string{
+ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5",
+ "918c48b83bd081e863dbe1b80f8998f058cd8294",
+ }
+ for i, commit := range commits {
+ c.Assert(commit.Hash.String(), Equals, expected[i])
+ }
+}
+
+func (s *CommitWalkerSuite) TestCommitBSFIterator(c *C) {
+ commit := s.commit(c, s.Fixture.Head)
+
+ var commits []*Commit
+ NewCommitIterBSF(commit, nil, nil).ForEach(func(c *Commit) error {
+ commits = append(commits, c)
+ return nil
+ })
+
+ c.Assert(commits, HasLen, 8)
+
+ expected := []string{
+ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5",
+ "918c48b83bd081e863dbe1b80f8998f058cd8294",
+ "af2d6a6954d532f8ffb47615169c8fdf9d383a1a",
+ "1669dce138d9b841a518c64b10914d88f5e488ea",
+ "35e85108805c84807bc66a02d91535e1e24b38b9",
+ "a5b8b09e2f8fcb0bb99d3ccb0958157b40890d69",
+ "b029517f6300c2da0f4b651b8642506cd6aaf45d",
+ "b8e471f58bcbca63b07bda20e428190409c2db47",
+ }
+ for i, commit := range commits {
+ c.Assert(commit.Hash.String(), Equals, expected[i])
+ }
+}
+
+func (s *CommitWalkerSuite) TestCommitBSFIteratorWithIgnore(c *C) {
+ commit := s.commit(c, s.Fixture.Head)
+
+ var commits []*Commit
+ NewCommitIterBSF(commit, nil, []plumbing.Hash{
+ plumbing.NewHash("af2d6a6954d532f8ffb47615169c8fdf9d383a1a"),
+ }).ForEach(func(c *Commit) error {
+ commits = append(commits, c)
+ return nil
+ })
+
+ c.Assert(commits, HasLen, 2)
+
+ expected := []string{
+ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5",
+ "918c48b83bd081e863dbe1b80f8998f058cd8294",
+ }
+ for i, commit := range commits {
+ c.Assert(commit.Hash.String(), Equals, expected[i])
+ }
+}
diff --git a/plumbing/object/difftree.go b/plumbing/object/difftree.go
index ac58c4d..a30a29e 100644
--- a/plumbing/object/difftree.go
+++ b/plumbing/object/difftree.go
@@ -2,6 +2,7 @@ package object
import (
"bytes"
+ "context"
"gopkg.in/src-d/go-git.v4/utils/merkletrie"
"gopkg.in/src-d/go-git.v4/utils/merkletrie/noder"
@@ -10,6 +11,13 @@ import (
// DiffTree compares the content and mode of the blobs found via two
// tree objects.
func DiffTree(a, b *Tree) (Changes, error) {
+ return DiffTreeContext(context.Background(), a, b)
+}
+
+// DiffTree compares the content and mode of the blobs found via two
+// tree objects. Provided context must be non-nil.
+// An error will be return if context expires
+func DiffTreeContext(ctx context.Context, a, b *Tree) (Changes, error) {
from := NewTreeRootNode(a)
to := NewTreeRootNode(b)
@@ -17,8 +25,11 @@ func DiffTree(a, b *Tree) (Changes, error) {
return bytes.Equal(a.Hash(), b.Hash())
}
- merkletrieChanges, err := merkletrie.DiffTree(from, to, hashEqual)
+ merkletrieChanges, err := merkletrie.DiffTreeContext(ctx, from, to, hashEqual)
if err != nil {
+ if err == merkletrie.ErrCanceled {
+ return nil, ErrCanceled
+ }
return nil, err
}
diff --git a/plumbing/object/difftree_test.go b/plumbing/object/difftree_test.go
index 40af8f2..4af8684 100644
--- a/plumbing/object/difftree_test.go
+++ b/plumbing/object/difftree_test.go
@@ -4,6 +4,7 @@ import (
"sort"
"gopkg.in/src-d/go-git.v4/plumbing"
+ "gopkg.in/src-d/go-git.v4/plumbing/cache"
"gopkg.in/src-d/go-git.v4/plumbing/filemode"
"gopkg.in/src-d/go-git.v4/plumbing/format/packfile"
"gopkg.in/src-d/go-git.v4/plumbing/storer"
@@ -25,8 +26,7 @@ type DiffTreeSuite struct {
func (s *DiffTreeSuite) SetUpSuite(c *C) {
s.Suite.SetUpSuite(c)
s.Fixture = fixtures.Basic().One()
- sto, err := filesystem.NewStorage(s.Fixture.DotGit())
- c.Assert(err, IsNil)
+ sto := filesystem.NewStorage(s.Fixture.DotGit(), cache.NewObjectLRUDefault())
s.Storer = sto
s.cache = make(map[string]storer.EncodedObjectStorer)
}
@@ -45,25 +45,17 @@ func (s *DiffTreeSuite) storageFromPackfile(f *fixtures.Fixture) storer.EncodedO
return sto
}
- sto = memory.NewStorage()
+ storer := memory.NewStorage()
pf := f.Packfile()
-
defer pf.Close()
- n := packfile.NewScanner(pf)
- d, err := packfile.NewDecoder(n, sto)
- if err != nil {
- panic(err)
- }
-
- _, err = d.Decode()
- if err != nil {
+ if err := packfile.UpdateObjectStorage(storer, pf); err != nil {
panic(err)
}
- s.cache[f.URL] = sto
- return sto
+ s.cache[f.URL] = storer
+ return storer
}
var _ = Suite(&DiffTreeSuite{})
diff --git a/plumbing/object/file.go b/plumbing/object/file.go
index 40b5206..1c5fdbb 100644
--- a/plumbing/object/file.go
+++ b/plumbing/object/file.go
@@ -44,7 +44,7 @@ func (f *File) Contents() (content string, err error) {
}
// IsBinary returns if the file is binary or not
-func (f *File) IsBinary() (bool, error) {
+func (f *File) IsBinary() (bin bool, err error) {
reader, err := f.Reader()
if err != nil {
return false, err
diff --git a/plumbing/object/file_test.go b/plumbing/object/file_test.go
index edb82d0..4b92749 100644
--- a/plumbing/object/file_test.go
+++ b/plumbing/object/file_test.go
@@ -4,6 +4,7 @@ import (
"io"
"gopkg.in/src-d/go-git.v4/plumbing"
+ "gopkg.in/src-d/go-git.v4/plumbing/cache"
"gopkg.in/src-d/go-git.v4/plumbing/filemode"
"gopkg.in/src-d/go-git.v4/plumbing/storer"
"gopkg.in/src-d/go-git.v4/storage/filesystem"
@@ -44,8 +45,7 @@ var fileIterTests = []struct {
func (s *FileSuite) TestIter(c *C) {
for i, t := range fileIterTests {
f := fixtures.ByURL(t.repo).One()
- sto, err := filesystem.NewStorage(f.DotGit())
- c.Assert(err, IsNil)
+ sto := filesystem.NewStorage(f.DotGit(), cache.NewObjectLRUDefault())
h := plumbing.NewHash(t.commit)
commit, err := GetCommit(sto, h)
@@ -106,8 +106,7 @@ hs_err_pid*
func (s *FileSuite) TestContents(c *C) {
for i, t := range contentsTests {
f := fixtures.ByURL(t.repo).One()
- sto, err := filesystem.NewStorage(f.DotGit())
- c.Assert(err, IsNil)
+ sto := filesystem.NewStorage(f.DotGit(), cache.NewObjectLRUDefault())
h := plumbing.NewHash(t.commit)
commit, err := GetCommit(sto, h)
@@ -160,8 +159,7 @@ var linesTests = []struct {
func (s *FileSuite) TestLines(c *C) {
for i, t := range linesTests {
f := fixtures.ByURL(t.repo).One()
- sto, err := filesystem.NewStorage(f.DotGit())
- c.Assert(err, IsNil)
+ sto := filesystem.NewStorage(f.DotGit(), cache.NewObjectLRUDefault())
h := plumbing.NewHash(t.commit)
commit, err := GetCommit(sto, h)
@@ -195,8 +193,7 @@ var ignoreEmptyDirEntriesTests = []struct {
func (s *FileSuite) TestIgnoreEmptyDirEntries(c *C) {
for i, t := range ignoreEmptyDirEntriesTests {
f := fixtures.ByURL(t.repo).One()
- sto, err := filesystem.NewStorage(f.DotGit())
- c.Assert(err, IsNil)
+ sto := filesystem.NewStorage(f.DotGit(), cache.NewObjectLRUDefault())
h := plumbing.NewHash(t.commit)
commit, err := GetCommit(sto, h)
@@ -251,9 +248,7 @@ func (s *FileSuite) TestFileIter(c *C) {
func (s *FileSuite) TestFileIterSubmodule(c *C) {
dotgit := fixtures.ByURL("https://github.com/git-fixtures/submodule.git").One().DotGit()
- st, err := filesystem.NewStorage(dotgit)
-
- c.Assert(err, IsNil)
+ st := filesystem.NewStorage(dotgit, cache.NewObjectLRUDefault())
hash := plumbing.NewHash("b685400c1f9316f350965a5993d350bc746b0bf4")
commit, err := GetCommit(st, hash)
diff --git a/plumbing/object/object.go b/plumbing/object/object.go
index 4b59aba..e960e50 100644
--- a/plumbing/object/object.go
+++ b/plumbing/object/object.go
@@ -152,7 +152,11 @@ func (s *Signature) decodeTimeAndTimeZone(b []byte) {
}
func (s *Signature) encodeTimeAndTimeZone(w io.Writer) error {
- _, err := fmt.Fprintf(w, "%d %s", s.When.Unix(), s.When.Format("-0700"))
+ u := s.When.Unix()
+ if u < 0 {
+ u = 0
+ }
+ _, err := fmt.Fprintf(w, "%d %s", u, s.When.Format("-0700"))
return err
}
diff --git a/plumbing/object/object_test.go b/plumbing/object/object_test.go
index 4f0fcb3..8f0eede 100644
--- a/plumbing/object/object_test.go
+++ b/plumbing/object/object_test.go
@@ -7,6 +7,7 @@ import (
"time"
"gopkg.in/src-d/go-git.v4/plumbing"
+ "gopkg.in/src-d/go-git.v4/plumbing/cache"
"gopkg.in/src-d/go-git.v4/plumbing/filemode"
"gopkg.in/src-d/go-git.v4/plumbing/storer"
"gopkg.in/src-d/go-git.v4/storage/filesystem"
@@ -26,8 +27,7 @@ type BaseObjectsSuite struct {
func (s *BaseObjectsSuite) SetUpSuite(c *C) {
s.Suite.SetUpSuite(c)
s.Fixture = fixtures.Basic().One()
- storer, err := filesystem.NewStorage(s.Fixture.DotGit())
- c.Assert(err, IsNil)
+ storer := filesystem.NewStorage(s.Fixture.DotGit(), cache.NewObjectLRUDefault())
s.Storer = storer
}
@@ -197,8 +197,9 @@ func (s *ObjectsSuite) TestObjectIter(c *C) {
}
c.Assert(err, IsNil)
- c.Assert(o, DeepEquals, objects[i])
- i += 1
+ c.Assert(o.ID(), Equals, objects[i].ID())
+ c.Assert(o.Type(), Equals, objects[i].Type())
+ i++
}
iter.Close()
diff --git a/plumbing/object/patch.go b/plumbing/object/patch.go
index aa96a96..adeaccb 100644
--- a/plumbing/object/patch.go
+++ b/plumbing/object/patch.go
@@ -2,6 +2,8 @@ package object
import (
"bytes"
+ "context"
+ "errors"
"fmt"
"io"
"math"
@@ -15,10 +17,25 @@ import (
dmp "github.com/sergi/go-diff/diffmatchpatch"
)
+var (
+ ErrCanceled = errors.New("operation canceled")
+)
+
func getPatch(message string, changes ...*Change) (*Patch, error) {
+ ctx := context.Background()
+ return getPatchContext(ctx, message, changes...)
+}
+
+func getPatchContext(ctx context.Context, message string, changes ...*Change) (*Patch, error) {
var filePatches []fdiff.FilePatch
for _, c := range changes {
- fp, err := filePatch(c)
+ select {
+ case <-ctx.Done():
+ return nil, ErrCanceled
+ default:
+ }
+
+ fp, err := filePatchWithContext(ctx, c)
if err != nil {
return nil, err
}
@@ -29,7 +46,7 @@ func getPatch(message string, changes ...*Change) (*Patch, error) {
return &Patch{message, filePatches}, nil
}
-func filePatch(c *Change) (fdiff.FilePatch, error) {
+func filePatchWithContext(ctx context.Context, c *Change) (fdiff.FilePatch, error) {
from, to, err := c.Files()
if err != nil {
return nil, err
@@ -52,6 +69,12 @@ func filePatch(c *Change) (fdiff.FilePatch, error) {
var chunks []fdiff.Chunk
for _, d := range diffs {
+ select {
+ case <-ctx.Done():
+ return nil, ErrCanceled
+ default:
+ }
+
var op fdiff.Operation
switch d.Type {
case dmp.DiffEqual:
@@ -70,6 +93,11 @@ func filePatch(c *Change) (fdiff.FilePatch, error) {
from: c.From,
to: c.To,
}, nil
+
+}
+
+func filePatch(c *Change) (fdiff.FilePatch, error) {
+ return filePatchWithContext(context.Background(), c)
}
func fileContent(f *File) (content string, isBinary bool, err error) {
diff --git a/plumbing/object/patch_test.go b/plumbing/object/patch_test.go
index 8eb65ec..47057fb 100644
--- a/plumbing/object/patch_test.go
+++ b/plumbing/object/patch_test.go
@@ -4,6 +4,7 @@ import (
. "gopkg.in/check.v1"
fixtures "gopkg.in/src-d/go-git-fixtures.v3"
"gopkg.in/src-d/go-git.v4/plumbing"
+ "gopkg.in/src-d/go-git.v4/plumbing/cache"
"gopkg.in/src-d/go-git.v4/storage/filesystem"
)
@@ -14,8 +15,8 @@ type PatchSuite struct {
var _ = Suite(&PatchSuite{})
func (s *PatchSuite) TestStatsWithSubmodules(c *C) {
- storer, err := filesystem.NewStorage(
- fixtures.ByURL("https://github.com/git-fixtures/submodule.git").One().DotGit())
+ storer := filesystem.NewStorage(
+ fixtures.ByURL("https://github.com/git-fixtures/submodule.git").One().DotGit(), cache.NewObjectLRUDefault())
commit, err := GetCommit(storer, plumbing.NewHash("b685400c1f9316f350965a5993d350bc746b0bf4"))
diff --git a/plumbing/object/tag.go b/plumbing/object/tag.go
index 19e55cf..03749f9 100644
--- a/plumbing/object/tag.go
+++ b/plumbing/object/tag.go
@@ -95,7 +95,8 @@ func (t *Tag) Decode(o plumbing.EncodedObject) (err error) {
r := bufio.NewReader(reader)
for {
- line, err := r.ReadBytes('\n')
+ var line []byte
+ line, err = r.ReadBytes('\n')
if err != nil && err != io.EOF {
return err
}
@@ -168,7 +169,7 @@ func (t *Tag) Encode(o plumbing.EncodedObject) error {
return t.encode(o, true)
}
-func (t *Tag) encode(o plumbing.EncodedObject, includeSig bool) error {
+func (t *Tag) encode(o plumbing.EncodedObject, includeSig bool) (err error) {
o.SetType(plumbing.TagObject)
w, err := o.Writer()
if err != nil {
@@ -194,13 +195,14 @@ func (t *Tag) encode(o plumbing.EncodedObject, includeSig bool) error {
return err
}
- if t.PGPSignature != "" && includeSig {
- // Split all the signature lines and write with a newline at the end.
- lines := strings.Split(t.PGPSignature, "\n")
- for _, line := range lines {
- if _, err = fmt.Fprintf(w, "%s\n", line); err != nil {
- return err
- }
+ // Note that this is highly sensitive to what it sent along in the message.
+ // Message *always* needs to end with a newline, or else the message and the
+ // signature will be concatenated into a corrupt object. Since this is a
+ // lower-level method, we assume you know what you are doing and have already
+ // done the needful on the message in the caller.
+ if includeSig {
+ if _, err = fmt.Fprint(w, t.PGPSignature); err != nil {
+ return err
}
}
diff --git a/plumbing/object/tag_test.go b/plumbing/object/tag_test.go
index 9900093..59c28b0 100644
--- a/plumbing/object/tag_test.go
+++ b/plumbing/object/tag_test.go
@@ -7,6 +7,7 @@ import (
"time"
"gopkg.in/src-d/go-git.v4/plumbing"
+ "gopkg.in/src-d/go-git.v4/plumbing/cache"
"gopkg.in/src-d/go-git.v4/storage/filesystem"
"gopkg.in/src-d/go-git.v4/storage/memory"
@@ -22,9 +23,7 @@ var _ = Suite(&TagSuite{})
func (s *TagSuite) SetUpSuite(c *C) {
s.BaseObjectsSuite.SetUpSuite(c)
- storer, err := filesystem.NewStorage(
- fixtures.ByURL("https://github.com/git-fixtures/tags.git").One().DotGit())
- c.Assert(err, IsNil)
+ storer := filesystem.NewStorage(fixtures.ByURL("https://github.com/git-fixtures/tags.git").One().DotGit(), cache.NewObjectLRUDefault())
s.Storer = storer
}
@@ -265,7 +264,7 @@ func (s *TagSuite) TestStringNonCommit(c *C) {
c.Assert(tag.String(), Equals,
"tag TAG TWO\n"+
"Tagger: <>\n"+
- "Date: Mon Jan 01 00:00:00 0001 +0000\n"+
+ "Date: Thu Jan 01 00:00:00 1970 +0000\n"+
"\n"+
"tag two\n")
}
diff --git a/plumbing/object/tree.go b/plumbing/object/tree.go
index 2fcd979..c36a137 100644
--- a/plumbing/object/tree.go
+++ b/plumbing/object/tree.go
@@ -2,10 +2,12 @@ package object
import (
"bufio"
+ "context"
"errors"
"fmt"
"io"
"path"
+ "path/filepath"
"strings"
"gopkg.in/src-d/go-git.v4/plumbing"
@@ -24,6 +26,7 @@ var (
ErrMaxTreeDepth = errors.New("maximum tree depth exceeded")
ErrFileNotFound = errors.New("file not found")
ErrDirectoryNotFound = errors.New("directory not found")
+ ErrEntryNotFound = errors.New("entry not found")
)
// Tree is basically like a directory - it references a bunch of other trees
@@ -34,6 +37,7 @@ type Tree struct {
s storer.EncodedObjectStorer
m map[string]*TreeEntry
+ t map[string]*Tree // tree path cache
}
// GetTree gets a tree from an object storer and decodes it.
@@ -111,14 +115,37 @@ func (t *Tree) TreeEntryFile(e *TreeEntry) (*File, error) {
// FindEntry search a TreeEntry in this tree or any subtree.
func (t *Tree) FindEntry(path string) (*TreeEntry, error) {
+ if t.t == nil {
+ t.t = make(map[string]*Tree)
+ }
+
pathParts := strings.Split(path, "/")
+ startingTree := t
+ pathCurrent := ""
+
+ // search for the longest path in the tree path cache
+ for i := len(pathParts); i > 1; i-- {
+ path := filepath.Join(pathParts[:i]...)
+
+ tree, ok := t.t[path]
+ if ok {
+ startingTree = tree
+ pathParts = pathParts[i:]
+ pathCurrent = path
+
+ break
+ }
+ }
var tree *Tree
var err error
- for tree = t; len(pathParts) > 1; pathParts = pathParts[1:] {
+ for tree = startingTree; len(pathParts) > 1; pathParts = pathParts[1:] {
if tree, err = tree.dir(pathParts[0]); err != nil {
return nil, err
}
+
+ pathCurrent = filepath.Join(pathCurrent, pathParts[0])
+ t.t[pathCurrent] = tree
}
return tree.entry(pathParts[0])
@@ -141,8 +168,6 @@ func (t *Tree) dir(baseName string) (*Tree, error) {
return tree, err
}
-var errEntryNotFound = errors.New("entry not found")
-
func (t *Tree) entry(baseName string) (*TreeEntry, error) {
if t.m == nil {
t.buildMap()
@@ -150,7 +175,7 @@ func (t *Tree) entry(baseName string) (*TreeEntry, error) {
entry, ok := t.m[baseName]
if !ok {
- return nil, errEntryNotFound
+ return nil, ErrEntryNotFound
}
return entry, nil
@@ -233,7 +258,7 @@ func (t *Tree) Decode(o plumbing.EncodedObject) (err error) {
}
// Encode transforms a Tree into a plumbing.EncodedObject.
-func (t *Tree) Encode(o plumbing.EncodedObject) error {
+func (t *Tree) Encode(o plumbing.EncodedObject) (err error) {
o.SetType(plumbing.TreeObject)
w, err := o.Writer()
if err != nil {
@@ -242,7 +267,7 @@ func (t *Tree) Encode(o plumbing.EncodedObject) error {
defer ioutil.CheckClose(w, &err)
for _, entry := range t.Entries {
- if _, err := fmt.Fprintf(w, "%o %s", entry.Mode, entry.Name); err != nil {
+ if _, err = fmt.Fprintf(w, "%o %s", entry.Mode, entry.Name); err != nil {
return err
}
@@ -270,15 +295,30 @@ func (from *Tree) Diff(to *Tree) (Changes, error) {
return DiffTree(from, to)
}
+// Diff returns a list of changes between this tree and the provided one
+// Error will be returned if context expires
+// Provided context must be non nil
+func (from *Tree) DiffContext(ctx context.Context, to *Tree) (Changes, error) {
+ return DiffTreeContext(ctx, from, to)
+}
+
// Patch returns a slice of Patch objects with all the changes between trees
// in chunks. This representation can be used to create several diff outputs.
func (from *Tree) Patch(to *Tree) (*Patch, error) {
- changes, err := DiffTree(from, to)
+ return from.PatchContext(context.Background(), to)
+}
+
+// Patch returns a slice of Patch objects with all the changes between trees
+// in chunks. This representation can be used to create several diff outputs.
+// If context expires, an error will be returned
+// Provided context must be non-nil
+func (from *Tree) PatchContext(ctx context.Context, to *Tree) (*Patch, error) {
+ changes, err := DiffTreeContext(ctx, from, to)
if err != nil {
return nil, err
}
- return changes.Patch()
+ return changes.PatchContext(ctx)
}
// treeEntryIter facilitates iterating through the TreeEntry objects in a Tree.
diff --git a/plumbing/object/tree_test.go b/plumbing/object/tree_test.go
index 3a687dd..7366421 100644
--- a/plumbing/object/tree_test.go
+++ b/plumbing/object/tree_test.go
@@ -5,6 +5,7 @@ import (
"io"
"gopkg.in/src-d/go-git.v4/plumbing"
+ "gopkg.in/src-d/go-git.v4/plumbing/cache"
"gopkg.in/src-d/go-git.v4/plumbing/filemode"
"gopkg.in/src-d/go-git.v4/plumbing/storer"
"gopkg.in/src-d/go-git.v4/storage/filesystem"
@@ -114,6 +115,12 @@ func (s *TreeSuite) TestFindEntry(c *C) {
c.Assert(e.Name, Equals, "foo.go")
}
+func (s *TreeSuite) TestFindEntryNotFound(c *C) {
+ e, err := s.Tree.FindEntry("not-found")
+ c.Assert(e, IsNil)
+ c.Assert(err, Equals, ErrEntryNotFound)
+}
+
// Overrides returned plumbing.EncodedObject for given hash.
// Otherwise, delegates to actual storer to get real object
type fakeStorer struct {
@@ -335,8 +342,7 @@ func (s *TreeSuite) TestTreeWalkerNextNonRecursive(c *C) {
func (s *TreeSuite) TestTreeWalkerNextSubmodule(c *C) {
dotgit := fixtures.ByURL("https://github.com/git-fixtures/submodule.git").One().DotGit()
- st, err := filesystem.NewStorage(dotgit)
- c.Assert(err, IsNil)
+ st := filesystem.NewStorage(dotgit, cache.NewObjectLRUDefault())
hash := plumbing.NewHash("b685400c1f9316f350965a5993d350bc746b0bf4")
commit, err := GetCommit(st, hash)
diff --git a/plumbing/protocol/packp/advrefs.go b/plumbing/protocol/packp/advrefs.go
index 7d644bc..684e76a 100644
--- a/plumbing/protocol/packp/advrefs.go
+++ b/plumbing/protocol/packp/advrefs.go
@@ -2,6 +2,7 @@ package packp
import (
"fmt"
+ "sort"
"strings"
"gopkg.in/src-d/go-git.v4/plumbing"
@@ -68,30 +69,119 @@ func (a *AdvRefs) AddReference(r *plumbing.Reference) error {
func (a *AdvRefs) AllReferences() (memory.ReferenceStorage, error) {
s := memory.ReferenceStorage{}
- if err := addRefs(s, a); err != nil {
+ if err := a.addRefs(s); err != nil {
return s, plumbing.NewUnexpectedError(err)
}
return s, nil
}
-func addRefs(s storer.ReferenceStorer, ar *AdvRefs) error {
- for name, hash := range ar.References {
+func (a *AdvRefs) addRefs(s storer.ReferenceStorer) error {
+ for name, hash := range a.References {
ref := plumbing.NewReferenceFromStrings(name, hash.String())
if err := s.SetReference(ref); err != nil {
return err
}
}
- return addSymbolicRefs(s, ar)
+ if a.supportSymrefs() {
+ return a.addSymbolicRefs(s)
+ }
+
+ return a.resolveHead(s)
}
-func addSymbolicRefs(s storer.ReferenceStorer, ar *AdvRefs) error {
- if !hasSymrefs(ar) {
+// If the server does not support symrefs capability,
+// we need to guess the reference where HEAD is pointing to.
+//
+// Git versions prior to 1.8.4.3 has an special procedure to get
+// the reference where is pointing to HEAD:
+// - Check if a reference called master exists. If exists and it
+// has the same hash as HEAD hash, we can say that HEAD is pointing to master
+// - If master does not exists or does not have the same hash as HEAD,
+// order references and check in that order if that reference has the same
+// hash than HEAD. If yes, set HEAD pointing to that branch hash
+// - If no reference is found, throw an error
+func (a *AdvRefs) resolveHead(s storer.ReferenceStorer) error {
+ if a.Head == nil {
+ return nil
+ }
+
+ ref, err := s.Reference(plumbing.ReferenceName(plumbing.Master))
+
+ // check first if HEAD is pointing to master
+ if err == nil {
+ ok, err := a.createHeadIfCorrectReference(ref, s)
+ if err != nil {
+ return err
+ }
+
+ if ok {
+ return nil
+ }
+ }
+
+ if err != nil && err != plumbing.ErrReferenceNotFound {
+ return err
+ }
+
+ // From here we are trying to guess the branch that HEAD is pointing
+ refIter, err := s.IterReferences()
+ if err != nil {
+ return err
+ }
+
+ var refNames []string
+ err = refIter.ForEach(func(r *plumbing.Reference) error {
+ refNames = append(refNames, string(r.Name()))
return nil
+ })
+ if err != nil {
+ return err
+ }
+
+ sort.Strings(refNames)
+
+ var headSet bool
+ for _, refName := range refNames {
+ ref, err := s.Reference(plumbing.ReferenceName(refName))
+ if err != nil {
+ return err
+ }
+ ok, err := a.createHeadIfCorrectReference(ref, s)
+ if err != nil {
+ return err
+ }
+ if ok {
+ headSet = true
+ break
+ }
+ }
+
+ if !headSet {
+ return plumbing.ErrReferenceNotFound
}
- for _, symref := range ar.Capabilities.Get(capability.SymRef) {
+ return nil
+}
+
+func (a *AdvRefs) createHeadIfCorrectReference(
+ reference *plumbing.Reference,
+ s storer.ReferenceStorer) (bool, error) {
+ if reference.Hash() == *a.Head {
+ headRef := plumbing.NewSymbolicReference(plumbing.HEAD, reference.Name())
+ if err := s.SetReference(headRef); err != nil {
+ return false, err
+ }
+
+ return true, nil
+ }
+
+ return false, nil
+}
+
+func (a *AdvRefs) addSymbolicRefs(s storer.ReferenceStorer) error {
+ for _, symref := range a.Capabilities.Get(capability.SymRef) {
chunks := strings.Split(symref, ":")
if len(chunks) != 2 {
err := fmt.Errorf("bad number of `:` in symref value (%q)", symref)
@@ -108,6 +198,6 @@ func addSymbolicRefs(s storer.ReferenceStorer, ar *AdvRefs) error {
return nil
}
-func hasSymrefs(ar *AdvRefs) bool {
- return ar.Capabilities.Supports(capability.SymRef)
+func (a *AdvRefs) supportSymrefs() bool {
+ return a.Capabilities.Supports(capability.SymRef)
}
diff --git a/plumbing/protocol/packp/advrefs_test.go b/plumbing/protocol/packp/advrefs_test.go
index 0180fd3..bb8d032 100644
--- a/plumbing/protocol/packp/advrefs_test.go
+++ b/plumbing/protocol/packp/advrefs_test.go
@@ -79,6 +79,79 @@ func (s *AdvRefSuite) TestAllReferencesBadSymref(c *C) {
c.Assert(err, NotNil)
}
+func (s *AdvRefSuite) TestNoSymRefCapabilityHeadToMaster(c *C) {
+ a := NewAdvRefs()
+ headHash := plumbing.NewHash("5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c")
+ a.Head = &headHash
+ ref := plumbing.NewHashReference(plumbing.Master, plumbing.NewHash("5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c"))
+
+ err := a.AddReference(ref)
+ c.Assert(err, IsNil)
+
+ storage, err := a.AllReferences()
+ c.Assert(err, IsNil)
+
+ head, err := storage.Reference(plumbing.HEAD)
+ c.Assert(err, IsNil)
+ c.Assert(head.Target(), Equals, ref.Name())
+}
+
+func (s *AdvRefSuite) TestNoSymRefCapabilityHeadToOtherThanMaster(c *C) {
+ a := NewAdvRefs()
+ headHash := plumbing.NewHash("0000000000000000000000000000000000000000")
+ a.Head = &headHash
+ ref1 := plumbing.NewHashReference(plumbing.Master, plumbing.NewHash("5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c"))
+ ref2 := plumbing.NewHashReference("other/ref", plumbing.NewHash("0000000000000000000000000000000000000000"))
+
+ err := a.AddReference(ref1)
+ c.Assert(err, IsNil)
+ err = a.AddReference(ref2)
+ c.Assert(err, IsNil)
+
+ storage, err := a.AllReferences()
+ c.Assert(err, IsNil)
+
+ head, err := storage.Reference(plumbing.HEAD)
+ c.Assert(err, IsNil)
+ c.Assert(head.Hash(), Equals, ref2.Hash())
+}
+
+func (s *AdvRefSuite) TestNoSymRefCapabilityHeadToNoRef(c *C) {
+ a := NewAdvRefs()
+ headHash := plumbing.NewHash("0000000000000000000000000000000000000000")
+ a.Head = &headHash
+ ref := plumbing.NewHashReference(plumbing.Master, plumbing.NewHash("5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c"))
+
+ err := a.AddReference(ref)
+ c.Assert(err, IsNil)
+
+ _, err = a.AllReferences()
+ c.Assert(err, NotNil)
+}
+
+func (s *AdvRefSuite) TestNoSymRefCapabilityHeadToNoMasterAlphabeticallyOrdered(c *C) {
+ a := NewAdvRefs()
+ headHash := plumbing.NewHash("5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c")
+ a.Head = &headHash
+ ref1 := plumbing.NewHashReference(plumbing.Master, plumbing.NewHash("0000000000000000000000000000000000000000"))
+ ref2 := plumbing.NewHashReference("aaaaaaaaaaaaaaa", plumbing.NewHash("5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c"))
+ ref3 := plumbing.NewHashReference("bbbbbbbbbbbbbbb", plumbing.NewHash("5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c"))
+
+ err := a.AddReference(ref1)
+ c.Assert(err, IsNil)
+ err = a.AddReference(ref3)
+ c.Assert(err, IsNil)
+ err = a.AddReference(ref2)
+ c.Assert(err, IsNil)
+
+ storage, err := a.AllReferences()
+ c.Assert(err, IsNil)
+
+ head, err := storage.Reference(plumbing.HEAD)
+ c.Assert(err, IsNil)
+ c.Assert(head.Target(), Equals, ref2.Name())
+}
+
type AdvRefsDecodeEncodeSuite struct{}
var _ = Suite(&AdvRefsDecodeEncodeSuite{})
diff --git a/plumbing/revlist/revlist_test.go b/plumbing/revlist/revlist_test.go
index 55d9bca..dea1c73 100644
--- a/plumbing/revlist/revlist_test.go
+++ b/plumbing/revlist/revlist_test.go
@@ -4,6 +4,7 @@ import (
"testing"
"gopkg.in/src-d/go-git.v4/plumbing"
+ "gopkg.in/src-d/go-git.v4/plumbing/cache"
"gopkg.in/src-d/go-git.v4/plumbing/object"
"gopkg.in/src-d/go-git.v4/plumbing/storer"
"gopkg.in/src-d/go-git.v4/storage/filesystem"
@@ -51,8 +52,7 @@ const (
func (s *RevListSuite) SetUpTest(c *C) {
s.Suite.SetUpSuite(c)
- sto, err := filesystem.NewStorage(fixtures.Basic().One().DotGit())
- c.Assert(err, IsNil)
+ sto := filesystem.NewStorage(fixtures.Basic().One().DotGit(), cache.NewObjectLRUDefault())
s.Storer = sto
}
@@ -67,8 +67,7 @@ func (s *RevListSuite) TestRevListObjects_Submodules(c *C) {
"6ecf0ef2c2dffb796033e5a02219af86ec6584e5": true,
}
- sto, err := filesystem.NewStorage(fixtures.ByTag("submodule").One().DotGit())
- c.Assert(err, IsNil)
+ sto := filesystem.NewStorage(fixtures.ByTag("submodule").One().DotGit(), cache.NewObjectLRUDefault())
ref, err := storer.ResolveReference(sto, plumbing.HEAD)
c.Assert(err, IsNil)
@@ -109,10 +108,9 @@ func (s *RevListSuite) TestRevListObjects(c *C) {
}
func (s *RevListSuite) TestRevListObjectsTagObject(c *C) {
- sto, err := filesystem.NewStorage(
+ sto := filesystem.NewStorage(
fixtures.ByTag("tags").
- ByURL("https://github.com/git-fixtures/tags.git").One().DotGit())
- c.Assert(err, IsNil)
+ ByURL("https://github.com/git-fixtures/tags.git").One().DotGit(), cache.NewObjectLRUDefault())
expected := map[string]bool{
"70846e9a10ef7b41064b40f07713d5b8b9a8fc73": true,
diff --git a/plumbing/storer/object.go b/plumbing/storer/object.go
index f1d19ef..92aa629 100644
--- a/plumbing/storer/object.go
+++ b/plumbing/storer/object.go
@@ -174,7 +174,6 @@ func (iter *EncodedObjectLookupIter) Close() {
// no longer needed.
type EncodedObjectSliceIter struct {
series []plumbing.EncodedObject
- pos int
}
// NewEncodedObjectSliceIter returns an object iterator for the given slice of
@@ -218,7 +217,6 @@ func (iter *EncodedObjectSliceIter) Close() {
// longer needed.
type MultiEncodedObjectIter struct {
iters []EncodedObjectIter
- pos int
}
// NewMultiEncodedObjectIter returns an object iterator for the given slice of
diff --git a/plumbing/transport/common.go b/plumbing/transport/common.go
index cc9682f..f7b882b 100644
--- a/plumbing/transport/common.go
+++ b/plumbing/transport/common.go
@@ -128,10 +128,10 @@ func (u *Endpoint) String() string {
buf.WriteString("//")
if u.User != "" || u.Password != "" {
- buf.WriteString(u.User)
+ buf.WriteString(url.PathEscape(u.User))
if u.Password != "" {
buf.WriteByte(':')
- buf.WriteString(u.Password)
+ buf.WriteString(url.PathEscape(u.Password))
}
buf.WriteByte('@')
diff --git a/plumbing/transport/common_test.go b/plumbing/transport/common_test.go
index 4203ce9..65ed5b9 100644
--- a/plumbing/transport/common_test.go
+++ b/plumbing/transport/common_test.go
@@ -1,6 +1,8 @@
package transport
import (
+ "fmt"
+ "net/url"
"testing"
"gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/capability"
@@ -153,6 +155,24 @@ func (s *SuiteCommon) TestNewEndpointFileURL(c *C) {
c.Assert(e.String(), Equals, "file:///foo.git")
}
+func (s *SuiteCommon) TestValidEndpoint(c *C) {
+ user := "person@mail.com"
+ pass := " !\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~"
+ e, err := NewEndpoint(fmt.Sprintf(
+ "http://%s:%s@github.com/user/repository.git",
+ url.PathEscape(user),
+ url.PathEscape(pass),
+ ))
+ c.Assert(err, IsNil)
+ c.Assert(e, NotNil)
+ c.Assert(e.User, Equals, user)
+ c.Assert(e.Password, Equals, pass)
+ c.Assert(e.Host, Equals, "github.com")
+ c.Assert(e.Path, Equals, "/user/repository.git")
+
+ c.Assert(e.String(), Equals, "http://person@mail.com:%20%21%22%23$%25&%27%28%29%2A+%2C-.%2F:%3B%3C=%3E%3F@%5B%5C%5D%5E_%60%7B%7C%7D~@github.com/user/repository.git")
+}
+
func (s *SuiteCommon) TestNewEndpointInvalidURL(c *C) {
e, err := NewEndpoint("http://\\")
c.Assert(err, NotNil)
diff --git a/plumbing/transport/http/common.go b/plumbing/transport/http/common.go
index edf1c6c..c034846 100644
--- a/plumbing/transport/http/common.go
+++ b/plumbing/transport/http/common.go
@@ -6,6 +6,7 @@ import (
"fmt"
"net/http"
"strconv"
+ "strings"
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/plumbing/protocol/packp"
@@ -28,10 +29,12 @@ func applyHeadersToRequest(req *http.Request, content *bytes.Buffer, host string
req.Header.Add("Content-Length", strconv.Itoa(content.Len()))
}
-func advertisedReferences(s *session, serviceName string) (*packp.AdvRefs, error) {
+const infoRefsPath = "/info/refs"
+
+func advertisedReferences(s *session, serviceName string) (ref *packp.AdvRefs, err error) {
url := fmt.Sprintf(
- "%s/info/refs?service=%s",
- s.endpoint.String(), serviceName,
+ "%s%s?service=%s",
+ s.endpoint.String(), infoRefsPath, serviceName,
)
req, err := http.NewRequest(http.MethodGet, url, nil)
@@ -39,21 +42,22 @@ func advertisedReferences(s *session, serviceName string) (*packp.AdvRefs, error
return nil, err
}
- s.applyAuthToRequest(req)
+ s.ApplyAuthToRequest(req)
applyHeadersToRequest(req, nil, s.endpoint.Host, serviceName)
res, err := s.client.Do(req)
if err != nil {
return nil, err
}
+ s.ModifyEndpointIfRedirect(res)
defer ioutil.CheckClose(res.Body, &err)
- if err := NewErr(res); err != nil {
+ if err = NewErr(res); err != nil {
return nil, err
}
ar := packp.NewAdvRefs()
- if err := ar.Decode(res.Body); err != nil {
+ if err = ar.Decode(res.Body); err != nil {
if err == packp.ErrEmptyAdvRefs {
err = transport.ErrEmptyRemoteRepository
}
@@ -129,11 +133,7 @@ func newSession(c *http.Client, ep *transport.Endpoint, auth transport.AuthMetho
return s, nil
}
-func (*session) Close() error {
- return nil
-}
-
-func (s *session) applyAuthToRequest(req *http.Request) {
+func (s *session) ApplyAuthToRequest(req *http.Request) {
if s.auth == nil {
return
}
@@ -141,6 +141,24 @@ func (s *session) applyAuthToRequest(req *http.Request) {
s.auth.setAuth(req)
}
+func (s *session) ModifyEndpointIfRedirect(res *http.Response) {
+ if res.Request == nil {
+ return
+ }
+
+ r := res.Request
+ if !strings.HasSuffix(r.URL.Path, infoRefsPath) {
+ return
+ }
+
+ s.endpoint.Protocol = r.URL.Scheme
+ s.endpoint.Path = r.URL.Path[:len(r.URL.Path)-len(infoRefsPath)]
+}
+
+func (*session) Close() error {
+ return nil
+}
+
// AuthMethod is concrete implementation of common.AuthMethod for HTTP services
type AuthMethod interface {
transport.AuthMethod
@@ -183,6 +201,31 @@ func (a *BasicAuth) String() string {
return fmt.Sprintf("%s - %s:%s", a.Name(), a.Username, masked)
}
+// TokenAuth implements the go-git http.AuthMethod and transport.AuthMethod interfaces
+type TokenAuth struct {
+ Token string
+}
+
+func (a *TokenAuth) setAuth(r *http.Request) {
+ if a == nil {
+ return
+ }
+ r.Header.Add("Authorization", fmt.Sprintf("Bearer %s", a.Token))
+}
+
+// Name is name of the auth
+func (a *TokenAuth) Name() string {
+ return "http-token-auth"
+}
+
+func (a *TokenAuth) String() string {
+ masked := "*******"
+ if a.Token == "" {
+ masked = "<empty>"
+ }
+ return fmt.Sprintf("%s - %s", a.Name(), masked)
+}
+
// Err is a dedicated error to return errors based on status code
type Err struct {
Response *http.Response
diff --git a/plumbing/transport/http/common_test.go b/plumbing/transport/http/common_test.go
index 8d57996..71eede4 100644
--- a/plumbing/transport/http/common_test.go
+++ b/plumbing/transport/http/common_test.go
@@ -54,6 +54,19 @@ func (s *ClientSuite) TestNewBasicAuth(c *C) {
c.Assert(a.String(), Equals, "http-basic-auth - foo:*******")
}
+func (s *ClientSuite) TestNewTokenAuth(c *C) {
+ a := &TokenAuth{"OAUTH-TOKEN-TEXT"}
+
+ c.Assert(a.Name(), Equals, "http-token-auth")
+ c.Assert(a.String(), Equals, "http-token-auth - *******")
+
+ // Check header is set correctly
+ req, err := http.NewRequest("GET", "https://github.com/git-fixtures/basic", nil)
+ c.Assert(err, Equals, nil)
+ a.setAuth(req)
+ c.Assert(req.Header.Get("Authorization"), Equals, "Bearer OAUTH-TOKEN-TEXT")
+}
+
func (s *ClientSuite) TestNewErrOK(c *C) {
res := &http.Response{StatusCode: http.StatusOK}
err := NewErr(res)
diff --git a/plumbing/transport/http/receive_pack.go b/plumbing/transport/http/receive_pack.go
index e5cae28..72ba0ec 100644
--- a/plumbing/transport/http/receive_pack.go
+++ b/plumbing/transport/http/receive_pack.go
@@ -90,7 +90,7 @@ func (s *rpSession) doRequest(
}
applyHeadersToRequest(req, content, s.endpoint.Host, transport.ReceivePackServiceName)
- s.applyAuthToRequest(req)
+ s.ApplyAuthToRequest(req)
res, err := s.client.Do(req.WithContext(ctx))
if err != nil {
diff --git a/plumbing/transport/http/upload_pack.go b/plumbing/transport/http/upload_pack.go
index 85a57a5..fb5ac36 100644
--- a/plumbing/transport/http/upload_pack.go
+++ b/plumbing/transport/http/upload_pack.go
@@ -88,7 +88,7 @@ func (s *upSession) doRequest(
}
applyHeadersToRequest(req, content, s.endpoint.Host, transport.UploadPackServiceName)
- s.applyAuthToRequest(req)
+ s.ApplyAuthToRequest(req)
res, err := s.client.Do(req.WithContext(ctx))
if err != nil {
diff --git a/plumbing/transport/http/upload_pack_test.go b/plumbing/transport/http/upload_pack_test.go
index fbd28c7..3b85af5 100644
--- a/plumbing/transport/http/upload_pack_test.go
+++ b/plumbing/transport/http/upload_pack_test.go
@@ -75,3 +75,31 @@ func (s *UploadPackSuite) newEndpoint(c *C, name string) *transport.Endpoint {
return ep
}
+
+func (s *UploadPackSuite) TestAdvertisedReferencesRedirectPath(c *C) {
+ endpoint, _ := transport.NewEndpoint("https://gitlab.com/gitlab-org/gitter/webapp")
+
+ session, err := s.Client.NewUploadPackSession(endpoint, s.EmptyAuth)
+ c.Assert(err, IsNil)
+
+ info, err := session.AdvertisedReferences()
+ c.Assert(err, IsNil)
+ c.Assert(info, NotNil)
+
+ url := session.(*upSession).endpoint.String()
+ c.Assert(url, Equals, "https://gitlab.com/gitlab-org/gitter/webapp.git")
+}
+
+func (s *UploadPackSuite) TestAdvertisedReferencesRedirectSchema(c *C) {
+ endpoint, _ := transport.NewEndpoint("http://github.com/git-fixtures/basic")
+
+ session, err := s.Client.NewUploadPackSession(endpoint, s.EmptyAuth)
+ c.Assert(err, IsNil)
+
+ info, err := session.AdvertisedReferences()
+ c.Assert(err, IsNil)
+ c.Assert(info, NotNil)
+
+ url := session.(*upSession).endpoint.String()
+ c.Assert(url, Equals, "https://github.com/git-fixtures/basic")
+}
diff --git a/plumbing/transport/internal/common/common.go b/plumbing/transport/internal/common/common.go
index 8ec1ea5..00497f3 100644
--- a/plumbing/transport/internal/common/common.go
+++ b/plumbing/transport/internal/common/common.go
@@ -382,6 +382,7 @@ var (
gitProtocolNotFoundErr = "ERR \n Repository not found."
gitProtocolNoSuchErr = "ERR no such repository"
gitProtocolAccessDeniedErr = "ERR access denied"
+ gogsAccessDeniedErr = "Gogs: Repository does not exist or you do not have access"
)
func isRepoNotFoundError(s string) bool {
@@ -409,6 +410,10 @@ func isRepoNotFoundError(s string) bool {
return true
}
+ if strings.HasPrefix(s, gogsAccessDeniedErr) {
+ return true
+ }
+
return false
}
diff --git a/plumbing/transport/internal/common/common_test.go b/plumbing/transport/internal/common/common_test.go
new file mode 100644
index 0000000..b2f035d
--- /dev/null
+++ b/plumbing/transport/internal/common/common_test.go
@@ -0,0 +1,78 @@
+package common
+
+import (
+ "fmt"
+ "testing"
+
+ . "gopkg.in/check.v1"
+)
+
+func Test(t *testing.T) { TestingT(t) }
+
+type CommonSuite struct{}
+
+var _ = Suite(&CommonSuite{})
+
+func (s *CommonSuite) TestIsRepoNotFoundErrorForUnknowSource(c *C) {
+ msg := "unknown system is complaining of something very sad :("
+
+ isRepoNotFound := isRepoNotFoundError(msg)
+
+ c.Assert(isRepoNotFound, Equals, false)
+}
+
+func (s *CommonSuite) TestIsRepoNotFoundErrorForGithub(c *C) {
+ msg := fmt.Sprintf("%s : some error stuf", githubRepoNotFoundErr)
+
+ isRepoNotFound := isRepoNotFoundError(msg)
+
+ c.Assert(isRepoNotFound, Equals, true)
+}
+
+func (s *CommonSuite) TestIsRepoNotFoundErrorForBitBucket(c *C) {
+ msg := fmt.Sprintf("%s : some error stuf", bitbucketRepoNotFoundErr)
+
+ isRepoNotFound := isRepoNotFoundError(msg)
+
+ c.Assert(isRepoNotFound, Equals, true)
+}
+
+func (s *CommonSuite) TestIsRepoNotFoundErrorForLocal(c *C) {
+ msg := fmt.Sprintf("some error stuf : %s", localRepoNotFoundErr)
+
+ isRepoNotFound := isRepoNotFoundError(msg)
+
+ c.Assert(isRepoNotFound, Equals, true)
+}
+
+func (s *CommonSuite) TestIsRepoNotFoundErrorForGitProtocolNotFound(c *C) {
+ msg := fmt.Sprintf("%s : some error stuf", gitProtocolNotFoundErr)
+
+ isRepoNotFound := isRepoNotFoundError(msg)
+
+ c.Assert(isRepoNotFound, Equals, true)
+}
+
+func (s *CommonSuite) TestIsRepoNotFoundErrorForGitProtocolNoSuch(c *C) {
+ msg := fmt.Sprintf("%s : some error stuf", gitProtocolNoSuchErr)
+
+ isRepoNotFound := isRepoNotFoundError(msg)
+
+ c.Assert(isRepoNotFound, Equals, true)
+}
+
+func (s *CommonSuite) TestIsRepoNotFoundErrorForGitProtocolAccessDenied(c *C) {
+ msg := fmt.Sprintf("%s : some error stuf", gitProtocolAccessDeniedErr)
+
+ isRepoNotFound := isRepoNotFoundError(msg)
+
+ c.Assert(isRepoNotFound, Equals, true)
+}
+
+func (s *CommonSuite) TestIsRepoNotFoundErrorForGogsAccessDenied(c *C) {
+ msg := fmt.Sprintf("%s : some error stuf", gogsAccessDeniedErr)
+
+ isRepoNotFound := isRepoNotFoundError(msg)
+
+ c.Assert(isRepoNotFound, Equals, true)
+}
diff --git a/plumbing/transport/server/loader.go b/plumbing/transport/server/loader.go
index c83752c..13b3526 100644
--- a/plumbing/transport/server/loader.go
+++ b/plumbing/transport/server/loader.go
@@ -1,6 +1,7 @@
package server
import (
+ "gopkg.in/src-d/go-git.v4/plumbing/cache"
"gopkg.in/src-d/go-git.v4/plumbing/storer"
"gopkg.in/src-d/go-git.v4/plumbing/transport"
"gopkg.in/src-d/go-git.v4/storage/filesystem"
@@ -43,7 +44,7 @@ func (l *fsLoader) Load(ep *transport.Endpoint) (storer.Storer, error) {
return nil, transport.ErrRepositoryNotFound
}
- return filesystem.NewStorage(fs)
+ return filesystem.NewStorage(fs, cache.NewObjectLRUDefault()), nil
}
// MapLoader is a Loader that uses a lookup map of storer.Storer by
diff --git a/plumbing/transport/server/server.go b/plumbing/transport/server/server.go
index 2357bd6..20bd12e 100644
--- a/plumbing/transport/server/server.go
+++ b/plumbing/transport/server/server.go
@@ -298,17 +298,6 @@ func (s *rpSession) updateReferences(req *packp.ReferenceUpdateRequest) {
}
}
-func (s *rpSession) failAtomicUpdate() (*packp.ReportStatus, error) {
- rs := s.reportStatus()
- for _, cs := range rs.CommandStatuses {
- if cs.Error() == nil {
- cs.Status = "atomic updated"
- }
- }
-
- return rs, s.firstErr
-}
-
func (s *rpSession) writePackfile(r io.ReadCloser) error {
if r == nil {
return nil
diff --git a/plumbing/transport/server/server_test.go b/plumbing/transport/server/server_test.go
index 33d74d1..302ff48 100644
--- a/plumbing/transport/server/server_test.go
+++ b/plumbing/transport/server/server_test.go
@@ -3,6 +3,7 @@ package server_test
import (
"testing"
+ "gopkg.in/src-d/go-git.v4/plumbing/cache"
"gopkg.in/src-d/go-git.v4/plumbing/transport"
"gopkg.in/src-d/go-git.v4/plumbing/transport/client"
"gopkg.in/src-d/go-git.v4/plumbing/transport/server"
@@ -53,8 +54,7 @@ func (s *BaseSuite) prepareRepositories(c *C) {
fs := fixtures.Basic().One().DotGit()
s.Endpoint, err = transport.NewEndpoint(fs.Root())
c.Assert(err, IsNil)
- s.loader[s.Endpoint.String()], err = filesystem.NewStorage(fs)
- c.Assert(err, IsNil)
+ s.loader[s.Endpoint.String()] = filesystem.NewStorage(fs, cache.NewObjectLRUDefault())
s.EmptyEndpoint, err = transport.NewEndpoint("/empty.git")
c.Assert(err, IsNil)
diff --git a/plumbing/transport/ssh/auth_method.go b/plumbing/transport/ssh/auth_method.go
index a092b29..dbb47c5 100644
--- a/plumbing/transport/ssh/auth_method.go
+++ b/plumbing/transport/ssh/auth_method.go
@@ -124,6 +124,9 @@ type PublicKeys struct {
// (PKCS#1), DSA (OpenSSL), and ECDSA private keys.
func NewPublicKeys(user string, pemBytes []byte, password string) (*PublicKeys, error) {
block, _ := pem.Decode(pemBytes)
+ if block == nil {
+ return nil, errors.New("invalid PEM data")
+ }
if x509.IsEncryptedPEMBlock(block) {
key, err := x509.DecryptPEMBlock(block, []byte(password))
if err != nil {
@@ -231,9 +234,9 @@ func (a *PublicKeysCallback) ClientConfig() (*ssh.ClientConfig, error) {
}
// NewKnownHostsCallback returns ssh.HostKeyCallback based on a file based on a
-// know_hosts file. http://man.openbsd.org/sshd#SSH_KNOWN_HOSTS_FILE_FORMAT
+// known_hosts file. http://man.openbsd.org/sshd#SSH_KNOWN_HOSTS_FILE_FORMAT
//
-// If files is empty, the list of files will be read from the SSH_KNOWN_HOSTS
+// If list of files is empty, then it will be read from the SSH_KNOWN_HOSTS
// environment variable, example:
// /home/foo/custom_known_hosts_file:/etc/custom_known/hosts_file
//
@@ -241,13 +244,15 @@ func (a *PublicKeysCallback) ClientConfig() (*ssh.ClientConfig, error) {
// ~/.ssh/known_hosts
// /etc/ssh/ssh_known_hosts
func NewKnownHostsCallback(files ...string) (ssh.HostKeyCallback, error) {
- files, err := getDefaultKnownHostsFiles()
- if err != nil {
- return nil, err
+ var err error
+
+ if len(files) == 0 {
+ if files, err = getDefaultKnownHostsFiles(); err != nil {
+ return nil, err
+ }
}
- files, err = filterKnownHostsFiles(files...)
- if err != nil {
+ if files, err = filterKnownHostsFiles(files...); err != nil {
return nil, err
}
@@ -286,7 +291,7 @@ func filterKnownHostsFiles(files ...string) ([]string, error) {
}
if len(out) == 0 {
- return nil, fmt.Errorf("unable to find any valid know_hosts file, set SSH_KNOWN_HOSTS env variable")
+ return nil, fmt.Errorf("unable to find any valid known_hosts file, set SSH_KNOWN_HOSTS env variable")
}
return out, nil
diff --git a/plumbing/transport/ssh/auth_method_test.go b/plumbing/transport/ssh/auth_method_test.go
index 1e77ca0..0cde61e 100644
--- a/plumbing/transport/ssh/auth_method_test.go
+++ b/plumbing/transport/ssh/auth_method_test.go
@@ -1,16 +1,30 @@
package ssh
import (
+ "bufio"
"fmt"
"io/ioutil"
"os"
+ "strings"
+ "golang.org/x/crypto/ssh"
"golang.org/x/crypto/ssh/testdata"
. "gopkg.in/check.v1"
)
-type SuiteCommon struct{}
+type (
+ SuiteCommon struct{}
+
+ mockKnownHosts struct{}
+)
+
+func (mockKnownHosts) host() string { return "github.com" }
+func (mockKnownHosts) knownHosts() []byte {
+ return []byte(`github.com ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ==`)
+}
+func (mockKnownHosts) Network() string { return "tcp" }
+func (mockKnownHosts) String() string { return "github.com:22" }
var _ = Suite(&SuiteCommon{})
@@ -143,3 +157,55 @@ func (*SuiteCommon) TestNewPublicKeysFromFile(c *C) {
c.Assert(err, IsNil)
c.Assert(auth, NotNil)
}
+
+func (*SuiteCommon) TestNewPublicKeysWithInvalidPEM(c *C) {
+ auth, err := NewPublicKeys("foo", []byte("bar"), "")
+ c.Assert(err, NotNil)
+ c.Assert(auth, IsNil)
+}
+
+func (*SuiteCommon) TestNewKnownHostsCallback(c *C) {
+ var mock = mockKnownHosts{}
+
+ f, err := ioutil.TempFile("", "known-hosts")
+ c.Assert(err, IsNil)
+
+ _, err = f.Write(mock.knownHosts())
+ c.Assert(err, IsNil)
+
+ err = f.Close()
+ c.Assert(err, IsNil)
+
+ defer os.RemoveAll(f.Name())
+
+ f, err = os.Open(f.Name())
+ c.Assert(err, IsNil)
+
+ defer f.Close()
+
+ var hostKey ssh.PublicKey
+ scanner := bufio.NewScanner(f)
+ for scanner.Scan() {
+ fields := strings.Split(scanner.Text(), " ")
+ if len(fields) != 3 {
+ continue
+ }
+ if strings.Contains(fields[0], mock.host()) {
+ var err error
+ hostKey, _, _, _, err = ssh.ParseAuthorizedKey(scanner.Bytes())
+ if err != nil {
+ c.Fatalf("error parsing %q: %v", fields[2], err)
+ }
+ break
+ }
+ }
+ if hostKey == nil {
+ c.Fatalf("no hostkey for %s", mock.host())
+ }
+
+ clb, err := NewKnownHostsCallback(f.Name())
+ c.Assert(err, IsNil)
+
+ err = clb(mock.String(), mock, hostKey)
+ c.Assert(err, IsNil)
+}
diff --git a/plumbing/transport/ssh/common_test.go b/plumbing/transport/ssh/common_test.go
index faa0503..6e76096 100644
--- a/plumbing/transport/ssh/common_test.go
+++ b/plumbing/transport/ssh/common_test.go
@@ -49,7 +49,7 @@ func (s *SuiteCommon) TestDefaultSSHConfig(c *C) {
}()
DefaultSSHConfig = &mockSSHConfig{map[string]map[string]string{
- "github.com": map[string]string{
+ "github.com": {
"Hostname": "foo.local",
"Port": "42",
},
@@ -82,7 +82,7 @@ func (s *SuiteCommon) TestDefaultSSHConfigWildcard(c *C) {
}()
DefaultSSHConfig = &mockSSHConfig{Values: map[string]map[string]string{
- "*": map[string]string{
+ "*": {
"Port": "42",
},
}}
diff --git a/plumbing/transport/test/receive_pack.go b/plumbing/transport/test/receive_pack.go
index 0f3352c..5aea1c0 100644
--- a/plumbing/transport/test/receive_pack.go
+++ b/plumbing/transport/test/receive_pack.go
@@ -8,6 +8,8 @@ import (
"context"
"io"
"io/ioutil"
+ "os"
+ "path/filepath"
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/plumbing/format/packfile"
@@ -50,7 +52,7 @@ func (s *ReceivePackSuite) TestAdvertisedReferencesNotExists(c *C) {
c.Assert(err, IsNil)
req := packp.NewReferenceUpdateRequest()
req.Commands = []*packp.Command{
- {"master", plumbing.ZeroHash, plumbing.NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5")},
+ {Name: "master", Old: plumbing.ZeroHash, New: plumbing.NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5")},
}
writer, err := r.ReceivePack(context.Background(), req)
@@ -99,7 +101,7 @@ func (s *ReceivePackSuite) TestFullSendPackOnEmpty(c *C) {
fixture := fixtures.Basic().ByTag("packfile").One()
req := packp.NewReferenceUpdateRequest()
req.Commands = []*packp.Command{
- {"refs/heads/master", plumbing.ZeroHash, fixture.Head},
+ {Name: "refs/heads/master", Old: plumbing.ZeroHash, New: fixture.Head},
}
s.receivePack(c, endpoint, req, fixture, full)
s.checkRemoteHead(c, endpoint, fixture.Head)
@@ -110,7 +112,7 @@ func (s *ReceivePackSuite) TestSendPackWithContext(c *C) {
req := packp.NewReferenceUpdateRequest()
req.Packfile = fixture.Packfile()
req.Commands = []*packp.Command{
- {"refs/heads/master", plumbing.ZeroHash, fixture.Head},
+ {Name: "refs/heads/master", Old: plumbing.ZeroHash, New: fixture.Head},
}
r, err := s.Client.NewReceivePackSession(s.EmptyEndpoint, s.EmptyAuth)
@@ -135,7 +137,7 @@ func (s *ReceivePackSuite) TestSendPackOnEmpty(c *C) {
fixture := fixtures.Basic().ByTag("packfile").One()
req := packp.NewReferenceUpdateRequest()
req.Commands = []*packp.Command{
- {"refs/heads/master", plumbing.ZeroHash, fixture.Head},
+ {Name: "refs/heads/master", Old: plumbing.ZeroHash, New: fixture.Head},
}
s.receivePack(c, endpoint, req, fixture, full)
s.checkRemoteHead(c, endpoint, fixture.Head)
@@ -147,7 +149,7 @@ func (s *ReceivePackSuite) TestSendPackOnEmptyWithReportStatus(c *C) {
fixture := fixtures.Basic().ByTag("packfile").One()
req := packp.NewReferenceUpdateRequest()
req.Commands = []*packp.Command{
- {"refs/heads/master", plumbing.ZeroHash, fixture.Head},
+ {Name: "refs/heads/master", Old: plumbing.ZeroHash, New: fixture.Head},
}
req.Capabilities.Set(capability.ReportStatus)
s.receivePack(c, endpoint, req, fixture, full)
@@ -160,7 +162,7 @@ func (s *ReceivePackSuite) TestFullSendPackOnNonEmpty(c *C) {
fixture := fixtures.Basic().ByTag("packfile").One()
req := packp.NewReferenceUpdateRequest()
req.Commands = []*packp.Command{
- {"refs/heads/master", fixture.Head, fixture.Head},
+ {Name: "refs/heads/master", Old: fixture.Head, New: fixture.Head},
}
s.receivePack(c, endpoint, req, fixture, full)
s.checkRemoteHead(c, endpoint, fixture.Head)
@@ -172,7 +174,7 @@ func (s *ReceivePackSuite) TestSendPackOnNonEmpty(c *C) {
fixture := fixtures.Basic().ByTag("packfile").One()
req := packp.NewReferenceUpdateRequest()
req.Commands = []*packp.Command{
- {"refs/heads/master", fixture.Head, fixture.Head},
+ {Name: "refs/heads/master", Old: fixture.Head, New: fixture.Head},
}
s.receivePack(c, endpoint, req, fixture, full)
s.checkRemoteHead(c, endpoint, fixture.Head)
@@ -184,7 +186,7 @@ func (s *ReceivePackSuite) TestSendPackOnNonEmptyWithReportStatus(c *C) {
fixture := fixtures.Basic().ByTag("packfile").One()
req := packp.NewReferenceUpdateRequest()
req.Commands = []*packp.Command{
- {"refs/heads/master", fixture.Head, fixture.Head},
+ {Name: "refs/heads/master", Old: fixture.Head, New: fixture.Head},
}
req.Capabilities.Set(capability.ReportStatus)
@@ -198,7 +200,7 @@ func (s *ReceivePackSuite) TestSendPackOnNonEmptyWithReportStatusWithError(c *C)
fixture := fixtures.Basic().ByTag("packfile").One()
req := packp.NewReferenceUpdateRequest()
req.Commands = []*packp.Command{
- {"refs/heads/master", plumbing.ZeroHash, fixture.Head},
+ {Name: "refs/heads/master", Old: plumbing.ZeroHash, New: fixture.Head},
}
req.Capabilities.Set(capability.ReportStatus)
@@ -225,6 +227,24 @@ func (s *ReceivePackSuite) receivePackNoCheck(c *C, ep *transport.Endpoint,
ep.String(), url, callAdvertisedReferences,
)
+ // Set write permissions to endpoint directory files. By default
+ // fixtures are generated with read only permissions, this casuses
+ // errors deleting or modifying files.
+ rootPath := ep.Path
+ stat, err := os.Stat(ep.Path)
+
+ if rootPath != "" && err == nil && stat.IsDir() {
+ objectPath := filepath.Join(rootPath, "objects/pack")
+ files, err := ioutil.ReadDir(objectPath)
+ c.Assert(err, IsNil)
+
+ for _, file := range files {
+ path := filepath.Join(objectPath, file.Name())
+ err = os.Chmod(path, 0644)
+ c.Assert(err, IsNil)
+ }
+ }
+
r, err := s.Client.NewReceivePackSession(ep, s.EmptyAuth)
c.Assert(err, IsNil, comment)
defer func() { c.Assert(r.Close(), IsNil, comment) }()
@@ -242,13 +262,16 @@ func (s *ReceivePackSuite) receivePackNoCheck(c *C, ep *transport.Endpoint,
req.Packfile = s.emptyPackfile()
}
- return r.ReceivePack(context.Background(), req)
+ if s, err := r.ReceivePack(context.Background(), req); err != nil {
+ return s, err
+ } else {
+ return s, err
+ }
}
func (s *ReceivePackSuite) receivePack(c *C, ep *transport.Endpoint,
req *packp.ReferenceUpdateRequest, fixture *fixtures.Fixture,
callAdvertisedReferences bool) {
-
url := ""
if fixture != nil {
url = fixture.URL
@@ -259,7 +282,6 @@ func (s *ReceivePackSuite) receivePack(c *C, ep *transport.Endpoint,
ep.String(), url, callAdvertisedReferences,
)
report, err := s.receivePackNoCheck(c, ep, req, fixture, callAdvertisedReferences)
-
c.Assert(err, IsNil, comment)
if req.Capabilities.Supports(capability.ReportStatus) {
c.Assert(report, NotNil, comment)
@@ -306,7 +328,7 @@ func (s *ReceivePackSuite) testSendPackAddReference(c *C) {
req := packp.NewReferenceUpdateRequest()
req.Commands = []*packp.Command{
- {"refs/heads/newbranch", plumbing.ZeroHash, fixture.Head},
+ {Name: "refs/heads/newbranch", Old: plumbing.ZeroHash, New: fixture.Head},
}
if ar.Capabilities.Supports(capability.ReportStatus) {
req.Capabilities.Set(capability.ReportStatus)
@@ -329,7 +351,7 @@ func (s *ReceivePackSuite) testSendPackDeleteReference(c *C) {
req := packp.NewReferenceUpdateRequest()
req.Commands = []*packp.Command{
- {"refs/heads/newbranch", fixture.Head, plumbing.ZeroHash},
+ {Name: "refs/heads/newbranch", Old: fixture.Head, New: plumbing.ZeroHash},
}
if ar.Capabilities.Supports(capability.ReportStatus) {
req.Capabilities.Set(capability.ReportStatus)
diff --git a/plumbing/transport/test/upload_pack.go b/plumbing/transport/test/upload_pack.go
index 70e4e56..8709ac2 100644
--- a/plumbing/transport/test/upload_pack.go
+++ b/plumbing/transport/test/upload_pack.go
@@ -258,11 +258,8 @@ func (s *UploadPackSuite) checkObjectNumber(c *C, r io.Reader, n int) {
b, err := ioutil.ReadAll(r)
c.Assert(err, IsNil)
buf := bytes.NewBuffer(b)
- scanner := packfile.NewScanner(buf)
storage := memory.NewStorage()
- d, err := packfile.NewDecoder(scanner, storage)
- c.Assert(err, IsNil)
- _, err = d.Decode()
+ err = packfile.UpdateObjectStorage(storage, buf)
c.Assert(err, IsNil)
c.Assert(len(storage.Objects), Equals, n)
}
diff --git a/prune.go b/prune.go
index 04913d6..c840325 100644
--- a/prune.go
+++ b/prune.go
@@ -49,7 +49,7 @@ func (r *Repository) Prune(opt PruneOptions) error {
}
// Otherwise it is a candidate for pruning.
// Check out for too new objects next.
- if opt.OnlyObjectsOlderThan != (time.Time{}) {
+ if !opt.OnlyObjectsOlderThan.IsZero() {
// Errors here are non-fatal. The object may be e.g. packed.
// Or concurrently deleted. Skip such objects.
t, err := los.LooseObjectTime(hash)
diff --git a/prune_test.go b/prune_test.go
index 60652ec..670cd07 100644
--- a/prune_test.go
+++ b/prune_test.go
@@ -4,6 +4,7 @@ import (
"time"
"gopkg.in/src-d/go-git.v4/plumbing"
+ "gopkg.in/src-d/go-git.v4/plumbing/cache"
"gopkg.in/src-d/go-git.v4/plumbing/storer"
"gopkg.in/src-d/go-git.v4/storage"
"gopkg.in/src-d/go-git.v4/storage/filesystem"
@@ -22,8 +23,7 @@ func (s *PruneSuite) testPrune(c *C, deleteTime time.Time) {
srcFs := fixtures.ByTag("unpacked").One().DotGit()
var sto storage.Storer
var err error
- sto, err = filesystem.NewStorage(srcFs)
- c.Assert(err, IsNil)
+ sto = filesystem.NewStorage(srcFs, cache.NewObjectLRUDefault())
los := sto.(storer.LooseObjectStorer)
c.Assert(los, NotNil)
diff --git a/remote.go b/remote.go
index 8828e3f..0556b98 100644
--- a/remote.go
+++ b/remote.go
@@ -73,7 +73,7 @@ func (r *Remote) Push(o *PushOptions) error {
// The provided Context must be non-nil. If the context expires before the
// operation is complete, an error is returned. The context only affects to the
// transport operations.
-func (r *Remote) PushContext(ctx context.Context, o *PushOptions) error {
+func (r *Remote) PushContext(ctx context.Context, o *PushOptions) (err error) {
if err := o.Validate(); err != nil {
return err
}
@@ -130,10 +130,7 @@ func (r *Remote) PushContext(ctx context.Context, o *PushOptions) error {
return NoErrAlreadyUpToDate
}
- objects, err := objectsToPush(req.Commands)
- if err != nil {
- return err
- }
+ objects := objectsToPush(req.Commands)
haves, err := referencesToHashes(remoteRefs)
if err != nil {
@@ -246,12 +243,12 @@ func (r *Remote) Fetch(o *FetchOptions) error {
return r.FetchContext(context.Background(), o)
}
-func (r *Remote) fetch(ctx context.Context, o *FetchOptions) (storer.ReferenceStorer, error) {
+func (r *Remote) fetch(ctx context.Context, o *FetchOptions) (sto storer.ReferenceStorer, err error) {
if o.RemoteName == "" {
o.RemoteName = r.c.Name
}
- if err := o.Validate(); err != nil {
+ if err = o.Validate(); err != nil {
return nil, err
}
@@ -298,7 +295,7 @@ func (r *Remote) fetch(ctx context.Context, o *FetchOptions) (storer.ReferenceSt
return nil, err
}
- if err := r.fetchPack(ctx, o, s, req); err != nil {
+ if err = r.fetchPack(ctx, o, s, req); err != nil {
return nil, err
}
}
@@ -357,7 +354,7 @@ func (r *Remote) fetchPack(ctx context.Context, o *FetchOptions, s transport.Upl
defer ioutil.CheckClose(reader, &err)
- if err := r.updateShallow(o, reader); err != nil {
+ if err = r.updateShallow(o, reader); err != nil {
return err
}
@@ -374,14 +371,22 @@ func (r *Remote) addReferencesToUpdate(
refspecs []config.RefSpec,
localRefs []*plumbing.Reference,
remoteRefs storer.ReferenceStorer,
- req *packp.ReferenceUpdateRequest) error {
+ req *packp.ReferenceUpdateRequest,
+) error {
+ // This references dictionary will be used to search references by name.
+ refsDict := make(map[string]*plumbing.Reference)
+ for _, ref := range localRefs {
+ refsDict[ref.Name().String()] = ref
+ }
+
for _, rs := range refspecs {
if rs.IsDelete() {
if err := r.deleteReferences(rs, remoteRefs, req); err != nil {
return err
}
} else {
- if err := r.addOrUpdateReferences(rs, localRefs, remoteRefs, req); err != nil {
+ err := r.addOrUpdateReferences(rs, localRefs, refsDict, remoteRefs, req)
+ if err != nil {
return err
}
}
@@ -393,9 +398,21 @@ func (r *Remote) addReferencesToUpdate(
func (r *Remote) addOrUpdateReferences(
rs config.RefSpec,
localRefs []*plumbing.Reference,
+ refsDict map[string]*plumbing.Reference,
remoteRefs storer.ReferenceStorer,
req *packp.ReferenceUpdateRequest,
) error {
+ // If it is not a wilcard refspec we can directly search for the reference
+ // in the references dictionary.
+ if !rs.IsWildcard() {
+ ref, ok := refsDict[rs.Src()]
+ if !ok {
+ return nil
+ }
+
+ return r.addReferenceIfRefSpecMatches(rs, remoteRefs, ref, req)
+ }
+
for _, ref := range localRefs {
err := r.addReferenceIfRefSpecMatches(rs, remoteRefs, ref, req)
if err != nil {
@@ -602,7 +619,7 @@ func getHaves(
return result, nil
}
-const refspecTag = "+refs/tags/*:refs/tags/*"
+const refspecAllTags = "+refs/tags/*:refs/tags/*"
func calculateRefs(
spec []config.RefSpec,
@@ -610,17 +627,32 @@ func calculateRefs(
tagMode TagMode,
) (memory.ReferenceStorage, error) {
if tagMode == AllTags {
- spec = append(spec, refspecTag)
+ spec = append(spec, refspecAllTags)
+ }
+
+ refs := make(memory.ReferenceStorage)
+ for _, s := range spec {
+ if err := doCalculateRefs(s, remoteRefs, refs); err != nil {
+ return nil, err
+ }
}
+ return refs, nil
+}
+
+func doCalculateRefs(
+ s config.RefSpec,
+ remoteRefs storer.ReferenceStorer,
+ refs memory.ReferenceStorage,
+) error {
iter, err := remoteRefs.IterReferences()
if err != nil {
- return nil, err
+ return err
}
- refs := make(memory.ReferenceStorage)
- return refs, iter.ForEach(func(ref *plumbing.Reference) error {
- if !config.MatchAny(spec, ref.Name()) {
+ var matched bool
+ err = iter.ForEach(func(ref *plumbing.Reference) error {
+ if !s.Match(ref.Name()) {
return nil
}
@@ -637,8 +669,23 @@ func calculateRefs(
return nil
}
- return refs.SetReference(ref)
+ matched = true
+ if err := refs.SetReference(ref); err != nil {
+ return err
+ }
+
+ if !s.IsWildcard() {
+ return storer.ErrStop
+ }
+
+ return nil
})
+
+ if !matched && !s.IsWildcard() {
+ return fmt.Errorf("couldn't find remote ref %q", s.Src())
+ }
+
+ return err
}
func getWants(localStorer storage.Storer, refs memory.ReferenceStorage) ([]plumbing.Hash, error) {
@@ -875,7 +922,7 @@ func (r *Remote) buildFetchedTags(refs memory.ReferenceStorage) (updated bool, e
}
// List the references on the remote repository.
-func (r *Remote) List(o *ListOptions) ([]*plumbing.Reference, error) {
+func (r *Remote) List(o *ListOptions) (rfs []*plumbing.Reference, err error) {
s, err := newUploadPackSession(r.c.URLs[0], o.Auth)
if err != nil {
return nil, err
@@ -907,7 +954,7 @@ func (r *Remote) List(o *ListOptions) ([]*plumbing.Reference, error) {
return resultRefs, nil
}
-func objectsToPush(commands []*packp.Command) ([]plumbing.Hash, error) {
+func objectsToPush(commands []*packp.Command) []plumbing.Hash {
var objects []plumbing.Hash
for _, cmd := range commands {
if cmd.New == plumbing.ZeroHash {
@@ -916,8 +963,7 @@ func objectsToPush(commands []*packp.Command) ([]plumbing.Hash, error) {
objects = append(objects, cmd.New)
}
-
- return objects, nil
+ return objects
}
func referencesToHashes(refs storer.ReferenceStorer) ([]plumbing.Hash, error) {
@@ -980,9 +1026,24 @@ func pushHashes(
}
func (r *Remote) updateShallow(o *FetchOptions, resp *packp.UploadPackResponse) error {
- if o.Depth == 0 {
+ if o.Depth == 0 || len(resp.Shallows) == 0 {
return nil
}
- return r.s.SetShallow(resp.Shallows)
+ shallows, err := r.s.Shallow()
+ if err != nil {
+ return err
+ }
+
+outer:
+ for _, s := range resp.Shallows {
+ for _, oldS := range shallows {
+ if s == oldS {
+ continue outer
+ }
+ }
+ shallows = append(shallows, s)
+ }
+
+ return r.s.SetShallow(shallows)
}
diff --git a/remote_test.go b/remote_test.go
index e586e7a..175faed 100644
--- a/remote_test.go
+++ b/remote_test.go
@@ -9,6 +9,8 @@ import (
"gopkg.in/src-d/go-git.v4/config"
"gopkg.in/src-d/go-git.v4/plumbing"
+ "gopkg.in/src-d/go-git.v4/plumbing/cache"
+ "gopkg.in/src-d/go-git.v4/plumbing/protocol/packp"
"gopkg.in/src-d/go-git.v4/plumbing/storer"
"gopkg.in/src-d/go-git.v4/storage"
"gopkg.in/src-d/go-git.v4/storage/filesystem"
@@ -99,6 +101,20 @@ func (s *RemoteSuite) TestFetch(c *C) {
})
}
+func (s *RemoteSuite) TestFetchNonExistantReference(c *C) {
+ r := newRemote(memory.NewStorage(), &config.RemoteConfig{
+ URLs: []string{s.GetLocalRepositoryURL(fixtures.ByTag("tags").One())},
+ })
+
+ err := r.Fetch(&FetchOptions{
+ RefSpecs: []config.RefSpec{
+ config.RefSpec("+refs/heads/foo:refs/remotes/origin/foo"),
+ },
+ })
+
+ c.Assert(err, ErrorMatches, "couldn't find remote ref.*")
+}
+
func (s *RemoteSuite) TestFetchContext(c *C) {
r := newRemote(memory.NewStorage(), &config.RemoteConfig{
URLs: []string{s.GetLocalRepositoryURL(fixtures.ByTag("tags").One())},
@@ -223,7 +239,7 @@ func (s *RemoteSuite) TestFetchWithPackfileWriter(c *C) {
defer os.RemoveAll(dir) // clean up
- fss, err := filesystem.NewStorage(osfs.New(dir))
+ fss := filesystem.NewStorage(osfs.New(dir), cache.NewObjectLRUDefault())
c.Assert(err, IsNil)
mock := &mockPackfileWriter{Storer: fss}
@@ -360,8 +376,7 @@ func (s *RemoteSuite) TestFetchFastForwardFS(c *C) {
defer os.RemoveAll(dir) // clean up
- fss, err := filesystem.NewStorage(osfs.New(dir))
- c.Assert(err, IsNil)
+ fss := filesystem.NewStorage(osfs.New(dir), cache.NewObjectLRUDefault())
// This exercises `storage.filesystem.Storage.CheckAndSetReference()`.
s.testFetchFastForward(c, fss)
@@ -385,8 +400,7 @@ func (s *RemoteSuite) TestPushToEmptyRepository(c *C) {
c.Assert(err, IsNil)
srcFs := fixtures.Basic().One().DotGit()
- sto, err := filesystem.NewStorage(srcFs)
- c.Assert(err, IsNil)
+ sto := filesystem.NewStorage(srcFs, cache.NewObjectLRUDefault())
r := newRemote(sto, &config.RemoteConfig{
Name: DefaultRemoteName,
@@ -423,8 +437,7 @@ func (s *RemoteSuite) TestPushContext(c *C) {
c.Assert(err, IsNil)
fs := fixtures.ByURL("https://github.com/git-fixtures/tags.git").One().DotGit()
- sto, err := filesystem.NewStorage(fs)
- c.Assert(err, IsNil)
+ sto := filesystem.NewStorage(fs, cache.NewObjectLRUDefault())
r := newRemote(sto, &config.RemoteConfig{
Name: DefaultRemoteName,
@@ -446,8 +459,7 @@ func (s *RemoteSuite) TestPushTags(c *C) {
c.Assert(err, IsNil)
fs := fixtures.ByURL("https://github.com/git-fixtures/tags.git").One().DotGit()
- sto, err := filesystem.NewStorage(fs)
- c.Assert(err, IsNil)
+ sto := filesystem.NewStorage(fs, cache.NewObjectLRUDefault())
r := newRemote(sto, &config.RemoteConfig{
Name: DefaultRemoteName,
@@ -470,15 +482,14 @@ func (s *RemoteSuite) TestPushTags(c *C) {
func (s *RemoteSuite) TestPushNoErrAlreadyUpToDate(c *C) {
fs := fixtures.Basic().One().DotGit()
- sto, err := filesystem.NewStorage(fs)
- c.Assert(err, IsNil)
+ sto := filesystem.NewStorage(fs, cache.NewObjectLRUDefault())
r := newRemote(sto, &config.RemoteConfig{
Name: DefaultRemoteName,
URLs: []string{fs.Root()},
})
- err = r.Push(&PushOptions{
+ err := r.Push(&PushOptions{
RefSpecs: []config.RefSpec{"refs/heads/*:refs/heads/*"},
})
c.Assert(err, Equals, NoErrAlreadyUpToDate)
@@ -486,8 +497,7 @@ func (s *RemoteSuite) TestPushNoErrAlreadyUpToDate(c *C) {
func (s *RemoteSuite) TestPushDeleteReference(c *C) {
fs := fixtures.Basic().One().DotGit()
- sto, err := filesystem.NewStorage(fs)
- c.Assert(err, IsNil)
+ sto := filesystem.NewStorage(fs, cache.NewObjectLRUDefault())
r, err := PlainClone(c.MkDir(), true, &CloneOptions{
URL: fs.Root(),
@@ -511,8 +521,7 @@ func (s *RemoteSuite) TestPushDeleteReference(c *C) {
func (s *RemoteSuite) TestPushRejectNonFastForward(c *C) {
fs := fixtures.Basic().One().DotGit()
- server, err := filesystem.NewStorage(fs)
- c.Assert(err, IsNil)
+ server := filesystem.NewStorage(fs, cache.NewObjectLRUDefault())
r, err := PlainClone(c.MkDir(), true, &CloneOptions{
URL: fs.Root(),
@@ -539,12 +548,10 @@ func (s *RemoteSuite) TestPushRejectNonFastForward(c *C) {
func (s *RemoteSuite) TestPushForce(c *C) {
f := fixtures.Basic().One()
- sto, err := filesystem.NewStorage(f.DotGit())
- c.Assert(err, IsNil)
+ sto := filesystem.NewStorage(f.DotGit(), cache.NewObjectLRUDefault())
dstFs := f.DotGit()
- dstSto, err := filesystem.NewStorage(dstFs)
- c.Assert(err, IsNil)
+ dstSto := filesystem.NewStorage(dstFs, cache.NewObjectLRUDefault())
url := dstFs.Root()
r := newRemote(sto, &config.RemoteConfig{
@@ -688,8 +695,7 @@ func (s *RemoteSuite) TestPushWrongRemoteName(c *C) {
func (s *RemoteSuite) TestGetHaves(c *C) {
f := fixtures.Basic().One()
- sto, err := filesystem.NewStorage(f.DotGit())
- c.Assert(err, IsNil)
+ sto := filesystem.NewStorage(f.DotGit(), cache.NewObjectLRUDefault())
var localRefs = []*plumbing.Reference{
plumbing.NewReferenceFromStrings(
@@ -741,3 +747,54 @@ func (s *RemoteSuite) TestList(c *C) {
c.Assert(found, Equals, true)
}
}
+
+func (s *RemoteSuite) TestUpdateShallows(c *C) {
+ hashes := []plumbing.Hash{
+ plumbing.NewHash("0000000000000000000000000000000000000001"),
+ plumbing.NewHash("0000000000000000000000000000000000000002"),
+ plumbing.NewHash("0000000000000000000000000000000000000003"),
+ plumbing.NewHash("0000000000000000000000000000000000000004"),
+ plumbing.NewHash("0000000000000000000000000000000000000005"),
+ plumbing.NewHash("0000000000000000000000000000000000000006"),
+ }
+
+ tests := []struct {
+ hashes []plumbing.Hash
+ result []plumbing.Hash
+ }{
+ // add to empty shallows
+ {hashes[0:2], hashes[0:2]},
+ // add new hashes
+ {hashes[2:4], hashes[0:4]},
+ // add some hashes already in shallow list
+ {hashes[2:6], hashes[0:6]},
+ // add all hashes
+ {hashes[0:6], hashes[0:6]},
+ // add empty list
+ {nil, hashes[0:6]},
+ }
+
+ remote := newRemote(memory.NewStorage(), &config.RemoteConfig{
+ Name: DefaultRemoteName,
+ })
+
+ shallows, err := remote.s.Shallow()
+ c.Assert(err, IsNil)
+ c.Assert(len(shallows), Equals, 0)
+
+ resp := new(packp.UploadPackResponse)
+ o := &FetchOptions{
+ Depth: 1,
+ }
+
+ for _, t := range tests {
+ resp.Shallows = t.hashes
+ err = remote.updateShallow(o, resp)
+ c.Assert(err, IsNil)
+
+ shallow, err := remote.s.Shallow()
+ c.Assert(err, IsNil)
+ c.Assert(len(shallow), Equals, len(t.result))
+ c.Assert(shallow, DeepEquals, t.result)
+ }
+}
diff --git a/repository.go b/repository.go
index 24d025d..ddf6727 100644
--- a/repository.go
+++ b/repository.go
@@ -1,18 +1,22 @@
package git
import (
+ "bytes"
"context"
"errors"
"fmt"
stdioutil "io/ioutil"
"os"
+ "path"
"path/filepath"
"strings"
"time"
+ "golang.org/x/crypto/openpgp"
"gopkg.in/src-d/go-git.v4/config"
"gopkg.in/src-d/go-git.v4/internal/revision"
"gopkg.in/src-d/go-git.v4/plumbing"
+ "gopkg.in/src-d/go-git.v4/plumbing/cache"
"gopkg.in/src-d/go-git.v4/plumbing/format/packfile"
"gopkg.in/src-d/go-git.v4/plumbing/object"
"gopkg.in/src-d/go-git.v4/plumbing/storer"
@@ -24,12 +28,24 @@ import (
"gopkg.in/src-d/go-billy.v4/osfs"
)
+// GitDirName this is a special folder where all the git stuff is.
+const GitDirName = ".git"
+
var (
+ // ErrBranchExists an error stating the specified branch already exists
+ ErrBranchExists = errors.New("branch already exists")
+ // ErrBranchNotFound an error stating the specified branch does not exist
+ ErrBranchNotFound = errors.New("branch not found")
+ // ErrTagExists an error stating the specified tag already exists
+ ErrTagExists = errors.New("tag already exists")
+ // ErrTagNotFound an error stating the specified tag does not exist
+ ErrTagNotFound = errors.New("tag not found")
+
ErrInvalidReference = errors.New("invalid reference, should be a tag or a branch")
ErrRepositoryNotExists = errors.New("repository does not exist")
ErrRepositoryAlreadyExists = errors.New("repository already exists")
ErrRemoteNotFound = errors.New("remote not found")
- ErrRemoteExists = errors.New("remote already exists ")
+ ErrRemoteExists = errors.New("remote already exists")
ErrWorktreeNotProvided = errors.New("worktree should be provided")
ErrIsBareRepository = errors.New("worktree not available in a bare repository")
ErrUnableToResolveCommit = errors.New("unable to resolve commit")
@@ -109,12 +125,12 @@ func createDotGitFile(worktree, storage billy.Filesystem) error {
path = storage.Root()
}
- if path == ".git" {
+ if path == GitDirName {
// not needed, since the folder is the default place
return nil
}
- f, err := worktree.Create(".git")
+ f, err := worktree.Create(GitDirName)
if err != nil {
return err
}
@@ -210,13 +226,10 @@ func PlainInit(path string, isBare bool) (*Repository, error) {
dot = osfs.New(path)
} else {
wt = osfs.New(path)
- dot, _ = wt.Chroot(".git")
+ dot, _ = wt.Chroot(GitDirName)
}
- s, err := filesystem.NewStorage(dot)
- if err != nil {
- return nil, err
- }
+ s := filesystem.NewStorage(dot, cache.NewObjectLRUDefault())
return Init(s, wt)
}
@@ -225,7 +238,13 @@ func PlainInit(path string, isBare bool) (*Repository, error) {
// repository is bare or a normal one. If the path doesn't contain a valid
// repository ErrRepositoryNotExists is returned
func PlainOpen(path string) (*Repository, error) {
- dot, wt, err := dotGitToOSFilesystems(path)
+ return PlainOpenWithOptions(path, &PlainOpenOptions{})
+}
+
+// PlainOpenWithOptions opens a git repository from the given path with specific
+// options. See PlainOpen for more info.
+func PlainOpenWithOptions(path string, o *PlainOpenOptions) (*Repository, error) {
+ dot, wt, err := dotGitToOSFilesystems(path, o.DetectDotGit)
if err != nil {
return nil, err
}
@@ -238,27 +257,43 @@ func PlainOpen(path string) (*Repository, error) {
return nil, err
}
- s, err := filesystem.NewStorage(dot)
- if err != nil {
- return nil, err
- }
+ s := filesystem.NewStorage(dot, cache.NewObjectLRUDefault())
return Open(s, wt)
}
-func dotGitToOSFilesystems(path string) (dot, wt billy.Filesystem, err error) {
- fs := osfs.New(path)
- fi, err := fs.Stat(".git")
- if err != nil {
+func dotGitToOSFilesystems(path string, detect bool) (dot, wt billy.Filesystem, err error) {
+ if path, err = filepath.Abs(path); err != nil {
+ return nil, nil, err
+ }
+ var fs billy.Filesystem
+ var fi os.FileInfo
+ for {
+ fs = osfs.New(path)
+ fi, err = fs.Stat(GitDirName)
+ if err == nil {
+ // no error; stop
+ break
+ }
if !os.IsNotExist(err) {
+ // unknown error; stop
return nil, nil, err
}
-
+ if detect {
+ // try its parent as long as we haven't reached
+ // the root dir
+ if dir := filepath.Dir(path); dir != path {
+ path = dir
+ continue
+ }
+ }
+ // not detecting via parent dirs and the dir does not exist;
+ // stop
return fs, nil, nil
}
if fi.IsDir() {
- dot, err = fs.Chroot(".git")
+ dot, err = fs.Chroot(GitDirName)
return dot, fs, err
}
@@ -270,10 +305,8 @@ func dotGitToOSFilesystems(path string) (dot, wt billy.Filesystem, err error) {
return dot, fs, nil
}
-func dotGitFileToOSFilesystem(path string, fs billy.Filesystem) (billy.Filesystem, error) {
- var err error
-
- f, err := fs.Open(".git")
+func dotGitFileToOSFilesystem(path string, fs billy.Filesystem) (bfs billy.Filesystem, err error) {
+ f, err := fs.Open(GitDirName)
if err != nil {
return nil, err
}
@@ -404,6 +437,188 @@ func (r *Repository) DeleteRemote(name string) error {
return r.Storer.SetConfig(cfg)
}
+// Branch return a Branch if exists
+func (r *Repository) Branch(name string) (*config.Branch, error) {
+ cfg, err := r.Storer.Config()
+ if err != nil {
+ return nil, err
+ }
+
+ b, ok := cfg.Branches[name]
+ if !ok {
+ return nil, ErrBranchNotFound
+ }
+
+ return b, nil
+}
+
+// CreateBranch creates a new Branch
+func (r *Repository) CreateBranch(c *config.Branch) error {
+ if err := c.Validate(); err != nil {
+ return err
+ }
+
+ cfg, err := r.Storer.Config()
+ if err != nil {
+ return err
+ }
+
+ if _, ok := cfg.Branches[c.Name]; ok {
+ return ErrBranchExists
+ }
+
+ cfg.Branches[c.Name] = c
+ return r.Storer.SetConfig(cfg)
+}
+
+// DeleteBranch delete a Branch from the repository and delete the config
+func (r *Repository) DeleteBranch(name string) error {
+ cfg, err := r.Storer.Config()
+ if err != nil {
+ return err
+ }
+
+ if _, ok := cfg.Branches[name]; !ok {
+ return ErrBranchNotFound
+ }
+
+ delete(cfg.Branches, name)
+ return r.Storer.SetConfig(cfg)
+}
+
+// CreateTag creates a tag. If opts is included, the tag is an annotated tag,
+// otherwise a lightweight tag is created.
+func (r *Repository) CreateTag(name string, hash plumbing.Hash, opts *CreateTagOptions) (*plumbing.Reference, error) {
+ rname := plumbing.ReferenceName(path.Join("refs", "tags", name))
+
+ _, err := r.Storer.Reference(rname)
+ switch err {
+ case nil:
+ // Tag exists, this is an error
+ return nil, ErrTagExists
+ case plumbing.ErrReferenceNotFound:
+ // Tag missing, available for creation, pass this
+ default:
+ // Some other error
+ return nil, err
+ }
+
+ var target plumbing.Hash
+ if opts != nil {
+ target, err = r.createTagObject(name, hash, opts)
+ if err != nil {
+ return nil, err
+ }
+ } else {
+ target = hash
+ }
+
+ ref := plumbing.NewHashReference(rname, target)
+ if err = r.Storer.SetReference(ref); err != nil {
+ return nil, err
+ }
+
+ return ref, nil
+}
+
+func (r *Repository) createTagObject(name string, hash plumbing.Hash, opts *CreateTagOptions) (plumbing.Hash, error) {
+ if err := opts.Validate(r, hash); err != nil {
+ return plumbing.ZeroHash, err
+ }
+
+ rawobj, err := object.GetObject(r.Storer, hash)
+ if err != nil {
+ return plumbing.ZeroHash, err
+ }
+
+ tag := &object.Tag{
+ Name: name,
+ Tagger: *opts.Tagger,
+ Message: opts.Message,
+ TargetType: rawobj.Type(),
+ Target: hash,
+ }
+
+ if opts.SignKey != nil {
+ sig, err := r.buildTagSignature(tag, opts.SignKey)
+ if err != nil {
+ return plumbing.ZeroHash, err
+ }
+
+ tag.PGPSignature = sig
+ }
+
+ obj := r.Storer.NewEncodedObject()
+ if err := tag.Encode(obj); err != nil {
+ return plumbing.ZeroHash, err
+ }
+
+ return r.Storer.SetEncodedObject(obj)
+}
+
+func (r *Repository) buildTagSignature(tag *object.Tag, signKey *openpgp.Entity) (string, error) {
+ encoded := &plumbing.MemoryObject{}
+ if err := tag.Encode(encoded); err != nil {
+ return "", err
+ }
+
+ rdr, err := encoded.Reader()
+ if err != nil {
+ return "", err
+ }
+
+ var b bytes.Buffer
+ if err := openpgp.ArmoredDetachSign(&b, signKey, rdr, nil); err != nil {
+ return "", err
+ }
+
+ return b.String(), nil
+}
+
+// Tag returns a tag from the repository.
+//
+// If you want to check to see if the tag is an annotated tag, you can call
+// TagObject on the hash of the reference in ForEach:
+//
+// ref, err := r.Tag("v0.1.0")
+// if err != nil {
+// // Handle error
+// }
+//
+// obj, err := r.TagObject(ref.Hash())
+// switch err {
+// case nil:
+// // Tag object present
+// case plumbing.ErrObjectNotFound:
+// // Not a tag object
+// default:
+// // Some other error
+// }
+//
+func (r *Repository) Tag(name string) (*plumbing.Reference, error) {
+ ref, err := r.Reference(plumbing.ReferenceName(path.Join("refs", "tags", name)), false)
+ if err != nil {
+ if err == plumbing.ErrReferenceNotFound {
+ // Return a friendly error for this one, versus just ReferenceNotFound.
+ return nil, ErrTagNotFound
+ }
+
+ return nil, err
+ }
+
+ return ref, nil
+}
+
+// DeleteTag deletes a tag from the repository.
+func (r *Repository) DeleteTag(name string) error {
+ _, err := r.Tag(name)
+ if err != nil {
+ return err
+ }
+
+ return r.Storer.RemoveReference(plumbing.ReferenceName(path.Join("refs", "tags", name)))
+}
+
func (r *Repository) resolveToCommitHash(h plumbing.Hash) (plumbing.Hash, error) {
obj, err := r.Storer.EncodedObject(plumbing.AnyObject, h)
if err != nil {
@@ -439,11 +654,12 @@ func (r *Repository) clone(ctx context.Context, o *CloneOptions) error {
}
ref, err := r.fetchAndUpdateReferences(ctx, &FetchOptions{
- RefSpecs: r.cloneRefSpec(o, c),
- Depth: o.Depth,
- Auth: o.Auth,
- Progress: o.Progress,
- Tags: o.Tags,
+ RefSpecs: r.cloneRefSpec(o, c),
+ Depth: o.Depth,
+ Auth: o.Auth,
+ Progress: o.Progress,
+ Tags: o.Tags,
+ RemoteName: o.RemoteName,
}, o.ReferenceName)
if err != nil {
return err
@@ -477,11 +693,33 @@ func (r *Repository) clone(ctx context.Context, o *CloneOptions) error {
}
}
- return r.updateRemoteConfigIfNeeded(o, c, ref)
+ if err := r.updateRemoteConfigIfNeeded(o, c, ref); err != nil {
+ return err
+ }
+
+ if ref.Name().IsBranch() {
+ branchRef := ref.Name()
+ branchName := strings.Split(string(branchRef), "refs/heads/")[1]
+
+ b := &config.Branch{
+ Name: branchName,
+ Merge: branchRef,
+ }
+ if o.RemoteName == "" {
+ b.Remote = "origin"
+ } else {
+ b.Remote = o.RemoteName
+ }
+ if err := r.CreateBranch(b); err != nil {
+ return err
+ }
+ }
+
+ return nil
}
const (
- refspecTagWithDepth = "+refs/tags/%s:refs/tags/%[1]s"
+ refspecTag = "+refs/tags/%s:refs/tags/%[1]s"
refspecSingleBranch = "+refs/heads/%s:refs/remotes/%s/%[1]s"
refspecSingleBranchHEAD = "+HEAD:refs/remotes/%s/HEAD"
)
@@ -490,8 +728,8 @@ func (r *Repository) cloneRefSpec(o *CloneOptions, c *config.RemoteConfig) []con
var rs string
switch {
- case o.ReferenceName.IsTag() && o.Depth > 0:
- rs = fmt.Sprintf(refspecTagWithDepth, o.ReferenceName.Short())
+ case o.ReferenceName.IsTag():
+ rs = fmt.Sprintf(refspecTag, o.ReferenceName.Short())
case o.SingleBranch && o.ReferenceName == plumbing.HEAD:
rs = fmt.Sprintf(refspecSingleBranchHEAD, c.Name)
case o.SingleBranch:
@@ -728,11 +966,53 @@ func (r *Repository) Log(o *LogOptions) (object.CommitIter, error) {
return nil, err
}
- return object.NewCommitPreorderIter(commit, nil, nil), nil
+ var commitIter object.CommitIter
+ switch o.Order {
+ case LogOrderDefault:
+ commitIter = object.NewCommitPreorderIter(commit, nil, nil)
+ case LogOrderDFS:
+ commitIter = object.NewCommitPreorderIter(commit, nil, nil)
+ case LogOrderDFSPost:
+ commitIter = object.NewCommitPostorderIter(commit, nil)
+ case LogOrderBSF:
+ commitIter = object.NewCommitIterBSF(commit, nil, nil)
+ case LogOrderCommitterTime:
+ commitIter = object.NewCommitIterCTime(commit, nil, nil)
+ default:
+ return nil, fmt.Errorf("invalid Order=%v", o.Order)
+ }
+
+ if o.FileName == nil {
+ return commitIter, nil
+ }
+ return object.NewCommitFileIterFromIter(*o.FileName, commitIter), nil
}
-// Tags returns all the References from Tags. This method returns all the tag
-// types, lightweight, and annotated ones.
+// Tags returns all the tag References in a repository.
+//
+// If you want to check to see if the tag is an annotated tag, you can call
+// TagObject on the hash Reference passed in through ForEach:
+//
+// iter, err := r.Tags()
+// if err != nil {
+// // Handle error
+// }
+//
+// if err := iter.ForEach(func (ref *plumbing.Reference) error {
+// obj, err := r.TagObject(ref.Hash())
+// switch err {
+// case nil:
+// // Tag object present
+// case plumbing.ErrObjectNotFound:
+// // Not a tag object
+// default:
+// // Some other error
+// return err
+// }
+// }); err != nil {
+// // Handle outer iterator error
+// }
+//
func (r *Repository) Tags() (storer.ReferenceIter, error) {
refIter, err := r.Storer.IterReferences()
if err != nil {
@@ -758,7 +1038,8 @@ func (r *Repository) Branches() (storer.ReferenceIter, error) {
}, refIter), nil
}
-// Notes returns all the References that are Branches.
+// Notes returns all the References that are notes. For more information:
+// https://git-scm.com/docs/git-notes
func (r *Repository) Notes() (storer.ReferenceIter, error) {
refIter, err := r.Storer.IterReferences()
if err != nil {
@@ -910,6 +1191,8 @@ func (r *Repository) ResolveRevision(rev plumbing.Revision) (*plumbing.Hash, err
case revision.Ref:
revisionRef := item.(revision.Ref)
var ref *plumbing.Reference
+ var hashCommit, refCommit *object.Commit
+ var rErr, hErr error
for _, rule := range append([]string{"%s"}, plumbing.RefRevParseRules...) {
ref, err = storer.ResolveReference(r.Storer, plumbing.ReferenceName(fmt.Sprintf(rule, revisionRef)))
@@ -919,14 +1202,27 @@ func (r *Repository) ResolveRevision(rev plumbing.Revision) (*plumbing.Hash, err
}
}
- if ref == nil {
- return &plumbing.ZeroHash, plumbing.ErrReferenceNotFound
+ if ref != nil {
+ refCommit, rErr = r.CommitObject(ref.Hash())
+ } else {
+ rErr = plumbing.ErrReferenceNotFound
}
- commit, err = r.CommitObject(ref.Hash())
+ isHash := plumbing.NewHash(string(revisionRef)).String() == string(revisionRef)
- if err != nil {
- return &plumbing.ZeroHash, err
+ if isHash {
+ hashCommit, hErr = r.CommitObject(plumbing.NewHash(string(revisionRef)))
+ }
+
+ switch {
+ case rErr == nil && !isHash:
+ commit = refCommit
+ case rErr != nil && isHash && hErr == nil:
+ commit = hashCommit
+ case rErr == nil && isHash && hErr == nil:
+ return &plumbing.ZeroHash, fmt.Errorf(`refname "%s" is ambiguous`, revisionRef)
+ default:
+ return &plumbing.ZeroHash, plumbing.ErrReferenceNotFound
}
case revision.CaretPath:
depth := item.(revision.CaretPath).Depth
@@ -1080,7 +1376,7 @@ func (r *Repository) createNewObjectPack(cfg *RepackConfig) (h plumbing.Hash, er
if los, ok := r.Storer.(storer.LooseObjectStorer); ok {
err = los.ForEachObjectHash(func(hash plumbing.Hash) error {
if ow.isSeen(hash) {
- err := los.DeleteLooseObject(hash)
+ err = los.DeleteLooseObject(hash)
if err != nil {
return err
}
diff --git a/repository_test.go b/repository_test.go
index ef37e37..6d34d42 100644
--- a/repository_test.go
+++ b/repository_test.go
@@ -10,10 +10,15 @@ import (
"os/exec"
"path/filepath"
"strings"
+ "testing"
"time"
+ "golang.org/x/crypto/openpgp"
+ "golang.org/x/crypto/openpgp/armor"
+ openpgperr "golang.org/x/crypto/openpgp/errors"
"gopkg.in/src-d/go-git.v4/config"
"gopkg.in/src-d/go-git.v4/plumbing"
+ "gopkg.in/src-d/go-git.v4/plumbing/cache"
"gopkg.in/src-d/go-git.v4/plumbing/object"
"gopkg.in/src-d/go-git.v4/plumbing/storer"
"gopkg.in/src-d/go-git.v4/storage"
@@ -50,8 +55,7 @@ func (s *RepositorySuite) TestInitNonStandardDotGit(c *C) {
fs := osfs.New(dir)
dot, _ := fs.Chroot("storage")
- storage, err := filesystem.NewStorage(dot)
- c.Assert(err, IsNil)
+ storage := filesystem.NewStorage(dot, cache.NewObjectLRUDefault())
wt, _ := fs.Chroot("worktree")
r, err := Init(storage, wt)
@@ -77,8 +81,7 @@ func (s *RepositorySuite) TestInitStandardDotGit(c *C) {
fs := osfs.New(dir)
dot, _ := fs.Chroot(".git")
- storage, err := filesystem.NewStorage(dot)
- c.Assert(err, IsNil)
+ storage := filesystem.NewStorage(dot, cache.NewObjectLRUDefault())
r, err := Init(storage, fs)
c.Assert(err, IsNil)
@@ -243,6 +246,119 @@ func (s *RepositorySuite) TestDeleteRemote(c *C) {
c.Assert(alt, IsNil)
}
+func (s *RepositorySuite) TestCreateBranchAndBranch(c *C) {
+ r, _ := Init(memory.NewStorage(), nil)
+ testBranch := &config.Branch{
+ Name: "foo",
+ Remote: "origin",
+ Merge: "refs/heads/foo",
+ }
+ err := r.CreateBranch(testBranch)
+
+ c.Assert(err, IsNil)
+ cfg, err := r.Config()
+ c.Assert(err, IsNil)
+ c.Assert(len(cfg.Branches), Equals, 1)
+ branch := cfg.Branches["foo"]
+ c.Assert(branch.Name, Equals, testBranch.Name)
+ c.Assert(branch.Remote, Equals, testBranch.Remote)
+ c.Assert(branch.Merge, Equals, testBranch.Merge)
+
+ branch, err = r.Branch("foo")
+ c.Assert(err, IsNil)
+ c.Assert(branch.Name, Equals, testBranch.Name)
+ c.Assert(branch.Remote, Equals, testBranch.Remote)
+ c.Assert(branch.Merge, Equals, testBranch.Merge)
+}
+
+func (s *RepositorySuite) TestCreateBranchUnmarshal(c *C) {
+ r, _ := Init(memory.NewStorage(), nil)
+
+ expected := []byte(`[core]
+ bare = true
+[remote "foo"]
+ url = http://foo/foo.git
+ fetch = +refs/heads/*:refs/remotes/foo/*
+[branch "foo"]
+ remote = origin
+ merge = refs/heads/foo
+[branch "master"]
+ remote = origin
+ merge = refs/heads/master
+`)
+
+ _, err := r.CreateRemote(&config.RemoteConfig{
+ Name: "foo",
+ URLs: []string{"http://foo/foo.git"},
+ })
+ c.Assert(err, IsNil)
+ testBranch1 := &config.Branch{
+ Name: "master",
+ Remote: "origin",
+ Merge: "refs/heads/master",
+ }
+ testBranch2 := &config.Branch{
+ Name: "foo",
+ Remote: "origin",
+ Merge: "refs/heads/foo",
+ }
+ err = r.CreateBranch(testBranch1)
+ err = r.CreateBranch(testBranch2)
+
+ c.Assert(err, IsNil)
+ cfg, err := r.Config()
+ c.Assert(err, IsNil)
+ marshaled, err := cfg.Marshal()
+ c.Assert(string(expected), Equals, string(marshaled))
+}
+
+func (s *RepositorySuite) TestBranchInvalid(c *C) {
+ r, _ := Init(memory.NewStorage(), nil)
+ branch, err := r.Branch("foo")
+
+ c.Assert(err, NotNil)
+ c.Assert(branch, IsNil)
+}
+
+func (s *RepositorySuite) TestCreateBranchInvalid(c *C) {
+ r, _ := Init(memory.NewStorage(), nil)
+ err := r.CreateBranch(&config.Branch{})
+
+ c.Assert(err, NotNil)
+
+ testBranch := &config.Branch{
+ Name: "foo",
+ Remote: "origin",
+ Merge: "refs/heads/foo",
+ }
+ err = r.CreateBranch(testBranch)
+ c.Assert(err, IsNil)
+ err = r.CreateBranch(testBranch)
+ c.Assert(err, NotNil)
+}
+
+func (s *RepositorySuite) TestDeleteBranch(c *C) {
+ r, _ := Init(memory.NewStorage(), nil)
+ testBranch := &config.Branch{
+ Name: "foo",
+ Remote: "origin",
+ Merge: "refs/heads/foo",
+ }
+ err := r.CreateBranch(testBranch)
+
+ c.Assert(err, IsNil)
+
+ err = r.DeleteBranch("foo")
+ c.Assert(err, IsNil)
+
+ b, err := r.Branch("foo")
+ c.Assert(err, Equals, ErrBranchNotFound)
+ c.Assert(b, IsNil)
+
+ err = r.DeleteBranch("foo")
+ c.Assert(err, Equals, ErrBranchNotFound)
+}
+
func (s *RepositorySuite) TestPlainInit(c *C) {
dir, err := ioutil.TempDir("", "plain-init")
c.Assert(err, IsNil)
@@ -406,6 +522,36 @@ func (s *RepositorySuite) TestPlainOpenNotExists(c *C) {
c.Assert(r, IsNil)
}
+func (s *RepositorySuite) TestPlainOpenDetectDotGit(c *C) {
+ dir, err := ioutil.TempDir("", "plain-open")
+ c.Assert(err, IsNil)
+ defer os.RemoveAll(dir)
+
+ subdir := filepath.Join(dir, "a", "b")
+ err = os.MkdirAll(subdir, 0755)
+ c.Assert(err, IsNil)
+
+ r, err := PlainInit(dir, false)
+ c.Assert(err, IsNil)
+ c.Assert(r, NotNil)
+
+ opt := &PlainOpenOptions{DetectDotGit: true}
+ r, err = PlainOpenWithOptions(subdir, opt)
+ c.Assert(err, IsNil)
+ c.Assert(r, NotNil)
+}
+
+func (s *RepositorySuite) TestPlainOpenNotExistsDetectDotGit(c *C) {
+ dir, err := ioutil.TempDir("", "plain-open")
+ c.Assert(err, IsNil)
+ defer os.RemoveAll(dir)
+
+ opt := &PlainOpenOptions{DetectDotGit: true}
+ r, err := PlainOpenWithOptions(dir, opt)
+ c.Assert(err, Equals, ErrRepositoryNotExists)
+ c.Assert(r, IsNil)
+}
+
func (s *RepositorySuite) TestPlainClone(c *C) {
r, err := PlainClone(c.MkDir(), false, &CloneOptions{
URL: s.GetBasicLocalRepositoryURL(),
@@ -416,6 +562,23 @@ func (s *RepositorySuite) TestPlainClone(c *C) {
remotes, err := r.Remotes()
c.Assert(err, IsNil)
c.Assert(remotes, HasLen, 1)
+ cfg, err := r.Config()
+ c.Assert(err, IsNil)
+ c.Assert(cfg.Branches, HasLen, 1)
+ c.Assert(cfg.Branches["master"].Name, Equals, "master")
+}
+
+func (s *RepositorySuite) TestPlainCloneWithRemoteName(c *C) {
+ r, err := PlainClone(c.MkDir(), false, &CloneOptions{
+ URL: s.GetBasicLocalRepositoryURL(),
+ RemoteName: "test",
+ })
+
+ c.Assert(err, IsNil)
+
+ remote, err := r.Remote("test")
+ c.Assert(err, IsNil)
+ c.Assert(remote, NotNil)
}
func (s *RepositorySuite) TestPlainCloneContext(c *C) {
@@ -430,6 +593,10 @@ func (s *RepositorySuite) TestPlainCloneContext(c *C) {
}
func (s *RepositorySuite) TestPlainCloneWithRecurseSubmodules(c *C) {
+ if testing.Short() {
+ c.Skip("skipping test in short mode.")
+ }
+
dir, err := ioutil.TempDir("", "plain-clone-submodule")
c.Assert(err, IsNil)
defer os.RemoveAll(dir)
@@ -445,6 +612,7 @@ func (s *RepositorySuite) TestPlainCloneWithRecurseSubmodules(c *C) {
cfg, err := r.Config()
c.Assert(err, IsNil)
c.Assert(cfg.Remotes, HasLen, 1)
+ c.Assert(cfg.Branches, HasLen, 1)
c.Assert(cfg.Submodules, HasLen, 2)
}
@@ -580,6 +748,8 @@ func (s *RepositorySuite) TestCloneConfig(c *C) {
c.Assert(cfg.Remotes, HasLen, 1)
c.Assert(cfg.Remotes["origin"].Name, Equals, "origin")
c.Assert(cfg.Remotes["origin"].URLs, HasLen, 1)
+ c.Assert(cfg.Branches, HasLen, 1)
+ c.Assert(cfg.Branches["master"].Name, Equals, "master")
}
func (s *RepositorySuite) TestCloneSingleBranchAndNonHEAD(c *C) {
@@ -601,6 +771,13 @@ func (s *RepositorySuite) TestCloneSingleBranchAndNonHEAD(c *C) {
c.Assert(err, IsNil)
c.Assert(remotes, HasLen, 1)
+ cfg, err := r.Config()
+ c.Assert(err, IsNil)
+ c.Assert(cfg.Branches, HasLen, 1)
+ c.Assert(cfg.Branches["branch"].Name, Equals, "branch")
+ c.Assert(cfg.Branches["branch"].Remote, Equals, "origin")
+ c.Assert(cfg.Branches["branch"].Merge, Equals, plumbing.ReferenceName("refs/heads/branch"))
+
head, err = r.Reference(plumbing.HEAD, false)
c.Assert(err, IsNil)
c.Assert(head, NotNil)
@@ -637,6 +814,13 @@ func (s *RepositorySuite) TestCloneSingleBranch(c *C) {
c.Assert(err, IsNil)
c.Assert(remotes, HasLen, 1)
+ cfg, err := r.Config()
+ c.Assert(err, IsNil)
+ c.Assert(cfg.Branches, HasLen, 1)
+ c.Assert(cfg.Branches["master"].Name, Equals, "master")
+ c.Assert(cfg.Branches["master"].Remote, Equals, "origin")
+ c.Assert(cfg.Branches["master"].Merge, Equals, plumbing.ReferenceName("refs/heads/master"))
+
head, err = r.Reference(plumbing.HEAD, false)
c.Assert(err, IsNil)
c.Assert(head, NotNil)
@@ -663,6 +847,10 @@ func (s *RepositorySuite) TestCloneDetachedHEAD(c *C) {
})
c.Assert(err, IsNil)
+ cfg, err := r.Config()
+ c.Assert(err, IsNil)
+ c.Assert(cfg.Branches, HasLen, 0)
+
head, err := r.Reference(plumbing.HEAD, false)
c.Assert(err, IsNil)
c.Assert(head, NotNil)
@@ -673,7 +861,33 @@ func (s *RepositorySuite) TestCloneDetachedHEAD(c *C) {
objects, err := r.Objects()
c.Assert(err, IsNil)
objects.ForEach(func(object.Object) error { count++; return nil })
- c.Assert(count, Equals, 31)
+ c.Assert(count, Equals, 28)
+}
+
+func (s *RepositorySuite) TestCloneDetachedHEADAndSingle(c *C) {
+ r, _ := Init(memory.NewStorage(), nil)
+ err := r.clone(context.Background(), &CloneOptions{
+ URL: s.GetBasicLocalRepositoryURL(),
+ ReferenceName: plumbing.ReferenceName("refs/tags/v1.0.0"),
+ SingleBranch: true,
+ })
+ c.Assert(err, IsNil)
+
+ cfg, err := r.Config()
+ c.Assert(err, IsNil)
+ c.Assert(cfg.Branches, HasLen, 0)
+
+ head, err := r.Reference(plumbing.HEAD, false)
+ c.Assert(err, IsNil)
+ c.Assert(head, NotNil)
+ c.Assert(head.Type(), Equals, plumbing.HashReference)
+ c.Assert(head.Hash().String(), Equals, "6ecf0ef2c2dffb796033e5a02219af86ec6584e5")
+
+ count := 0
+ objects, err := r.Objects()
+ c.Assert(err, IsNil)
+ objects.ForEach(func(object.Object) error { count++; return nil })
+ c.Assert(count, Equals, 28)
}
func (s *RepositorySuite) TestCloneDetachedHEADAndShallow(c *C) {
@@ -686,6 +900,10 @@ func (s *RepositorySuite) TestCloneDetachedHEADAndShallow(c *C) {
c.Assert(err, IsNil)
+ cfg, err := r.Config()
+ c.Assert(err, IsNil)
+ c.Assert(cfg.Branches, HasLen, 0)
+
head, err := r.Reference(plumbing.HEAD, false)
c.Assert(err, IsNil)
c.Assert(head, NotNil)
@@ -707,6 +925,10 @@ func (s *RepositorySuite) TestCloneDetachedHEADAnnotatedTag(c *C) {
})
c.Assert(err, IsNil)
+ cfg, err := r.Config()
+ c.Assert(err, IsNil)
+ c.Assert(cfg.Branches, HasLen, 0)
+
head, err := r.Reference(plumbing.HEAD, false)
c.Assert(err, IsNil)
c.Assert(head, NotNil)
@@ -851,8 +1073,7 @@ func (s *RepositorySuite) TestPushDepth(c *C) {
func (s *RepositorySuite) TestPushNonExistentRemote(c *C) {
srcFs := fixtures.Basic().One().DotGit()
- sto, err := filesystem.NewStorage(srcFs)
- c.Assert(err, IsNil)
+ sto := filesystem.NewStorage(srcFs, cache.NewObjectLRUDefault())
r, err := Open(sto, srcFs)
c.Assert(err, IsNil)
@@ -870,7 +1091,7 @@ func (s *RepositorySuite) TestLog(c *C) {
c.Assert(err, IsNil)
cIter, err := r.Log(&LogOptions{
- plumbing.NewHash("b8e471f58bcbca63b07bda20e428190409c2db47"),
+ From: plumbing.NewHash("b8e471f58bcbca63b07bda20e428190409c2db47"),
})
c.Assert(err, IsNil)
@@ -930,11 +1151,150 @@ func (s *RepositorySuite) TestLogError(c *C) {
c.Assert(err, IsNil)
_, err = r.Log(&LogOptions{
- plumbing.NewHash("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"),
+ From: plumbing.NewHash("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"),
})
c.Assert(err, NotNil)
}
+func (s *RepositorySuite) TestLogFileNext(c *C) {
+ r, _ := Init(memory.NewStorage(), nil)
+ err := r.clone(context.Background(), &CloneOptions{
+ URL: s.GetBasicLocalRepositoryURL(),
+ })
+
+ c.Assert(err, IsNil)
+
+ fileName := "vendor/foo.go"
+ cIter, err := r.Log(&LogOptions{FileName: &fileName})
+
+ c.Assert(err, IsNil)
+
+ commitOrder := []plumbing.Hash{
+ plumbing.NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5"),
+ }
+
+ for _, o := range commitOrder {
+ commit, err := cIter.Next()
+ c.Assert(err, IsNil)
+ c.Assert(commit.Hash, Equals, o)
+ }
+ _, err = cIter.Next()
+ c.Assert(err, Equals, io.EOF)
+}
+
+func (s *RepositorySuite) TestLogFileForEach(c *C) {
+ r, _ := Init(memory.NewStorage(), nil)
+ err := r.clone(context.Background(), &CloneOptions{
+ URL: s.GetBasicLocalRepositoryURL(),
+ })
+
+ c.Assert(err, IsNil)
+
+ fileName := "php/crappy.php"
+ cIter, err := r.Log(&LogOptions{FileName: &fileName})
+
+ c.Assert(err, IsNil)
+
+ commitOrder := []plumbing.Hash{
+ plumbing.NewHash("918c48b83bd081e863dbe1b80f8998f058cd8294"),
+ }
+
+ expectedIndex := 0
+ cIter.ForEach(func(commit *object.Commit) error {
+ expectedCommitHash := commitOrder[expectedIndex]
+ c.Assert(commit.Hash.String(), Equals, expectedCommitHash.String())
+ expectedIndex += 1
+ return nil
+ })
+ c.Assert(expectedIndex, Equals, 1)
+}
+
+func (s *RepositorySuite) TestLogInvalidFile(c *C) {
+ r, _ := Init(memory.NewStorage(), nil)
+ err := r.clone(context.Background(), &CloneOptions{
+ URL: s.GetBasicLocalRepositoryURL(),
+ })
+ c.Assert(err, IsNil)
+
+ // Throwing in a file that does not exist
+ fileName := "vendor/foo12.go"
+ cIter, err := r.Log(&LogOptions{FileName: &fileName})
+ // Not raising an error since `git log -- vendor/foo12.go` responds silently
+ c.Assert(err, IsNil)
+
+ _, err = cIter.Next()
+ c.Assert(err, Equals, io.EOF)
+}
+
+func (s *RepositorySuite) TestLogFileInitialCommit(c *C) {
+ r, _ := Init(memory.NewStorage(), nil)
+ err := r.clone(context.Background(), &CloneOptions{
+ URL: s.GetBasicLocalRepositoryURL(),
+ })
+ c.Assert(err, IsNil)
+
+ fileName := "LICENSE"
+ cIter, err := r.Log(&LogOptions{
+ Order: LogOrderCommitterTime,
+ FileName: &fileName,
+ })
+
+ c.Assert(err, IsNil)
+
+ commitOrder := []plumbing.Hash{
+ plumbing.NewHash("b029517f6300c2da0f4b651b8642506cd6aaf45d"),
+ }
+
+ expectedIndex := 0
+ cIter.ForEach(func(commit *object.Commit) error {
+ expectedCommitHash := commitOrder[expectedIndex]
+ c.Assert(commit.Hash.String(), Equals, expectedCommitHash.String())
+ expectedIndex += 1
+ return nil
+ })
+ c.Assert(expectedIndex, Equals, 1)
+}
+
+func (s *RepositorySuite) TestLogFileWithOtherParamsFail(c *C) {
+ r, _ := Init(memory.NewStorage(), nil)
+ err := r.clone(context.Background(), &CloneOptions{
+ URL: s.GetBasicLocalRepositoryURL(),
+ })
+ c.Assert(err, IsNil)
+
+ fileName := "vendor/foo.go"
+ cIter, err := r.Log(&LogOptions{
+ Order: LogOrderCommitterTime,
+ FileName: &fileName,
+ From: plumbing.NewHash("35e85108805c84807bc66a02d91535e1e24b38b9"),
+ })
+ c.Assert(err, IsNil)
+ _, iterErr := cIter.Next()
+ c.Assert(iterErr, Equals, io.EOF)
+}
+
+func (s *RepositorySuite) TestLogFileWithOtherParamsPass(c *C) {
+ r, _ := Init(memory.NewStorage(), nil)
+ err := r.clone(context.Background(), &CloneOptions{
+ URL: s.GetBasicLocalRepositoryURL(),
+ })
+ c.Assert(err, IsNil)
+
+ fileName := "LICENSE"
+ cIter, err := r.Log(&LogOptions{
+ Order: LogOrderCommitterTime,
+ FileName: &fileName,
+ From: plumbing.NewHash("35e85108805c84807bc66a02d91535e1e24b38b9"),
+ })
+ c.Assert(err, IsNil)
+ commitVal, iterErr := cIter.Next()
+ c.Assert(iterErr, Equals, nil)
+ c.Assert(commitVal.Hash.String(), Equals, "b029517f6300c2da0f4b651b8642506cd6aaf45d")
+
+ _, iterErr = cIter.Next()
+ c.Assert(iterErr, Equals, io.EOF)
+}
+
func (s *RepositorySuite) TestCommit(c *C) {
r, _ := Init(memory.NewStorage(), nil)
err := r.clone(context.Background(), &CloneOptions{
@@ -1068,10 +1428,396 @@ func (s *RepositorySuite) TestTags(c *C) {
c.Assert(count, Equals, 5)
}
+func (s *RepositorySuite) TestCreateTagLightweight(c *C) {
+ url := s.GetLocalRepositoryURL(
+ fixtures.ByURL("https://github.com/git-fixtures/tags.git").One(),
+ )
+
+ r, _ := Init(memory.NewStorage(), nil)
+ err := r.clone(context.Background(), &CloneOptions{URL: url})
+ c.Assert(err, IsNil)
+
+ expected, err := r.Head()
+ c.Assert(err, IsNil)
+
+ ref, err := r.CreateTag("foobar", expected.Hash(), nil)
+ c.Assert(err, IsNil)
+ c.Assert(ref, NotNil)
+
+ actual, err := r.Tag("foobar")
+ c.Assert(err, IsNil)
+
+ c.Assert(expected.Hash(), Equals, actual.Hash())
+}
+
+func (s *RepositorySuite) TestCreateTagLightweightExists(c *C) {
+ url := s.GetLocalRepositoryURL(
+ fixtures.ByURL("https://github.com/git-fixtures/tags.git").One(),
+ )
+
+ r, _ := Init(memory.NewStorage(), nil)
+ err := r.clone(context.Background(), &CloneOptions{URL: url})
+ c.Assert(err, IsNil)
+
+ expected, err := r.Head()
+ c.Assert(err, IsNil)
+
+ ref, err := r.CreateTag("lightweight-tag", expected.Hash(), nil)
+ c.Assert(ref, IsNil)
+ c.Assert(err, Equals, ErrTagExists)
+}
+
+func (s *RepositorySuite) TestCreateTagAnnotated(c *C) {
+ url := s.GetLocalRepositoryURL(
+ fixtures.ByURL("https://github.com/git-fixtures/tags.git").One(),
+ )
+
+ r, _ := Init(memory.NewStorage(), nil)
+ err := r.clone(context.Background(), &CloneOptions{URL: url})
+ c.Assert(err, IsNil)
+
+ h, err := r.Head()
+ c.Assert(err, IsNil)
+
+ expectedHash := h.Hash()
+
+ ref, err := r.CreateTag("foobar", expectedHash, &CreateTagOptions{
+ Tagger: defaultSignature(),
+ Message: "foo bar baz qux",
+ })
+ c.Assert(err, IsNil)
+
+ tag, err := r.Tag("foobar")
+ c.Assert(err, IsNil)
+
+ obj, err := r.TagObject(tag.Hash())
+ c.Assert(err, IsNil)
+
+ c.Assert(ref, DeepEquals, tag)
+ c.Assert(obj.Hash, Equals, ref.Hash())
+ c.Assert(obj.Type(), Equals, plumbing.TagObject)
+ c.Assert(obj.Target, Equals, expectedHash)
+}
+
+func (s *RepositorySuite) TestCreateTagAnnotatedBadOpts(c *C) {
+ url := s.GetLocalRepositoryURL(
+ fixtures.ByURL("https://github.com/git-fixtures/tags.git").One(),
+ )
+
+ r, _ := Init(memory.NewStorage(), nil)
+ err := r.clone(context.Background(), &CloneOptions{URL: url})
+ c.Assert(err, IsNil)
+
+ h, err := r.Head()
+ c.Assert(err, IsNil)
+
+ expectedHash := h.Hash()
+
+ ref, err := r.CreateTag("foobar", expectedHash, &CreateTagOptions{
+ Message: "foo bar baz qux",
+ })
+ c.Assert(ref, IsNil)
+ c.Assert(err, Equals, ErrMissingTagger)
+
+ ref, err = r.CreateTag("foobar", expectedHash, &CreateTagOptions{
+ Tagger: defaultSignature(),
+ })
+ c.Assert(ref, IsNil)
+ c.Assert(err, Equals, ErrMissingMessage)
+}
+
+func (s *RepositorySuite) TestCreateTagAnnotatedBadHash(c *C) {
+ url := s.GetLocalRepositoryURL(
+ fixtures.ByURL("https://github.com/git-fixtures/tags.git").One(),
+ )
+
+ r, _ := Init(memory.NewStorage(), nil)
+ err := r.clone(context.Background(), &CloneOptions{URL: url})
+ c.Assert(err, IsNil)
+
+ ref, err := r.CreateTag("foobar", plumbing.ZeroHash, &CreateTagOptions{
+ Tagger: defaultSignature(),
+ Message: "foo bar baz qux",
+ })
+ c.Assert(ref, IsNil)
+ c.Assert(err, Equals, plumbing.ErrObjectNotFound)
+}
+
+func (s *RepositorySuite) TestCreateTagSigned(c *C) {
+ url := s.GetLocalRepositoryURL(
+ fixtures.ByURL("https://github.com/git-fixtures/tags.git").One(),
+ )
+
+ r, _ := Init(memory.NewStorage(), nil)
+ err := r.clone(context.Background(), &CloneOptions{URL: url})
+ c.Assert(err, IsNil)
+
+ h, err := r.Head()
+ c.Assert(err, IsNil)
+
+ key := commitSignKey(c, true)
+ _, err = r.CreateTag("foobar", h.Hash(), &CreateTagOptions{
+ Tagger: defaultSignature(),
+ Message: "foo bar baz qux",
+ SignKey: key,
+ })
+ c.Assert(err, IsNil)
+
+ tag, err := r.Tag("foobar")
+ c.Assert(err, IsNil)
+
+ obj, err := r.TagObject(tag.Hash())
+ c.Assert(err, IsNil)
+
+ // Verify the tag.
+ pks := new(bytes.Buffer)
+ pkw, err := armor.Encode(pks, openpgp.PublicKeyType, nil)
+ c.Assert(err, IsNil)
+
+ err = key.Serialize(pkw)
+ c.Assert(err, IsNil)
+ err = pkw.Close()
+ c.Assert(err, IsNil)
+
+ actual, err := obj.Verify(pks.String())
+ c.Assert(err, IsNil)
+ c.Assert(actual.PrimaryKey, DeepEquals, key.PrimaryKey)
+}
+
+func (s *RepositorySuite) TestCreateTagSignedBadKey(c *C) {
+ url := s.GetLocalRepositoryURL(
+ fixtures.ByURL("https://github.com/git-fixtures/tags.git").One(),
+ )
+
+ r, _ := Init(memory.NewStorage(), nil)
+ err := r.clone(context.Background(), &CloneOptions{URL: url})
+ c.Assert(err, IsNil)
+
+ h, err := r.Head()
+ c.Assert(err, IsNil)
+
+ key := commitSignKey(c, false)
+ _, err = r.CreateTag("foobar", h.Hash(), &CreateTagOptions{
+ Tagger: defaultSignature(),
+ Message: "foo bar baz qux",
+ SignKey: key,
+ })
+ c.Assert(err, Equals, openpgperr.InvalidArgumentError("signing key is encrypted"))
+}
+
+func (s *RepositorySuite) TestCreateTagCanonicalize(c *C) {
+ url := s.GetLocalRepositoryURL(
+ fixtures.ByURL("https://github.com/git-fixtures/tags.git").One(),
+ )
+
+ r, _ := Init(memory.NewStorage(), nil)
+ err := r.clone(context.Background(), &CloneOptions{URL: url})
+ c.Assert(err, IsNil)
+
+ h, err := r.Head()
+ c.Assert(err, IsNil)
+
+ key := commitSignKey(c, true)
+ _, err = r.CreateTag("foobar", h.Hash(), &CreateTagOptions{
+ Tagger: defaultSignature(),
+ Message: "\n\nfoo bar baz qux\n\nsome message here",
+ SignKey: key,
+ })
+ c.Assert(err, IsNil)
+
+ tag, err := r.Tag("foobar")
+ c.Assert(err, IsNil)
+
+ obj, err := r.TagObject(tag.Hash())
+ c.Assert(err, IsNil)
+
+ // Assert the new canonicalized message.
+ c.Assert(obj.Message, Equals, "foo bar baz qux\n\nsome message here\n")
+
+ // Verify the tag.
+ pks := new(bytes.Buffer)
+ pkw, err := armor.Encode(pks, openpgp.PublicKeyType, nil)
+ c.Assert(err, IsNil)
+
+ err = key.Serialize(pkw)
+ c.Assert(err, IsNil)
+ err = pkw.Close()
+ c.Assert(err, IsNil)
+
+ actual, err := obj.Verify(pks.String())
+ c.Assert(err, IsNil)
+ c.Assert(actual.PrimaryKey, DeepEquals, key.PrimaryKey)
+}
+
+func (s *RepositorySuite) TestTagLightweight(c *C) {
+ url := s.GetLocalRepositoryURL(
+ fixtures.ByURL("https://github.com/git-fixtures/tags.git").One(),
+ )
+
+ r, _ := Init(memory.NewStorage(), nil)
+ err := r.clone(context.Background(), &CloneOptions{URL: url})
+ c.Assert(err, IsNil)
+
+ expected := plumbing.NewHash("f7b877701fbf855b44c0a9e86f3fdce2c298b07f")
+
+ tag, err := r.Tag("lightweight-tag")
+ c.Assert(err, IsNil)
+
+ actual := tag.Hash()
+ c.Assert(expected, Equals, actual)
+}
+
+func (s *RepositorySuite) TestTagLightweightMissingTag(c *C) {
+ url := s.GetLocalRepositoryURL(
+ fixtures.ByURL("https://github.com/git-fixtures/tags.git").One(),
+ )
+
+ r, _ := Init(memory.NewStorage(), nil)
+ err := r.clone(context.Background(), &CloneOptions{URL: url})
+ c.Assert(err, IsNil)
+
+ tag, err := r.Tag("lightweight-tag-tag")
+ c.Assert(tag, IsNil)
+ c.Assert(err, Equals, ErrTagNotFound)
+}
+
+func (s *RepositorySuite) TestDeleteTag(c *C) {
+ url := s.GetLocalRepositoryURL(
+ fixtures.ByURL("https://github.com/git-fixtures/tags.git").One(),
+ )
+
+ r, _ := Init(memory.NewStorage(), nil)
+ err := r.clone(context.Background(), &CloneOptions{URL: url})
+ c.Assert(err, IsNil)
+
+ err = r.DeleteTag("lightweight-tag")
+ c.Assert(err, IsNil)
+
+ _, err = r.Tag("lightweight-tag")
+ c.Assert(err, Equals, ErrTagNotFound)
+}
+
+func (s *RepositorySuite) TestDeleteTagMissingTag(c *C) {
+ url := s.GetLocalRepositoryURL(
+ fixtures.ByURL("https://github.com/git-fixtures/tags.git").One(),
+ )
+
+ r, _ := Init(memory.NewStorage(), nil)
+ err := r.clone(context.Background(), &CloneOptions{URL: url})
+ c.Assert(err, IsNil)
+
+ err = r.DeleteTag("lightweight-tag-tag")
+ c.Assert(err, Equals, ErrTagNotFound)
+}
+
+func (s *RepositorySuite) TestDeleteTagAnnotated(c *C) {
+ url := s.GetLocalRepositoryURL(
+ fixtures.ByURL("https://github.com/git-fixtures/tags.git").One(),
+ )
+
+ dir, err := ioutil.TempDir("", "go-git-test-deletetag-annotated")
+ c.Assert(err, IsNil)
+
+ defer os.RemoveAll(dir) // clean up
+
+ fss := filesystem.NewStorage(osfs.New(dir), cache.NewObjectLRUDefault())
+
+ r, _ := Init(fss, nil)
+ err = r.clone(context.Background(), &CloneOptions{URL: url})
+ c.Assert(err, IsNil)
+
+ ref, err := r.Tag("annotated-tag")
+ c.Assert(ref, NotNil)
+ c.Assert(err, IsNil)
+
+ obj, err := r.TagObject(ref.Hash())
+ c.Assert(obj, NotNil)
+ c.Assert(err, IsNil)
+
+ err = r.DeleteTag("annotated-tag")
+ c.Assert(err, IsNil)
+
+ _, err = r.Tag("annotated-tag")
+ c.Assert(err, Equals, ErrTagNotFound)
+
+ // Run a prune (and repack, to ensure that we are GCing everything regardless
+ // of the fixture in use) and try to get the tag object again.
+ //
+ // The repo needs to be re-opened after the repack.
+ err = r.Prune(PruneOptions{Handler: r.DeleteObject})
+ c.Assert(err, IsNil)
+
+ err = r.RepackObjects(&RepackConfig{})
+ c.Assert(err, IsNil)
+
+ r, err = PlainOpen(dir)
+ c.Assert(r, NotNil)
+ c.Assert(err, IsNil)
+
+ // Now check to see if the GC was effective in removing the tag object.
+ obj, err = r.TagObject(ref.Hash())
+ c.Assert(obj, IsNil)
+ c.Assert(err, Equals, plumbing.ErrObjectNotFound)
+}
+
+func (s *RepositorySuite) TestDeleteTagAnnotatedUnpacked(c *C) {
+ url := s.GetLocalRepositoryURL(
+ fixtures.ByURL("https://github.com/git-fixtures/tags.git").One(),
+ )
+
+ dir, err := ioutil.TempDir("", "go-git-test-deletetag-annotated-unpacked")
+ c.Assert(err, IsNil)
+
+ defer os.RemoveAll(dir) // clean up
+
+ fss := filesystem.NewStorage(osfs.New(dir), cache.NewObjectLRUDefault())
+
+ r, _ := Init(fss, nil)
+ err = r.clone(context.Background(), &CloneOptions{URL: url})
+ c.Assert(err, IsNil)
+
+ // Create a tag for the deletion test. This ensures that the ultimate loose
+ // object will be unpacked (as we aren't doing anything that should pack it),
+ // so that we can effectively test that a prune deletes it, without having to
+ // resort to a repack.
+ h, err := r.Head()
+ c.Assert(err, IsNil)
+
+ expectedHash := h.Hash()
+
+ ref, err := r.CreateTag("foobar", expectedHash, &CreateTagOptions{
+ Tagger: defaultSignature(),
+ Message: "foo bar baz qux",
+ })
+ c.Assert(err, IsNil)
+
+ tag, err := r.Tag("foobar")
+ c.Assert(err, IsNil)
+
+ obj, err := r.TagObject(tag.Hash())
+ c.Assert(obj, NotNil)
+ c.Assert(err, IsNil)
+
+ err = r.DeleteTag("foobar")
+ c.Assert(err, IsNil)
+
+ _, err = r.Tag("foobar")
+ c.Assert(err, Equals, ErrTagNotFound)
+
+ // As mentioned, only run a prune. We are not testing for packed objects
+ // here.
+ err = r.Prune(PruneOptions{Handler: r.DeleteObject})
+ c.Assert(err, IsNil)
+
+ // Now check to see if the GC was effective in removing the tag object.
+ obj, err = r.TagObject(ref.Hash())
+ c.Assert(obj, IsNil)
+ c.Assert(err, Equals, plumbing.ErrObjectNotFound)
+}
+
func (s *RepositorySuite) TestBranches(c *C) {
f := fixtures.ByURL("https://github.com/git-fixtures/root-references.git").One()
- sto, err := filesystem.NewStorage(f.DotGit())
- c.Assert(err, IsNil)
+ sto := filesystem.NewStorage(f.DotGit(), cache.NewObjectLRUDefault())
r, err := Open(sto, f.DotGit())
c.Assert(err, IsNil)
@@ -1288,8 +2034,7 @@ func (s *RepositorySuite) TestWorktreeBare(c *C) {
func (s *RepositorySuite) TestResolveRevision(c *C) {
f := fixtures.ByURL("https://github.com/git-fixtures/basic.git").One()
- sto, err := filesystem.NewStorage(f.DotGit())
- c.Assert(err, IsNil)
+ sto := filesystem.NewStorage(f.DotGit(), cache.NewObjectLRUDefault())
r, err := Open(sto, f.DotGit())
c.Assert(err, IsNil)
@@ -1313,6 +2058,7 @@ func (s *RepositorySuite) TestResolveRevision(c *C) {
"branch~1": "918c48b83bd081e863dbe1b80f8998f058cd8294",
"v1.0.0~1": "918c48b83bd081e863dbe1b80f8998f058cd8294",
"master~1": "918c48b83bd081e863dbe1b80f8998f058cd8294",
+ "918c48b83bd081e863dbe1b80f8998f058cd8294": "918c48b83bd081e863dbe1b80f8998f058cd8294",
}
for rev, hash := range datas {
@@ -1332,10 +2078,19 @@ func (s *RepositorySuite) TestResolveRevisionWithErrors(c *C) {
err := r.clone(context.Background(), &CloneOptions{URL: url})
c.Assert(err, IsNil)
+ headRef, err := r.Head()
+ c.Assert(err, IsNil)
+
+ ref := plumbing.NewHashReference("refs/heads/918c48b83bd081e863dbe1b80f8998f058cd8294", headRef.Hash())
+ err = r.Storer.SetReference(ref)
+ c.Assert(err, IsNil)
+
datas := map[string]string{
- "efs/heads/master~": "reference not found",
- "HEAD^3": `Revision invalid : "3" found must be 0, 1 or 2 after "^"`,
- "HEAD^{/whatever}": `No commit message match regexp : "whatever"`,
+ "efs/heads/master~": "reference not found",
+ "HEAD^3": `Revision invalid : "3" found must be 0, 1 or 2 after "^"`,
+ "HEAD^{/whatever}": `No commit message match regexp : "whatever"`,
+ "4e1243bd22c66e76c2ba9eddc1f91394e57f9f83": "reference not found",
+ "918c48b83bd081e863dbe1b80f8998f058cd8294": `refname "918c48b83bd081e863dbe1b80f8998f058cd8294" is ambiguous`,
}
for rev, rerr := range datas {
@@ -1350,8 +2105,7 @@ func (s *RepositorySuite) testRepackObjects(
srcFs := fixtures.ByTag("unpacked").One().DotGit()
var sto storage.Storer
var err error
- sto, err = filesystem.NewStorage(srcFs)
- c.Assert(err, IsNil)
+ sto = filesystem.NewStorage(srcFs, cache.NewObjectLRUDefault())
los := sto.(storer.LooseObjectStorer)
c.Assert(los, NotNil)
@@ -1396,10 +2150,18 @@ func (s *RepositorySuite) testRepackObjects(
}
func (s *RepositorySuite) TestRepackObjects(c *C) {
+ if testing.Short() {
+ c.Skip("skipping test in short mode.")
+ }
+
s.testRepackObjects(c, time.Time{}, 1)
}
func (s *RepositorySuite) TestRepackObjectsWithNoDelete(c *C) {
+ if testing.Short() {
+ c.Skip("skipping test in short mode.")
+ }
+
s.testRepackObjects(c, time.Unix(0, 1), 3)
}
@@ -1426,3 +2188,119 @@ func executeOnPath(path, cmd string) error {
return c.Run()
}
+
+func (s *RepositorySuite) TestBrokenMultipleShallowFetch(c *C) {
+ r, _ := Init(memory.NewStorage(), nil)
+ _, err := r.CreateRemote(&config.RemoteConfig{
+ Name: DefaultRemoteName,
+ URLs: []string{s.GetBasicLocalRepositoryURL()},
+ })
+ c.Assert(err, IsNil)
+
+ c.Assert(r.Fetch(&FetchOptions{
+ Depth: 2,
+ RefSpecs: []config.RefSpec{config.RefSpec("refs/heads/master:refs/heads/master")},
+ }), IsNil)
+
+ shallows, err := r.Storer.Shallow()
+ c.Assert(err, IsNil)
+ c.Assert(len(shallows), Equals, 1)
+
+ ref, err := r.Reference("refs/heads/master", true)
+ c.Assert(err, IsNil)
+ cobj, err := r.CommitObject(ref.Hash())
+ c.Assert(err, IsNil)
+ c.Assert(cobj, NotNil)
+ err = object.NewCommitPreorderIter(cobj, nil, nil).ForEach(func(c *object.Commit) error {
+ for _, ph := range c.ParentHashes {
+ for _, h := range shallows {
+ if ph == h {
+ return storer.ErrStop
+ }
+ }
+ }
+
+ return nil
+ })
+ c.Assert(err, IsNil)
+
+ c.Assert(r.Fetch(&FetchOptions{
+ Depth: 5,
+ RefSpecs: []config.RefSpec{config.RefSpec("refs/heads/*:refs/heads/*")},
+ }), IsNil)
+
+ shallows, err = r.Storer.Shallow()
+ c.Assert(err, IsNil)
+ c.Assert(len(shallows), Equals, 3)
+
+ ref, err = r.Reference("refs/heads/master", true)
+ c.Assert(err, IsNil)
+ cobj, err = r.CommitObject(ref.Hash())
+ c.Assert(err, IsNil)
+ c.Assert(cobj, NotNil)
+ err = object.NewCommitPreorderIter(cobj, nil, nil).ForEach(func(c *object.Commit) error {
+ for _, ph := range c.ParentHashes {
+ for _, h := range shallows {
+ if ph == h {
+ return storer.ErrStop
+ }
+ }
+ }
+
+ return nil
+ })
+ c.Assert(err, IsNil)
+}
+
+func BenchmarkObjects(b *testing.B) {
+ if err := fixtures.Init(); err != nil {
+ b.Fatal(err)
+ }
+
+ defer func() {
+ if err := fixtures.Clean(); err != nil {
+ b.Fatal(err)
+ }
+ }()
+
+ for _, f := range fixtures.ByTag("packfile") {
+ if f.DotGitHash == plumbing.ZeroHash {
+ continue
+ }
+
+ b.Run(f.URL, func(b *testing.B) {
+ fs := f.DotGit()
+ storer := filesystem.NewStorage(fs, cache.NewObjectLRUDefault())
+
+ worktree, err := fs.Chroot(filepath.Dir(fs.Root()))
+ if err != nil {
+ b.Fatal(err)
+ }
+
+ repo, err := Open(storer, worktree)
+ if err != nil {
+ b.Fatal(err)
+ }
+
+ for i := 0; i < b.N; i++ {
+ iter, err := repo.Objects()
+ if err != nil {
+ b.Fatal(err)
+ }
+
+ for {
+ _, err := iter.Next()
+ if err == io.EOF {
+ break
+ }
+
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+
+ iter.Close()
+ }
+ })
+ }
+}
diff --git a/status.go b/status.go
index ef8a500..7f18e02 100644
--- a/status.go
+++ b/status.go
@@ -1,7 +1,10 @@
package git
-import "fmt"
-import "bytes"
+import (
+ "bytes"
+ "fmt"
+ "path/filepath"
+)
// Status represents the current status of a Worktree.
// The key of the map is the path of the file.
@@ -17,7 +20,13 @@ func (s Status) File(path string) *FileStatus {
return s[path]
}
-// IsClean returns true if all the files aren't in Unmodified status.
+// IsUntracked checks if file for given path is 'Untracked'
+func (s Status) IsUntracked(path string) bool {
+ stat, ok := (s)[filepath.ToSlash(path)]
+ return ok && stat.Worktree == Untracked
+}
+
+// IsClean returns true if all the files are in Unmodified status.
func (s Status) IsClean() bool {
for _, status := range s {
if status.Worktree != Unmodified || status.Staging != Unmodified {
diff --git a/storage/filesystem/config.go b/storage/filesystem/config.go
index a2cc173..be812e4 100644
--- a/storage/filesystem/config.go
+++ b/storage/filesystem/config.go
@@ -5,7 +5,7 @@ import (
"os"
"gopkg.in/src-d/go-git.v4/config"
- "gopkg.in/src-d/go-git.v4/storage/filesystem/internal/dotgit"
+ "gopkg.in/src-d/go-git.v4/storage/filesystem/dotgit"
"gopkg.in/src-d/go-git.v4/utils/ioutil"
)
@@ -13,7 +13,7 @@ type ConfigStorage struct {
dir *dotgit.DotGit
}
-func (c *ConfigStorage) Config() (*config.Config, error) {
+func (c *ConfigStorage) Config() (conf *config.Config, err error) {
cfg := config.NewConfig()
f, err := c.dir.Config()
@@ -32,15 +32,15 @@ func (c *ConfigStorage) Config() (*config.Config, error) {
return nil, err
}
- if err := cfg.Unmarshal(b); err != nil {
+ if err = cfg.Unmarshal(b); err != nil {
return nil, err
}
return cfg, err
}
-func (c *ConfigStorage) SetConfig(cfg *config.Config) error {
- if err := cfg.Validate(); err != nil {
+func (c *ConfigStorage) SetConfig(cfg *config.Config) (err error) {
+ if err = cfg.Validate(); err != nil {
return err
}
diff --git a/storage/filesystem/config_test.go b/storage/filesystem/config_test.go
index cc03119..71c947d 100644
--- a/storage/filesystem/config_test.go
+++ b/storage/filesystem/config_test.go
@@ -5,7 +5,7 @@ import (
"os"
"gopkg.in/src-d/go-git.v4/config"
- "gopkg.in/src-d/go-git.v4/storage/filesystem/internal/dotgit"
+ "gopkg.in/src-d/go-git.v4/storage/filesystem/dotgit"
. "gopkg.in/check.v1"
"gopkg.in/src-d/go-billy.v4/osfs"
diff --git a/storage/filesystem/internal/dotgit/dotgit.go b/storage/filesystem/dotgit/dotgit.go
index fac7aec..a58c248 100644
--- a/storage/filesystem/internal/dotgit/dotgit.go
+++ b/storage/filesystem/dotgit/dotgit.go
@@ -57,17 +57,48 @@ var (
ErrSymRefTargetNotFound = errors.New("symbolic reference target not found")
)
+// Options holds configuration for the storage.
+type Options struct {
+ // ExclusiveAccess means that the filesystem is not modified externally
+ // while the repo is open.
+ ExclusiveAccess bool
+ // KeepDescriptors makes the file descriptors to be reused but they will
+ // need to be manually closed calling Close().
+ KeepDescriptors bool
+}
+
// The DotGit type represents a local git repository on disk. This
// type is not zero-value-safe, use the New function to initialize it.
type DotGit struct {
- fs billy.Filesystem
+ options Options
+ fs billy.Filesystem
+
+ // incoming object directory information
+ incomingChecked bool
+ incomingDirName string
+
+ objectList []plumbing.Hash
+ objectMap map[plumbing.Hash]struct{}
+ packList []plumbing.Hash
+ packMap map[plumbing.Hash]struct{}
+
+ files map[string]billy.File
}
// New returns a DotGit value ready to be used. The path argument must
// be the absolute path of a git repository directory (e.g.
// "/foo/bar/.git").
func New(fs billy.Filesystem) *DotGit {
- return &DotGit{fs: fs}
+ return NewWithOptions(fs, Options{})
+}
+
+// NewWithOptions sets non default configuration options.
+// See New for complete help.
+func NewWithOptions(fs billy.Filesystem, o Options) *DotGit {
+ return &DotGit{
+ options: o,
+ fs: fs,
+ }
}
// Initialize creates all the folder scaffolding.
@@ -97,6 +128,28 @@ func (d *DotGit) Initialize() error {
return nil
}
+// Close closes all opened files.
+func (d *DotGit) Close() error {
+ var firstError error
+ if d.files != nil {
+ for _, f := range d.files {
+ err := f.Close()
+ if err != nil && firstError == nil {
+ firstError = err
+ continue
+ }
+ }
+
+ d.files = nil
+ }
+
+ if firstError != nil {
+ return firstError
+ }
+
+ return nil
+}
+
// ConfigWriter returns a file pointer for write to the config file
func (d *DotGit) ConfigWriter() (billy.File, error) {
return d.fs.Create(configPath)
@@ -139,11 +192,25 @@ func (d *DotGit) Shallow() (billy.File, error) {
// NewObjectPack return a writer for a new packfile, it saves the packfile to
// disk and also generates and save the index for the given packfile.
func (d *DotGit) NewObjectPack() (*PackWriter, error) {
+ d.cleanPackList()
return newPackWrite(d.fs)
}
// ObjectPacks returns the list of availables packfiles
func (d *DotGit) ObjectPacks() ([]plumbing.Hash, error) {
+ if !d.options.ExclusiveAccess {
+ return d.objectPacks()
+ }
+
+ err := d.genPackList()
+ if err != nil {
+ return nil, err
+ }
+
+ return d.packList, nil
+}
+
+func (d *DotGit) objectPacks() ([]plumbing.Hash, error) {
packDir := d.fs.Join(objectsPath, packPath)
files, err := d.fs.ReadDir(packDir)
if err != nil {
@@ -162,8 +229,11 @@ func (d *DotGit) ObjectPacks() ([]plumbing.Hash, error) {
n := f.Name()
h := plumbing.NewHash(n[5 : len(n)-5]) //pack-(hash).pack
+ if h.IsZero() {
+ // Ignore files with badly-formatted names.
+ continue
+ }
packs = append(packs, h)
-
}
return packs, nil
@@ -174,7 +244,22 @@ func (d *DotGit) objectPackPath(hash plumbing.Hash, extension string) string {
}
func (d *DotGit) objectPackOpen(hash plumbing.Hash, extension string) (billy.File, error) {
- pack, err := d.fs.Open(d.objectPackPath(hash, extension))
+ if d.files == nil {
+ d.files = make(map[string]billy.File)
+ }
+
+ err := d.hasPack(hash)
+ if err != nil {
+ return nil, err
+ }
+
+ path := d.objectPackPath(hash, extension)
+ f, ok := d.files[path]
+ if ok {
+ return f, nil
+ }
+
+ pack, err := d.fs.Open(path)
if err != nil {
if os.IsNotExist(err) {
return nil, ErrPackfileNotFound
@@ -183,20 +268,36 @@ func (d *DotGit) objectPackOpen(hash plumbing.Hash, extension string) (billy.Fil
return nil, err
}
+ if d.options.KeepDescriptors && extension == "pack" {
+ d.files[path] = pack
+ }
+
return pack, nil
}
// ObjectPack returns a fs.File of the given packfile
func (d *DotGit) ObjectPack(hash plumbing.Hash) (billy.File, error) {
+ err := d.hasPack(hash)
+ if err != nil {
+ return nil, err
+ }
+
return d.objectPackOpen(hash, `pack`)
}
// ObjectPackIdx returns a fs.File of the index file for a given packfile
func (d *DotGit) ObjectPackIdx(hash plumbing.Hash) (billy.File, error) {
+ err := d.hasPack(hash)
+ if err != nil {
+ return nil, err
+ }
+
return d.objectPackOpen(hash, `idx`)
}
func (d *DotGit) DeleteOldObjectPackAndIndex(hash plumbing.Hash, t time.Time) error {
+ d.cleanPackList()
+
path := d.objectPackPath(hash, `pack`)
if !t.IsZero() {
fi, err := d.fs.Stat(path)
@@ -217,12 +318,23 @@ func (d *DotGit) DeleteOldObjectPackAndIndex(hash plumbing.Hash, t time.Time) er
// NewObject return a writer for a new object file.
func (d *DotGit) NewObject() (*ObjectWriter, error) {
+ d.cleanObjectList()
+
return newObjectWriter(d.fs)
}
// Objects returns a slice with the hashes of objects found under the
// .git/objects/ directory.
func (d *DotGit) Objects() ([]plumbing.Hash, error) {
+ if d.options.ExclusiveAccess {
+ err := d.genObjectList()
+ if err != nil {
+ return nil, err
+ }
+
+ return d.objectList, nil
+ }
+
var objects []plumbing.Hash
err := d.ForEachObjectHash(func(hash plumbing.Hash) error {
objects = append(objects, hash)
@@ -234,9 +346,29 @@ func (d *DotGit) Objects() ([]plumbing.Hash, error) {
return objects, nil
}
-// Objects returns a slice with the hashes of objects found under the
-// .git/objects/ directory.
+// ForEachObjectHash iterates over the hashes of objects found under the
+// .git/objects/ directory and executes the provided function.
func (d *DotGit) ForEachObjectHash(fun func(plumbing.Hash) error) error {
+ if !d.options.ExclusiveAccess {
+ return d.forEachObjectHash(fun)
+ }
+
+ err := d.genObjectList()
+ if err != nil {
+ return err
+ }
+
+ for _, h := range d.objectList {
+ err := fun(h)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (d *DotGit) forEachObjectHash(fun func(plumbing.Hash) error) error {
files, err := d.fs.ReadDir(objectsPath)
if err != nil {
if os.IsNotExist(err) {
@@ -255,7 +387,12 @@ func (d *DotGit) ForEachObjectHash(fun func(plumbing.Hash) error) error {
}
for _, o := range d {
- err = fun(plumbing.NewHash(base + o.Name()))
+ h := plumbing.NewHash(base + o.Name())
+ if h.IsZero() {
+ // Ignore files with badly-formatted names.
+ continue
+ }
+ err = fun(h)
if err != nil {
return err
}
@@ -266,24 +403,178 @@ func (d *DotGit) ForEachObjectHash(fun func(plumbing.Hash) error) error {
return nil
}
+func (d *DotGit) cleanObjectList() {
+ d.objectMap = nil
+ d.objectList = nil
+}
+
+func (d *DotGit) genObjectList() error {
+ if d.objectMap != nil {
+ return nil
+ }
+
+ d.objectMap = make(map[plumbing.Hash]struct{})
+ return d.forEachObjectHash(func(h plumbing.Hash) error {
+ d.objectList = append(d.objectList, h)
+ d.objectMap[h] = struct{}{}
+
+ return nil
+ })
+}
+
+func (d *DotGit) hasObject(h plumbing.Hash) error {
+ if !d.options.ExclusiveAccess {
+ return nil
+ }
+
+ err := d.genObjectList()
+ if err != nil {
+ return err
+ }
+
+ _, ok := d.objectMap[h]
+ if !ok {
+ return plumbing.ErrObjectNotFound
+ }
+
+ return nil
+}
+
+func (d *DotGit) cleanPackList() {
+ d.packMap = nil
+ d.packList = nil
+}
+
+func (d *DotGit) genPackList() error {
+ if d.packMap != nil {
+ return nil
+ }
+
+ op, err := d.objectPacks()
+ if err != nil {
+ return err
+ }
+
+ d.packMap = make(map[plumbing.Hash]struct{})
+ d.packList = nil
+
+ for _, h := range op {
+ d.packList = append(d.packList, h)
+ d.packMap[h] = struct{}{}
+ }
+
+ return nil
+}
+
+func (d *DotGit) hasPack(h plumbing.Hash) error {
+ if !d.options.ExclusiveAccess {
+ return nil
+ }
+
+ err := d.genPackList()
+ if err != nil {
+ return err
+ }
+
+ _, ok := d.packMap[h]
+ if !ok {
+ return ErrPackfileNotFound
+ }
+
+ return nil
+}
+
func (d *DotGit) objectPath(h plumbing.Hash) string {
hash := h.String()
return d.fs.Join(objectsPath, hash[0:2], hash[2:40])
}
+// incomingObjectPath is intended to add support for a git pre-receive hook
+// to be written it adds support for go-git to find objects in an "incoming"
+// directory, so that the library can be used to write a pre-receive hook
+// that deals with the incoming objects.
+//
+// More on git hooks found here : https://git-scm.com/docs/githooks
+// More on 'quarantine'/incoming directory here:
+// https://git-scm.com/docs/git-receive-pack
+func (d *DotGit) incomingObjectPath(h plumbing.Hash) string {
+ hString := h.String()
+
+ if d.incomingDirName == "" {
+ return d.fs.Join(objectsPath, hString[0:2], hString[2:40])
+ }
+
+ return d.fs.Join(objectsPath, d.incomingDirName, hString[0:2], hString[2:40])
+}
+
+// hasIncomingObjects searches for an incoming directory and keeps its name
+// so it doesn't have to be found each time an object is accessed.
+func (d *DotGit) hasIncomingObjects() bool {
+ if !d.incomingChecked {
+ directoryContents, err := d.fs.ReadDir(objectsPath)
+ if err == nil {
+ for _, file := range directoryContents {
+ if strings.HasPrefix(file.Name(), "incoming-") && file.IsDir() {
+ d.incomingDirName = file.Name()
+ }
+ }
+ }
+
+ d.incomingChecked = true
+ }
+
+ return d.incomingDirName != ""
+}
+
// Object returns a fs.File pointing the object file, if exists
func (d *DotGit) Object(h plumbing.Hash) (billy.File, error) {
- return d.fs.Open(d.objectPath(h))
+ err := d.hasObject(h)
+ if err != nil {
+ return nil, err
+ }
+
+ obj1, err1 := d.fs.Open(d.objectPath(h))
+ if os.IsNotExist(err1) && d.hasIncomingObjects() {
+ obj2, err2 := d.fs.Open(d.incomingObjectPath(h))
+ if err2 != nil {
+ return obj1, err1
+ }
+ return obj2, err2
+ }
+ return obj1, err1
}
// ObjectStat returns a os.FileInfo pointing the object file, if exists
func (d *DotGit) ObjectStat(h plumbing.Hash) (os.FileInfo, error) {
- return d.fs.Stat(d.objectPath(h))
+ err := d.hasObject(h)
+ if err != nil {
+ return nil, err
+ }
+
+ obj1, err1 := d.fs.Stat(d.objectPath(h))
+ if os.IsNotExist(err1) && d.hasIncomingObjects() {
+ obj2, err2 := d.fs.Stat(d.incomingObjectPath(h))
+ if err2 != nil {
+ return obj1, err1
+ }
+ return obj2, err2
+ }
+ return obj1, err1
}
// ObjectDelete removes the object file, if exists
func (d *DotGit) ObjectDelete(h plumbing.Hash) error {
- return d.fs.Remove(d.objectPath(h))
+ d.cleanObjectList()
+
+ err1 := d.fs.Remove(d.objectPath(h))
+ if os.IsNotExist(err1) && d.hasIncomingObjects() {
+ err2 := d.fs.Remove(d.incomingObjectPath(h))
+ if err2 != nil {
+ return err1
+ }
+ return err2
+ }
+ return err1
}
func (d *DotGit) readReferenceFrom(rd io.Reader, name string) (ref *plumbing.Reference, err error) {
@@ -375,7 +666,7 @@ func (d *DotGit) findPackedRefsInFile(f billy.File) ([]*plumbing.Reference, erro
return refs, s.Err()
}
-func (d *DotGit) findPackedRefs() ([]*plumbing.Reference, error) {
+func (d *DotGit) findPackedRefs() (r []*plumbing.Reference, err error) {
f, err := d.fs.Open(packedRefsPath)
if err != nil {
if os.IsNotExist(err) {
@@ -461,7 +752,7 @@ func (d *DotGit) openAndLockPackedRefs(doCreate bool) (
// File mode is retrieved from a constant defined in the target specific
// files (dotgit_rewrite_packed_refs_*). Some modes are not available
// in all filesystems.
- openFlags := openAndLockPackedRefsMode
+ openFlags := d.openAndLockPackedRefsMode()
if doCreate {
openFlags |= os.O_CREATE
}
@@ -676,7 +967,7 @@ func (d *DotGit) PackRefs() (err error) {
// Gather all refs using addRefsFromRefDir and addRefsFromPackedRefs.
var refs []*plumbing.Reference
seen := make(map[plumbing.ReferenceName]bool)
- if err := d.addRefsFromRefDir(&refs, seen); err != nil {
+ if err = d.addRefsFromRefDir(&refs, seen); err != nil {
return err
}
if len(refs) == 0 {
@@ -684,7 +975,7 @@ func (d *DotGit) PackRefs() (err error) {
return nil
}
numLooseRefs := len(refs)
- if err := d.addRefsFromPackedRefsFile(&refs, f, seen); err != nil {
+ if err = d.addRefsFromPackedRefsFile(&refs, f, seen); err != nil {
return err
}
@@ -701,7 +992,7 @@ func (d *DotGit) PackRefs() (err error) {
w := bufio.NewWriter(tmp)
for _, ref := range refs {
- _, err := w.WriteString(ref.String() + "\n")
+ _, err = w.WriteString(ref.String() + "\n")
if err != nil {
return err
}
@@ -776,6 +1067,11 @@ func (d *DotGit) Alternates() ([]*DotGit, error) {
return alternates, nil
}
+// Fs returns the underlying filesystem of the DotGit folder.
+func (d *DotGit) Fs() billy.Filesystem {
+ return d.fs
+}
+
func isHex(s string) bool {
for _, b := range []byte(s) {
if isNum(b) {
@@ -798,5 +1094,3 @@ func isNum(b byte) bool {
func isHexAlpha(b byte) bool {
return b >= 'a' && b <= 'f' || b >= 'A' && b <= 'F'
}
-
-type refCache map[plumbing.ReferenceName]*plumbing.Reference
diff --git a/storage/filesystem/dotgit/dotgit_rewrite_packed_refs.go b/storage/filesystem/dotgit/dotgit_rewrite_packed_refs.go
new file mode 100644
index 0000000..7f1c02c
--- /dev/null
+++ b/storage/filesystem/dotgit/dotgit_rewrite_packed_refs.go
@@ -0,0 +1,81 @@
+package dotgit
+
+import (
+ "io"
+ "os"
+ "runtime"
+
+ "gopkg.in/src-d/go-billy.v4"
+ "gopkg.in/src-d/go-git.v4/utils/ioutil"
+)
+
+func (d *DotGit) openAndLockPackedRefsMode() int {
+ if billy.CapabilityCheck(d.fs, billy.ReadAndWriteCapability) {
+ return os.O_RDWR
+ }
+
+ return os.O_RDONLY
+}
+
+func (d *DotGit) rewritePackedRefsWhileLocked(
+ tmp billy.File, pr billy.File) error {
+ // Try plain rename. If we aren't using the bare Windows filesystem as the
+ // storage layer, we might be able to get away with a rename over a locked
+ // file.
+ err := d.fs.Rename(tmp.Name(), pr.Name())
+ if err == nil {
+ return nil
+ }
+
+ // If we are in a filesystem that does not support rename (e.g. sivafs)
+ // a full copy is done.
+ if err == billy.ErrNotSupported {
+ return d.copyNewFile(tmp, pr)
+ }
+
+ if runtime.GOOS != "windows" {
+ return err
+ }
+
+ // Otherwise, Windows doesn't let us rename over a locked file, so
+ // we have to do a straight copy. Unfortunately this could result
+ // in a partially-written file if the process fails before the
+ // copy completes.
+ return d.copyToExistingFile(tmp, pr)
+}
+
+func (d *DotGit) copyToExistingFile(tmp, pr billy.File) error {
+ _, err := pr.Seek(0, io.SeekStart)
+ if err != nil {
+ return err
+ }
+ err = pr.Truncate(0)
+ if err != nil {
+ return err
+ }
+ _, err = tmp.Seek(0, io.SeekStart)
+ if err != nil {
+ return err
+ }
+ _, err = io.Copy(pr, tmp)
+
+ return err
+}
+
+func (d *DotGit) copyNewFile(tmp billy.File, pr billy.File) (err error) {
+ prWrite, err := d.fs.Create(pr.Name())
+ if err != nil {
+ return err
+ }
+
+ defer ioutil.CheckClose(prWrite, &err)
+
+ _, err = tmp.Seek(0, io.SeekStart)
+ if err != nil {
+ return err
+ }
+
+ _, err = io.Copy(prWrite, tmp)
+
+ return err
+}
diff --git a/storage/filesystem/internal/dotgit/dotgit_setref.go b/storage/filesystem/dotgit/dotgit_setref.go
index c732c9f..d27c1a3 100644
--- a/storage/filesystem/internal/dotgit/dotgit_setref.go
+++ b/storage/filesystem/dotgit/dotgit_setref.go
@@ -9,7 +9,7 @@ import (
"gopkg.in/src-d/go-git.v4/utils/ioutil"
)
-func (d *DotGit) setRef(fileName, content string, old *plumbing.Reference) error {
+func (d *DotGit) setRef(fileName, content string, old *plumbing.Reference) (err error) {
// If we are not checking an old ref, just truncate the file.
mode := os.O_RDWR | os.O_CREATE
if old == nil {
diff --git a/storage/filesystem/internal/dotgit/dotgit_setref_norwfs.go b/storage/filesystem/dotgit/dotgit_setref_norwfs.go
index 5695bd3..5695bd3 100644
--- a/storage/filesystem/internal/dotgit/dotgit_setref_norwfs.go
+++ b/storage/filesystem/dotgit/dotgit_setref_norwfs.go
diff --git a/storage/filesystem/internal/dotgit/dotgit_test.go b/storage/filesystem/dotgit/dotgit_test.go
index 2c43295..308c6b7 100644
--- a/storage/filesystem/internal/dotgit/dotgit_test.go
+++ b/storage/filesystem/dotgit/dotgit_test.go
@@ -9,6 +9,7 @@ import (
"strings"
"testing"
+ "gopkg.in/src-d/go-billy.v4"
"gopkg.in/src-d/go-git.v4/plumbing"
. "gopkg.in/check.v1"
@@ -151,6 +152,7 @@ func (s *SuiteDotGit) TestRefsFromReferenceFile(c *C) {
}
func BenchmarkRefMultipleTimes(b *testing.B) {
+ fixtures.Init()
fs := fixtures.Basic().ByTag(".git").One().DotGit()
refname := plumbing.ReferenceName("refs/remotes/origin/branch")
@@ -418,15 +420,39 @@ func findReference(refs []*plumbing.Reference, name string) *plumbing.Reference
return nil
}
-func (s *SuiteDotGit) TestObjectsPack(c *C) {
+func (s *SuiteDotGit) TestObjectPacks(c *C) {
f := fixtures.Basic().ByTag(".git").One()
fs := f.DotGit()
dir := New(fs)
+ testObjectPacks(c, fs, dir, f)
+}
+
+func (s *SuiteDotGit) TestObjectPacksExclusive(c *C) {
+ f := fixtures.Basic().ByTag(".git").One()
+ fs := f.DotGit()
+ dir := NewWithOptions(fs, Options{ExclusiveAccess: true})
+
+ testObjectPacks(c, fs, dir, f)
+}
+
+func testObjectPacks(c *C, fs billy.Filesystem, dir *DotGit, f *fixtures.Fixture) {
hashes, err := dir.ObjectPacks()
c.Assert(err, IsNil)
c.Assert(hashes, HasLen, 1)
c.Assert(hashes[0], Equals, f.PackfileHash)
+
+ // Make sure that a random file in the pack directory doesn't
+ // break everything.
+ badFile, err := fs.Create("objects/pack/OOPS_THIS_IS_NOT_RIGHT.pack")
+ c.Assert(err, IsNil)
+ err = badFile.Close()
+ c.Assert(err, IsNil)
+
+ hashes2, err := dir.ObjectPacks()
+ c.Assert(err, IsNil)
+ c.Assert(hashes2, HasLen, 1)
+ c.Assert(hashes[0], Equals, hashes2[0])
}
func (s *SuiteDotGit) TestObjectPack(c *C) {
@@ -439,6 +465,45 @@ func (s *SuiteDotGit) TestObjectPack(c *C) {
c.Assert(filepath.Ext(pack.Name()), Equals, ".pack")
}
+func (s *SuiteDotGit) TestObjectPackWithKeepDescriptors(c *C) {
+ f := fixtures.Basic().ByTag(".git").One()
+ fs := f.DotGit()
+ dir := NewWithOptions(fs, Options{KeepDescriptors: true})
+
+ pack, err := dir.ObjectPack(f.PackfileHash)
+ c.Assert(err, IsNil)
+ c.Assert(filepath.Ext(pack.Name()), Equals, ".pack")
+
+ // Move to an specific offset
+ pack.Seek(42, os.SEEK_SET)
+
+ pack2, err := dir.ObjectPack(f.PackfileHash)
+ c.Assert(err, IsNil)
+
+ // If the file is the same the offset should be the same
+ offset, err := pack2.Seek(0, os.SEEK_CUR)
+ c.Assert(err, IsNil)
+ c.Assert(offset, Equals, int64(42))
+
+ err = dir.Close()
+ c.Assert(err, IsNil)
+
+ pack2, err = dir.ObjectPack(f.PackfileHash)
+ c.Assert(err, IsNil)
+
+ // If the file is opened again its offset should be 0
+ offset, err = pack2.Seek(0, os.SEEK_CUR)
+ c.Assert(err, IsNil)
+ c.Assert(offset, Equals, int64(0))
+
+ err = pack2.Close()
+ c.Assert(err, IsNil)
+
+ err = dir.Close()
+ c.Assert(err, NotNil)
+
+}
+
func (s *SuiteDotGit) TestObjectPackIdx(c *C) {
f := fixtures.Basic().ByTag(".git").One()
fs := f.DotGit()
@@ -493,6 +558,17 @@ func (s *SuiteDotGit) TestObjects(c *C) {
fs := fixtures.ByTag(".git").ByTag("unpacked").One().DotGit()
dir := New(fs)
+ testObjects(c, fs, dir)
+}
+
+func (s *SuiteDotGit) TestObjectsExclusive(c *C) {
+ fs := fixtures.ByTag(".git").ByTag("unpacked").One().DotGit()
+ dir := NewWithOptions(fs, Options{ExclusiveAccess: true})
+
+ testObjects(c, fs, dir)
+}
+
+func testObjects(c *C, fs billy.Filesystem, dir *DotGit) {
hashes, err := dir.Objects()
c.Assert(err, IsNil)
c.Assert(hashes, HasLen, 187)
@@ -524,6 +600,57 @@ func (s *SuiteDotGit) TestObject(c *C) {
file.Name(), fs.Join("objects", "03", "db8e1fbe133a480f2867aac478fd866686d69e")),
Equals, true,
)
+ incomingHash := "9d25e0f9bde9f82882b49fe29117b9411cb157b7" //made up hash
+ incomingDirPath := fs.Join("objects", "incoming-123456")
+ incomingFilePath := fs.Join(incomingDirPath, incomingHash[0:2], incomingHash[2:40])
+ fs.MkdirAll(incomingDirPath, os.FileMode(0755))
+ fs.Create(incomingFilePath)
+
+ file, err = dir.Object(plumbing.NewHash(incomingHash))
+ c.Assert(err, IsNil)
+}
+
+func (s *SuiteDotGit) TestObjectStat(c *C) {
+ fs := fixtures.ByTag(".git").ByTag("unpacked").One().DotGit()
+ dir := New(fs)
+
+ hash := plumbing.NewHash("03db8e1fbe133a480f2867aac478fd866686d69e")
+ _, err := dir.ObjectStat(hash)
+ c.Assert(err, IsNil)
+ incomingHash := "9d25e0f9bde9f82882b49fe29117b9411cb157b7" //made up hash
+ incomingDirPath := fs.Join("objects", "incoming-123456")
+ incomingFilePath := fs.Join(incomingDirPath, incomingHash[0:2], incomingHash[2:40])
+ fs.MkdirAll(incomingDirPath, os.FileMode(0755))
+ fs.Create(incomingFilePath)
+
+ _, err = dir.ObjectStat(plumbing.NewHash(incomingHash))
+ c.Assert(err, IsNil)
+}
+
+func (s *SuiteDotGit) TestObjectDelete(c *C) {
+ fs := fixtures.ByTag(".git").ByTag("unpacked").One().DotGit()
+ dir := New(fs)
+
+ hash := plumbing.NewHash("03db8e1fbe133a480f2867aac478fd866686d69e")
+ err := dir.ObjectDelete(hash)
+ c.Assert(err, IsNil)
+
+ incomingHash := "9d25e0f9bde9f82882b49fe29117b9411cb157b7" //made up hash
+ incomingDirPath := fs.Join("objects", "incoming-123456")
+ incomingSubDirPath := fs.Join(incomingDirPath, incomingHash[0:2])
+ incomingFilePath := fs.Join(incomingSubDirPath, incomingHash[2:40])
+
+ err = fs.MkdirAll(incomingSubDirPath, os.FileMode(0755))
+ c.Assert(err, IsNil)
+
+ f, err := fs.Create(incomingFilePath)
+ c.Assert(err, IsNil)
+
+ err = f.Close()
+ c.Assert(err, IsNil)
+
+ err = dir.ObjectDelete(plumbing.NewHash(incomingHash))
+ c.Assert(err, IsNil)
}
func (s *SuiteDotGit) TestObjectNotFound(c *C) {
diff --git a/storage/filesystem/internal/dotgit/writers.go b/storage/filesystem/dotgit/writers.go
index c2b420f..93d2d8c 100644
--- a/storage/filesystem/internal/dotgit/writers.go
+++ b/storage/filesystem/dotgit/writers.go
@@ -20,13 +20,14 @@ import (
// is renamed/moved (depends on the Filesystem implementation) to the final
// location, if the PackWriter is not used, nothing is written
type PackWriter struct {
- Notify func(plumbing.Hash, *packfile.Index)
+ Notify func(plumbing.Hash, *idxfile.Writer)
fs billy.Filesystem
fr, fw billy.File
synced *syncedReader
checksum plumbing.Hash
- index *packfile.Index
+ parser *packfile.Parser
+ writer *idxfile.Writer
result chan error
}
@@ -55,20 +56,21 @@ func newPackWrite(fs billy.Filesystem) (*PackWriter, error) {
func (w *PackWriter) buildIndex() {
s := packfile.NewScanner(w.synced)
- d, err := packfile.NewDecoder(s, nil)
+ w.writer = new(idxfile.Writer)
+ var err error
+ w.parser, err = packfile.NewParser(s, w.writer)
if err != nil {
w.result <- err
return
}
- checksum, err := d.Decode()
+ checksum, err := w.parser.Parse()
if err != nil {
w.result <- err
return
}
w.checksum = checksum
- w.index = d.Index()
w.result <- err
}
@@ -92,8 +94,8 @@ func (w *PackWriter) Write(p []byte) (int, error) {
// was written, the tempfiles are deleted without writing a packfile.
func (w *PackWriter) Close() error {
defer func() {
- if w.Notify != nil && w.index != nil && w.index.Size() > 0 {
- w.Notify(w.checksum, w.index)
+ if w.Notify != nil && w.writer != nil && w.writer.Finished() {
+ w.Notify(w.checksum, w.writer)
}
close(w.result)
@@ -115,7 +117,7 @@ func (w *PackWriter) Close() error {
return err
}
- if w.index == nil || w.index.Size() == 0 {
+ if w.writer == nil || !w.writer.Finished() {
return w.clean()
}
@@ -145,11 +147,13 @@ func (w *PackWriter) save() error {
}
func (w *PackWriter) encodeIdx(writer io.Writer) error {
- idx := w.index.ToIdxFile()
- idx.PackfileChecksum = w.checksum
- idx.Version = idxfile.VersionSupported
+ idx, err := w.writer.Index()
+ if err != nil {
+ return err
+ }
+
e := idxfile.NewEncoder(writer)
- _, err := e.Encode(idx)
+ _, err = e.Encode(idx)
return err
}
@@ -209,7 +213,6 @@ func (s *syncedReader) isBlocked() bool {
func (s *syncedReader) wake() {
if s.isBlocked() {
- // fmt.Println("wake")
atomic.StoreUint32(&s.blocked, 0)
s.news <- true
}
@@ -220,7 +223,6 @@ func (s *syncedReader) sleep() {
written := atomic.LoadUint64(&s.written)
if read >= written {
atomic.StoreUint32(&s.blocked, 1)
- // fmt.Println("sleep", read, written)
<-s.news
}
diff --git a/storage/filesystem/internal/dotgit/writers_test.go b/storage/filesystem/dotgit/writers_test.go
index bf00762..5a5f7b4 100644
--- a/storage/filesystem/internal/dotgit/writers_test.go
+++ b/storage/filesystem/dotgit/writers_test.go
@@ -9,6 +9,7 @@ import (
"strconv"
"gopkg.in/src-d/go-git.v4/plumbing"
+ "gopkg.in/src-d/go-git.v4/plumbing/format/idxfile"
"gopkg.in/src-d/go-git.v4/plumbing/format/packfile"
. "gopkg.in/check.v1"
@@ -148,7 +149,7 @@ func (s *SuiteDotGit) TestPackWriterUnusedNotify(c *C) {
w, err := newPackWrite(fs)
c.Assert(err, IsNil)
- w.Notify = func(h plumbing.Hash, idx *packfile.Index) {
+ w.Notify = func(h plumbing.Hash, idx *idxfile.Writer) {
c.Fatal("unexpected call to PackWriter.Notify")
}
diff --git a/storage/filesystem/index.go b/storage/filesystem/index.go
index 14ab09a..2ebf57e 100644
--- a/storage/filesystem/index.go
+++ b/storage/filesystem/index.go
@@ -4,7 +4,7 @@ import (
"os"
"gopkg.in/src-d/go-git.v4/plumbing/format/index"
- "gopkg.in/src-d/go-git.v4/storage/filesystem/internal/dotgit"
+ "gopkg.in/src-d/go-git.v4/storage/filesystem/dotgit"
"gopkg.in/src-d/go-git.v4/utils/ioutil"
)
@@ -12,7 +12,7 @@ type IndexStorage struct {
dir *dotgit.DotGit
}
-func (s *IndexStorage) SetIndex(idx *index.Index) error {
+func (s *IndexStorage) SetIndex(idx *index.Index) (err error) {
f, err := s.dir.IndexWriter()
if err != nil {
return err
@@ -25,7 +25,7 @@ func (s *IndexStorage) SetIndex(idx *index.Index) error {
return err
}
-func (s *IndexStorage) Index() (*index.Index, error) {
+func (s *IndexStorage) Index() (i *index.Index, err error) {
idx := &index.Index{
Version: 2,
}
diff --git a/storage/filesystem/internal/dotgit/dotgit_rewrite_packed_refs_nix.go b/storage/filesystem/internal/dotgit/dotgit_rewrite_packed_refs_nix.go
deleted file mode 100644
index c760793..0000000
--- a/storage/filesystem/internal/dotgit/dotgit_rewrite_packed_refs_nix.go
+++ /dev/null
@@ -1,17 +0,0 @@
-// +build !windows,!norwfs
-
-package dotgit
-
-import (
- "os"
-
- "gopkg.in/src-d/go-billy.v4"
-)
-
-const openAndLockPackedRefsMode = os.O_RDWR
-
-func (d *DotGit) rewritePackedRefsWhileLocked(
- tmp billy.File, pr billy.File) error {
- // On non-Windows platforms, we can have atomic rename.
- return d.fs.Rename(tmp.Name(), pr.Name())
-}
diff --git a/storage/filesystem/internal/dotgit/dotgit_rewrite_packed_refs_norwfs.go b/storage/filesystem/internal/dotgit/dotgit_rewrite_packed_refs_norwfs.go
deleted file mode 100644
index 6e43b42..0000000
--- a/storage/filesystem/internal/dotgit/dotgit_rewrite_packed_refs_norwfs.go
+++ /dev/null
@@ -1,34 +0,0 @@
-// +build norwfs
-
-package dotgit
-
-import (
- "io"
- "os"
-
- "gopkg.in/src-d/go-billy.v4"
-)
-
-const openAndLockPackedRefsMode = os.O_RDONLY
-
-// Instead of renaming that can not be supported in simpler filesystems
-// a full copy is done.
-func (d *DotGit) rewritePackedRefsWhileLocked(
- tmp billy.File, pr billy.File) error {
-
- prWrite, err := d.fs.Create(pr.Name())
- if err != nil {
- return err
- }
-
- defer prWrite.Close()
-
- _, err = tmp.Seek(0, io.SeekStart)
- if err != nil {
- return err
- }
-
- _, err = io.Copy(prWrite, tmp)
-
- return err
-}
diff --git a/storage/filesystem/internal/dotgit/dotgit_rewrite_packed_refs_windows.go b/storage/filesystem/internal/dotgit/dotgit_rewrite_packed_refs_windows.go
deleted file mode 100644
index 897d2c9..0000000
--- a/storage/filesystem/internal/dotgit/dotgit_rewrite_packed_refs_windows.go
+++ /dev/null
@@ -1,42 +0,0 @@
-// +build windows,!norwfs
-
-package dotgit
-
-import (
- "io"
- "os"
-
- "gopkg.in/src-d/go-billy.v4"
-)
-
-const openAndLockPackedRefsMode = os.O_RDWR
-
-func (d *DotGit) rewritePackedRefsWhileLocked(
- tmp billy.File, pr billy.File) error {
- // If we aren't using the bare Windows filesystem as the storage
- // layer, we might be able to get away with a rename over a locked
- // file.
- err := d.fs.Rename(tmp.Name(), pr.Name())
- if err == nil {
- return nil
- }
-
- // Otherwise, Windows doesn't let us rename over a locked file, so
- // we have to do a straight copy. Unfortunately this could result
- // in a partially-written file if the process fails before the
- // copy completes.
- _, err = pr.Seek(0, io.SeekStart)
- if err != nil {
- return err
- }
- err = pr.Truncate(0)
- if err != nil {
- return err
- }
- _, err = tmp.Seek(0, io.SeekStart)
- if err != nil {
- return err
- }
- _, err = io.Copy(pr, tmp)
- return err
-}
diff --git a/storage/filesystem/module.go b/storage/filesystem/module.go
index 6f3de3f..9272206 100644
--- a/storage/filesystem/module.go
+++ b/storage/filesystem/module.go
@@ -1,8 +1,9 @@
package filesystem
import (
+ "gopkg.in/src-d/go-git.v4/plumbing/cache"
"gopkg.in/src-d/go-git.v4/storage"
- "gopkg.in/src-d/go-git.v4/storage/filesystem/internal/dotgit"
+ "gopkg.in/src-d/go-git.v4/storage/filesystem/dotgit"
)
type ModuleStorage struct {
@@ -15,5 +16,5 @@ func (s *ModuleStorage) Module(name string) (storage.Storer, error) {
return nil, err
}
- return NewStorage(fs)
+ return NewStorage(fs, cache.NewObjectLRUDefault()), nil
}
diff --git a/storage/filesystem/object.go b/storage/filesystem/object.go
index 3ec7304..68bd140 100644
--- a/storage/filesystem/object.go
+++ b/storage/filesystem/object.go
@@ -11,28 +11,34 @@ import (
"gopkg.in/src-d/go-git.v4/plumbing/format/objfile"
"gopkg.in/src-d/go-git.v4/plumbing/format/packfile"
"gopkg.in/src-d/go-git.v4/plumbing/storer"
- "gopkg.in/src-d/go-git.v4/storage/filesystem/internal/dotgit"
- "gopkg.in/src-d/go-git.v4/storage/memory"
+ "gopkg.in/src-d/go-git.v4/storage/filesystem/dotgit"
"gopkg.in/src-d/go-git.v4/utils/ioutil"
"gopkg.in/src-d/go-billy.v4"
)
type ObjectStorage struct {
+ options Options
+
// deltaBaseCache is an object cache uses to cache delta's bases when
deltaBaseCache cache.Object
dir *dotgit.DotGit
- index map[plumbing.Hash]*packfile.Index
+ index map[plumbing.Hash]idxfile.Index
+}
+
+// NewObjectStorage creates a new ObjectStorage with the given .git directory and cache.
+func NewObjectStorage(dir *dotgit.DotGit, cache cache.Object) *ObjectStorage {
+ return NewObjectStorageWithOptions(dir, cache, Options{})
}
-func newObjectStorage(dir *dotgit.DotGit) (ObjectStorage, error) {
- s := ObjectStorage{
- deltaBaseCache: cache.NewObjectLRUDefault(),
+// NewObjectStorageWithOptions creates a new ObjectStorage with the given .git directory, cache and extra options
+func NewObjectStorageWithOptions(dir *dotgit.DotGit, cache cache.Object, ops Options) *ObjectStorage {
+ return &ObjectStorage{
+ options: ops,
+ deltaBaseCache: cache,
dir: dir,
}
-
- return s, nil
}
func (s *ObjectStorage) requireIndex() error {
@@ -40,7 +46,7 @@ func (s *ObjectStorage) requireIndex() error {
return nil
}
- s.index = make(map[plumbing.Hash]*packfile.Index)
+ s.index = make(map[plumbing.Hash]idxfile.Index)
packs, err := s.dir.ObjectPacks()
if err != nil {
return err
@@ -55,20 +61,21 @@ func (s *ObjectStorage) requireIndex() error {
return nil
}
-func (s *ObjectStorage) loadIdxFile(h plumbing.Hash) error {
+func (s *ObjectStorage) loadIdxFile(h plumbing.Hash) (err error) {
f, err := s.dir.ObjectPackIdx(h)
if err != nil {
return err
}
defer ioutil.CheckClose(f, &err)
- idxf := idxfile.NewIdxfile()
+
+ idxf := idxfile.NewMemoryIndex()
d := idxfile.NewDecoder(f)
if err = d.Decode(idxf); err != nil {
return err
}
- s.index[h] = packfile.NewIndexFromIdxFile(idxf)
+ s.index[h] = idxf
return err
}
@@ -86,15 +93,18 @@ func (s *ObjectStorage) PackfileWriter() (io.WriteCloser, error) {
return nil, err
}
- w.Notify = func(h plumbing.Hash, idx *packfile.Index) {
- s.index[h] = idx
+ w.Notify = func(h plumbing.Hash, writer *idxfile.Writer) {
+ index, err := writer.Index()
+ if err == nil {
+ s.index[h] = index
+ }
}
return w, nil
}
// SetEncodedObject adds a new object to the storage.
-func (s *ObjectStorage) SetEncodedObject(o plumbing.EncodedObject) (plumbing.Hash, error) {
+func (s *ObjectStorage) SetEncodedObject(o plumbing.EncodedObject) (h plumbing.Hash, err error) {
if o.Type() == plumbing.OFSDeltaObject || o.Type() == plumbing.REFDeltaObject {
return plumbing.ZeroHash, plumbing.ErrInvalidType
}
@@ -113,11 +123,11 @@ func (s *ObjectStorage) SetEncodedObject(o plumbing.EncodedObject) (plumbing.Has
defer ioutil.CheckClose(or, &err)
- if err := ow.WriteHeader(o.Type(), o.Size()); err != nil {
+ if err = ow.WriteHeader(o.Type(), o.Size()); err != nil {
return plumbing.ZeroHash, err
}
- if _, err := io.Copy(ow, or); err != nil {
+ if _, err = io.Copy(ow, or); err != nil {
return plumbing.ZeroHash, err
}
@@ -166,10 +176,7 @@ func (s *ObjectStorage) EncodedObject(t plumbing.ObjectType, h plumbing.Hash) (p
// Create a new object storage with the DotGit(s) and check for the
// required hash object. Skip when not found.
for _, dg := range dotgits {
- o, oe := newObjectStorage(dg)
- if oe != nil {
- continue
- }
+ o := NewObjectStorage(dg, s.deltaBaseCache)
enobj, enerr := o.EncodedObject(t, h)
if enerr != nil {
continue
@@ -265,7 +272,9 @@ func (s *ObjectStorage) getFromPackfile(h plumbing.Hash, canBeDelta bool) (
return nil, err
}
- defer ioutil.CheckClose(f, &err)
+ if !s.options.KeepDescriptors {
+ defer ioutil.CheckClose(f, &err)
+ }
idx := s.index[pack]
if canBeDelta {
@@ -277,30 +286,37 @@ func (s *ObjectStorage) getFromPackfile(h plumbing.Hash, canBeDelta bool) (
func (s *ObjectStorage) decodeObjectAt(
f billy.File,
- idx *packfile.Index,
- offset int64) (plumbing.EncodedObject, error) {
- if _, err := f.Seek(0, io.SeekStart); err != nil {
- return nil, err
+ idx idxfile.Index,
+ offset int64,
+) (plumbing.EncodedObject, error) {
+ hash, err := idx.FindHash(offset)
+ if err == nil {
+ obj, ok := s.deltaBaseCache.Get(hash)
+ if ok {
+ return obj, nil
+ }
}
- p := packfile.NewScanner(f)
-
- d, err := packfile.NewDecoderWithCache(p, memory.NewStorage(),
- s.deltaBaseCache)
- if err != nil {
+ if err != nil && err != plumbing.ErrObjectNotFound {
return nil, err
}
- d.SetIndex(idx)
- obj, err := d.DecodeObjectAt(offset)
- return obj, err
+ var p *packfile.Packfile
+ if s.deltaBaseCache != nil {
+ p = packfile.NewPackfileWithCache(idx, s.dir.Fs(), f, s.deltaBaseCache)
+ } else {
+ p = packfile.NewPackfile(idx, s.dir.Fs(), f)
+ }
+
+ return p.GetByOffset(offset)
}
func (s *ObjectStorage) decodeDeltaObjectAt(
f billy.File,
- idx *packfile.Index,
+ idx idxfile.Index,
offset int64,
- hash plumbing.Hash) (plumbing.EncodedObject, error) {
+ hash plumbing.Hash,
+) (plumbing.EncodedObject, error) {
if _, err := f.Seek(0, io.SeekStart); err != nil {
return nil, err
}
@@ -323,12 +339,10 @@ func (s *ObjectStorage) decodeDeltaObjectAt(
case plumbing.REFDeltaObject:
base = header.Reference
case plumbing.OFSDeltaObject:
- e, ok := idx.LookupOffset(uint64(header.OffsetReference))
- if !ok {
- return nil, plumbing.ErrObjectNotFound
+ base, err = idx.FindHash(header.OffsetReference)
+ if err != nil {
+ return nil, err
}
-
- base = e.Hash
default:
return s.decodeObjectAt(f, idx, offset)
}
@@ -349,8 +363,9 @@ func (s *ObjectStorage) decodeDeltaObjectAt(
func (s *ObjectStorage) findObjectInPackfile(h plumbing.Hash) (plumbing.Hash, plumbing.Hash, int64) {
for packfile, index := range s.index {
- if e, ok := index.LookupHash(h); ok {
- return packfile, e.Hash, int64(e.Offset)
+ offset, err := index.FindOffset(h)
+ if err == nil {
+ return packfile, h, offset
}
}
@@ -365,7 +380,7 @@ func (s *ObjectStorage) IterEncodedObjects(t plumbing.ObjectType) (storer.Encode
return nil, err
}
- seen := make(map[plumbing.Hash]bool)
+ seen := make(map[plumbing.Hash]struct{})
var iters []storer.EncodedObjectIter
if len(objects) != 0 {
iters = append(iters, &objectsIter{s: s, t: t, h: objects})
@@ -377,11 +392,14 @@ func (s *ObjectStorage) IterEncodedObjects(t plumbing.ObjectType) (storer.Encode
return nil, err
}
- iters = append(iters, packi...)
+ iters = append(iters, packi)
return storer.NewMultiEncodedObjectIter(iters), nil
}
-func (s *ObjectStorage) buildPackfileIters(t plumbing.ObjectType, seen map[plumbing.Hash]bool) ([]storer.EncodedObjectIter, error) {
+func (s *ObjectStorage) buildPackfileIters(
+ t plumbing.ObjectType,
+ seen map[plumbing.Hash]struct{},
+) (storer.EncodedObjectIter, error) {
if err := s.requireIndex(); err != nil {
return nil, err
}
@@ -390,96 +408,172 @@ func (s *ObjectStorage) buildPackfileIters(t plumbing.ObjectType, seen map[plumb
if err != nil {
return nil, err
}
+ return &lazyPackfilesIter{
+ hashes: packs,
+ open: func(h plumbing.Hash) (storer.EncodedObjectIter, error) {
+ pack, err := s.dir.ObjectPack(h)
+ if err != nil {
+ return nil, err
+ }
+ return newPackfileIter(
+ s.dir.Fs(), pack, t, seen, s.index[h],
+ s.deltaBaseCache, s.options.KeepDescriptors,
+ )
+ },
+ }, nil
+}
- var iters []storer.EncodedObjectIter
- for _, h := range packs {
- pack, err := s.dir.ObjectPack(h)
- if err != nil {
- return nil, err
- }
+// Close closes all opened files.
+func (s *ObjectStorage) Close() error {
+ return s.dir.Close()
+}
- iter, err := newPackfileIter(pack, t, seen, s.index[h], s.deltaBaseCache)
- if err != nil {
+type lazyPackfilesIter struct {
+ hashes []plumbing.Hash
+ open func(h plumbing.Hash) (storer.EncodedObjectIter, error)
+ cur storer.EncodedObjectIter
+}
+
+func (it *lazyPackfilesIter) Next() (plumbing.EncodedObject, error) {
+ for {
+ if it.cur == nil {
+ if len(it.hashes) == 0 {
+ return nil, io.EOF
+ }
+ h := it.hashes[0]
+ it.hashes = it.hashes[1:]
+
+ sub, err := it.open(h)
+ if err == io.EOF {
+ continue
+ } else if err != nil {
+ return nil, err
+ }
+ it.cur = sub
+ }
+ ob, err := it.cur.Next()
+ if err == io.EOF {
+ it.cur.Close()
+ it.cur = nil
+ continue
+ } else if err != nil {
return nil, err
}
-
- iters = append(iters, iter)
+ return ob, nil
}
+}
+
+func (it *lazyPackfilesIter) ForEach(cb func(plumbing.EncodedObject) error) error {
+ return storer.ForEachIterator(it, cb)
+}
- return iters, nil
+func (it *lazyPackfilesIter) Close() {
+ if it.cur != nil {
+ it.cur.Close()
+ it.cur = nil
+ }
+ it.hashes = nil
}
type packfileIter struct {
- f billy.File
- d *packfile.Decoder
- t plumbing.ObjectType
+ pack billy.File
+ iter storer.EncodedObjectIter
+ seen map[plumbing.Hash]struct{}
- seen map[plumbing.Hash]bool
- position uint32
- total uint32
+ // tells whether the pack file should be left open after iteration or not
+ keepPack bool
}
-func NewPackfileIter(f billy.File, t plumbing.ObjectType) (storer.EncodedObjectIter, error) {
- return newPackfileIter(f, t, make(map[plumbing.Hash]bool), nil, nil)
-}
+// NewPackfileIter returns a new EncodedObjectIter for the provided packfile
+// and object type. Packfile and index file will be closed after they're
+// used. If keepPack is true the packfile won't be closed after the iteration
+// finished.
+func NewPackfileIter(
+ fs billy.Filesystem,
+ f billy.File,
+ idxFile billy.File,
+ t plumbing.ObjectType,
+ keepPack bool,
+) (storer.EncodedObjectIter, error) {
+ idx := idxfile.NewMemoryIndex()
+ if err := idxfile.NewDecoder(idxFile).Decode(idx); err != nil {
+ return nil, err
+ }
-func newPackfileIter(f billy.File, t plumbing.ObjectType, seen map[plumbing.Hash]bool,
- index *packfile.Index, cache cache.Object) (storer.EncodedObjectIter, error) {
- s := packfile.NewScanner(f)
- _, total, err := s.Header()
- if err != nil {
+ if err := idxFile.Close(); err != nil {
return nil, err
}
- d, err := packfile.NewDecoderForType(s, memory.NewStorage(), t, cache)
+ seen := make(map[plumbing.Hash]struct{})
+ return newPackfileIter(fs, f, t, seen, idx, nil, keepPack)
+}
+
+func newPackfileIter(
+ fs billy.Filesystem,
+ f billy.File,
+ t plumbing.ObjectType,
+ seen map[plumbing.Hash]struct{},
+ index idxfile.Index,
+ cache cache.Object,
+ keepPack bool,
+) (storer.EncodedObjectIter, error) {
+ var p *packfile.Packfile
+ if cache != nil {
+ p = packfile.NewPackfileWithCache(index, fs, f, cache)
+ } else {
+ p = packfile.NewPackfile(index, fs, f)
+ }
+
+ iter, err := p.GetByType(t)
if err != nil {
return nil, err
}
- d.SetIndex(index)
-
return &packfileIter{
- f: f,
- d: d,
- t: t,
-
- total: total,
- seen: seen,
+ pack: f,
+ iter: iter,
+ seen: seen,
+ keepPack: keepPack,
}, nil
}
func (iter *packfileIter) Next() (plumbing.EncodedObject, error) {
for {
- if iter.position >= iter.total {
- return nil, io.EOF
- }
-
- obj, err := iter.d.DecodeObject()
+ obj, err := iter.iter.Next()
if err != nil {
return nil, err
}
- iter.position++
- if obj == nil {
+ if _, ok := iter.seen[obj.Hash()]; ok {
continue
}
- if iter.seen[obj.Hash()] {
- return iter.Next()
- }
-
return obj, nil
}
}
-// ForEach is never called since is used inside of a MultiObjectIterator
func (iter *packfileIter) ForEach(cb func(plumbing.EncodedObject) error) error {
- return nil
+ for {
+ o, err := iter.Next()
+ if err != nil {
+ if err == io.EOF {
+ iter.Close()
+ return nil
+ }
+ return err
+ }
+
+ if err := cb(o); err != nil {
+ return err
+ }
+ }
}
func (iter *packfileIter) Close() {
- iter.f.Close()
- iter.d.Close()
+ iter.iter.Close()
+ if !iter.keepPack {
+ _ = iter.pack.Close()
+ }
}
type objectsIter struct {
@@ -507,21 +601,31 @@ func (iter *objectsIter) Next() (plumbing.EncodedObject, error) {
return obj, err
}
-// ForEach is never called since is used inside of a MultiObjectIterator
func (iter *objectsIter) ForEach(cb func(plumbing.EncodedObject) error) error {
- return nil
+ for {
+ o, err := iter.Next()
+ if err != nil {
+ if err == io.EOF {
+ return nil
+ }
+ return err
+ }
+
+ if err := cb(o); err != nil {
+ return err
+ }
+ }
}
func (iter *objectsIter) Close() {
iter.h = []plumbing.Hash{}
}
-func hashListAsMap(l []plumbing.Hash) map[plumbing.Hash]bool {
- m := make(map[plumbing.Hash]bool, len(l))
+func hashListAsMap(l []plumbing.Hash) map[plumbing.Hash]struct{} {
+ m := make(map[plumbing.Hash]struct{}, len(l))
for _, h := range l {
- m[h] = true
+ m[h] = struct{}{}
}
-
return m
}
diff --git a/storage/filesystem/object_test.go b/storage/filesystem/object_test.go
index de8f2b2..407abf2 100644
--- a/storage/filesystem/object_test.go
+++ b/storage/filesystem/object_test.go
@@ -1,8 +1,13 @@
package filesystem
import (
+ "io/ioutil"
+ "os"
+ "testing"
+
"gopkg.in/src-d/go-git.v4/plumbing"
- "gopkg.in/src-d/go-git.v4/storage/filesystem/internal/dotgit"
+ "gopkg.in/src-d/go-git.v4/plumbing/cache"
+ "gopkg.in/src-d/go-git.v4/storage/filesystem/dotgit"
. "gopkg.in/check.v1"
"gopkg.in/src-d/go-git-fixtures.v3"
@@ -10,22 +15,20 @@ import (
type FsSuite struct {
fixtures.Suite
- Types []plumbing.ObjectType
}
-var _ = Suite(&FsSuite{
- Types: []plumbing.ObjectType{
- plumbing.CommitObject,
- plumbing.TagObject,
- plumbing.TreeObject,
- plumbing.BlobObject,
- },
-})
+var objectTypes = []plumbing.ObjectType{
+ plumbing.CommitObject,
+ plumbing.TagObject,
+ plumbing.TreeObject,
+ plumbing.BlobObject,
+}
+
+var _ = Suite(&FsSuite{})
func (s *FsSuite) TestGetFromObjectFile(c *C) {
fs := fixtures.ByTag(".git").ByTag("unpacked").One().DotGit()
- o, err := newObjectStorage(dotgit.New(fs))
- c.Assert(err, IsNil)
+ o := NewObjectStorage(dotgit.New(fs), cache.NewObjectLRUDefault())
expected := plumbing.NewHash("f3dfe29d268303fc6e1bbce268605fc99573406e")
obj, err := o.EncodedObject(plumbing.AnyObject, expected)
@@ -36,20 +39,53 @@ func (s *FsSuite) TestGetFromObjectFile(c *C) {
func (s *FsSuite) TestGetFromPackfile(c *C) {
fixtures.Basic().ByTag(".git").Test(c, func(f *fixtures.Fixture) {
fs := f.DotGit()
- o, err := newObjectStorage(dotgit.New(fs))
+ o := NewObjectStorage(dotgit.New(fs), cache.NewObjectLRUDefault())
+
+ expected := plumbing.NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5")
+ obj, err := o.EncodedObject(plumbing.AnyObject, expected)
c.Assert(err, IsNil)
+ c.Assert(obj.Hash(), Equals, expected)
+ })
+}
+
+func (s *FsSuite) TestGetFromPackfileKeepDescriptors(c *C) {
+ fixtures.Basic().ByTag(".git").Test(c, func(f *fixtures.Fixture) {
+ fs := f.DotGit()
+ dg := dotgit.NewWithOptions(fs, dotgit.Options{KeepDescriptors: true})
+ o := NewObjectStorageWithOptions(dg, cache.NewObjectLRUDefault(), Options{KeepDescriptors: true})
expected := plumbing.NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5")
obj, err := o.EncodedObject(plumbing.AnyObject, expected)
c.Assert(err, IsNil)
c.Assert(obj.Hash(), Equals, expected)
+
+ packfiles, err := dg.ObjectPacks()
+ c.Assert(err, IsNil)
+
+ pack1, err := dg.ObjectPack(packfiles[0])
+ c.Assert(err, IsNil)
+
+ pack1.Seek(42, os.SEEK_SET)
+
+ err = o.Close()
+ c.Assert(err, IsNil)
+
+ pack2, err := dg.ObjectPack(packfiles[0])
+ c.Assert(err, IsNil)
+
+ offset, err := pack2.Seek(0, os.SEEK_CUR)
+ c.Assert(err, IsNil)
+ c.Assert(offset, Equals, int64(0))
+
+ err = o.Close()
+ c.Assert(err, IsNil)
+
})
}
func (s *FsSuite) TestGetFromPackfileMultiplePackfiles(c *C) {
fs := fixtures.ByTag(".git").ByTag("multi-packfile").One().DotGit()
- o, err := newObjectStorage(dotgit.New(fs))
- c.Assert(err, IsNil)
+ o := NewObjectStorage(dotgit.New(fs), cache.NewObjectLRUDefault())
expected := plumbing.NewHash("8d45a34641d73851e01d3754320b33bb5be3c4d3")
obj, err := o.getFromPackfile(expected, false)
@@ -65,8 +101,7 @@ func (s *FsSuite) TestGetFromPackfileMultiplePackfiles(c *C) {
func (s *FsSuite) TestIter(c *C) {
fixtures.ByTag(".git").ByTag("packfile").Test(c, func(f *fixtures.Fixture) {
fs := f.DotGit()
- o, err := newObjectStorage(dotgit.New(fs))
- c.Assert(err, IsNil)
+ o := NewObjectStorage(dotgit.New(fs), cache.NewObjectLRUDefault())
iter, err := o.IterEncodedObjects(plumbing.AnyObject)
c.Assert(err, IsNil)
@@ -84,10 +119,9 @@ func (s *FsSuite) TestIter(c *C) {
func (s *FsSuite) TestIterWithType(c *C) {
fixtures.ByTag(".git").Test(c, func(f *fixtures.Fixture) {
- for _, t := range s.Types {
+ for _, t := range objectTypes {
fs := f.DotGit()
- o, err := newObjectStorage(dotgit.New(fs))
- c.Assert(err, IsNil)
+ o := NewObjectStorage(dotgit.New(fs), cache.NewObjectLRUDefault())
iter, err := o.IterEncodedObjects(t)
c.Assert(err, IsNil)
@@ -108,23 +142,215 @@ func (s *FsSuite) TestPackfileIter(c *C) {
fs := f.DotGit()
dg := dotgit.New(fs)
- for _, t := range s.Types {
+ for _, t := range objectTypes {
+ ph, err := dg.ObjectPacks()
+ c.Assert(err, IsNil)
+
+ for _, h := range ph {
+ f, err := dg.ObjectPack(h)
+ c.Assert(err, IsNil)
+
+ idxf, err := dg.ObjectPackIdx(h)
+ c.Assert(err, IsNil)
+
+ iter, err := NewPackfileIter(fs, f, idxf, t, false)
+ c.Assert(err, IsNil)
+
+ err = iter.ForEach(func(o plumbing.EncodedObject) error {
+ c.Assert(o.Type(), Equals, t)
+ return nil
+ })
+ c.Assert(err, IsNil)
+ }
+ }
+ })
+}
+
+func (s *FsSuite) TestPackfileIterKeepDescriptors(c *C) {
+ fixtures.ByTag(".git").Test(c, func(f *fixtures.Fixture) {
+ fs := f.DotGit()
+ ops := dotgit.Options{KeepDescriptors: true}
+ dg := dotgit.NewWithOptions(fs, ops)
+
+ for _, t := range objectTypes {
ph, err := dg.ObjectPacks()
c.Assert(err, IsNil)
for _, h := range ph {
f, err := dg.ObjectPack(h)
c.Assert(err, IsNil)
- iter, err := NewPackfileIter(f, t)
+
+ idxf, err := dg.ObjectPackIdx(h)
c.Assert(err, IsNil)
+
+ iter, err := NewPackfileIter(fs, f, idxf, t, true)
+ c.Assert(err, IsNil)
+
err = iter.ForEach(func(o plumbing.EncodedObject) error {
c.Assert(o.Type(), Equals, t)
return nil
})
+ c.Assert(err, IsNil)
+ // test twice to check that packfiles are not closed
+ err = iter.ForEach(func(o plumbing.EncodedObject) error {
+ c.Assert(o.Type(), Equals, t)
+ return nil
+ })
c.Assert(err, IsNil)
}
}
})
+}
+func BenchmarkPackfileIter(b *testing.B) {
+ if err := fixtures.Init(); err != nil {
+ b.Fatal(err)
+ }
+
+ defer func() {
+ if err := fixtures.Clean(); err != nil {
+ b.Fatal(err)
+ }
+ }()
+
+ for _, f := range fixtures.ByTag(".git") {
+ b.Run(f.URL, func(b *testing.B) {
+ fs := f.DotGit()
+ dg := dotgit.New(fs)
+
+ for i := 0; i < b.N; i++ {
+ for _, t := range objectTypes {
+ ph, err := dg.ObjectPacks()
+ if err != nil {
+ b.Fatal(err)
+ }
+
+ for _, h := range ph {
+ f, err := dg.ObjectPack(h)
+ if err != nil {
+ b.Fatal(err)
+ }
+
+ idxf, err := dg.ObjectPackIdx(h)
+ if err != nil {
+ b.Fatal(err)
+ }
+
+ iter, err := NewPackfileIter(fs, f, idxf, t, false)
+ if err != nil {
+ b.Fatal(err)
+ }
+
+ err = iter.ForEach(func(o plumbing.EncodedObject) error {
+ if o.Type() != t {
+ b.Errorf("expecting %s, got %s", t, o.Type())
+ }
+ return nil
+ })
+
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+ }
+ }
+ })
+ }
+}
+
+func BenchmarkPackfileIterReadContent(b *testing.B) {
+ if err := fixtures.Init(); err != nil {
+ b.Fatal(err)
+ }
+
+ defer func() {
+ if err := fixtures.Clean(); err != nil {
+ b.Fatal(err)
+ }
+ }()
+
+ for _, f := range fixtures.ByTag(".git") {
+ b.Run(f.URL, func(b *testing.B) {
+ fs := f.DotGit()
+ dg := dotgit.New(fs)
+
+ for i := 0; i < b.N; i++ {
+ for _, t := range objectTypes {
+ ph, err := dg.ObjectPacks()
+ if err != nil {
+ b.Fatal(err)
+ }
+
+ for _, h := range ph {
+ f, err := dg.ObjectPack(h)
+ if err != nil {
+ b.Fatal(err)
+ }
+
+ idxf, err := dg.ObjectPackIdx(h)
+ if err != nil {
+ b.Fatal(err)
+ }
+
+ iter, err := NewPackfileIter(fs, f, idxf, t, false)
+ if err != nil {
+ b.Fatal(err)
+ }
+
+ err = iter.ForEach(func(o plumbing.EncodedObject) error {
+ if o.Type() != t {
+ b.Errorf("expecting %s, got %s", t, o.Type())
+ }
+
+ r, err := o.Reader()
+ if err != nil {
+ b.Fatal(err)
+ }
+
+ if _, err := ioutil.ReadAll(r); err != nil {
+ b.Fatal(err)
+ }
+
+ return r.Close()
+ })
+
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+ }
+ }
+ })
+ }
+}
+
+func BenchmarkGetObjectFromPackfile(b *testing.B) {
+ if err := fixtures.Init(); err != nil {
+ b.Fatal(err)
+ }
+
+ defer func() {
+ if err := fixtures.Clean(); err != nil {
+ b.Fatal(err)
+ }
+ }()
+
+ for _, f := range fixtures.Basic() {
+ b.Run(f.URL, func(b *testing.B) {
+ fs := f.DotGit()
+ o := NewObjectStorage(dotgit.New(fs), cache.NewObjectLRUDefault())
+ for i := 0; i < b.N; i++ {
+ expected := plumbing.NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5")
+ obj, err := o.EncodedObject(plumbing.AnyObject, expected)
+ if err != nil {
+ b.Fatal(err)
+ }
+
+ if obj.Hash() != expected {
+ b.Errorf("expecting %s, got %s", expected, obj.Hash())
+ }
+ }
+ })
+ }
}
diff --git a/storage/filesystem/reference.go b/storage/filesystem/reference.go
index 7313f05..a891b83 100644
--- a/storage/filesystem/reference.go
+++ b/storage/filesystem/reference.go
@@ -3,7 +3,7 @@ package filesystem
import (
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/plumbing/storer"
- "gopkg.in/src-d/go-git.v4/storage/filesystem/internal/dotgit"
+ "gopkg.in/src-d/go-git.v4/storage/filesystem/dotgit"
)
type ReferenceStorage struct {
diff --git a/storage/filesystem/shallow.go b/storage/filesystem/shallow.go
index 394e6ed..502d406 100644
--- a/storage/filesystem/shallow.go
+++ b/storage/filesystem/shallow.go
@@ -5,7 +5,7 @@ import (
"fmt"
"gopkg.in/src-d/go-git.v4/plumbing"
- "gopkg.in/src-d/go-git.v4/storage/filesystem/internal/dotgit"
+ "gopkg.in/src-d/go-git.v4/storage/filesystem/dotgit"
"gopkg.in/src-d/go-git.v4/utils/ioutil"
)
@@ -26,7 +26,7 @@ func (s *ShallowStorage) SetShallow(commits []plumbing.Hash) error {
defer ioutil.CheckClose(f, &err)
for _, h := range commits {
- if _, err := fmt.Fprintf(f, "%s\n", h); err != err {
+ if _, err := fmt.Fprintf(f, "%s\n", h); err != nil {
return err
}
}
@@ -41,6 +41,8 @@ func (s *ShallowStorage) Shallow() ([]plumbing.Hash, error) {
return nil, err
}
+ defer ioutil.CheckClose(f, &err)
+
var hash []plumbing.Hash
scn := bufio.NewScanner(f)
diff --git a/storage/filesystem/storage.go b/storage/filesystem/storage.go
index 82b137c..14a772a 100644
--- a/storage/filesystem/storage.go
+++ b/storage/filesystem/storage.go
@@ -2,7 +2,8 @@
package filesystem
import (
- "gopkg.in/src-d/go-git.v4/storage/filesystem/internal/dotgit"
+ "gopkg.in/src-d/go-git.v4/plumbing/cache"
+ "gopkg.in/src-d/go-git.v4/storage/filesystem/dotgit"
"gopkg.in/src-d/go-billy.v4"
)
@@ -22,25 +23,45 @@ type Storage struct {
ModuleStorage
}
-// NewStorage returns a new Storage backed by a given `fs.Filesystem`
-func NewStorage(fs billy.Filesystem) (*Storage, error) {
- dir := dotgit.New(fs)
- o, err := newObjectStorage(dir)
- if err != nil {
- return nil, err
+// Options holds configuration for the storage.
+type Options struct {
+ // ExclusiveAccess means that the filesystem is not modified externally
+ // while the repo is open.
+ ExclusiveAccess bool
+ // KeepDescriptors makes the file descriptors to be reused but they will
+ // need to be manually closed calling Close().
+ KeepDescriptors bool
+}
+
+// NewStorage returns a new Storage backed by a given `fs.Filesystem` and cache.
+func NewStorage(fs billy.Filesystem, cache cache.Object) *Storage {
+ return NewStorageWithOptions(fs, cache, Options{})
+}
+
+// NewStorageWithOptions returns a new Storage with extra options,
+// backed by a given `fs.Filesystem` and cache.
+func NewStorageWithOptions(fs billy.Filesystem, cache cache.Object, ops Options) *Storage {
+ dirOps := dotgit.Options{
+ ExclusiveAccess: ops.ExclusiveAccess,
+ KeepDescriptors: ops.KeepDescriptors,
}
+ dir := dotgit.NewWithOptions(fs, dirOps)
return &Storage{
fs: fs,
dir: dir,
- ObjectStorage: o,
+ ObjectStorage: ObjectStorage{
+ options: ops,
+ deltaBaseCache: cache,
+ dir: dir,
+ },
ReferenceStorage: ReferenceStorage{dir: dir},
IndexStorage: IndexStorage{dir: dir},
ShallowStorage: ShallowStorage{dir: dir},
ConfigStorage: ConfigStorage{dir: dir},
ModuleStorage: ModuleStorage{dir: dir},
- }, nil
+ }
}
// Filesystem returns the underlying filesystem
@@ -48,6 +69,7 @@ func (s *Storage) Filesystem() billy.Filesystem {
return s.fs
}
+// Init initializes .git directory
func (s *Storage) Init() error {
return s.dir.Initialize()
}
diff --git a/storage/filesystem/storage_test.go b/storage/filesystem/storage_test.go
index 4d9ba6f..6fa0d90 100644
--- a/storage/filesystem/storage_test.go
+++ b/storage/filesystem/storage_test.go
@@ -4,6 +4,7 @@ import (
"io/ioutil"
"testing"
+ "gopkg.in/src-d/go-git.v4/plumbing/cache"
"gopkg.in/src-d/go-git.v4/plumbing/storer"
"gopkg.in/src-d/go-git.v4/storage/test"
@@ -23,9 +24,12 @@ var _ = Suite(&StorageSuite{})
func (s *StorageSuite) SetUpTest(c *C) {
s.dir = c.MkDir()
- storage, err := NewStorage(osfs.New(s.dir))
- c.Assert(err, IsNil)
+ storage := NewStorage(osfs.New(s.dir), cache.NewObjectLRUDefault())
+
+ setUpTest(s, c, storage)
+}
+func setUpTest(s *StorageSuite, c *C, storage *Storage) {
// ensure that right interfaces are implemented
var _ storer.EncodedObjectStorer = storage
var _ storer.IndexStorer = storage
@@ -40,8 +44,7 @@ func (s *StorageSuite) SetUpTest(c *C) {
func (s *StorageSuite) TestFilesystem(c *C) {
fs := memfs.New()
- storage, err := NewStorage(fs)
- c.Assert(err, IsNil)
+ storage := NewStorage(fs, cache.NewObjectLRUDefault())
c.Assert(storage.Filesystem(), Equals, fs)
}
@@ -51,3 +54,19 @@ func (s *StorageSuite) TestNewStorageShouldNotAddAnyContentsToDir(c *C) {
c.Assert(err, IsNil)
c.Assert(fis, HasLen, 0)
}
+
+type StorageExclusiveSuite struct {
+ StorageSuite
+}
+
+var _ = Suite(&StorageExclusiveSuite{})
+
+func (s *StorageExclusiveSuite) SetUpTest(c *C) {
+ s.dir = c.MkDir()
+ storage := NewStorageWithOptions(
+ osfs.New(s.dir),
+ cache.NewObjectLRUDefault(),
+ Options{ExclusiveAccess: true})
+
+ setUpTest(&s.StorageSuite, c, storage)
+}
diff --git a/storage/memory/storage.go b/storage/memory/storage.go
index 0f66f1e..2e32509 100644
--- a/storage/memory/storage.go
+++ b/storage/memory/storage.go
@@ -165,7 +165,7 @@ func (o *ObjectStorage) Begin() storer.Transaction {
}
func (o *ObjectStorage) ForEachObjectHash(fun func(plumbing.Hash) error) error {
- for h, _ := range o.Objects {
+ for h := range o.Objects {
err := fun(h)
if err != nil {
if err == storer.ErrStop {
diff --git a/submodule_test.go b/submodule_test.go
index bea5a0f..2c0a2ed 100644
--- a/submodule_test.go
+++ b/submodule_test.go
@@ -5,6 +5,7 @@ import (
"io/ioutil"
"os"
"path/filepath"
+ "testing"
"gopkg.in/src-d/go-git.v4/plumbing"
@@ -66,6 +67,10 @@ func (s *SubmoduleSuite) TestInit(c *C) {
}
func (s *SubmoduleSuite) TestUpdate(c *C) {
+ if testing.Short() {
+ c.Skip("skipping test in short mode.")
+ }
+
sm, err := s.Worktree.Submodule("basic")
c.Assert(err, IsNil)
@@ -118,6 +123,10 @@ func (s *SubmoduleSuite) TestUpdateWithNotFetch(c *C) {
}
func (s *SubmoduleSuite) TestUpdateWithRecursion(c *C) {
+ if testing.Short() {
+ c.Skip("skipping test in short mode.")
+ }
+
sm, err := s.Worktree.Submodule("itself")
c.Assert(err, IsNil)
@@ -134,6 +143,10 @@ func (s *SubmoduleSuite) TestUpdateWithRecursion(c *C) {
}
func (s *SubmoduleSuite) TestUpdateWithInitAndUpdate(c *C) {
+ if testing.Short() {
+ c.Skip("skipping test in short mode.")
+ }
+
sm, err := s.Worktree.Submodule("basic")
c.Assert(err, IsNil)
@@ -183,6 +196,21 @@ func (s *SubmoduleSuite) TestSubmodulesInit(c *C) {
}
}
+func (s *SubmoduleSuite) TestGitSubmodulesSymlink(c *C) {
+ f, err := s.Worktree.Filesystem.Create("badfile")
+ c.Assert(err, IsNil)
+ defer f.Close()
+
+ err = s.Worktree.Filesystem.Remove(gitmodulesFile)
+ c.Assert(err, IsNil)
+
+ err = s.Worktree.Filesystem.Symlink("badfile", gitmodulesFile)
+ c.Assert(err, IsNil)
+
+ _, err = s.Worktree.Submodules()
+ c.Assert(err, Equals, ErrGitModulesSymlink)
+}
+
func (s *SubmoduleSuite) TestSubmodulesStatus(c *C) {
sm, err := s.Worktree.Submodules()
c.Assert(err, IsNil)
@@ -193,6 +221,10 @@ func (s *SubmoduleSuite) TestSubmodulesStatus(c *C) {
}
func (s *SubmoduleSuite) TestSubmodulesUpdateContext(c *C) {
+ if testing.Short() {
+ c.Skip("skipping test in short mode.")
+ }
+
sm, err := s.Worktree.Submodules()
c.Assert(err, IsNil)
diff --git a/utils/diff/diff.go b/utils/diff/diff.go
index b840ad6..f49ae55 100644
--- a/utils/diff/diff.go
+++ b/utils/diff/diff.go
@@ -16,8 +16,8 @@ import (
// string into the dst string.
func Do(src, dst string) (diffs []diffmatchpatch.Diff) {
dmp := diffmatchpatch.New()
- wSrc, wDst, warray := dmp.DiffLinesToChars(src, dst)
- diffs = dmp.DiffMain(wSrc, wDst, false)
+ wSrc, wDst, warray := dmp.DiffLinesToRunes(src, dst)
+ diffs = dmp.DiffMainRunes(wSrc, wDst, false)
diffs = dmp.DiffCharsToLines(diffs, warray)
return diffs
}
diff --git a/utils/merkletrie/difftree.go b/utils/merkletrie/difftree.go
index 2294096..d57ed13 100644
--- a/utils/merkletrie/difftree.go
+++ b/utils/merkletrie/difftree.go
@@ -248,15 +248,30 @@ package merkletrie
// h: else of i
import (
+ "context"
+ "errors"
"fmt"
"gopkg.in/src-d/go-git.v4/utils/merkletrie/noder"
)
+var (
+ ErrCanceled = errors.New("operation canceled")
+)
+
// DiffTree calculates the list of changes between two merkletries. It
// uses the provided hashEqual callback to compare noders.
func DiffTree(fromTree, toTree noder.Noder,
hashEqual noder.Equal) (Changes, error) {
+ return DiffTreeContext(context.Background(), fromTree, toTree, hashEqual)
+}
+
+// DiffTree calculates the list of changes between two merkletries. It
+// uses the provided hashEqual callback to compare noders.
+// Error will be returned if context expires
+// Provided context must be non nil
+func DiffTreeContext(ctx context.Context, fromTree, toTree noder.Noder,
+ hashEqual noder.Equal) (Changes, error) {
ret := NewChanges()
ii, err := newDoubleIter(fromTree, toTree, hashEqual)
@@ -265,6 +280,12 @@ func DiffTree(fromTree, toTree noder.Noder,
}
for {
+ select {
+ case <-ctx.Done():
+ return nil, ErrCanceled
+ default:
+ }
+
from := ii.from.current
to := ii.to.current
diff --git a/utils/merkletrie/difftree_test.go b/utils/merkletrie/difftree_test.go
index 9f033b1..ab0eb57 100644
--- a/utils/merkletrie/difftree_test.go
+++ b/utils/merkletrie/difftree_test.go
@@ -2,6 +2,7 @@ package merkletrie_test
import (
"bytes"
+ ctx "context"
"fmt"
"reflect"
"sort"
@@ -61,9 +62,45 @@ func (t diffTreeTest) innerRun(c *C, context string, reverse bool) {
c.Assert(obtained, changesEquals, expected, comment)
}
+func (t diffTreeTest) innerRunCtx(c *C, context string, reverse bool) {
+ comment := Commentf("\n%s", context)
+ if reverse {
+ comment = Commentf("%s [REVERSED]", comment.CheckCommentString())
+ }
+
+ a, err := fsnoder.New(t.from)
+ c.Assert(err, IsNil, comment)
+ comment = Commentf("%s\n\t from = %s", comment.CheckCommentString(), a)
+
+ b, err := fsnoder.New(t.to)
+ c.Assert(err, IsNil, comment)
+ comment = Commentf("%s\n\t to = %s", comment.CheckCommentString(), b)
+
+ expected, err := newChangesFromString(t.expected)
+ c.Assert(err, IsNil, comment)
+
+ if reverse {
+ a, b = b, a
+ expected = expected.reverse()
+ }
+ comment = Commentf("%s\n\texpected = %s", comment.CheckCommentString(), expected)
+
+ results, err := merkletrie.DiffTreeContext(ctx.Background(), a, b, fsnoder.HashEqual)
+ c.Assert(err, IsNil, comment)
+
+ obtained, err := newChanges(results)
+ c.Assert(err, IsNil, comment)
+
+ comment = Commentf("%s\n\tobtained = %s", comment.CheckCommentString(), obtained)
+
+ c.Assert(obtained, changesEquals, expected, comment)
+}
+
func (t diffTreeTest) run(c *C, context string) {
t.innerRun(c, context, false)
t.innerRun(c, context, true)
+ t.innerRunCtx(c, context, false)
+ t.innerRunCtx(c, context, true)
}
type change struct {
@@ -437,3 +474,27 @@ func (s *DiffTreeSuite) TestIssue275(c *C) {
},
})
}
+
+func (s *DiffTreeSuite) TestCancel(c *C) {
+ t := diffTreeTest{"()", "(a<> b<1> c() d<> e<2> f())", "+a +b +d +e"}
+ comment := Commentf("\n%s", "test cancel:")
+
+ a, err := fsnoder.New(t.from)
+ c.Assert(err, IsNil, comment)
+ comment = Commentf("%s\n\t from = %s", comment.CheckCommentString(), a)
+
+ b, err := fsnoder.New(t.to)
+ c.Assert(err, IsNil, comment)
+ comment = Commentf("%s\n\t to = %s", comment.CheckCommentString(), b)
+
+ expected, err := newChangesFromString(t.expected)
+ c.Assert(err, IsNil, comment)
+
+ comment = Commentf("%s\n\texpected = %s", comment.CheckCommentString(), expected)
+ context, cancel := ctx.WithCancel(ctx.Background())
+ cancel()
+ results, err := merkletrie.DiffTreeContext(context, a, b, fsnoder.HashEqual)
+ c.Assert(results, IsNil, comment)
+ c.Assert(err, ErrorMatches, "operation canceled")
+
+}
diff --git a/worktree.go b/worktree.go
index a23397e..e45d815 100644
--- a/worktree.go
+++ b/worktree.go
@@ -13,6 +13,7 @@ import (
"gopkg.in/src-d/go-git.v4/config"
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/plumbing/filemode"
+ "gopkg.in/src-d/go-git.v4/plumbing/format/gitignore"
"gopkg.in/src-d/go-git.v4/plumbing/format/index"
"gopkg.in/src-d/go-git.v4/plumbing/object"
"gopkg.in/src-d/go-git.v4/plumbing/storer"
@@ -27,12 +28,15 @@ var (
ErrWorktreeNotClean = errors.New("worktree is not clean")
ErrSubmoduleNotFound = errors.New("submodule not found")
ErrUnstagedChanges = errors.New("worktree contains unstaged changes")
+ ErrGitModulesSymlink = errors.New(gitmodulesFile + " is a symlink")
)
// Worktree represents a git worktree.
type Worktree struct {
// Filesystem underlying filesystem.
Filesystem billy.Filesystem
+ // External excludes not found in the repository .gitignore
+ Excludes []gitignore.Pattern
r *Repository
}
@@ -554,6 +558,22 @@ func (w *Worktree) checkoutFileSymlink(f *object.File) (err error) {
}
err = w.Filesystem.Symlink(string(bytes), f.Name)
+
+ // On windows, this might fail.
+ // Follow Git on Windows behavior by writing the link as it is.
+ if err != nil && isSymlinkWindowsNonAdmin(err) {
+ mode, _ := f.Mode.ToOSFileMode()
+
+ to, err := w.Filesystem.OpenFile(f.Name, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, mode.Perm())
+ if err != nil {
+ return err
+ }
+
+ defer ioutil.CheckClose(to, &err)
+
+ _, err = to.Write(bytes)
+ return err
+ }
return
}
@@ -607,10 +627,6 @@ func (w *Worktree) getTreeFromCommitHash(commit plumbing.Hash) (*object.Tree, er
return c.Tree()
}
-func (w *Worktree) initializeIndex() error {
- return w.r.Storer.SetIndex(&index.Index{Version: 2})
-}
-
var fillSystemInfo func(e *index.Entry, sys interface{})
const gitmodulesFile = ".gitmodules"
@@ -665,7 +681,18 @@ func (w *Worktree) newSubmodule(fromModules, fromConfig *config.Submodule) *Subm
return m
}
+func (w *Worktree) isSymlink(path string) bool {
+ if s, err := w.Filesystem.Lstat(path); err == nil {
+ return s.Mode()&os.ModeSymlink != 0
+ }
+ return false
+}
+
func (w *Worktree) readGitmodulesFile() (*config.Modules, error) {
+ if w.isSymlink(gitmodulesFile) {
+ return nil, ErrGitModulesSymlink
+ }
+
f, err := w.Filesystem.Open(gitmodulesFile)
if err != nil {
if os.IsNotExist(err) {
@@ -686,29 +713,54 @@ func (w *Worktree) readGitmodulesFile() (*config.Modules, error) {
}
// Clean the worktree by removing untracked files.
+// An empty dir could be removed - this is what `git clean -f -d .` does.
func (w *Worktree) Clean(opts *CleanOptions) error {
s, err := w.Status()
if err != nil {
return err
}
- // Check Worktree status to be Untracked, obtain absolute path and delete.
- for relativePath, status := range s {
- // Check if the path contains a directory and if Dir options is false,
- // skip the path.
- if relativePath != filepath.Base(relativePath) && !opts.Dir {
+ root := ""
+ files, err := w.Filesystem.ReadDir(root)
+ if err != nil {
+ return err
+ }
+ return w.doClean(s, opts, root, files)
+}
+
+func (w *Worktree) doClean(status Status, opts *CleanOptions, dir string, files []os.FileInfo) error {
+ for _, fi := range files {
+ if fi.Name() == ".git" {
continue
}
- // Remove the file only if it's an untracked file.
- if status.Worktree == Untracked {
- absPath := filepath.Join(w.Filesystem.Root(), relativePath)
- if err := os.Remove(absPath); err != nil {
+ // relative path under the root
+ path := filepath.Join(dir, fi.Name())
+ if fi.IsDir() {
+ if !opts.Dir {
+ continue
+ }
+
+ subfiles, err := w.Filesystem.ReadDir(path)
+ if err != nil {
return err
}
+ err = w.doClean(status, opts, path, subfiles)
+ if err != nil {
+ return err
+ }
+ } else {
+ if status.IsUntracked(path) {
+ if err := w.Filesystem.Remove(path); err != nil {
+ return err
+ }
+ }
}
}
+ if opts.Dir {
+ return doCleanDirectories(w.Filesystem, dir)
+ }
return nil
}
@@ -854,15 +906,18 @@ func rmFileAndDirIfEmpty(fs billy.Filesystem, name string) error {
return err
}
- path := filepath.Dir(name)
- files, err := fs.ReadDir(path)
+ dir := filepath.Dir(name)
+ return doCleanDirectories(fs, dir)
+}
+
+// doCleanDirectories removes empty subdirs (without files)
+func doCleanDirectories(fs billy.Filesystem, dir string) error {
+ files, err := fs.ReadDir(dir)
if err != nil {
return err
}
-
if len(files) == 0 {
- fs.Remove(path)
+ return fs.Remove(dir)
}
-
return nil
}
diff --git a/worktree_darwin.go b/worktree_bsd.go
index 8eaffde..3b374c7 100644
--- a/worktree_darwin.go
+++ b/worktree_bsd.go
@@ -20,3 +20,7 @@ func init() {
}
}
}
+
+func isSymlinkWindowsNonAdmin(err error) bool {
+ return false
+}
diff --git a/worktree_commit.go b/worktree_commit.go
index 3145c8a..673eb16 100644
--- a/worktree_commit.go
+++ b/worktree_commit.go
@@ -1,9 +1,12 @@
package git
import (
+ "bytes"
"path"
+ "sort"
"strings"
+ "golang.org/x/crypto/openpgp"
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/plumbing/filemode"
"gopkg.in/src-d/go-git.v4/plumbing/format/index"
@@ -63,7 +66,6 @@ func (w *Worktree) autoAddModifiedAndDeleted() error {
if _, err := w.Add(path); err != nil {
return err
}
-
}
return nil
@@ -93,6 +95,14 @@ func (w *Worktree) buildCommitObject(msg string, opts *CommitOptions, tree plumb
ParentHashes: opts.Parents,
}
+ if opts.SignKey != nil {
+ sig, err := w.buildCommitSignature(commit, opts.SignKey)
+ if err != nil {
+ return plumbing.ZeroHash, err
+ }
+ commit.PGPSignature = sig
+ }
+
obj := w.r.Storer.NewEncodedObject()
if err := commit.Encode(obj); err != nil {
return plumbing.ZeroHash, err
@@ -100,6 +110,22 @@ func (w *Worktree) buildCommitObject(msg string, opts *CommitOptions, tree plumb
return w.r.Storer.SetEncodedObject(obj)
}
+func (w *Worktree) buildCommitSignature(commit *object.Commit, signKey *openpgp.Entity) (string, error) {
+ encoded := &plumbing.MemoryObject{}
+ if err := commit.Encode(encoded); err != nil {
+ return "", err
+ }
+ r, err := encoded.Reader()
+ if err != nil {
+ return "", err
+ }
+ var b bytes.Buffer
+ if err := openpgp.ArmoredDetachSign(&b, signKey, r, nil); err != nil {
+ return "", err
+ }
+ return b.String(), nil
+}
+
// buildTreeHelper converts a given index.Index file into multiple git objects
// reading the blobs from the given filesystem and creating the trees from the
// index structure. The created objects are pushed to a given Storer.
@@ -163,7 +189,20 @@ func (h *buildTreeHelper) doBuildTree(e *index.Entry, parent, fullpath string) {
h.trees[parent].Entries = append(h.trees[parent].Entries, te)
}
+type sortableEntries []object.TreeEntry
+
+func (sortableEntries) sortName(te object.TreeEntry) string {
+ if te.Mode == filemode.Dir {
+ return te.Name + "/"
+ }
+ return te.Name
+}
+func (se sortableEntries) Len() int { return len(se) }
+func (se sortableEntries) Less(i int, j int) bool { return se.sortName(se[i]) < se.sortName(se[j]) }
+func (se sortableEntries) Swap(i int, j int) { se[i], se[j] = se[j], se[i] }
+
func (h *buildTreeHelper) copyTreeToStorageRecursive(parent string, t *object.Tree) (plumbing.Hash, error) {
+ sort.Sort(sortableEntries(t.Entries))
for i, e := range t.Entries {
if e.Mode != filemode.Dir && !e.Hash.IsZero() {
continue
diff --git a/worktree_commit_test.go b/worktree_commit_test.go
index 5575bca..da377c6 100644
--- a/worktree_commit_test.go
+++ b/worktree_commit_test.go
@@ -1,15 +1,26 @@
package git
import (
+ "bytes"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "strings"
"time"
"gopkg.in/src-d/go-git.v4/plumbing"
+ "gopkg.in/src-d/go-git.v4/plumbing/cache"
"gopkg.in/src-d/go-git.v4/plumbing/object"
"gopkg.in/src-d/go-git.v4/plumbing/storer"
+ "gopkg.in/src-d/go-git.v4/storage/filesystem"
"gopkg.in/src-d/go-git.v4/storage/memory"
+ "golang.org/x/crypto/openpgp"
+ "golang.org/x/crypto/openpgp/armor"
+ "golang.org/x/crypto/openpgp/errors"
. "gopkg.in/check.v1"
"gopkg.in/src-d/go-billy.v4/memfs"
+ "gopkg.in/src-d/go-billy.v4/osfs"
"gopkg.in/src-d/go-billy.v4/util"
)
@@ -135,6 +146,109 @@ func (s *WorktreeSuite) TestRemoveAndCommitAll(c *C) {
assertStorageStatus(c, s.Repository, 13, 11, 11, expected)
}
+func (s *WorktreeSuite) TestCommitSign(c *C) {
+ fs := memfs.New()
+ storage := memory.NewStorage()
+
+ r, err := Init(storage, fs)
+ c.Assert(err, IsNil)
+
+ w, err := r.Worktree()
+ c.Assert(err, IsNil)
+
+ util.WriteFile(fs, "foo", []byte("foo"), 0644)
+
+ _, err = w.Add("foo")
+ c.Assert(err, IsNil)
+
+ key := commitSignKey(c, true)
+ hash, err := w.Commit("foo\n", &CommitOptions{Author: defaultSignature(), SignKey: key})
+ c.Assert(err, IsNil)
+
+ // Verify the commit.
+ pks := new(bytes.Buffer)
+ pkw, err := armor.Encode(pks, openpgp.PublicKeyType, nil)
+ c.Assert(err, IsNil)
+
+ err = key.Serialize(pkw)
+ c.Assert(err, IsNil)
+ err = pkw.Close()
+ c.Assert(err, IsNil)
+
+ expectedCommit, err := r.CommitObject(hash)
+ c.Assert(err, IsNil)
+ actual, err := expectedCommit.Verify(pks.String())
+ c.Assert(err, IsNil)
+ c.Assert(actual.PrimaryKey, DeepEquals, key.PrimaryKey)
+}
+
+func (s *WorktreeSuite) TestCommitSignBadKey(c *C) {
+ fs := memfs.New()
+ storage := memory.NewStorage()
+
+ r, err := Init(storage, fs)
+ c.Assert(err, IsNil)
+
+ w, err := r.Worktree()
+ c.Assert(err, IsNil)
+
+ util.WriteFile(fs, "foo", []byte("foo"), 0644)
+
+ _, err = w.Add("foo")
+ c.Assert(err, IsNil)
+
+ key := commitSignKey(c, false)
+ _, err = w.Commit("foo\n", &CommitOptions{Author: defaultSignature(), SignKey: key})
+ c.Assert(err, Equals, errors.InvalidArgumentError("signing key is encrypted"))
+}
+
+func (s *WorktreeSuite) TestCommitTreeSort(c *C) {
+ path, err := ioutil.TempDir(os.TempDir(), "test-commit-tree-sort")
+ c.Assert(err, IsNil)
+ fs := osfs.New(path)
+ st := filesystem.NewStorage(fs, cache.NewObjectLRUDefault())
+ r, err := Init(st, nil)
+ c.Assert(err, IsNil)
+
+ r, err = Clone(memory.NewStorage(), memfs.New(), &CloneOptions{
+ URL: path,
+ })
+
+ w, err := r.Worktree()
+ c.Assert(err, IsNil)
+
+ mfs := w.Filesystem
+
+ err = mfs.MkdirAll("delta", 0755)
+ c.Assert(err, IsNil)
+
+ for _, p := range []string{"delta_last", "Gamma", "delta/middle", "Beta", "delta-first", "alpha"} {
+ util.WriteFile(mfs, p, []byte("foo"), 0644)
+ _, err = w.Add(p)
+ c.Assert(err, IsNil)
+ }
+
+ _, err = w.Commit("foo\n", &CommitOptions{
+ All: true,
+ Author: defaultSignature(),
+ })
+ c.Assert(err, IsNil)
+
+ err = r.Push(&PushOptions{})
+ c.Assert(err, IsNil)
+
+ cmd := exec.Command("git", "fsck")
+ cmd.Dir = path
+ cmd.Env = os.Environ()
+ buf := &bytes.Buffer{}
+ cmd.Stderr = buf
+ cmd.Stdout = buf
+
+ err = cmd.Run()
+
+ c.Assert(err, IsNil, Commentf("%s", buf.Bytes()))
+}
+
func assertStorageStatus(
c *C, r *Repository,
treesCount, blobCount, commitCount int, head plumbing.Hash,
@@ -173,3 +287,83 @@ func defaultSignature() *object.Signature {
When: when,
}
}
+
+func commitSignKey(c *C, decrypt bool) *openpgp.Entity {
+ s := strings.NewReader(armoredKeyRing)
+ es, err := openpgp.ReadArmoredKeyRing(s)
+ c.Assert(err, IsNil)
+
+ c.Assert(es, HasLen, 1)
+ c.Assert(es[0].Identities, HasLen, 1)
+ _, ok := es[0].Identities["foo bar <foo@foo.foo>"]
+ c.Assert(ok, Equals, true)
+
+ key := es[0]
+ if decrypt {
+ err = key.PrivateKey.Decrypt([]byte(keyPassphrase))
+ c.Assert(err, IsNil)
+ }
+
+ return key
+}
+
+const armoredKeyRing = `
+-----BEGIN PGP PRIVATE KEY BLOCK-----
+
+lQdGBFt89QIBEAC8du0Purt9yeFuLlBYHcexnZvcbaci2pY+Ejn1VnxM7caFxRX/
+b2weZi9E6+I0F+K/hKIaidPdcbK92UCL0Vp6F3izjqategZ7o44vlK/HfWFME4wv
+sou6lnig9ovA73HRyzngi3CmqWxSdg8lL0kIJLNzlvCFEd4Z34BnEkagklQJRymo
+0WnmLJjSnZFT5Nk7q5jrcR7ApbD98cakvgivDlUBPJCk2JFPWheCkouWPHMvLXQz
+bZXW5RFz4lJsMUWa/S3ofvIOnjG5Etnil3IA4uksS8fSDkGus998mBvUwzqX7xBh
+dK17ZEbxDdO4PuVJDkjvq618rMu8FVk5yVd59rUketSnGrehd/+vdh6qtgQC4tu1
+RldbUVAuKZGg79H61nWnvrDZmbw4eoqCEuv1+aZsM9ElSC5Ps2J0rtpHRyBndKn+
+8Jlc/KTH04/O+FAhEv0IgMTFEm3iAq8udBhRBgu6Y4gJyn4tqy6+6ZjPUNos8GOG
++ZJPdrgHHHfQged1ygeceN6W2AwQRet/B3/rieHf2V93uHJy/DjYUEuBhPm9nxqi
+R6ILUr97Sj2EsvLyfQO9pFpIctoNKEJmDx/C9tkFMNNlQhpsBitSdR2/wancw9ND
+iWV/J9roUdC0qns7eNSbiFe3Len8Xir7srnjAFgbGvOu9jDBUuiKGT5F3wARAQAB
+/gcDAl+0SktmjrUW8uwpvru6GeIeo5kc4rXuD7iIxH6nDl3nmjZMX7qWvp+pRTHH
+0hEDH44899PDvzclBN3ouehfFUbJ+DBy8umBiLqF8Mu2PrKjdmyv3BvnbTkqPM3m
+2Su7WmUDBhG00X07lfl8fTpZJG80onEGzGynryP/xVm4ymzoHyYGksntXLYr2HJ5
+aV6L7sL2/STsaaOVHoa/oEmVBo1+NRsTxRRUcFVLs3g0OIi6ZCeSevBdavMwf9Iv
+b5Bs/e0+GLpP71XzFpdrGcL6oGjZH/dgdeypzbGA+FHtQJqynN3qEE9eCc9cfTGL
+2zN2OtnMA28NtPVN4SnSxQIDvycWx68NZjfwLOK+gswfKpimp+6xMWSnNIRDyU9M
+w0hdNPMK9JAxm/MlnkR7x6ysX/8vrVVFl9gWOmxzJ5L4kvfMsHcV5ZFRP8OnVA6a
+NFBWIBGXF1uQC4qrXup/xKyWJOoH++cMo2cjPT3+3oifZgdBydVfHXjS9aQ/S3Sa
+A6henWyx/qeBGPVRuXWdXIOKDboOPK8JwQaGd6yazKkH9c5tDohmQHzZ6ho0gyAt
+dh+g9ZyiZVpjc6excfK/DP/RdUOYKw3Ur9652hKephvYZzHvPjTbqVkhS7JjZkVY
+rukQ64d5T0pE1B4y+If4hLFXMNQtfo0TIsATNA69jop+KFnJpLzAB+Ee33EA/HUl
+YC5EJCJaXt6kdtYFac0HvVWiz5ZuMhdtzpJfvOe+Olp/xR9nIPW3XZojQoHIZKwu
+gXeZeVMvfeoq+ymKAKNH5Np4WaUDF7Wh9VLl045jGyF5viyy61ivC0eyAzp5W1uy
+gJBZwafVma5MhmZUS2dFs0hBwBrKRzZZhN65VvfSYw6CnXp83ryUjReDvrLmqZDM
+FNpSMDKRk1+k9Wwi3m+fzLAvlxoHscJ5Any7ApsvBRbyehP8MAAG7UV3jImugTLi
+yN6FKVwziQXiC4/97oKbA1YYNjTT7Qw9gWTXvLRspn4f9997brcA9dm0M0seTjLa
+lc5hTJwJQdvPPI2klf+YgPvsD6nrP1moeWBb8irICqG1/BoE0JHPS+bqJ1J+m1iV
+kRV/+4pV2bLlXKqg1LEvqANW+1P1eM2nbbVB7EQn8ZOPIKMoCLoC1QWUPNfnemsW
+U5ynAbhsbm16PDJql0ApEgUCEDfsXTu1ui6SIO3bs/gWyD9HEmnfaYMYDKF+j+0r
+jXd4GnCxb+Yu3wV5WyewOHouzC+++h/3WcDLkOYZ9pcIbA86qT+v6b9MuTAU0D3c
+wlDv8r5J59zOcXl4HpMb2BY5F9dZn8hjgeVJRhJdij9x1TQ8qlVasSi4Eq8SiPmZ
+PZz33Pk6yn2caQ6wd47A79LXCbFQqJqA5aA6oS4DOpENGS5fh7WUZq/MTcmm9GsG
+w2gHxocASK9RCUYgZFWVYgLDuviMMWvc/2TJcTMxdF0Amu3erYAD90smFs0g/6fZ
+4pRLnKFuifwAMGMOx7jbW5tmOaSPx6XkuYvkDJeLMHoN3z/8bZEG5VpayypwFGyV
+bk/YIUWg/KM/43juDPdTvab9tZzYIjxC6on7dtYIAGjZis97XZou3KYKTaMe1VY6
+IhrnVzJ0JAHpd1prf9NUz96e1vjGdn3I61JgjNp5sWklIJEZzvaD28Eovf/LH1BO
+gYFFCvsWXaRoPHNQ5a9m7CROkLeHUFgRu5uriqHxxQHgogDznc8/3fnvDAHNpNb6
+Jnk4zaeVR3tTyIjiNM+wxUFPDNFpJWmQbSDCcPVYTbpznzVRnhqrw7q0FWZvbyBi
+YXIgPGZvb0Bmb28uZm9vPokCVAQTAQgAPgIbAwULCQgHAgYVCAkKCwIEFgIDAQIe
+AQIXgBYhBJOhf/AeVDKFRgh8jgKTlUAu/M1TBQJbfPU4BQkSzAM2AAoJEAKTlUAu
+/M1TVTIQALA6ocNc2fXz1loLykMxlfnX/XxiyNDOUPDZkrZtscqqWPYaWvJK3OiD
+32bdVEbftnAiFvJYkinrCXLEmwwf5wyOxKFmCHwwKhH0UYt60yF4WwlOVNstGSAy
+RkPMEEmVfMXS9K1nzKv/9A5YsqMQob7sN5CMN66Vrm0RKSvOF/NhhM9v8fC0QSU2
+GZNO0tnRfaS4wMnFr5L4FuDST+14F5sJT7ZEJz7HfbxXKLvvWbvqLlCYHJOdz56s
+X/eKde8eT9/LSzcmgsd7rGS2np5901kubww5jllUl1CFnk3Mdg9FTJl5u9Epuhnn
+823Jpdy1ZNbyLqZ266Z/q2HepDA7P/GqIXgWdHjwG2y1YAC4JIkA4RBbesQwqAXs
+6cX5gqRFRl5iDGEP5zclS0y5mWi/J8bLYxMYfqxs9EZtHd9DumWISi87804TEzYa
+WDijMlW7PR8QRW0vdmtYOhJZOlTnomLQx2v27iqpVXRh12J1aYVBFC+IvG1vhCf9
+FL3LzAHHEGlIoDaKJMd+Wg/Lm/f1PqqQx3lWIh9hhKh5Qx6hcuJH669JOWuEdxfo
+1so50aItG+tdDKqXflmOi7grrUURchYYKteaW2fC2SQgzDClprALI7aj9s/lDrEN
+CgLH6twOqdSFWqB/4ASDMsNeLeKX3WOYKYYMlE01cj3T1m6dpRUO
+=gIM9
+-----END PGP PRIVATE KEY BLOCK-----
+`
+
+const keyPassphrase = "abcdef0123456789"
diff --git a/worktree_linux.go b/worktree_linux.go
index a33cd2f..891cb1c 100644
--- a/worktree_linux.go
+++ b/worktree_linux.go
@@ -20,3 +20,7 @@ func init() {
}
}
}
+
+func isSymlinkWindowsNonAdmin(err error) bool {
+ return false
+}
diff --git a/worktree_status.go b/worktree_status.go
index 36f48eb..0e113d0 100644
--- a/worktree_status.go
+++ b/worktree_status.go
@@ -5,7 +5,10 @@ import (
"errors"
"io"
"os"
+ "path"
+ "path/filepath"
+ "gopkg.in/src-d/go-billy.v4/util"
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/plumbing/filemode"
"gopkg.in/src-d/go-git.v4/plumbing/format/gitignore"
@@ -18,9 +21,14 @@ import (
"gopkg.in/src-d/go-git.v4/utils/merkletrie/noder"
)
-// ErrDestinationExists in an Move operation means that the target exists on
-// the worktree.
-var ErrDestinationExists = errors.New("destination exists")
+var (
+ // ErrDestinationExists in an Move operation means that the target exists on
+ // the worktree.
+ ErrDestinationExists = errors.New("destination exists")
+ // ErrGlobNoMatches in an AddGlob if the glob pattern does not match any
+ // files in the worktree.
+ ErrGlobNoMatches = errors.New("glob pattern did not match any files")
+)
// Status returns the working tree status.
func (w *Worktree) Status() (Status, error) {
@@ -137,6 +145,9 @@ func (w *Worktree) excludeIgnoredChanges(changes merkletrie.Changes) merkletrie.
if err != nil || len(patterns) == 0 {
return changes
}
+
+ patterns = append(patterns, w.Excludes...)
+
m := gitignore.NewMatcher(patterns)
var res merkletrie.Changes
@@ -243,30 +254,151 @@ func diffTreeIsEquals(a, b noder.Hasher) bool {
}
// Add adds the file contents of a file in the worktree to the index. if the
-// file is already staged in the index no error is returned.
+// file is already staged in the index no error is returned. If a file deleted
+// from the Workspace is given, the file is removed from the index. If a
+// directory given, adds the files and all his sub-directories recursively in
+// the worktree to the index. If any of the files is already staged in the index
+// no error is returned. When path is a file, the blob.Hash is returned.
func (w *Worktree) Add(path string) (plumbing.Hash, error) {
+ // TODO(mcuadros): remove plumbing.Hash from signature at v5.
s, err := w.Status()
if err != nil {
return plumbing.ZeroHash, err
}
- h, err := w.copyFileToStorage(path)
+ idx, err := w.r.Storer.Index()
+ if err != nil {
+ return plumbing.ZeroHash, err
+ }
+
+ var h plumbing.Hash
+ var added bool
+
+ fi, err := w.Filesystem.Lstat(path)
+ if err != nil || !fi.IsDir() {
+ added, h, err = w.doAddFile(idx, s, path)
+ } else {
+ added, err = w.doAddDirectory(idx, s, path)
+ }
+
if err != nil {
- if os.IsNotExist(err) {
- h, err = w.deleteFromIndex(path)
- }
return h, err
}
- if s.File(path).Worktree == Unmodified {
+ if !added {
return h, nil
}
- if err := w.addOrUpdateFileToIndex(path, h); err != nil {
- return h, err
+ return h, w.r.Storer.SetIndex(idx)
+}
+
+func (w *Worktree) doAddDirectory(idx *index.Index, s Status, directory string) (added bool, err error) {
+ files, err := w.Filesystem.ReadDir(directory)
+ if err != nil {
+ return false, err
+ }
+
+ for _, file := range files {
+ name := path.Join(directory, file.Name())
+
+ var a bool
+ if file.IsDir() {
+ if file.Name() == GitDirName {
+ // ignore special git directory
+ continue
+ }
+ a, err = w.doAddDirectory(idx, s, name)
+ } else {
+ a, _, err = w.doAddFile(idx, s, name)
+ }
+
+ if err != nil {
+ return
+ }
+
+ if !added && a {
+ added = true
+ }
+ }
+
+ return
+}
+
+// AddGlob adds all paths, matching pattern, to the index. If pattern matches a
+// directory path, all directory contents are added to the index recursively. No
+// error is returned if all matching paths are already staged in index.
+func (w *Worktree) AddGlob(pattern string) error {
+ files, err := util.Glob(w.Filesystem, pattern)
+ if err != nil {
+ return err
+ }
+
+ if len(files) == 0 {
+ return ErrGlobNoMatches
+ }
+
+ s, err := w.Status()
+ if err != nil {
+ return err
+ }
+
+ idx, err := w.r.Storer.Index()
+ if err != nil {
+ return err
+ }
+
+ var saveIndex bool
+ for _, file := range files {
+ fi, err := w.Filesystem.Lstat(file)
+ if err != nil {
+ return err
+ }
+
+ var added bool
+ if fi.IsDir() {
+ added, err = w.doAddDirectory(idx, s, file)
+ } else {
+ added, _, err = w.doAddFile(idx, s, file)
+ }
+
+ if err != nil {
+ return err
+ }
+
+ if !saveIndex && added {
+ saveIndex = true
+ }
+ }
+
+ if saveIndex {
+ return w.r.Storer.SetIndex(idx)
+ }
+
+ return nil
+}
+
+// doAddFile create a new blob from path and update the index, added is true if
+// the file added is different from the index.
+func (w *Worktree) doAddFile(idx *index.Index, s Status, path string) (added bool, h plumbing.Hash, err error) {
+ if s.File(path).Worktree == Unmodified {
+ return false, h, nil
}
- return h, err
+ h, err = w.copyFileToStorage(path)
+ if err != nil {
+ if os.IsNotExist(err) {
+ added = true
+ h, err = w.deleteFromIndex(idx, path)
+ }
+
+ return
+ }
+
+ if err := w.addOrUpdateFileToIndex(idx, path, h); err != nil {
+ return false, h, err
+ }
+
+ return true, h, err
}
func (w *Worktree) copyFileToStorage(path string) (hash plumbing.Hash, err error) {
@@ -324,35 +456,21 @@ func (w *Worktree) fillEncodedObjectFromSymlink(dst io.Writer, path string, fi o
return err
}
-func (w *Worktree) addOrUpdateFileToIndex(filename string, h plumbing.Hash) error {
- idx, err := w.r.Storer.Index()
- if err != nil {
- return err
- }
-
+func (w *Worktree) addOrUpdateFileToIndex(idx *index.Index, filename string, h plumbing.Hash) error {
e, err := idx.Entry(filename)
if err != nil && err != index.ErrEntryNotFound {
return err
}
if err == index.ErrEntryNotFound {
- if err := w.doAddFileToIndex(idx, filename, h); err != nil {
- return err
- }
- } else {
- if err := w.doUpdateFileToIndex(e, filename, h); err != nil {
- return err
- }
+ return w.doAddFileToIndex(idx, filename, h)
}
- return w.r.Storer.SetIndex(idx)
+ return w.doUpdateFileToIndex(e, filename, h)
}
func (w *Worktree) doAddFileToIndex(idx *index.Index, filename string, h plumbing.Hash) error {
- e := &index.Entry{Name: filename}
- idx.Entries = append(idx.Entries, e)
-
- return w.doUpdateFileToIndex(e, filename, h)
+ return w.doUpdateFileToIndex(idx.Add(filename), filename, h)
}
func (w *Worktree) doUpdateFileToIndex(e *index.Entry, filename string, h plumbing.Hash) error {
@@ -378,26 +496,88 @@ func (w *Worktree) doUpdateFileToIndex(e *index.Entry, filename string, h plumbi
// Remove removes files from the working tree and from the index.
func (w *Worktree) Remove(path string) (plumbing.Hash, error) {
- hash, err := w.deleteFromIndex(path)
+ // TODO(mcuadros): remove plumbing.Hash from signature at v5.
+ idx, err := w.r.Storer.Index()
if err != nil {
return plumbing.ZeroHash, err
}
- return hash, w.deleteFromFilesystem(path)
+ var h plumbing.Hash
+
+ fi, err := w.Filesystem.Lstat(path)
+ if err != nil || !fi.IsDir() {
+ h, err = w.doRemoveFile(idx, path)
+ } else {
+ _, err = w.doRemoveDirectory(idx, path)
+ }
+ if err != nil {
+ return h, err
+ }
+
+ return h, w.r.Storer.SetIndex(idx)
}
-func (w *Worktree) deleteFromIndex(path string) (plumbing.Hash, error) {
- idx, err := w.r.Storer.Index()
+func (w *Worktree) doRemoveDirectory(idx *index.Index, directory string) (removed bool, err error) {
+ files, err := w.Filesystem.ReadDir(directory)
+ if err != nil {
+ return false, err
+ }
+
+ for _, file := range files {
+ name := path.Join(directory, file.Name())
+
+ var r bool
+ if file.IsDir() {
+ r, err = w.doRemoveDirectory(idx, name)
+ } else {
+ _, err = w.doRemoveFile(idx, name)
+ if err == index.ErrEntryNotFound {
+ err = nil
+ }
+ }
+
+ if err != nil {
+ return
+ }
+
+ if !removed && r {
+ removed = true
+ }
+ }
+
+ err = w.removeEmptyDirectory(directory)
+ return
+}
+
+func (w *Worktree) removeEmptyDirectory(path string) error {
+ files, err := w.Filesystem.ReadDir(path)
+ if err != nil {
+ return err
+ }
+
+ if len(files) != 0 {
+ return nil
+ }
+
+ return w.Filesystem.Remove(path)
+}
+
+func (w *Worktree) doRemoveFile(idx *index.Index, path string) (plumbing.Hash, error) {
+ hash, err := w.deleteFromIndex(idx, path)
if err != nil {
return plumbing.ZeroHash, err
}
+ return hash, w.deleteFromFilesystem(path)
+}
+
+func (w *Worktree) deleteFromIndex(idx *index.Index, path string) (plumbing.Hash, error) {
e, err := idx.Remove(path)
if err != nil {
return plumbing.ZeroHash, err
}
- return e.Hash, w.r.Storer.SetIndex(idx)
+ return e.Hash, nil
}
func (w *Worktree) deleteFromFilesystem(path string) error {
@@ -409,9 +589,43 @@ func (w *Worktree) deleteFromFilesystem(path string) error {
return err
}
+// RemoveGlob removes all paths, matching pattern, from the index. If pattern
+// matches a directory path, all directory contents are removed from the index
+// recursively.
+func (w *Worktree) RemoveGlob(pattern string) error {
+ idx, err := w.r.Storer.Index()
+ if err != nil {
+ return err
+ }
+
+ entries, err := idx.Glob(pattern)
+ if err != nil {
+ return err
+ }
+
+ for _, e := range entries {
+ file := filepath.FromSlash(e.Name)
+ if _, err := w.Filesystem.Lstat(file); err != nil && !os.IsNotExist(err) {
+ return err
+ }
+
+ if _, err := w.doRemoveFile(idx, file); err != nil {
+ return err
+ }
+
+ dir, _ := filepath.Split(file)
+ if err := w.removeEmptyDirectory(dir); err != nil {
+ return err
+ }
+ }
+
+ return w.r.Storer.SetIndex(idx)
+}
+
// Move moves or rename a file in the worktree and the index, directories are
// not supported.
func (w *Worktree) Move(from, to string) (plumbing.Hash, error) {
+ // TODO(mcuadros): support directories and/or implement support for glob
if _, err := w.Filesystem.Lstat(from); err != nil {
return plumbing.ZeroHash, err
}
@@ -420,7 +634,12 @@ func (w *Worktree) Move(from, to string) (plumbing.Hash, error) {
return plumbing.ZeroHash, ErrDestinationExists
}
- hash, err := w.deleteFromIndex(from)
+ idx, err := w.r.Storer.Index()
+ if err != nil {
+ return plumbing.ZeroHash, err
+ }
+
+ hash, err := w.deleteFromIndex(idx, from)
if err != nil {
return plumbing.ZeroHash, err
}
@@ -429,5 +648,9 @@ func (w *Worktree) Move(from, to string) (plumbing.Hash, error) {
return hash, err
}
- return hash, w.addOrUpdateFileToIndex(to, hash)
+ if err := w.addOrUpdateFileToIndex(idx, to, hash); err != nil {
+ return hash, err
+ }
+
+ return hash, w.r.Storer.SetIndex(idx)
}
diff --git a/worktree_test.go b/worktree_test.go
index e51e89a..c714011 100644
--- a/worktree_test.go
+++ b/worktree_test.go
@@ -3,15 +3,19 @@ package git
import (
"bytes"
"context"
+ "errors"
"io/ioutil"
"os"
"path/filepath"
"regexp"
"runtime"
+ "testing"
+ "time"
"gopkg.in/src-d/go-git.v4/config"
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/plumbing/filemode"
+ "gopkg.in/src-d/go-git.v4/plumbing/format/gitignore"
"gopkg.in/src-d/go-git.v4/plumbing/format/index"
"gopkg.in/src-d/go-git.v4/plumbing/object"
"gopkg.in/src-d/go-git.v4/storage/memory"
@@ -196,6 +200,10 @@ func (s *WorktreeSuite) TestPullProgress(c *C) {
}
func (s *WorktreeSuite) TestPullProgressWithRecursion(c *C) {
+ if testing.Short() {
+ c.Skip("skipping test in short mode.")
+ }
+
path := fixtures.ByTag("submodule").One().Worktree().Root()
dir, err := ioutil.TempDir("", "plain-clone-submodule")
@@ -613,6 +621,10 @@ func (s *WorktreeSuite) TestCheckoutTag(c *C) {
}
func (s *WorktreeSuite) TestCheckoutBisect(c *C) {
+ if testing.Short() {
+ c.Skip("skipping test in short mode.")
+ }
+
s.testCheckoutBisect(c, "https://github.com/src-d/go-git.git")
}
@@ -1061,6 +1073,35 @@ func (s *WorktreeSuite) TestAddUntracked(c *C) {
c.Assert(obj.Size(), Equals, int64(3))
}
+func (s *WorktreeSuite) TestIgnored(c *C) {
+ fs := memfs.New()
+ w := &Worktree{
+ r: s.Repository,
+ Filesystem: fs,
+ }
+
+ w.Excludes = make([]gitignore.Pattern, 0)
+ w.Excludes = append(w.Excludes, gitignore.ParsePattern("foo", nil))
+
+ err := w.Checkout(&CheckoutOptions{Force: true})
+ c.Assert(err, IsNil)
+
+ idx, err := w.r.Storer.Index()
+ c.Assert(err, IsNil)
+ c.Assert(idx.Entries, HasLen, 9)
+
+ err = util.WriteFile(w.Filesystem, "foo", []byte("FOO"), 0755)
+ c.Assert(err, IsNil)
+
+ status, err := w.Status()
+ c.Assert(err, IsNil)
+ c.Assert(status, HasLen, 0)
+
+ file := status.File("foo")
+ c.Assert(file.Staging, Equals, Untracked)
+ c.Assert(file.Worktree, Equals, Untracked)
+}
+
func (s *WorktreeSuite) TestAddModified(c *C) {
fs := memfs.New()
w := &Worktree{
@@ -1115,6 +1156,40 @@ func (s *WorktreeSuite) TestAddUnmodified(c *C) {
c.Assert(err, IsNil)
}
+func (s *WorktreeSuite) TestAddRemoved(c *C) {
+ fs := memfs.New()
+ w := &Worktree{
+ r: s.Repository,
+ Filesystem: fs,
+ }
+
+ err := w.Checkout(&CheckoutOptions{Force: true})
+ c.Assert(err, IsNil)
+
+ idx, err := w.r.Storer.Index()
+ c.Assert(err, IsNil)
+ c.Assert(idx.Entries, HasLen, 9)
+
+ err = w.Filesystem.Remove("LICENSE")
+ c.Assert(err, IsNil)
+
+ hash, err := w.Add("LICENSE")
+ c.Assert(err, IsNil)
+ c.Assert(hash.String(), Equals, "c192bd6a24ea1ab01d78686e417c8bdc7c3d197f")
+
+ e, err := idx.Entry("LICENSE")
+ c.Assert(err, IsNil)
+ c.Assert(e.Hash, Equals, hash)
+ c.Assert(e.Mode, Equals, filemode.Regular)
+
+ status, err := w.Status()
+ c.Assert(err, IsNil)
+ c.Assert(status, HasLen, 1)
+
+ file := status.File("LICENSE")
+ c.Assert(file.Staging, Equals, Deleted)
+}
+
func (s *WorktreeSuite) TestAddSymlink(c *C) {
dir, err := ioutil.TempDir("", "checkout")
c.Assert(err, IsNil)
@@ -1141,7 +1216,124 @@ func (s *WorktreeSuite) TestAddSymlink(c *C) {
c.Assert(err, IsNil)
c.Assert(obj, NotNil)
c.Assert(obj.Size(), Equals, int64(3))
+}
+
+func (s *WorktreeSuite) TestAddDirectory(c *C) {
+ fs := memfs.New()
+ w := &Worktree{
+ r: s.Repository,
+ Filesystem: fs,
+ }
+
+ err := w.Checkout(&CheckoutOptions{Force: true})
+ c.Assert(err, IsNil)
+
+ idx, err := w.r.Storer.Index()
+ c.Assert(err, IsNil)
+ c.Assert(idx.Entries, HasLen, 9)
+
+ err = util.WriteFile(w.Filesystem, "qux/foo", []byte("FOO"), 0755)
+ c.Assert(err, IsNil)
+ err = util.WriteFile(w.Filesystem, "qux/baz/bar", []byte("BAR"), 0755)
+ c.Assert(err, IsNil)
+
+ h, err := w.Add("qux")
+ c.Assert(err, IsNil)
+ c.Assert(h.IsZero(), Equals, true)
+
+ idx, err = w.r.Storer.Index()
+ c.Assert(err, IsNil)
+ c.Assert(idx.Entries, HasLen, 11)
+
+ e, err := idx.Entry("qux/foo")
+ c.Assert(err, IsNil)
+ c.Assert(e.Mode, Equals, filemode.Executable)
+
+ e, err = idx.Entry("qux/baz/bar")
+ c.Assert(err, IsNil)
+ c.Assert(e.Mode, Equals, filemode.Executable)
+
+ status, err := w.Status()
+ c.Assert(err, IsNil)
+ c.Assert(status, HasLen, 2)
+
+ file := status.File("qux/foo")
+ c.Assert(file.Staging, Equals, Added)
+ c.Assert(file.Worktree, Equals, Unmodified)
+
+ file = status.File("qux/baz/bar")
+ c.Assert(file.Staging, Equals, Added)
+ c.Assert(file.Worktree, Equals, Unmodified)
+}
+
+func (s *WorktreeSuite) TestAddDirectoryErrorNotFound(c *C) {
+ r, _ := Init(memory.NewStorage(), memfs.New())
+ w, _ := r.Worktree()
+
+ h, err := w.Add("foo")
+ c.Assert(err, NotNil)
+ c.Assert(h.IsZero(), Equals, true)
+}
+
+func (s *WorktreeSuite) TestAddGlob(c *C) {
+ fs := memfs.New()
+ w := &Worktree{
+ r: s.Repository,
+ Filesystem: fs,
+ }
+
+ err := w.Checkout(&CheckoutOptions{Force: true})
+ c.Assert(err, IsNil)
+
+ idx, err := w.r.Storer.Index()
+ c.Assert(err, IsNil)
+ c.Assert(idx.Entries, HasLen, 9)
+
+ err = util.WriteFile(w.Filesystem, "qux/qux", []byte("QUX"), 0755)
+ c.Assert(err, IsNil)
+ err = util.WriteFile(w.Filesystem, "qux/baz", []byte("BAZ"), 0755)
+ c.Assert(err, IsNil)
+ err = util.WriteFile(w.Filesystem, "qux/bar/baz", []byte("BAZ"), 0755)
+ c.Assert(err, IsNil)
+
+ err = w.AddGlob(w.Filesystem.Join("qux", "b*"))
+ c.Assert(err, IsNil)
+
+ idx, err = w.r.Storer.Index()
+ c.Assert(err, IsNil)
+ c.Assert(idx.Entries, HasLen, 11)
+
+ e, err := idx.Entry("qux/baz")
+ c.Assert(err, IsNil)
+ c.Assert(e.Mode, Equals, filemode.Executable)
+ e, err = idx.Entry("qux/bar/baz")
+ c.Assert(err, IsNil)
+ c.Assert(e.Mode, Equals, filemode.Executable)
+
+ status, err := w.Status()
+ c.Assert(err, IsNil)
+ c.Assert(status, HasLen, 3)
+
+ file := status.File("qux/qux")
+ c.Assert(file.Staging, Equals, Untracked)
+ c.Assert(file.Worktree, Equals, Untracked)
+
+ file = status.File("qux/baz")
+ c.Assert(file.Staging, Equals, Added)
+ c.Assert(file.Worktree, Equals, Unmodified)
+
+ file = status.File("qux/bar/baz")
+ c.Assert(file.Staging, Equals, Added)
+ c.Assert(file.Worktree, Equals, Unmodified)
+}
+
+func (s *WorktreeSuite) TestAddGlobErrorNoMatches(c *C) {
+ r, _ := Init(memory.NewStorage(), memfs.New())
+ w, _ := r.Worktree()
+
+ err := w.AddGlob("foo")
+ c.Assert(err, Equals, ErrGlobNoMatches)
}
func (s *WorktreeSuite) TestRemove(c *C) {
@@ -1179,6 +1371,58 @@ func (s *WorktreeSuite) TestRemoveNotExistentEntry(c *C) {
c.Assert(err, NotNil)
}
+func (s *WorktreeSuite) TestRemoveDirectory(c *C) {
+ fs := memfs.New()
+ w := &Worktree{
+ r: s.Repository,
+ Filesystem: fs,
+ }
+
+ err := w.Checkout(&CheckoutOptions{Force: true})
+ c.Assert(err, IsNil)
+
+ hash, err := w.Remove("json")
+ c.Assert(hash.IsZero(), Equals, true)
+ c.Assert(err, IsNil)
+
+ status, err := w.Status()
+ c.Assert(err, IsNil)
+ c.Assert(status, HasLen, 2)
+ c.Assert(status.File("json/long.json").Staging, Equals, Deleted)
+ c.Assert(status.File("json/short.json").Staging, Equals, Deleted)
+
+ _, err = w.Filesystem.Stat("json")
+ c.Assert(os.IsNotExist(err), Equals, true)
+}
+
+func (s *WorktreeSuite) TestRemoveDirectoryUntracked(c *C) {
+ fs := memfs.New()
+ w := &Worktree{
+ r: s.Repository,
+ Filesystem: fs,
+ }
+
+ err := w.Checkout(&CheckoutOptions{Force: true})
+ c.Assert(err, IsNil)
+
+ err = util.WriteFile(w.Filesystem, "json/foo", []byte("FOO"), 0755)
+ c.Assert(err, IsNil)
+
+ hash, err := w.Remove("json")
+ c.Assert(hash.IsZero(), Equals, true)
+ c.Assert(err, IsNil)
+
+ status, err := w.Status()
+ c.Assert(err, IsNil)
+ c.Assert(status, HasLen, 3)
+ c.Assert(status.File("json/long.json").Staging, Equals, Deleted)
+ c.Assert(status.File("json/short.json").Staging, Equals, Deleted)
+ c.Assert(status.File("json/foo").Staging, Equals, Untracked)
+
+ _, err = w.Filesystem.Stat("json")
+ c.Assert(err, IsNil)
+}
+
func (s *WorktreeSuite) TestRemoveDeletedFromWorktree(c *C) {
fs := memfs.New()
w := &Worktree{
@@ -1202,6 +1446,74 @@ func (s *WorktreeSuite) TestRemoveDeletedFromWorktree(c *C) {
c.Assert(status.File("LICENSE").Staging, Equals, Deleted)
}
+func (s *WorktreeSuite) TestRemoveGlob(c *C) {
+ fs := memfs.New()
+ w := &Worktree{
+ r: s.Repository,
+ Filesystem: fs,
+ }
+
+ err := w.Checkout(&CheckoutOptions{Force: true})
+ c.Assert(err, IsNil)
+
+ err = w.RemoveGlob(w.Filesystem.Join("json", "l*"))
+ c.Assert(err, IsNil)
+
+ status, err := w.Status()
+ c.Assert(err, IsNil)
+ c.Assert(status, HasLen, 1)
+ c.Assert(status.File("json/long.json").Staging, Equals, Deleted)
+}
+
+func (s *WorktreeSuite) TestRemoveGlobDirectory(c *C) {
+ fs := memfs.New()
+ w := &Worktree{
+ r: s.Repository,
+ Filesystem: fs,
+ }
+
+ err := w.Checkout(&CheckoutOptions{Force: true})
+ c.Assert(err, IsNil)
+
+ err = w.RemoveGlob("js*")
+ c.Assert(err, IsNil)
+
+ status, err := w.Status()
+ c.Assert(err, IsNil)
+ c.Assert(status, HasLen, 2)
+ c.Assert(status.File("json/short.json").Staging, Equals, Deleted)
+ c.Assert(status.File("json/long.json").Staging, Equals, Deleted)
+
+ _, err = w.Filesystem.Stat("json")
+ c.Assert(os.IsNotExist(err), Equals, true)
+}
+
+func (s *WorktreeSuite) TestRemoveGlobDirectoryDeleted(c *C) {
+ fs := memfs.New()
+ w := &Worktree{
+ r: s.Repository,
+ Filesystem: fs,
+ }
+
+ err := w.Checkout(&CheckoutOptions{Force: true})
+ c.Assert(err, IsNil)
+
+ err = fs.Remove("json/short.json")
+ c.Assert(err, IsNil)
+
+ err = util.WriteFile(w.Filesystem, "json/foo", []byte("FOO"), 0755)
+ c.Assert(err, IsNil)
+
+ err = w.RemoveGlob("js*")
+ c.Assert(err, IsNil)
+
+ status, err := w.Status()
+ c.Assert(err, IsNil)
+ c.Assert(status, HasLen, 3)
+ c.Assert(status.File("json/short.json").Staging, Equals, Deleted)
+ c.Assert(status.File("json/long.json").Staging, Equals, Deleted)
+}
+
func (s *WorktreeSuite) TestMove(c *C) {
fs := memfs.New()
w := &Worktree{
@@ -1279,6 +1591,10 @@ func (s *WorktreeSuite) TestClean(c *C) {
c.Assert(len(status), Equals, 1)
+ fi, err := fs.Lstat("pkgA")
+ c.Assert(err, IsNil)
+ c.Assert(fi.IsDir(), Equals, true)
+
// Clean with Dir: true.
err = wt.Clean(&CleanOptions{Dir: true})
c.Assert(err, IsNil)
@@ -1287,6 +1603,11 @@ func (s *WorktreeSuite) TestClean(c *C) {
c.Assert(err, IsNil)
c.Assert(len(status), Equals, 0)
+
+ // An empty dir should be deleted, as well.
+ _, err = fs.Lstat("pkgA")
+ c.Assert(err, ErrorMatches, ".*(no such file or directory.*|.*file does not exist)*.")
+
}
func (s *WorktreeSuite) TestAlternatesRepo(c *C) {
@@ -1553,3 +1874,39 @@ func (s *WorktreeSuite) TestGrep(c *C) {
}
}
}
+
+func (s *WorktreeSuite) TestAddAndCommit(c *C) {
+ dir, err := ioutil.TempDir("", "plain-repo")
+ c.Assert(err, IsNil)
+ defer os.RemoveAll(dir)
+
+ repo, err := PlainInit(dir, false)
+ c.Assert(err, IsNil)
+
+ w, err := repo.Worktree()
+ c.Assert(err, IsNil)
+
+ _, err = w.Add(".")
+ c.Assert(err, IsNil)
+
+ w.Commit("Test Add And Commit", &CommitOptions{Author: &object.Signature{
+ Name: "foo",
+ Email: "foo@foo.foo",
+ When: time.Now(),
+ }})
+
+ iter, err := w.r.Log(&LogOptions{})
+ c.Assert(err, IsNil)
+ err = iter.ForEach(func(c *object.Commit) error {
+ files, err := c.Files()
+ if err != nil {
+ return err
+ }
+
+ err = files.ForEach(func(f *object.File) error {
+ return errors.New("Expected no files, got at least 1")
+ })
+ return err
+ })
+ c.Assert(err, IsNil)
+}
diff --git a/worktree_windows.go b/worktree_windows.go
index d59448e..1bef6f7 100644
--- a/worktree_windows.go
+++ b/worktree_windows.go
@@ -3,6 +3,7 @@
package git
import (
+ "os"
"syscall"
"time"
@@ -18,3 +19,17 @@ func init() {
}
}
}
+
+func isSymlinkWindowsNonAdmin(err error) bool {
+ const ERROR_PRIVILEGE_NOT_HELD syscall.Errno = 1314
+
+ if err != nil {
+ if errLink, ok := err.(*os.LinkError); ok {
+ if errNo, ok := errLink.Err.(syscall.Errno); ok {
+ return errNo == ERROR_PRIVILEGE_NOT_HELD
+ }
+ }
+ }
+
+ return false
+}