aboutsummaryrefslogtreecommitdiffstats
path: root/plumbing
diff options
context:
space:
mode:
Diffstat (limited to 'plumbing')
-rw-r--r--plumbing/client/common.go48
-rw-r--r--plumbing/client/common/common.go219
-rw-r--r--plumbing/client/common/common_test.go126
-rw-r--r--plumbing/client/common_test.go85
-rw-r--r--plumbing/client/http/common.go77
-rw-r--r--plumbing/client/http/common_test.go52
-rw-r--r--plumbing/client/http/git_upload_pack.go186
-rw-r--r--plumbing/client/http/git_upload_pack_test.go135
-rw-r--r--plumbing/client/ssh/auth_method.go159
-rw-r--r--plumbing/client/ssh/auth_method_test.go94
-rw-r--r--plumbing/client/ssh/git_upload_pack.go315
-rw-r--r--plumbing/client/ssh/git_upload_pack_test.go144
-rw-r--r--plumbing/errors.go35
-rw-r--r--plumbing/format/config/common.go97
-rw-r--r--plumbing/format/config/common_test.go86
-rw-r--r--plumbing/format/config/decoder.go37
-rw-r--r--plumbing/format/config/decoder_test.go90
-rw-r--r--plumbing/format/config/doc.go199
-rw-r--r--plumbing/format/config/encoder.go75
-rw-r--r--plumbing/format/config/encoder_test.go21
-rw-r--r--plumbing/format/config/fixtures_test.go90
-rw-r--r--plumbing/format/config/option.go83
-rw-r--r--plumbing/format/config/option_test.go33
-rw-r--r--plumbing/format/config/section.go87
-rw-r--r--plumbing/format/config/section_test.go71
-rw-r--r--plumbing/format/idxfile/decoder.go148
-rw-r--r--plumbing/format/idxfile/decoder_test.go69
-rw-r--r--plumbing/format/idxfile/doc.go132
-rw-r--r--plumbing/format/idxfile/encoder.go131
-rw-r--r--plumbing/format/idxfile/encoder_test.go48
-rw-r--r--plumbing/format/idxfile/idxfile.go62
-rw-r--r--plumbing/format/index/decoder.go446
-rw-r--r--plumbing/format/index/decoder_test.go196
-rw-r--r--plumbing/format/index/doc.go302
-rw-r--r--plumbing/format/index/encoder.go141
-rw-r--r--plumbing/format/index/encoder_test.go78
-rw-r--r--plumbing/format/index/index.go108
-rw-r--r--plumbing/format/objfile/common_test.go69
-rw-r--r--plumbing/format/objfile/reader.go118
-rw-r--r--plumbing/format/objfile/reader_test.go67
-rw-r--r--plumbing/format/objfile/writer.go109
-rw-r--r--plumbing/format/objfile/writer_test.go80
-rw-r--r--plumbing/format/packfile/decoder.go307
-rw-r--r--plumbing/format/packfile/decoder_test.go182
-rw-r--r--plumbing/format/packfile/delta.go181
-rw-r--r--plumbing/format/packfile/doc.go168
-rw-r--r--plumbing/format/packfile/error.go30
-rw-r--r--plumbing/format/packfile/scanner.go418
-rw-r--r--plumbing/format/packfile/scanner_test.go189
-rw-r--r--plumbing/format/packp/advrefs/advrefs.go58
-rw-r--r--plumbing/format/packp/advrefs/advrefs_test.go315
-rw-r--r--plumbing/format/packp/advrefs/decoder.go288
-rw-r--r--plumbing/format/packp/advrefs/decoder_test.go500
-rw-r--r--plumbing/format/packp/advrefs/encoder.go155
-rw-r--r--plumbing/format/packp/advrefs/encoder_test.go249
-rw-r--r--plumbing/format/packp/capabilities.go136
-rw-r--r--plumbing/format/packp/capabilities_test.go46
-rw-r--r--plumbing/format/packp/doc.go724
-rw-r--r--plumbing/format/packp/pktline/encoder.go123
-rw-r--r--plumbing/format/packp/pktline/encoder_test.go249
-rw-r--r--plumbing/format/packp/pktline/scanner.go133
-rw-r--r--plumbing/format/packp/pktline/scanner_test.go225
-rw-r--r--plumbing/format/packp/ulreq/decoder.go287
-rw-r--r--plumbing/format/packp/ulreq/decoder_test.go541
-rw-r--r--plumbing/format/packp/ulreq/encoder.go140
-rw-r--r--plumbing/format/packp/ulreq/encoder_test.go268
-rw-r--r--plumbing/format/packp/ulreq/ulreq.go56
-rw-r--r--plumbing/format/packp/ulreq/ulreq_test.go91
-rw-r--r--plumbing/hash.go58
-rw-r--r--plumbing/hash_test.go42
-rw-r--r--plumbing/memory.go59
-rw-r--r--plumbing/memory_test.go71
-rw-r--r--plumbing/object.go94
-rw-r--r--plumbing/object_test.go46
-rw-r--r--plumbing/reference.go146
-rw-r--r--plumbing/reference_test.go61
-rw-r--r--plumbing/storer/index.go9
-rw-r--r--plumbing/storer/object.go241
-rw-r--r--plumbing/storer/object_test.go150
-rw-r--r--plumbing/storer/reference.go109
-rw-r--r--plumbing/storer/reference_test.go67
81 files changed, 12160 insertions, 0 deletions
diff --git a/plumbing/client/common.go b/plumbing/client/common.go
new file mode 100644
index 0000000..6a99339
--- /dev/null
+++ b/plumbing/client/common.go
@@ -0,0 +1,48 @@
+// Package clients includes the implementation for diferent transport protocols
+//
+// go-git needs the packfile and the refs of the repo. The
+// `NewGitUploadPackService` function returns an object that allows to
+// download them.
+//
+// go-git supports HTTP and SSH (see `Protocols`) for downloading the packfile
+// and the refs, but you can also install your own protocols (see
+// `InstallProtocol` below).
+//
+// Each protocol has its own implementation of
+// `NewGitUploadPackService`, but you should generally not use them
+// directly, use this package's `NewGitUploadPackService` instead.
+package clients
+
+import (
+ "fmt"
+
+ "gopkg.in/src-d/go-git.v4/plumbing/client/common"
+ "gopkg.in/src-d/go-git.v4/plumbing/client/http"
+ "gopkg.in/src-d/go-git.v4/plumbing/client/ssh"
+)
+
+type GitUploadPackServiceFactory func(common.Endpoint) common.GitUploadPackService
+
+// Protocols are the protocols supported by default.
+var Protocols = map[string]GitUploadPackServiceFactory{
+ "http": http.NewGitUploadPackService,
+ "https": http.NewGitUploadPackService,
+ "ssh": ssh.NewGitUploadPackService,
+}
+
+// InstallProtocol adds or modifies an existing protocol.
+func InstallProtocol(scheme string, f GitUploadPackServiceFactory) {
+ Protocols[scheme] = f
+}
+
+// NewGitUploadPackService returns the appropriate upload pack service
+// among of the set of known protocols: HTTP, SSH. See `InstallProtocol`
+// to add or modify protocols.
+func NewGitUploadPackService(endpoint common.Endpoint) (common.GitUploadPackService, error) {
+ f, ok := Protocols[endpoint.Scheme]
+ if !ok {
+ return nil, fmt.Errorf("unsupported scheme %q", endpoint.Scheme)
+ }
+
+ return f(endpoint), nil
+}
diff --git a/plumbing/client/common/common.go b/plumbing/client/common/common.go
new file mode 100644
index 0000000..97f78c4
--- /dev/null
+++ b/plumbing/client/common/common.go
@@ -0,0 +1,219 @@
+// Package common contains interfaces and non-specific protocol entities
+package common
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/url"
+ "regexp"
+ "strings"
+
+ "gopkg.in/src-d/go-git.v4/plumbing"
+ "gopkg.in/src-d/go-git.v4/plumbing/format/packp"
+ "gopkg.in/src-d/go-git.v4/plumbing/format/packp/advrefs"
+ "gopkg.in/src-d/go-git.v4/plumbing/format/packp/pktline"
+ "gopkg.in/src-d/go-git.v4/plumbing/storer"
+ "gopkg.in/src-d/go-git.v4/storage/memory"
+)
+
+var (
+ ErrRepositoryNotFound = errors.New("repository not found")
+ ErrAuthorizationRequired = errors.New("authorization required")
+ ErrEmptyGitUploadPack = errors.New("empty git-upload-pack given")
+ ErrInvalidAuthMethod = errors.New("invalid auth method")
+)
+
+const GitUploadPackServiceName = "git-upload-pack"
+
+type GitUploadPackService interface {
+ Connect() error
+ SetAuth(AuthMethod) error
+ Info() (*GitUploadPackInfo, error)
+ Fetch(*GitUploadPackRequest) (io.ReadCloser, error)
+ Disconnect() error
+}
+
+type AuthMethod interface {
+ Name() string
+ String() string
+}
+
+type Endpoint url.URL
+
+var (
+ isSchemeRegExp = regexp.MustCompile("^[^:]+://")
+ scpLikeUrlRegExp = regexp.MustCompile("^(?P<user>[^@]+@)?(?P<host>[^:]+):/?(?P<path>.+)$")
+)
+
+func NewEndpoint(endpoint string) (Endpoint, error) {
+ endpoint = transformSCPLikeIfNeeded(endpoint)
+
+ u, err := url.Parse(endpoint)
+ if err != nil {
+ return Endpoint{}, plumbing.NewPermanentError(err)
+ }
+
+ if !u.IsAbs() {
+ return Endpoint{}, plumbing.NewPermanentError(fmt.Errorf(
+ "invalid endpoint: %s", endpoint,
+ ))
+ }
+
+ return Endpoint(*u), nil
+}
+
+func transformSCPLikeIfNeeded(endpoint string) string {
+ if !isSchemeRegExp.MatchString(endpoint) && scpLikeUrlRegExp.MatchString(endpoint) {
+ m := scpLikeUrlRegExp.FindStringSubmatch(endpoint)
+ return fmt.Sprintf("ssh://%s%s/%s", m[1], m[2], m[3])
+ }
+
+ return endpoint
+}
+
+func (e *Endpoint) String() string {
+ u := url.URL(*e)
+ return u.String()
+}
+
+type GitUploadPackInfo struct {
+ Capabilities *packp.Capabilities
+ Refs memory.ReferenceStorage
+}
+
+func NewGitUploadPackInfo() *GitUploadPackInfo {
+ return &GitUploadPackInfo{
+ Capabilities: packp.NewCapabilities(),
+ Refs: make(memory.ReferenceStorage, 0),
+ }
+}
+
+func (i *GitUploadPackInfo) Decode(r io.Reader) error {
+ d := advrefs.NewDecoder(r)
+ ar := advrefs.New()
+ if err := d.Decode(ar); err != nil {
+ if err == advrefs.ErrEmpty {
+ return plumbing.NewPermanentError(err)
+ }
+ return plumbing.NewUnexpectedError(err)
+ }
+
+ i.Capabilities = ar.Capabilities
+
+ if err := i.addRefs(ar); err != nil {
+ return plumbing.NewUnexpectedError(err)
+ }
+
+ return nil
+}
+
+func (i *GitUploadPackInfo) addRefs(ar *advrefs.AdvRefs) error {
+ for name, hash := range ar.References {
+ ref := plumbing.NewReferenceFromStrings(name, hash.String())
+ i.Refs.SetReference(ref)
+ }
+
+ return i.addSymbolicRefs(ar)
+}
+
+func (i *GitUploadPackInfo) addSymbolicRefs(ar *advrefs.AdvRefs) error {
+ if !hasSymrefs(ar) {
+ return nil
+ }
+
+ for _, symref := range ar.Capabilities.Get("symref").Values {
+ chunks := strings.Split(symref, ":")
+ if len(chunks) != 2 {
+ err := fmt.Errorf("bad number of `:` in symref value (%q)", symref)
+ return plumbing.NewUnexpectedError(err)
+ }
+ name := plumbing.ReferenceName(chunks[0])
+ target := plumbing.ReferenceName(chunks[1])
+ ref := plumbing.NewSymbolicReference(name, target)
+ i.Refs.SetReference(ref)
+ }
+
+ return nil
+}
+
+func hasSymrefs(ar *advrefs.AdvRefs) bool {
+ return ar.Capabilities.Supports("symref")
+}
+
+func (i *GitUploadPackInfo) Head() *plumbing.Reference {
+ ref, _ := storer.ResolveReference(i.Refs, plumbing.HEAD)
+ return ref
+}
+
+func (i *GitUploadPackInfo) String() string {
+ return string(i.Bytes())
+}
+
+func (i *GitUploadPackInfo) Bytes() []byte {
+ var buf bytes.Buffer
+ e := pktline.NewEncoder(&buf)
+
+ _ = e.EncodeString("# service=git-upload-pack\n")
+
+ // inserting a flush-pkt here violates the protocol spec, but some
+ // servers do it, like Github.com
+ e.Flush()
+
+ _ = e.Encodef("%s HEAD\x00%s\n", i.Head().Hash(), i.Capabilities.String())
+
+ for _, ref := range i.Refs {
+ if ref.Type() != plumbing.HashReference {
+ continue
+ }
+
+ _ = e.Encodef("%s %s\n", ref.Hash(), ref.Name())
+ }
+
+ e.Flush()
+
+ return buf.Bytes()
+}
+
+type GitUploadPackRequest struct {
+ Wants []plumbing.Hash
+ Haves []plumbing.Hash
+ Depth int
+}
+
+func (r *GitUploadPackRequest) Want(h ...plumbing.Hash) {
+ r.Wants = append(r.Wants, h...)
+}
+
+func (r *GitUploadPackRequest) Have(h ...plumbing.Hash) {
+ r.Haves = append(r.Haves, h...)
+}
+
+func (r *GitUploadPackRequest) String() string {
+ b, _ := ioutil.ReadAll(r.Reader())
+ return string(b)
+}
+
+func (r *GitUploadPackRequest) Reader() *strings.Reader {
+ var buf bytes.Buffer
+ e := pktline.NewEncoder(&buf)
+
+ for _, want := range r.Wants {
+ _ = e.Encodef("want %s\n", want)
+ }
+
+ for _, have := range r.Haves {
+ _ = e.Encodef("have %s\n", have)
+ }
+
+ if r.Depth != 0 {
+ _ = e.Encodef("deepen %d\n", r.Depth)
+ }
+
+ _ = e.Flush()
+ _ = e.EncodeString("done\n")
+
+ return strings.NewReader(buf.String())
+}
diff --git a/plumbing/client/common/common_test.go b/plumbing/client/common/common_test.go
new file mode 100644
index 0000000..cf4d871
--- /dev/null
+++ b/plumbing/client/common/common_test.go
@@ -0,0 +1,126 @@
+package common
+
+import (
+ "bytes"
+ "encoding/base64"
+ "testing"
+
+ "gopkg.in/src-d/go-git.v4/plumbing"
+ "gopkg.in/src-d/go-git.v4/plumbing/format/packp"
+
+ . "gopkg.in/check.v1"
+)
+
+func Test(t *testing.T) { TestingT(t) }
+
+type SuiteCommon struct{}
+
+var _ = Suite(&SuiteCommon{})
+
+func (s *SuiteCommon) TestNewEndpoint(c *C) {
+ e, err := NewEndpoint("ssh://git@github.com/user/repository.git")
+ c.Assert(err, IsNil)
+ c.Assert(e.String(), Equals, "ssh://git@github.com/user/repository.git")
+}
+
+func (s *SuiteCommon) TestNewEndpointSCPLike(c *C) {
+ e, err := NewEndpoint("git@github.com:user/repository.git")
+ c.Assert(err, IsNil)
+ c.Assert(e.String(), Equals, "ssh://git@github.com/user/repository.git")
+}
+
+func (s *SuiteCommon) TestNewEndpointWrongForgat(c *C) {
+ e, err := NewEndpoint("foo")
+ c.Assert(err, Not(IsNil))
+ c.Assert(e.Host, Equals, "")
+}
+
+const CapabilitiesFixture = "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEADmulti_ack thin-pack side-band side-band-64k ofs-delta shallow no-progress include-tag multi_ack_detailed no-done symref=HEAD:refs/heads/master agent=git/2:2.4.8~dbussink-fix-enterprise-tokens-compilation-1167-gc7006cf"
+
+func (s *SuiteCommon) TestCapabilitiesSymbolicReference(c *C) {
+ cap := packp.NewCapabilities()
+ cap.Decode(CapabilitiesFixture)
+ c.Assert(cap.SymbolicReference("HEAD"), Equals, "refs/heads/master")
+}
+
+const GitUploadPackInfoFixture = "MDAxZSMgc2VydmljZT1naXQtdXBsb2FkLXBhY2sKMDAwMDAxMGM2ZWNmMGVmMmMyZGZmYjc5NjAzM2U1YTAyMjE5YWY4NmVjNjU4NGU1IEhFQUQAbXVsdGlfYWNrIHRoaW4tcGFjayBzaWRlLWJhbmQgc2lkZS1iYW5kLTY0ayBvZnMtZGVsdGEgc2hhbGxvdyBuby1wcm9ncmVzcyBpbmNsdWRlLXRhZyBtdWx0aV9hY2tfZGV0YWlsZWQgbm8tZG9uZSBzeW1yZWY9SEVBRDpyZWZzL2hlYWRzL21hc3RlciBhZ2VudD1naXQvMjoyLjQuOH5kYnVzc2luay1maXgtZW50ZXJwcmlzZS10b2tlbnMtY29tcGlsYXRpb24tMTE2Ny1nYzcwMDZjZgowMDNmZThkM2ZmYWI1NTI4OTVjMTliOWZjZjdhYTI2NGQyNzdjZGUzMzg4MSByZWZzL2hlYWRzL2JyYW5jaAowMDNmNmVjZjBlZjJjMmRmZmI3OTYwMzNlNWEwMjIxOWFmODZlYzY1ODRlNSByZWZzL2hlYWRzL21hc3RlcgowMDNlYjhlNDcxZjU4YmNiY2E2M2IwN2JkYTIwZTQyODE5MDQwOWMyZGI0NyByZWZzL3B1bGwvMS9oZWFkCjAwMDA="
+
+func (s *SuiteCommon) TestGitUploadPackInfo(c *C) {
+ b, _ := base64.StdEncoding.DecodeString(GitUploadPackInfoFixture)
+
+ i := NewGitUploadPackInfo()
+ err := i.Decode(bytes.NewBuffer(b))
+ c.Assert(err, IsNil)
+
+ name := i.Capabilities.SymbolicReference("HEAD")
+ c.Assert(name, Equals, "refs/heads/master")
+ c.Assert(i.Refs, HasLen, 4)
+
+ ref := i.Refs[plumbing.ReferenceName(name)]
+ c.Assert(ref, NotNil)
+ c.Assert(ref.Hash().String(), Equals, "6ecf0ef2c2dffb796033e5a02219af86ec6584e5")
+
+ ref = i.Refs[plumbing.HEAD]
+ c.Assert(ref, NotNil)
+ c.Assert(ref.Target(), Equals, plumbing.ReferenceName(name))
+}
+
+const GitUploadPackInfoNoHEADFixture = "MDAxZSMgc2VydmljZT1naXQtdXBsb2FkLXBhY2sKMDAwMDAwYmNkN2UxZmVlMjYxMjM0YmIzYTQzYzA5NmY1NTg3NDhhNTY5ZDc5ZWZmIHJlZnMvaGVhZHMvdjQAbXVsdGlfYWNrIHRoaW4tcGFjayBzaWRlLWJhbmQgc2lkZS1iYW5kLTY0ayBvZnMtZGVsdGEgc2hhbGxvdyBuby1wcm9ncmVzcyBpbmNsdWRlLXRhZyBtdWx0aV9hY2tfZGV0YWlsZWQgbm8tZG9uZSBhZ2VudD1naXQvMS45LjEKMDAwMA=="
+
+func (s *SuiteCommon) TestGitUploadPackInfoNoHEAD(c *C) {
+ b, _ := base64.StdEncoding.DecodeString(GitUploadPackInfoNoHEADFixture)
+
+ i := NewGitUploadPackInfo()
+ err := i.Decode(bytes.NewBuffer(b))
+ c.Assert(err, IsNil)
+
+ name := i.Capabilities.SymbolicReference("HEAD")
+ c.Assert(name, Equals, "")
+ c.Assert(i.Refs, HasLen, 1)
+
+ ref := i.Refs["refs/heads/v4"]
+ c.Assert(ref, NotNil)
+ c.Assert(ref.Hash().String(), Equals, "d7e1fee261234bb3a43c096f558748a569d79eff")
+}
+
+func (s *SuiteCommon) TestGitUploadPackInfoEmpty(c *C) {
+ b := bytes.NewBuffer(nil)
+
+ i := NewGitUploadPackInfo()
+ err := i.Decode(b)
+ c.Assert(err, ErrorMatches, "permanent.*empty.*")
+}
+
+func (s *SuiteCommon) TestGitUploadPackEncode(c *C) {
+ info := NewGitUploadPackInfo()
+ info.Capabilities.Add("symref", "HEAD:refs/heads/master")
+
+ ref := plumbing.ReferenceName("refs/heads/master")
+ hash := plumbing.NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5")
+ info.Refs = map[plumbing.ReferenceName]*plumbing.Reference{
+ plumbing.HEAD: plumbing.NewSymbolicReference(plumbing.HEAD, ref),
+ ref: plumbing.NewHashReference(ref, hash),
+ }
+
+ c.Assert(info.Head(), NotNil)
+ c.Assert(info.String(), Equals,
+ "001e# service=git-upload-pack\n"+
+ "000000506ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00symref=HEAD:refs/heads/master\n"+
+ "003f6ecf0ef2c2dffb796033e5a02219af86ec6584e5 refs/heads/master\n"+
+ "0000",
+ )
+}
+
+func (s *SuiteCommon) TestGitUploadPackRequest(c *C) {
+ r := &GitUploadPackRequest{}
+ r.Want(plumbing.NewHash("d82f291cde9987322c8a0c81a325e1ba6159684c"))
+ r.Want(plumbing.NewHash("2b41ef280fdb67a9b250678686a0c3e03b0a9989"))
+ r.Have(plumbing.NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5"))
+
+ c.Assert(r.String(), Equals,
+ "0032want d82f291cde9987322c8a0c81a325e1ba6159684c\n"+
+ "0032want 2b41ef280fdb67a9b250678686a0c3e03b0a9989\n"+
+ "0032have 6ecf0ef2c2dffb796033e5a02219af86ec6584e5\n0000"+
+ "0009done\n",
+ )
+}
diff --git a/plumbing/client/common_test.go b/plumbing/client/common_test.go
new file mode 100644
index 0000000..058c4d3
--- /dev/null
+++ b/plumbing/client/common_test.go
@@ -0,0 +1,85 @@
+package clients
+
+import (
+ "fmt"
+ "io"
+ "testing"
+
+ "gopkg.in/src-d/go-git.v4/plumbing/client/common"
+
+ . "gopkg.in/check.v1"
+)
+
+func Test(t *testing.T) { TestingT(t) }
+
+type SuiteCommon struct{}
+
+var _ = Suite(&SuiteCommon{})
+
+func (s *SuiteCommon) TestNewGitUploadPackServiceHTTP(c *C) {
+ e, err := common.NewEndpoint("http://github.com/src-d/go-git")
+ c.Assert(err, IsNil)
+
+ output, err := NewGitUploadPackService(e)
+ c.Assert(err, IsNil)
+ c.Assert(typeAsString(output), Equals, "*http.GitUploadPackService")
+
+ e, err = common.NewEndpoint("https://github.com/src-d/go-git")
+ c.Assert(err, IsNil)
+
+ output, err = NewGitUploadPackService(e)
+ c.Assert(err, IsNil)
+ c.Assert(typeAsString(output), Equals, "*http.GitUploadPackService")
+}
+
+func (s *SuiteCommon) TestNewGitUploadPackServiceSSH(c *C) {
+ e, err := common.NewEndpoint("ssh://github.com/src-d/go-git")
+ c.Assert(err, IsNil)
+
+ output, err := NewGitUploadPackService(e)
+ c.Assert(err, IsNil)
+ c.Assert(typeAsString(output), Equals, "*ssh.GitUploadPackService")
+}
+
+func (s *SuiteCommon) TestNewGitUploadPackServiceUnknown(c *C) {
+ e, err := common.NewEndpoint("unknown://github.com/src-d/go-git")
+ c.Assert(err, IsNil)
+
+ _, err = NewGitUploadPackService(e)
+ c.Assert(err, NotNil)
+}
+
+func (s *SuiteCommon) TestInstallProtocol(c *C) {
+ InstallProtocol("newscheme", newDummyProtocolService)
+ c.Assert(Protocols["newscheme"], NotNil)
+}
+
+type dummyProtocolService struct{}
+
+func newDummyProtocolService(common.Endpoint) common.GitUploadPackService {
+ return &dummyProtocolService{}
+}
+
+func (s *dummyProtocolService) Connect() error {
+ return nil
+}
+
+func (s *dummyProtocolService) SetAuth(auth common.AuthMethod) error {
+ return nil
+}
+
+func (s *dummyProtocolService) Info() (*common.GitUploadPackInfo, error) {
+ return nil, nil
+}
+
+func (s *dummyProtocolService) Fetch(r *common.GitUploadPackRequest) (io.ReadCloser, error) {
+ return nil, nil
+}
+
+func (s *dummyProtocolService) Disconnect() error {
+ return nil
+}
+
+func typeAsString(v interface{}) string {
+ return fmt.Sprintf("%T", v)
+}
diff --git a/plumbing/client/http/common.go b/plumbing/client/http/common.go
new file mode 100644
index 0000000..4c07876
--- /dev/null
+++ b/plumbing/client/http/common.go
@@ -0,0 +1,77 @@
+// Package http implements a HTTP client for go-git.
+package http
+
+import (
+ "fmt"
+ "net/http"
+
+ "gopkg.in/src-d/go-git.v4/plumbing"
+ "gopkg.in/src-d/go-git.v4/plumbing/client/common"
+)
+
+// HTTPAuthMethod concrete implementation of common.AuthMethod for HTTP services
+type HTTPAuthMethod interface {
+ common.AuthMethod
+ setAuth(r *http.Request)
+}
+
+// BasicAuth represent a HTTP basic auth
+type BasicAuth struct {
+ username, password string
+}
+
+// NewBasicAuth returns a BasicAuth base on the given user and password
+func NewBasicAuth(username, password string) *BasicAuth {
+ return &BasicAuth{username, password}
+}
+
+func (a *BasicAuth) setAuth(r *http.Request) {
+ r.SetBasicAuth(a.username, a.password)
+}
+
+// Name name of the auth
+func (a *BasicAuth) Name() string {
+ return "http-basic-auth"
+}
+
+func (a *BasicAuth) String() string {
+ masked := "*******"
+ if a.password == "" {
+ masked = "<empty>"
+ }
+
+ return fmt.Sprintf("%s - %s:%s", a.Name(), a.username, masked)
+}
+
+// HTTPError a dedicated error to return errors bases on status codes
+type HTTPError struct {
+ Response *http.Response
+}
+
+// NewHTTPError returns a new HTTPError based on a http response
+func NewHTTPError(r *http.Response) error {
+ if r.StatusCode >= 200 && r.StatusCode < 300 {
+ return nil
+ }
+
+ switch r.StatusCode {
+ case 401:
+ return common.ErrAuthorizationRequired
+ case 404:
+ return common.ErrRepositoryNotFound
+ }
+
+ err := &HTTPError{r}
+ return plumbing.NewUnexpectedError(err)
+}
+
+// StatusCode returns the status code of the response
+func (e *HTTPError) StatusCode() int {
+ return e.Response.StatusCode
+}
+
+func (e *HTTPError) Error() string {
+ return fmt.Sprintf("unexpected requesting %q status code: %d",
+ e.Response.Request.URL, e.Response.StatusCode,
+ )
+}
diff --git a/plumbing/client/http/common_test.go b/plumbing/client/http/common_test.go
new file mode 100644
index 0000000..287897d
--- /dev/null
+++ b/plumbing/client/http/common_test.go
@@ -0,0 +1,52 @@
+package http
+
+import (
+ "net/http"
+ "testing"
+
+ . "gopkg.in/check.v1"
+)
+
+func Test(t *testing.T) { TestingT(t) }
+
+type SuiteCommon struct{}
+
+var _ = Suite(&SuiteCommon{})
+
+func (s *SuiteCommon) TestNewBasicAuth(c *C) {
+ a := NewBasicAuth("foo", "qux")
+
+ c.Assert(a.Name(), Equals, "http-basic-auth")
+ c.Assert(a.String(), Equals, "http-basic-auth - foo:*******")
+}
+
+func (s *SuiteCommon) TestNewHTTPError200(c *C) {
+ res := &http.Response{StatusCode: 200}
+ res.StatusCode = 200
+ err := NewHTTPError(res)
+ c.Assert(err, IsNil)
+}
+
+func (s *SuiteCommon) TestNewHTTPError401(c *C) {
+ s.testNewHTTPError(c, 401, "authorization required")
+}
+
+func (s *SuiteCommon) TestNewHTTPError404(c *C) {
+ s.testNewHTTPError(c, 404, "repository not found")
+}
+
+func (s *SuiteCommon) TestNewHTTPError40x(c *C) {
+ s.testNewHTTPError(c, 402, "unexpected client error.*")
+}
+
+func (s *SuiteCommon) testNewHTTPError(c *C, code int, msg string) {
+ req, _ := http.NewRequest("GET", "foo", nil)
+ res := &http.Response{
+ StatusCode: code,
+ Request: req,
+ }
+
+ err := NewHTTPError(res)
+ c.Assert(err, NotNil)
+ c.Assert(err, ErrorMatches, msg)
+}
diff --git a/plumbing/client/http/git_upload_pack.go b/plumbing/client/http/git_upload_pack.go
new file mode 100644
index 0000000..c1f4a0b
--- /dev/null
+++ b/plumbing/client/http/git_upload_pack.go
@@ -0,0 +1,186 @@
+package http
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "net/http"
+ "strings"
+
+ "gopkg.in/src-d/go-git.v4/plumbing"
+ "gopkg.in/src-d/go-git.v4/plumbing/client/common"
+ "gopkg.in/src-d/go-git.v4/plumbing/format/packp/pktline"
+)
+
+// GitUploadPackService git-upoad-pack service over HTTP
+type GitUploadPackService struct {
+ client *http.Client
+ endpoint common.Endpoint
+ auth HTTPAuthMethod
+}
+
+// NewGitUploadPackService connects to a git-upload-pack service over HTTP, the
+// auth is extracted from the URL, or can be provided using the SetAuth method
+func NewGitUploadPackService(endpoint common.Endpoint) common.GitUploadPackService {
+ s := &GitUploadPackService{
+ client: http.DefaultClient,
+ endpoint: endpoint,
+ }
+
+ s.setBasicAuthFromEndpoint()
+ return s
+}
+
+// Connect has not any effect, is here just for meet the interface
+func (s *GitUploadPackService) Connect() error {
+ return nil
+}
+
+func (s *GitUploadPackService) setBasicAuthFromEndpoint() {
+ info := s.endpoint.User
+ if info == nil {
+ return
+ }
+
+ p, ok := info.Password()
+ if !ok {
+ return
+ }
+
+ u := info.Username()
+ s.auth = NewBasicAuth(u, p)
+}
+
+// SetAuth sets the AuthMethod
+func (s *GitUploadPackService) SetAuth(auth common.AuthMethod) error {
+ httpAuth, ok := auth.(HTTPAuthMethod)
+ if !ok {
+ return common.ErrInvalidAuthMethod
+ }
+
+ s.auth = httpAuth
+ return nil
+}
+
+// Info returns the references info and capabilities from the service
+func (s *GitUploadPackService) Info() (*common.GitUploadPackInfo, error) {
+ url := fmt.Sprintf(
+ "%s/info/refs?service=%s",
+ s.endpoint.String(), common.GitUploadPackServiceName,
+ )
+
+ res, err := s.doRequest("GET", url, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ defer res.Body.Close()
+
+ i := common.NewGitUploadPackInfo()
+ return i, i.Decode(res.Body)
+}
+
+// Fetch request and returns a reader to a packfile
+func (s *GitUploadPackService) Fetch(r *common.GitUploadPackRequest) (io.ReadCloser, error) {
+ url := fmt.Sprintf(
+ "%s/%s",
+ s.endpoint.String(), common.GitUploadPackServiceName,
+ )
+
+ res, err := s.doRequest("POST", url, r.Reader())
+ if err != nil {
+ return nil, err
+ }
+
+ reader := newBufferedReadCloser(res.Body)
+ if _, err := reader.Peek(1); err != nil {
+ if err == io.ErrUnexpectedEOF {
+ return nil, common.ErrEmptyGitUploadPack
+ }
+
+ return nil, err
+ }
+
+ if err := discardResponseInfo(reader); err != nil {
+ return nil, err
+ }
+
+ return reader, nil
+}
+
+func discardResponseInfo(r io.Reader) error {
+ s := pktline.NewScanner(r)
+ for s.Scan() {
+ if bytes.Equal(s.Bytes(), []byte{'N', 'A', 'K', '\n'}) {
+ break
+ }
+ }
+
+ return s.Err()
+}
+
+func (s *GitUploadPackService) doRequest(method, url string, content *strings.Reader) (*http.Response, error) {
+ var body io.Reader
+ if content != nil {
+ body = content
+ }
+
+ req, err := http.NewRequest(method, url, body)
+ if err != nil {
+ return nil, plumbing.NewPermanentError(err)
+ }
+
+ s.applyHeadersToRequest(req, content)
+ s.applyAuthToRequest(req)
+
+ res, err := s.client.Do(req)
+ if err != nil {
+ return nil, plumbing.NewUnexpectedError(err)
+ }
+
+ if err := NewHTTPError(res); err != nil {
+ return nil, err
+ }
+
+ return res, nil
+}
+
+func (s *GitUploadPackService) applyHeadersToRequest(req *http.Request, content *strings.Reader) {
+ req.Header.Add("User-Agent", "git/1.0")
+ req.Header.Add("Host", "github.com")
+
+ if content == nil {
+ req.Header.Add("Accept", "*/*")
+ } else {
+ req.Header.Add("Accept", "application/x-git-upload-pack-result")
+ req.Header.Add("Content-Type", "application/x-git-upload-pack-request")
+ req.Header.Add("Content-Length", string(content.Len()))
+ }
+}
+
+func (s *GitUploadPackService) applyAuthToRequest(req *http.Request) {
+ if s.auth == nil {
+ return
+ }
+
+ s.auth.setAuth(req)
+}
+
+// Disconnect do nothing
+func (s *GitUploadPackService) Disconnect() (err error) {
+ return nil
+}
+
+type bufferedReadCloser struct {
+ *bufio.Reader
+ closer io.Closer
+}
+
+func newBufferedReadCloser(r io.ReadCloser) *bufferedReadCloser {
+ return &bufferedReadCloser{bufio.NewReader(r), r}
+}
+
+func (r *bufferedReadCloser) Close() error {
+ return r.closer.Close()
+}
diff --git a/plumbing/client/http/git_upload_pack_test.go b/plumbing/client/http/git_upload_pack_test.go
new file mode 100644
index 0000000..a50dbdf
--- /dev/null
+++ b/plumbing/client/http/git_upload_pack_test.go
@@ -0,0 +1,135 @@
+package http
+
+import (
+ "io/ioutil"
+
+ . "gopkg.in/check.v1"
+ "gopkg.in/src-d/go-git.v4/plumbing"
+ "gopkg.in/src-d/go-git.v4/plumbing/client/common"
+)
+
+type RemoteSuite struct {
+ Endpoint common.Endpoint
+}
+
+var _ = Suite(&RemoteSuite{})
+
+func (s *RemoteSuite) SetUpSuite(c *C) {
+ var err error
+ s.Endpoint, err = common.NewEndpoint("https://github.com/git-fixtures/basic")
+ c.Assert(err, IsNil)
+}
+
+func (s *RemoteSuite) TestNewGitUploadPackServiceAuth(c *C) {
+ e, err := common.NewEndpoint("https://foo:bar@github.com/git-fixtures/basic")
+ c.Assert(err, IsNil)
+
+ r := NewGitUploadPackService(e)
+ auth := r.(*GitUploadPackService).auth
+
+ c.Assert(auth.String(), Equals, "http-basic-auth - foo:*******")
+}
+
+func (s *RemoteSuite) TestConnect(c *C) {
+ r := NewGitUploadPackService(s.Endpoint)
+ c.Assert(r.Connect(), IsNil)
+}
+
+func (s *RemoteSuite) TestSetAuth(c *C) {
+ auth := &BasicAuth{}
+ r := NewGitUploadPackService(s.Endpoint)
+ r.SetAuth(auth)
+ c.Assert(auth, Equals, r.(*GitUploadPackService).auth)
+}
+
+type mockAuth struct{}
+
+func (*mockAuth) Name() string { return "" }
+func (*mockAuth) String() string { return "" }
+
+func (s *RemoteSuite) TestSetAuthWrongType(c *C) {
+ r := NewGitUploadPackService(s.Endpoint)
+ c.Assert(r.SetAuth(&mockAuth{}), Equals, common.ErrInvalidAuthMethod)
+}
+
+func (s *RemoteSuite) TestInfoEmpty(c *C) {
+ endpoint, _ := common.NewEndpoint("https://github.com/git-fixture/empty")
+ r := NewGitUploadPackService(endpoint)
+ c.Assert(r.Connect(), IsNil)
+
+ info, err := r.Info()
+ c.Assert(err, Equals, common.ErrAuthorizationRequired)
+ c.Assert(info, IsNil)
+}
+
+func (s *RemoteSuite) TestInfoNotExists(c *C) {
+ endpoint, _ := common.NewEndpoint("https://github.com/git-fixture/not-exists")
+ r := NewGitUploadPackService(endpoint)
+ c.Assert(r.Connect(), IsNil)
+
+ info, err := r.Info()
+ c.Assert(err, Equals, common.ErrAuthorizationRequired)
+ c.Assert(info, IsNil)
+}
+
+func (s *RemoteSuite) TestDefaultBranch(c *C) {
+ r := NewGitUploadPackService(s.Endpoint)
+ c.Assert(r.Connect(), IsNil)
+
+ info, err := r.Info()
+ c.Assert(err, IsNil)
+ c.Assert(info.Capabilities.SymbolicReference("HEAD"), Equals, "refs/heads/master")
+}
+
+func (s *RemoteSuite) TestCapabilities(c *C) {
+ r := NewGitUploadPackService(s.Endpoint)
+ c.Assert(r.Connect(), IsNil)
+
+ info, err := r.Info()
+ c.Assert(err, IsNil)
+ c.Assert(info.Capabilities.Get("agent").Values, HasLen, 1)
+}
+
+func (s *RemoteSuite) TestFetch(c *C) {
+ r := NewGitUploadPackService(s.Endpoint)
+ c.Assert(r.Connect(), IsNil)
+
+ req := &common.GitUploadPackRequest{}
+ req.Want(plumbing.NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5"))
+
+ reader, err := r.Fetch(req)
+ c.Assert(err, IsNil)
+
+ b, err := ioutil.ReadAll(reader)
+ c.Assert(err, IsNil)
+ c.Assert(b, HasLen, 85374)
+}
+
+func (s *RemoteSuite) TestFetchNoChanges(c *C) {
+ r := NewGitUploadPackService(s.Endpoint)
+ c.Assert(r.Connect(), IsNil)
+
+ req := &common.GitUploadPackRequest{}
+ req.Want(plumbing.NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5"))
+ req.Have(plumbing.NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5"))
+
+ reader, err := r.Fetch(req)
+ c.Assert(err, Equals, common.ErrEmptyGitUploadPack)
+ c.Assert(reader, IsNil)
+}
+
+func (s *RemoteSuite) TestFetchMulti(c *C) {
+ r := NewGitUploadPackService(s.Endpoint)
+ c.Assert(r.Connect(), IsNil)
+
+ req := &common.GitUploadPackRequest{}
+ req.Want(plumbing.NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5"))
+ req.Want(plumbing.NewHash("e8d3ffab552895c19b9fcf7aa264d277cde33881"))
+
+ reader, err := r.Fetch(req)
+ c.Assert(err, IsNil)
+
+ b, err := ioutil.ReadAll(reader)
+ c.Assert(err, IsNil)
+ c.Assert(b, HasLen, 85585)
+}
diff --git a/plumbing/client/ssh/auth_method.go b/plumbing/client/ssh/auth_method.go
new file mode 100644
index 0000000..587f59a
--- /dev/null
+++ b/plumbing/client/ssh/auth_method.go
@@ -0,0 +1,159 @@
+package ssh
+
+import (
+ "fmt"
+ "net"
+ "os"
+
+ "golang.org/x/crypto/ssh"
+ "golang.org/x/crypto/ssh/agent"
+ "gopkg.in/src-d/go-git.v4/plumbing/client/common"
+)
+
+// AuthMethod is the interface all auth methods for the ssh client
+// must implement. The clientConfig method returns the ssh client
+// configuration needed to establish an ssh connection.
+type AuthMethod interface {
+ common.AuthMethod
+ clientConfig() *ssh.ClientConfig
+}
+
+// The names of the AuthMethod implementations. To be returned by the
+// Name() method. Most git servers only allow PublicKeysName and
+// PublicKeysCallbackName.
+const (
+ KeyboardInteractiveName = "ssh-keyboard-interactive"
+ PasswordName = "ssh-password"
+ PasswordCallbackName = "ssh-password-callback"
+ PublicKeysName = "ssh-public-keys"
+ PublicKeysCallbackName = "ssh-public-key-callback"
+)
+
+// KeyboardInteractive implements AuthMethod by using a
+// prompt/response sequence controlled by the server.
+type KeyboardInteractive struct {
+ User string
+ Challenge ssh.KeyboardInteractiveChallenge
+}
+
+func (a *KeyboardInteractive) Name() string {
+ return KeyboardInteractiveName
+}
+
+func (a *KeyboardInteractive) String() string {
+ return fmt.Sprintf("user: %s, name: %s", a.User, a.Name())
+}
+
+func (a *KeyboardInteractive) clientConfig() *ssh.ClientConfig {
+ return &ssh.ClientConfig{
+ User: a.User,
+ Auth: []ssh.AuthMethod{ssh.KeyboardInteractiveChallenge(a.Challenge)},
+ }
+}
+
+// Password implements AuthMethod by using the given password.
+type Password struct {
+ User string
+ Pass string
+}
+
+func (a *Password) Name() string {
+ return PasswordName
+}
+
+func (a *Password) String() string {
+ return fmt.Sprintf("user: %s, name: %s", a.User, a.Name())
+}
+
+func (a *Password) clientConfig() *ssh.ClientConfig {
+ return &ssh.ClientConfig{
+ User: a.User,
+ Auth: []ssh.AuthMethod{ssh.Password(a.Pass)},
+ }
+}
+
+// PasswordCallback implements AuthMethod by using a callback
+// to fetch the password.
+type PasswordCallback struct {
+ User string
+ Callback func() (pass string, err error)
+}
+
+func (a *PasswordCallback) Name() string {
+ return PasswordCallbackName
+}
+
+func (a *PasswordCallback) String() string {
+ return fmt.Sprintf("user: %s, name: %s", a.User, a.Name())
+}
+
+func (a *PasswordCallback) clientConfig() *ssh.ClientConfig {
+ return &ssh.ClientConfig{
+ User: a.User,
+ Auth: []ssh.AuthMethod{ssh.PasswordCallback(a.Callback)},
+ }
+}
+
+// PublicKeys implements AuthMethod by using the given
+// key pairs.
+type PublicKeys struct {
+ User string
+ Signer ssh.Signer
+}
+
+func (a *PublicKeys) Name() string {
+ return PublicKeysName
+}
+
+func (a *PublicKeys) String() string {
+ return fmt.Sprintf("user: %s, name: %s", a.User, a.Name())
+}
+
+func (a *PublicKeys) clientConfig() *ssh.ClientConfig {
+ return &ssh.ClientConfig{
+ User: a.User,
+ Auth: []ssh.AuthMethod{ssh.PublicKeys(a.Signer)},
+ }
+}
+
+// PublicKeysCallback implements AuthMethod by asking a
+// ssh.agent.Agent to act as a signer.
+type PublicKeysCallback struct {
+ User string
+ Callback func() (signers []ssh.Signer, err error)
+}
+
+func (a *PublicKeysCallback) Name() string {
+ return PublicKeysCallbackName
+}
+
+func (a *PublicKeysCallback) String() string {
+ return fmt.Sprintf("user: %s, name: %s", a.User, a.Name())
+}
+
+func (a *PublicKeysCallback) clientConfig() *ssh.ClientConfig {
+ return &ssh.ClientConfig{
+ User: a.User,
+ Auth: []ssh.AuthMethod{ssh.PublicKeysCallback(a.Callback)},
+ }
+}
+
+const DefaultSSHUsername = "git"
+
+// Opens a pipe with the ssh agent and uses the pipe
+// as the implementer of the public key callback function.
+func NewSSHAgentAuth(user string) (*PublicKeysCallback, error) {
+ if user == "" {
+ user = DefaultSSHUsername
+ }
+
+ pipe, err := net.Dial("unix", os.Getenv("SSH_AUTH_SOCK"))
+ if err != nil {
+ return nil, err
+ }
+
+ return &PublicKeysCallback{
+ User: user,
+ Callback: agent.NewClient(pipe).Signers,
+ }, nil
+}
diff --git a/plumbing/client/ssh/auth_method_test.go b/plumbing/client/ssh/auth_method_test.go
new file mode 100644
index 0000000..a87c950
--- /dev/null
+++ b/plumbing/client/ssh/auth_method_test.go
@@ -0,0 +1,94 @@
+package ssh
+
+import (
+ "fmt"
+ "testing"
+
+ . "gopkg.in/check.v1"
+)
+
+func Test(t *testing.T) { TestingT(t) }
+
+type SuiteCommon struct{}
+
+var _ = Suite(&SuiteCommon{})
+
+func (s *SuiteCommon) TestKeyboardInteractiveName(c *C) {
+ a := &KeyboardInteractive{
+ User: "test",
+ Challenge: nil,
+ }
+ c.Assert(a.Name(), Equals, KeyboardInteractiveName)
+}
+
+func (s *SuiteCommon) TestKeyboardInteractiveString(c *C) {
+ a := &KeyboardInteractive{
+ User: "test",
+ Challenge: nil,
+ }
+ c.Assert(a.String(), Equals, fmt.Sprintf("user: test, name: %s", KeyboardInteractiveName))
+}
+
+func (s *SuiteCommon) TestPasswordName(c *C) {
+ a := &Password{
+ User: "test",
+ Pass: "",
+ }
+ c.Assert(a.Name(), Equals, PasswordName)
+}
+
+func (s *SuiteCommon) TestPasswordString(c *C) {
+ a := &Password{
+ User: "test",
+ Pass: "",
+ }
+ c.Assert(a.String(), Equals, fmt.Sprintf("user: test, name: %s", PasswordName))
+}
+
+func (s *SuiteCommon) TestPasswordCallbackName(c *C) {
+ a := &PasswordCallback{
+ User: "test",
+ Callback: nil,
+ }
+ c.Assert(a.Name(), Equals, PasswordCallbackName)
+}
+
+func (s *SuiteCommon) TestPasswordCallbackString(c *C) {
+ a := &PasswordCallback{
+ User: "test",
+ Callback: nil,
+ }
+ c.Assert(a.String(), Equals, fmt.Sprintf("user: test, name: %s", PasswordCallbackName))
+}
+
+func (s *SuiteCommon) TestPublicKeysName(c *C) {
+ a := &PublicKeys{
+ User: "test",
+ Signer: nil,
+ }
+ c.Assert(a.Name(), Equals, PublicKeysName)
+}
+
+func (s *SuiteCommon) TestPublicKeysString(c *C) {
+ a := &PublicKeys{
+ User: "test",
+ Signer: nil,
+ }
+ c.Assert(a.String(), Equals, fmt.Sprintf("user: test, name: %s", PublicKeysName))
+}
+
+func (s *SuiteCommon) TestPublicKeysCallbackName(c *C) {
+ a := &PublicKeysCallback{
+ User: "test",
+ Callback: nil,
+ }
+ c.Assert(a.Name(), Equals, PublicKeysCallbackName)
+}
+
+func (s *SuiteCommon) TestPublicKeysCallbackString(c *C) {
+ a := &PublicKeysCallback{
+ User: "test",
+ Callback: nil,
+ }
+ c.Assert(a.String(), Equals, fmt.Sprintf("user: test, name: %s", PublicKeysCallbackName))
+}
diff --git a/plumbing/client/ssh/git_upload_pack.go b/plumbing/client/ssh/git_upload_pack.go
new file mode 100644
index 0000000..e2b73fd
--- /dev/null
+++ b/plumbing/client/ssh/git_upload_pack.go
@@ -0,0 +1,315 @@
+// Package ssh implements a ssh client for go-git.
+package ssh
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "strings"
+
+ "gopkg.in/src-d/go-git.v4/plumbing/client/common"
+ "gopkg.in/src-d/go-git.v4/plumbing/format/packp/advrefs"
+ "gopkg.in/src-d/go-git.v4/plumbing/format/packp/pktline"
+ "gopkg.in/src-d/go-git.v4/plumbing/format/packp/ulreq"
+
+ "golang.org/x/crypto/ssh"
+)
+
+// New errors introduced by this package.
+var (
+ ErrInvalidAuthMethod = errors.New("invalid ssh auth method")
+ ErrAuthRequired = errors.New("cannot connect: auth required")
+ ErrNotConnected = errors.New("not connected")
+ ErrAlreadyConnected = errors.New("already connected")
+ ErrUploadPackAnswerFormat = errors.New("git-upload-pack bad answer format")
+ ErrUnsupportedVCS = errors.New("only git is supported")
+ ErrUnsupportedRepo = errors.New("only github.com is supported")
+
+ nak = []byte("NAK")
+ eol = []byte("\n")
+)
+
+// GitUploadPackService holds the service information.
+// The zero value is safe to use.
+type GitUploadPackService struct {
+ connected bool
+ endpoint common.Endpoint
+ client *ssh.Client
+ auth AuthMethod
+}
+
+// NewGitUploadPackService initialises a GitUploadPackService,
+func NewGitUploadPackService(endpoint common.Endpoint) common.GitUploadPackService {
+ return &GitUploadPackService{endpoint: endpoint}
+}
+
+// Connect connects to the SSH server, unless a AuthMethod was set with SetAuth
+// method, by default uses an auth method based on PublicKeysCallback, it
+// connects to a SSH agent, using the address stored in the SSH_AUTH_SOCK
+// environment var
+func (s *GitUploadPackService) Connect() error {
+ if s.connected {
+ return ErrAlreadyConnected
+ }
+
+ if err := s.setAuthFromEndpoint(); err != nil {
+ return err
+ }
+
+ var err error
+ s.client, err = ssh.Dial("tcp", s.getHostWithPort(), s.auth.clientConfig())
+ if err != nil {
+ return err
+ }
+
+ s.connected = true
+ return nil
+}
+
+func (s *GitUploadPackService) getHostWithPort() string {
+ host := s.endpoint.Host
+ if strings.Index(s.endpoint.Host, ":") == -1 {
+ host += ":22"
+ }
+
+ return host
+}
+
+func (s *GitUploadPackService) setAuthFromEndpoint() error {
+ var u string
+ if info := s.endpoint.User; info != nil {
+ u = info.Username()
+ }
+
+ var err error
+ s.auth, err = NewSSHAgentAuth(u)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// SetAuth sets the AuthMethod
+func (s *GitUploadPackService) SetAuth(auth common.AuthMethod) error {
+ var ok bool
+ s.auth, ok = auth.(AuthMethod)
+ if !ok {
+ return ErrInvalidAuthMethod
+ }
+
+ return nil
+}
+
+// Info returns the GitUploadPackInfo of the repository. The client must be
+// connected with the repository (using the ConnectWithAuth() method) before
+// using this method.
+func (s *GitUploadPackService) Info() (i *common.GitUploadPackInfo, err error) {
+ if !s.connected {
+ return nil, ErrNotConnected
+ }
+
+ session, err := s.client.NewSession()
+ if err != nil {
+ return nil, err
+ }
+ defer func() {
+ // the session can be closed by the other endpoint,
+ // therefore we must ignore a close error.
+ _ = session.Close()
+ }()
+
+ out, err := session.Output(s.getCommand())
+ if err != nil {
+ return nil, err
+ }
+
+ i = common.NewGitUploadPackInfo()
+ return i, i.Decode(bytes.NewReader(out))
+}
+
+// Disconnect the SSH client.
+func (s *GitUploadPackService) Disconnect() (err error) {
+ if !s.connected {
+ return ErrNotConnected
+ }
+ s.connected = false
+ return s.client.Close()
+}
+
+// Fetch returns a packfile for a given upload request. It opens a new
+// SSH session on a connected GitUploadPackService, sends the given
+// upload request to the server and returns a reader for the received
+// packfile. Closing the returned reader will close the SSH session.
+func (s *GitUploadPackService) Fetch(req *common.GitUploadPackRequest) (rc io.ReadCloser, err error) {
+ if !s.connected {
+ return nil, ErrNotConnected
+ }
+
+ session, i, o, done, err := openSSHSession(s.client, s.getCommand())
+ if err != nil {
+ return nil, fmt.Errorf("cannot open SSH session: %s", err)
+ }
+
+ if err := talkPackProtocol(i, o, req); err != nil {
+ return nil, err
+ }
+
+ return &fetchSession{
+ Reader: o,
+ session: session,
+ done: done,
+ }, nil
+}
+
+func openSSHSession(c *ssh.Client, cmd string) (
+ *ssh.Session, io.WriteCloser, io.Reader, <-chan error, error) {
+
+ session, err := c.NewSession()
+ if err != nil {
+ return nil, nil, nil, nil, fmt.Errorf("cannot open SSH session: %s", err)
+ }
+
+ i, err := session.StdinPipe()
+ if err != nil {
+ return nil, nil, nil, nil, fmt.Errorf("cannot pipe remote stdin: %s", err)
+ }
+
+ o, err := session.StdoutPipe()
+ if err != nil {
+ return nil, nil, nil, nil, fmt.Errorf("cannot pipe remote stdout: %s", err)
+ }
+
+ done := make(chan error)
+ go func() {
+ done <- session.Run(cmd)
+ }()
+
+ return session, i, o, done, nil
+}
+
+// TODO support multi_ack mode
+// TODO support multi_ack_detailed mode
+// TODO support acks for common objects
+// TODO build a proper state machine for all these processing options
+func talkPackProtocol(w io.WriteCloser, r io.Reader,
+ req *common.GitUploadPackRequest) error {
+
+ if err := skipAdvRef(r); err != nil {
+ return fmt.Errorf("skipping advertised-refs: %s", err)
+ }
+
+ if err := sendUlReq(w, req); err != nil {
+ return fmt.Errorf("sending upload-req message: %s", err)
+ }
+
+ if err := sendHaves(w, req); err != nil {
+ return fmt.Errorf("sending haves message: %s", err)
+ }
+
+ if err := sendDone(w); err != nil {
+ return fmt.Errorf("sending done message: %s", err)
+ }
+
+ if err := w.Close(); err != nil {
+ return fmt.Errorf("closing input: %s", err)
+ }
+
+ if err := readNAK(r); err != nil {
+ return fmt.Errorf("reading NAK: %s", err)
+ }
+
+ return nil
+}
+
+func skipAdvRef(r io.Reader) error {
+ d := advrefs.NewDecoder(r)
+ ar := advrefs.New()
+
+ return d.Decode(ar)
+}
+
+func sendUlReq(w io.Writer, req *common.GitUploadPackRequest) error {
+ ur := ulreq.New()
+ ur.Wants = req.Wants
+ ur.Depth = ulreq.DepthCommits(req.Depth)
+ e := ulreq.NewEncoder(w)
+
+ return e.Encode(ur)
+}
+
+func sendHaves(w io.Writer, req *common.GitUploadPackRequest) error {
+ e := pktline.NewEncoder(w)
+ for _, have := range req.Haves {
+ if err := e.Encodef("have %s\n", have); err != nil {
+ return fmt.Errorf("sending haves for %q: err ", have, err)
+ }
+ }
+
+ if len(req.Haves) != 0 {
+ if err := e.Flush(); err != nil {
+ return fmt.Errorf("sending flush-pkt after haves: %s", err)
+ }
+ }
+
+ return nil
+}
+
+func sendDone(w io.Writer) error {
+ e := pktline.NewEncoder(w)
+
+ return e.Encodef("done\n")
+}
+
+func readNAK(r io.Reader) error {
+ s := pktline.NewScanner(r)
+ if !s.Scan() {
+ return s.Err()
+ }
+
+ b := s.Bytes()
+ b = bytes.TrimSuffix(b, eol)
+ if !bytes.Equal(b, nak) {
+ return fmt.Errorf("expecting NAK, found %q instead", string(b))
+ }
+
+ return nil
+}
+
+type fetchSession struct {
+ io.Reader
+ session *ssh.Session
+ done <-chan error
+}
+
+// Close closes the session and collects the output state of the remote
+// SSH command.
+//
+// If both the remote command and the closing of the session completes
+// susccessfully it returns nil.
+//
+// If the remote command completes unsuccessfully or is interrupted by a
+// signal, it returns the corresponding *ExitError.
+//
+// Otherwise, if clossing the SSH session fails it returns the close
+// error. Closing the session when the other has already close it is
+// not cosidered an error.
+func (f *fetchSession) Close() (err error) {
+ if err := <-f.done; err != nil {
+ return err
+ }
+
+ if err := f.session.Close(); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+func (s *GitUploadPackService) getCommand() string {
+ directory := s.endpoint.Path
+ directory = directory[1:]
+
+ return fmt.Sprintf("git-upload-pack '%s'", directory)
+}
diff --git a/plumbing/client/ssh/git_upload_pack_test.go b/plumbing/client/ssh/git_upload_pack_test.go
new file mode 100644
index 0000000..4d5b2b1
--- /dev/null
+++ b/plumbing/client/ssh/git_upload_pack_test.go
@@ -0,0 +1,144 @@
+package ssh
+
+import (
+ "io/ioutil"
+ "os"
+
+ . "gopkg.in/check.v1"
+ "gopkg.in/src-d/go-git.v4/plumbing"
+ "gopkg.in/src-d/go-git.v4/plumbing/client/common"
+)
+
+type RemoteSuite struct {
+ Endpoint common.Endpoint
+}
+
+var _ = Suite(&RemoteSuite{})
+
+func (s *RemoteSuite) SetUpSuite(c *C) {
+ var err error
+ s.Endpoint, err = common.NewEndpoint("git@github.com:git-fixtures/basic.git")
+ c.Assert(err, IsNil)
+
+ if os.Getenv("SSH_AUTH_SOCK") == "" {
+ c.Skip("SSH_AUTH_SOCK is not set")
+ }
+}
+
+// A mock implementation of client.common.AuthMethod
+// to test non ssh auth method detection.
+type mockAuth struct{}
+
+func (*mockAuth) Name() string { return "" }
+func (*mockAuth) String() string { return "" }
+
+func (s *RemoteSuite) TestSetAuthWrongType(c *C) {
+ r := NewGitUploadPackService(s.Endpoint)
+ c.Assert(r.SetAuth(&mockAuth{}), Equals, ErrInvalidAuthMethod)
+}
+
+func (s *RemoteSuite) TestAlreadyConnected(c *C) {
+ r := NewGitUploadPackService(s.Endpoint)
+ c.Assert(r.Connect(), IsNil)
+ defer func() {
+ c.Assert(r.Disconnect(), IsNil)
+ }()
+
+ c.Assert(r.Connect(), Equals, ErrAlreadyConnected)
+}
+
+func (s *RemoteSuite) TestDisconnect(c *C) {
+ r := NewGitUploadPackService(s.Endpoint)
+ c.Assert(r.Connect(), IsNil)
+ c.Assert(r.Disconnect(), IsNil)
+}
+
+func (s *RemoteSuite) TestDisconnectedWhenNonConnected(c *C) {
+ r := NewGitUploadPackService(s.Endpoint)
+ c.Assert(r.Disconnect(), Equals, ErrNotConnected)
+}
+
+func (s *RemoteSuite) TestAlreadyDisconnected(c *C) {
+ r := NewGitUploadPackService(s.Endpoint)
+ c.Assert(r.Connect(), IsNil)
+ c.Assert(r.Disconnect(), IsNil)
+ c.Assert(r.Disconnect(), Equals, ErrNotConnected)
+}
+
+func (s *RemoteSuite) TestServeralConnections(c *C) {
+ r := NewGitUploadPackService(s.Endpoint)
+ c.Assert(r.Connect(), IsNil)
+ c.Assert(r.Disconnect(), IsNil)
+
+ c.Assert(r.Connect(), IsNil)
+ c.Assert(r.Disconnect(), IsNil)
+
+ c.Assert(r.Connect(), IsNil)
+ c.Assert(r.Disconnect(), IsNil)
+}
+
+func (s *RemoteSuite) TestInfoNotConnected(c *C) {
+ r := NewGitUploadPackService(s.Endpoint)
+ _, err := r.Info()
+ c.Assert(err, Equals, ErrNotConnected)
+}
+
+func (s *RemoteSuite) TestDefaultBranch(c *C) {
+ r := NewGitUploadPackService(s.Endpoint)
+ c.Assert(r.Connect(), IsNil)
+ defer func() { c.Assert(r.Disconnect(), IsNil) }()
+
+ info, err := r.Info()
+ c.Assert(err, IsNil)
+ c.Assert(info.Capabilities.SymbolicReference("HEAD"), Equals, "refs/heads/master")
+}
+
+func (s *RemoteSuite) TestCapabilities(c *C) {
+ r := NewGitUploadPackService(s.Endpoint)
+ c.Assert(r.Connect(), IsNil)
+ defer func() { c.Assert(r.Disconnect(), IsNil) }()
+
+ info, err := r.Info()
+ c.Assert(err, IsNil)
+ c.Assert(info.Capabilities.Get("agent").Values, HasLen, 1)
+}
+
+func (s *RemoteSuite) TestFetchNotConnected(c *C) {
+ r := NewGitUploadPackService(s.Endpoint)
+ pr := &common.GitUploadPackRequest{}
+ pr.Want(plumbing.NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5"))
+ _, err := r.Fetch(pr)
+ c.Assert(err, Equals, ErrNotConnected)
+}
+
+func (s *RemoteSuite) TestFetch(c *C) {
+ r := NewGitUploadPackService(s.Endpoint)
+ c.Assert(r.Connect(), IsNil)
+ defer func() { c.Assert(r.Disconnect(), IsNil) }()
+
+ req := &common.GitUploadPackRequest{}
+ req.Want(plumbing.NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5"))
+ req.Want(plumbing.NewHash("e8d3ffab552895c19b9fcf7aa264d277cde33881"))
+ reader, err := r.Fetch(req)
+ c.Assert(err, IsNil)
+ defer func() { c.Assert(reader.Close(), IsNil) }()
+
+ b, err := ioutil.ReadAll(reader)
+ c.Assert(err, IsNil)
+ c.Check(len(b), Equals, 85585)
+}
+
+func (s *RemoteSuite) TestFetchError(c *C) {
+ r := NewGitUploadPackService(s.Endpoint)
+ c.Assert(r.Connect(), IsNil)
+ defer func() { c.Assert(r.Disconnect(), IsNil) }()
+
+ req := &common.GitUploadPackRequest{}
+ req.Want(plumbing.NewHash("1111111111111111111111111111111111111111"))
+
+ reader, err := r.Fetch(req)
+ c.Assert(err, IsNil)
+
+ err = reader.Close()
+ c.Assert(err, Not(IsNil))
+}
diff --git a/plumbing/errors.go b/plumbing/errors.go
new file mode 100644
index 0000000..a3ebed3
--- /dev/null
+++ b/plumbing/errors.go
@@ -0,0 +1,35 @@
+package plumbing
+
+import "fmt"
+
+type PermanentError struct {
+ Err error
+}
+
+func NewPermanentError(err error) *PermanentError {
+ if err == nil {
+ return nil
+ }
+
+ return &PermanentError{Err: err}
+}
+
+func (e *PermanentError) Error() string {
+ return fmt.Sprintf("permanent client error: %s", e.Err.Error())
+}
+
+type UnexpectedError struct {
+ Err error
+}
+
+func NewUnexpectedError(err error) *UnexpectedError {
+ if err == nil {
+ return nil
+ }
+
+ return &UnexpectedError{Err: err}
+}
+
+func (e *UnexpectedError) Error() string {
+ return fmt.Sprintf("unexpected client error: %s", e.Err.Error())
+}
diff --git a/plumbing/format/config/common.go b/plumbing/format/config/common.go
new file mode 100644
index 0000000..d2f1e5c
--- /dev/null
+++ b/plumbing/format/config/common.go
@@ -0,0 +1,97 @@
+package config
+
+// New creates a new config instance.
+func New() *Config {
+ return &Config{}
+}
+
+type Config struct {
+ Comment *Comment
+ Sections Sections
+ Includes Includes
+}
+
+type Includes []*Include
+
+// A reference to a included configuration.
+type Include struct {
+ Path string
+ Config *Config
+}
+
+type Comment string
+
+const (
+ NoSubsection = ""
+)
+
+func (c *Config) Section(name string) *Section {
+ for i := len(c.Sections) - 1; i >= 0; i-- {
+ s := c.Sections[i]
+ if s.IsName(name) {
+ return s
+ }
+ }
+
+ s := &Section{Name: name}
+ c.Sections = append(c.Sections, s)
+ return s
+}
+
+// AddOption is a convenience method to add an option to a given
+// section and subsection.
+//
+// Use the NoSubsection constant for the subsection argument
+// if no subsection is wanted.
+func (s *Config) AddOption(section string, subsection string, key string, value string) *Config {
+ if subsection == "" {
+ s.Section(section).AddOption(key, value)
+ } else {
+ s.Section(section).Subsection(subsection).AddOption(key, value)
+ }
+
+ return s
+}
+
+// SetOption is a convenience method to set an option to a given
+// section and subsection.
+//
+// Use the NoSubsection constant for the subsection argument
+// if no subsection is wanted.
+func (s *Config) SetOption(section string, subsection string, key string, value string) *Config {
+ if subsection == "" {
+ s.Section(section).SetOption(key, value)
+ } else {
+ s.Section(section).Subsection(subsection).SetOption(key, value)
+ }
+
+ return s
+}
+
+func (c *Config) RemoveSection(name string) *Config {
+ result := Sections{}
+ for _, s := range c.Sections {
+ if !s.IsName(name) {
+ result = append(result, s)
+ }
+ }
+
+ c.Sections = result
+ return c
+}
+
+func (c *Config) RemoveSubsection(section string, subsection string) *Config {
+ for _, s := range c.Sections {
+ if s.IsName(section) {
+ result := Subsections{}
+ for _, ss := range s.Subsections {
+ if !ss.IsName(subsection) {
+ result = append(result, ss)
+ }
+ }
+ s.Subsections = result
+ }
+ }
+
+ return c
+}
diff --git a/plumbing/format/config/common_test.go b/plumbing/format/config/common_test.go
new file mode 100644
index 0000000..365b53f
--- /dev/null
+++ b/plumbing/format/config/common_test.go
@@ -0,0 +1,86 @@
+package config
+
+import (
+ "testing"
+
+ . "gopkg.in/check.v1"
+)
+
+func Test(t *testing.T) { TestingT(t) }
+
+type CommonSuite struct{}
+
+var _ = Suite(&CommonSuite{})
+
+func (s *CommonSuite) TestConfig_SetOption(c *C) {
+ obtained := New().SetOption("section", NoSubsection, "key1", "value1")
+ expected := &Config{
+ Sections: []*Section{
+ {
+ Name: "section",
+ Options: []*Option{
+ {Key: "key1", Value: "value1"},
+ },
+ },
+ },
+ }
+ c.Assert(obtained, DeepEquals, expected)
+ obtained = obtained.SetOption("section", NoSubsection, "key1", "value1")
+ c.Assert(obtained, DeepEquals, expected)
+
+ obtained = New().SetOption("section", "subsection", "key1", "value1")
+ expected = &Config{
+ Sections: []*Section{
+ {
+ Name: "section",
+ Subsections: []*Subsection{
+ {
+ Name: "subsection",
+ Options: []*Option{
+ {Key: "key1", Value: "value1"},
+ },
+ },
+ },
+ },
+ },
+ }
+ c.Assert(obtained, DeepEquals, expected)
+ obtained = obtained.SetOption("section", "subsection", "key1", "value1")
+ c.Assert(obtained, DeepEquals, expected)
+}
+
+func (s *CommonSuite) TestConfig_AddOption(c *C) {
+ obtained := New().AddOption("section", NoSubsection, "key1", "value1")
+ expected := &Config{
+ Sections: []*Section{
+ {
+ Name: "section",
+ Options: []*Option{
+ {Key: "key1", Value: "value1"},
+ },
+ },
+ },
+ }
+ c.Assert(obtained, DeepEquals, expected)
+}
+
+func (s *CommonSuite) TestConfig_RemoveSection(c *C) {
+ sect := New().
+ AddOption("section1", NoSubsection, "key1", "value1").
+ AddOption("section2", NoSubsection, "key1", "value1")
+ expected := New().
+ AddOption("section1", NoSubsection, "key1", "value1")
+ c.Assert(sect.RemoveSection("other"), DeepEquals, sect)
+ c.Assert(sect.RemoveSection("section2"), DeepEquals, expected)
+}
+
+func (s *CommonSuite) TestConfig_RemoveSubsection(c *C) {
+ sect := New().
+ AddOption("section1", "sub1", "key1", "value1").
+ AddOption("section1", "sub2", "key1", "value1")
+ expected := New().
+ AddOption("section1", "sub1", "key1", "value1")
+ c.Assert(sect.RemoveSubsection("section1", "other"), DeepEquals, sect)
+ c.Assert(sect.RemoveSubsection("other", "other"), DeepEquals, sect)
+ c.Assert(sect.RemoveSubsection("section1", "sub2"), DeepEquals, expected)
+}
diff --git a/plumbing/format/config/decoder.go b/plumbing/format/config/decoder.go
new file mode 100644
index 0000000..0f02ce1
--- /dev/null
+++ b/plumbing/format/config/decoder.go
@@ -0,0 +1,37 @@
+package config
+
+import (
+ "io"
+
+ "github.com/src-d/gcfg"
+)
+
+// A Decoder reads and decodes config files from an input stream.
+type Decoder struct {
+ io.Reader
+}
+
+// NewDecoder returns a new decoder that reads from r.
+func NewDecoder(r io.Reader) *Decoder {
+ return &Decoder{r}
+}
+
+// Decode reads the whole config from its input and stores it in the
+// value pointed to by config.
+func (d *Decoder) Decode(config *Config) error {
+ cb := func(s string, ss string, k string, v string, bv bool) error {
+ if ss == "" && k == "" {
+ config.Section(s)
+ return nil
+ }
+
+ if ss != "" && k == "" {
+ config.Section(s).Subsection(ss)
+ return nil
+ }
+
+ config.AddOption(s, ss, k, v)
+ return nil
+ }
+ return gcfg.ReadWithCallback(d, cb)
+}
diff --git a/plumbing/format/config/decoder_test.go b/plumbing/format/config/decoder_test.go
new file mode 100644
index 0000000..412549f
--- /dev/null
+++ b/plumbing/format/config/decoder_test.go
@@ -0,0 +1,90 @@
+package config
+
+import (
+ "bytes"
+
+ . "gopkg.in/check.v1"
+)
+
+type DecoderSuite struct{}
+
+var _ = Suite(&DecoderSuite{})
+
+func (s *DecoderSuite) TestDecode(c *C) {
+ for idx, fixture := range fixtures {
+ r := bytes.NewReader([]byte(fixture.Raw))
+ d := NewDecoder(r)
+ cfg := &Config{}
+ err := d.Decode(cfg)
+ c.Assert(err, IsNil, Commentf("decoder error for fixture: %d", idx))
+ c.Assert(cfg, DeepEquals, fixture.Config, Commentf("bad result for fixture: %d", idx))
+ }
+}
+
+func (s *DecoderSuite) TestDecodeFailsWithIdentBeforeSection(c *C) {
+ t := `
+ key=value
+ [section]
+ key=value
+ `
+ decodeFails(c, t)
+}
+
+func (s *DecoderSuite) TestDecodeFailsWithEmptySectionName(c *C) {
+ t := `
+ []
+ key=value
+ `
+ decodeFails(c, t)
+}
+
+func (s *DecoderSuite) TestDecodeFailsWithEmptySubsectionName(c *C) {
+ t := `
+ [remote ""]
+ key=value
+ `
+ decodeFails(c, t)
+}
+
+func (s *DecoderSuite) TestDecodeFailsWithBadSubsectionName(c *C) {
+ t := `
+ [remote origin"]
+ key=value
+ `
+ decodeFails(c, t)
+ t = `
+ [remote "origin]
+ key=value
+ `
+ decodeFails(c, t)
+}
+
+func (s *DecoderSuite) TestDecodeFailsWithTrailingGarbage(c *C) {
+ t := `
+ [remote]garbage
+ key=value
+ `
+ decodeFails(c, t)
+ t = `
+ [remote "origin"]garbage
+ key=value
+ `
+ decodeFails(c, t)
+}
+
+func (s *DecoderSuite) TestDecodeFailsWithGarbage(c *C) {
+ decodeFails(c, "---")
+ decodeFails(c, "????")
+ decodeFails(c, "[sect\nkey=value")
+ decodeFails(c, "sect]\nkey=value")
+ decodeFails(c, `[section]key="value`)
+ decodeFails(c, `[section]key=value"`)
+}
+
+func decodeFails(c *C, text string) {
+ r := bytes.NewReader([]byte(text))
+ d := NewDecoder(r)
+ cfg := &Config{}
+ err := d.Decode(cfg)
+ c.Assert(err, NotNil)
+}
diff --git a/plumbing/format/config/doc.go b/plumbing/format/config/doc.go
new file mode 100644
index 0000000..dd77fbc
--- /dev/null
+++ b/plumbing/format/config/doc.go
@@ -0,0 +1,199 @@
+// Package config implements decoding/encoding of git config files.
+package config
+
+/*
+
+CONFIGURATION FILE
+------------------
+
+The Git configuration file contains a number of variables that affect
+the Git commands' behavior. The `.git/config` file in each repository
+is used to store the configuration for that repository, and
+`$HOME/.gitconfig` is used to store a per-user configuration as
+fallback values for the `.git/config` file. The file `/etc/gitconfig`
+can be used to store a system-wide default configuration.
+
+The configuration variables are used by both the Git plumbing
+and the porcelains. The variables are divided into sections, wherein
+the fully qualified variable name of the variable itself is the last
+dot-separated segment and the section name is everything before the last
+dot. The variable names are case-insensitive, allow only alphanumeric
+characters and `-`, and must start with an alphabetic character. Some
+variables may appear multiple times; we say then that the variable is
+multivalued.
+
+Syntax
+~~~~~~
+
+The syntax is fairly flexible and permissive; whitespaces are mostly
+ignored. The '#' and ';' characters begin comments to the end of line,
+blank lines are ignored.
+
+The file consists of sections and variables. A section begins with
+the name of the section in square brackets and continues until the next
+section begins. Section names are case-insensitive. Only alphanumeric
+characters, `-` and `.` are allowed in section names. Each variable
+must belong to some section, which means that there must be a section
+header before the first setting of a variable.
+
+Sections can be further divided into subsections. To begin a subsection
+put its name in double quotes, separated by space from the section name,
+in the section header, like in the example below:
+
+--------
+ [section "subsection"]
+
+--------
+
+Subsection names are case sensitive and can contain any characters except
+newline (doublequote `"` and backslash can be included by escaping them
+as `\"` and `\\`, respectively). Section headers cannot span multiple
+lines. Variables may belong directly to a section or to a given subsection.
+You can have `[section]` if you have `[section "subsection"]`, but you
+don't need to.
+
+There is also a deprecated `[section.subsection]` syntax. With this
+syntax, the subsection name is converted to lower-case and is also
+compared case sensitively. These subsection names follow the same
+restrictions as section names.
+
+All the other lines (and the remainder of the line after the section
+header) are recognized as setting variables, in the form
+'name = value' (or just 'name', which is a short-hand to say that
+the variable is the boolean "true").
+The variable names are case-insensitive, allow only alphanumeric characters
+and `-`, and must start with an alphabetic character.
+
+A line that defines a value can be continued to the next line by
+ending it with a `\`; the backquote and the end-of-line are
+stripped. Leading whitespaces after 'name =', the remainder of the
+line after the first comment character '#' or ';', and trailing
+whitespaces of the line are discarded unless they are enclosed in
+double quotes. Internal whitespaces within the value are retained
+verbatim.
+
+Inside double quotes, double quote `"` and backslash `\` characters
+must be escaped: use `\"` for `"` and `\\` for `\`.
+
+The following escape sequences (beside `\"` and `\\`) are recognized:
+`\n` for newline character (NL), `\t` for horizontal tabulation (HT, TAB)
+and `\b` for backspace (BS). Other char escape sequences (including octal
+escape sequences) are invalid.
+
+
+Includes
+~~~~~~~~
+
+You can include one config file from another by setting the special
+`include.path` variable to the name of the file to be included. The
+variable takes a pathname as its value, and is subject to tilde
+expansion.
+
+The
+included file is expanded immediately, as if its contents had been
+found at the location of the include directive. If the value of the
+`include.path` variable is a relative path, the path is considered to be
+relative to the configuration file in which the include directive was
+found. See below for examples.
+
+
+Example
+~~~~~~~
+
+ # Core variables
+ [core]
+ ; Don't trust file modes
+ filemode = false
+
+ # Our diff algorithm
+ [diff]
+ external = /usr/local/bin/diff-wrapper
+ renames = true
+
+ [branch "devel"]
+ remote = origin
+ merge = refs/heads/devel
+
+ # Proxy settings
+ [core]
+ gitProxy="ssh" for "kernel.org"
+ gitProxy=default-proxy ; for the rest
+
+ [include]
+ path = /path/to/foo.inc ; include by absolute path
+ path = foo ; expand "foo" relative to the current file
+ path = ~/foo ; expand "foo" in your `$HOME` directory
+
+
+Values
+~~~~~~
+
+Values of many variables are treated as a simple string, but there
+are variables that take values of specific types and there are rules
+as to how to spell them.
+
+boolean::
+
+ When a variable is said to take a boolean value, many
+ synonyms are accepted for 'true' and 'false'; these are all
+ case-insensitive.
+
+ true;; Boolean true can be spelled as `yes`, `on`, `true`,
+ or `1`. Also, a variable defined without `= <value>`
+ is taken as true.
+
+ false;; Boolean false can be spelled as `no`, `off`,
+ `false`, or `0`.
++
+When converting value to the canonical form using `--bool` type
+specifier; 'git config' will ensure that the output is "true" or
+"false" (spelled in lowercase).
+
+integer::
+ The value for many variables that specify various sizes can
+ be suffixed with `k`, `M`,... to mean "scale the number by
+ 1024", "by 1024x1024", etc.
+
+color::
+ The value for a variable that takes a color is a list of
+ colors (at most two, one for foreground and one for background)
+ and attributes (as many as you want), separated by spaces.
++
+The basic colors accepted are `normal`, `black`, `red`, `green`, `yellow`,
+`blue`, `magenta`, `cyan` and `white`. The first color given is the
+foreground; the second is the background.
++
+Colors may also be given as numbers between 0 and 255; these use ANSI
+256-color mode (but note that not all terminals may support this). If
+your terminal supports it, you may also specify 24-bit RGB values as
+hex, like `#ff0ab3`.
++
+
+From: https://git-scm.com/docs/git-config
+The accepted attributes are `bold`, `dim`, `ul`, `blink`, `reverse`,
+`italic`, and `strike` (for crossed-out or "strikethrough" letters).
+The position of any attributes with respect to the colors
+(before, after, or in between), doesn't matter. Specific attributes may
+be turned off by prefixing them with `no` or `no-` (e.g., `noreverse`,
+`no-ul`, etc).
++
+For git's pre-defined color slots, the attributes are meant to be reset
+at the beginning of each item in the colored output. So setting
+`color.decorate.branch` to `black` will paint that branch name in a
+plain `black`, even if the previous thing on the same output line (e.g.
+opening parenthesis before the list of branch names in `log --decorate`
+output) is set to be painted with `bold` or some other attribute.
+However, custom log formats may do more complicated and layered
+coloring, and the negated forms may be useful there.
+
+pathname::
+ A variable that takes a pathname value can be given a
+ string that begins with "`~/`" or "`~user/`", and the usual
+ tilde expansion happens to such a string: `~/`
+ is expanded to the value of `$HOME`, and `~user/` to the
+ specified user's home directory.
+
+From:
+https://raw.githubusercontent.com/git/git/659889482ac63411daea38b2c3d127842ea04e4d/Documentation/config.txt
+
+*/
diff --git a/plumbing/format/config/encoder.go b/plumbing/format/config/encoder.go
new file mode 100644
index 0000000..88bdf65
--- /dev/null
+++ b/plumbing/format/config/encoder.go
@@ -0,0 +1,75 @@
+package config
+
+import (
+ "fmt"
+ "io"
+)
+
+// An Encoder writes config files to an output stream.
+type Encoder struct {
+ w io.Writer
+}
+
+// NewEncoder returns a new encoder that writes to w.
+func NewEncoder(w io.Writer) *Encoder {
+ return &Encoder{w}
+}
+
+// Encode writes the config in git config format to the stream of the encoder.
+func (e *Encoder) Encode(cfg *Config) error {
+ for _, s := range cfg.Sections {
+ if err := e.encodeSection(s); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (e *Encoder) encodeSection(s *Section) error {
+ if len(s.Options) > 0 {
+ if err := e.printf("[%s]\n", s.Name); err != nil {
+ return err
+ }
+
+ if err := e.encodeOptions(s.Options); err != nil {
+ return err
+ }
+ }
+
+ for _, ss := range s.Subsections {
+ if err := e.encodeSubsection(s.Name, ss); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (e *Encoder) encodeSubsection(sectionName string, s *Subsection) error {
+ //TODO: escape
+ if err := e.printf("[%s \"%s\"]\n", sectionName, s.Name); err != nil {
+ return err
+ }
+
+ if err := e.encodeOptions(s.Options); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (e *Encoder) encodeOptions(opts Options) error {
+ for _, o := range opts {
+ if err := e.printf("\t%s = %s\n", o.Key, o.Value); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (e *Encoder) printf(msg string, args ...interface{}) error {
+ _, err := fmt.Fprintf(e.w, msg, args...)
+ return err
+}
diff --git a/plumbing/format/config/encoder_test.go b/plumbing/format/config/encoder_test.go
new file mode 100644
index 0000000..5335b83
--- /dev/null
+++ b/plumbing/format/config/encoder_test.go
@@ -0,0 +1,21 @@
+package config
+
+import (
+ "bytes"
+
+ . "gopkg.in/check.v1"
+)
+
+type EncoderSuite struct{}
+
+var _ = Suite(&EncoderSuite{})
+
+func (s *EncoderSuite) TestEncode(c *C) {
+ for idx, fixture := range fixtures {
+ buf := &bytes.Buffer{}
+ e := NewEncoder(buf)
+ err := e.Encode(fixture.Config)
+ c.Assert(err, IsNil, Commentf("encoder error for fixture: %d", idx))
+ c.Assert(buf.String(), Equals, fixture.Text, Commentf("bad result for fixture: %d", idx))
+ }
+}
diff --git a/plumbing/format/config/fixtures_test.go b/plumbing/format/config/fixtures_test.go
new file mode 100644
index 0000000..12ff288
--- /dev/null
+++ b/plumbing/format/config/fixtures_test.go
@@ -0,0 +1,90 @@
+package config
+
+type Fixture struct {
+ Text string
+ Raw string
+ Config *Config
+}
+
+var fixtures = []*Fixture{
+ {
+ Raw: "",
+ Text: "",
+ Config: New(),
+ },
+ {
+ Raw: ";Comments only",
+ Text: "",
+ Config: New(),
+ },
+ {
+ Raw: "#Comments only",
+ Text: "",
+ Config: New(),
+ },
+ {
+ Raw: "[core]\nrepositoryformatversion=0",
+ Text: "[core]\n\trepositoryformatversion = 0\n",
+ Config: New().AddOption("core", "", "repositoryformatversion", "0"),
+ },
+ {
+ Raw: "[core]\n\trepositoryformatversion = 0\n",
+ Text: "[core]\n\trepositoryformatversion = 0\n",
+ Config: New().AddOption("core", "", "repositoryformatversion", "0"),
+ },
+ {
+ Raw: ";Commment\n[core]\n;Comment\nrepositoryformatversion = 0\n",
+ Text: "[core]\n\trepositoryformatversion = 0\n",
+ Config: New().AddOption("core", "", "repositoryformatversion", "0"),
+ },
+ {
+ Raw: "#Commment\n#Comment\n[core]\n#Comment\nrepositoryformatversion = 0\n",
+ Text: "[core]\n\trepositoryformatversion = 0\n",
+ Config: New().AddOption("core", "", "repositoryformatversion", "0"),
+ },
+ {
+ Raw: `
+ [sect1]
+ opt1 = value1
+ [sect1 "subsect1"]
+ opt2 = value2
+ `,
+ Text: `[sect1]
+ opt1 = value1
+[sect1 "subsect1"]
+ opt2 = value2
+`,
+ Config: New().
+ AddOption("sect1", "", "opt1", "value1").
+ AddOption("sect1", "subsect1", "opt2", "value2"),
+ },
+ {
+ Raw: `
+ [sect1]
+ opt1 = value1
+ [sect1 "subsect1"]
+ opt2 = value2
+ [sect1]
+ opt1 = value1b
+ [sect1 "subsect1"]
+ opt2 = value2b
+ [sect1 "subsect2"]
+ opt2 = value2
+ `,
+ Text: `[sect1]
+ opt1 = value1
+ opt1 = value1b
+[sect1 "subsect1"]
+ opt2 = value2
+ opt2 = value2b
+[sect1 "subsect2"]
+ opt2 = value2
+`,
+ Config: New().
+ AddOption("sect1", "", "opt1", "value1").
+ AddOption("sect1", "", "opt1", "value1b").
+ AddOption("sect1", "subsect1", "opt2", "value2").
+ AddOption("sect1", "subsect1", "opt2", "value2b").
+ AddOption("sect1", "subsect2", "opt2", "value2"),
+ },
+}
diff --git a/plumbing/format/config/option.go b/plumbing/format/config/option.go
new file mode 100644
index 0000000..dbb401c
--- /dev/null
+++ b/plumbing/format/config/option.go
@@ -0,0 +1,83 @@
+package config
+
+import (
+ "strings"
+)
+
+type Option struct {
+ // Key preserving original caseness.
+ // Use IsKey instead to compare key regardless of caseness.
+ Key string
+ // Original value as string, could be not notmalized.
+ Value string
+}
+
+type Options []*Option
+
+// IsKey returns true if the given key matches
+// this options' key in a case-insensitive comparison.
+func (o *Option) IsKey(key string) bool {
+ return strings.ToLower(o.Key) == strings.ToLower(key)
+}
+
+// Get gets the value for the given key if set,
+// otherwise it returns the empty string.
+//
+// Note that there is no difference
+//
+// This matches git behaviour since git v1.8.1-rc1,
+// if there are multiple definitions of a key, the
+// last one wins.
+//
+// See: http://article.gmane.org/gmane.linux.kernel/1407184
+//
+// In order to get all possible values for the same key,
+// use GetAll.
+func (opts Options) Get(key string) string {
+ for i := len(opts) - 1; i >= 0; i-- {
+ o := opts[i]
+ if o.IsKey(key) {
+ return o.Value
+ }
+ }
+ return ""
+}
+
+// GetAll returns all possible values for the same key.
+func (opts Options) GetAll(key string) []string {
+ result := []string{}
+ for _, o := range opts {
+ if o.IsKey(key) {
+ result = append(result, o.Value)
+ }
+ }
+ return result
+}
+
+func (opts Options) withoutOption(key string) Options {
+ result := Options{}
+ for _, o := range opts {
+ if !o.IsKey(key) {
+ result = append(result, o)
+ }
+ }
+ return result
+}
+
+func (opts Options) withAddedOption(key string, value string) Options {
+ return append(opts, &Option{key, value})
+}
+
+func (opts Options) withSettedOption(key string, value string) Options {
+ for i := len(opts) - 1; i >= 0; i-- {
+ o := opts[i]
+ if o.IsKey(key) {
+ result := make(Options, len(opts))
+ copy(result, opts)
+ result[i] = &Option{key, value}
+ return result
+ }
+ }
+
+ return opts.withAddedOption(key, value)
+}
diff --git a/plumbing/format/config/option_test.go b/plumbing/format/config/option_test.go
new file mode 100644
index 0000000..8588de1
--- /dev/null
+++ b/plumbing/format/config/option_test.go
@@ -0,0 +1,33 @@
+package config
+
+import (
+ . "gopkg.in/check.v1"
+)
+
+type OptionSuite struct{}
+
+var _ = Suite(&OptionSuite{})
+
+func (s *OptionSuite) TestOptions_GetAll(c *C) {
+ o := Options{
+ &Option{"k", "v"},
+ &Option{"ok", "v1"},
+ &Option{"K", "v2"},
+ }
+ c.Assert(o.GetAll("k"), DeepEquals, []string{"v", "v2"})
+ c.Assert(o.GetAll("K"), DeepEquals, []string{"v", "v2"})
+ c.Assert(o.GetAll("ok"), DeepEquals, []string{"v1"})
+ c.Assert(o.GetAll("unexistant"), DeepEquals, []string{})
+
+ o = Options{}
+ c.Assert(o.GetAll("k"), DeepEquals, []string{})
+}
+
+func (s *OptionSuite) TestOption_IsKey(c *C) {
+ c.Assert((&Option{Key: "key"}).IsKey("key"), Equals, true)
+ c.Assert((&Option{Key: "key"}).IsKey("KEY"), Equals, true)
+ c.Assert((&Option{Key: "KEY"}).IsKey("key"), Equals, true)
+ c.Assert((&Option{Key: "key"}).IsKey("other"), Equals, false)
+ c.Assert((&Option{Key: "key"}).IsKey(""), Equals, false)
+ c.Assert((&Option{Key: ""}).IsKey("key"), Equals, false)
+}
diff --git a/plumbing/format/config/section.go b/plumbing/format/config/section.go
new file mode 100644
index 0000000..1844913
--- /dev/null
+++ b/plumbing/format/config/section.go
@@ -0,0 +1,87 @@
+package config
+
+import "strings"
+
+type Section struct {
+ Name string
+ Options Options
+ Subsections Subsections
+}
+
+type Subsection struct {
+ Name string
+ Options Options
+}
+
+type Sections []*Section
+
+type Subsections []*Subsection
+
+func (s *Section) IsName(name string) bool {
+ return strings.ToLower(s.Name) == strings.ToLower(name)
+}
+
+func (s *Section) Option(key string) string {
+ return s.Options.Get(key)
+}
+
+func (s *Section) AddOption(key string, value string) *Section {
+ s.Options = s.Options.withAddedOption(key, value)
+ return s
+}
+
+func (s *Section) SetOption(key string, value string) *Section {
+ s.Options = s.Options.withSettedOption(key, value)
+ return s
+}
+
+func (s *Section) RemoveOption(key string) *Section {
+ s.Options = s.Options.withoutOption(key)
+ return s
+}
+
+func (s *Section) Subsection(name string) *Subsection {
+ for i := len(s.Subsections) - 1; i >= 0; i-- {
+ ss := s.Subsections[i]
+ if ss.IsName(name) {
+ return ss
+ }
+ }
+
+ ss := &Subsection{Name: name}
+ s.Subsections = append(s.Subsections, ss)
+ return ss
+}
+
+func (s *Section) HasSubsection(name string) bool {
+ for _, ss := range s.Subsections {
+ if ss.IsName(name) {
+ return true
+ }
+ }
+
+ return false
+}
+
+func (s *Subsection) IsName(name string) bool {
+ return s.Name == name
+}
+
+func (s *Subsection) Option(key string) string {
+ return s.Options.Get(key)
+}
+
+func (s *Subsection) AddOption(key string, value string) *Subsection {
+ s.Options = s.Options.withAddedOption(key, value)
+ return s
+}
+
+func (s *Subsection) SetOption(key string, value string) *Subsection {
+ s.Options = s.Options.withSettedOption(key, value)
+ return s
+}
+
+func (s *Subsection) RemoveOption(key string) *Subsection {
+ s.Options = s.Options.withoutOption(key)
+ return s
+}
diff --git a/plumbing/format/config/section_test.go b/plumbing/format/config/section_test.go
new file mode 100644
index 0000000..cfd9f3f
--- /dev/null
+++ b/plumbing/format/config/section_test.go
@@ -0,0 +1,71 @@
+package config
+
+import (
+ . "gopkg.in/check.v1"
+)
+
+type SectionSuite struct{}
+
+var _ = Suite(&SectionSuite{})
+
+func (s *SectionSuite) TestSection_Option(c *C) {
+ sect := &Section{
+ Options: []*Option{
+ {Key: "key1", Value: "value1"},
+ {Key: "key2", Value: "value2"},
+ {Key: "key1", Value: "value3"},
+ },
+ }
+ c.Assert(sect.Option("otherkey"), Equals, "")
+ c.Assert(sect.Option("key2"), Equals, "value2")
+ c.Assert(sect.Option("key1"), Equals, "value3")
+}
+
+func (s *SectionSuite) TestSubsection_Option(c *C) {
+ sect := &Subsection{
+ Options: []*Option{
+ {Key: "key1", Value: "value1"},
+ {Key: "key2", Value: "value2"},
+ {Key: "key1", Value: "value3"},
+ },
+ }
+ c.Assert(sect.Option("otherkey"), Equals, "")
+ c.Assert(sect.Option("key2"), Equals, "value2")
+ c.Assert(sect.Option("key1"), Equals, "value3")
+}
+
+func (s *SectionSuite) TestSection_RemoveOption(c *C) {
+ sect := &Section{
+ Options: []*Option{
+ {Key: "key1", Value: "value1"},
+ {Key: "key2", Value: "value2"},
+ {Key: "key1", Value: "value3"},
+ },
+ }
+ c.Assert(sect.RemoveOption("otherkey"), DeepEquals, sect)
+
+ expected := &Section{
+ Options: []*Option{
+ {Key: "key2", Value: "value2"},
+ },
+ }
+ c.Assert(sect.RemoveOption("key1"), DeepEquals, expected)
+}
+
+func (s *SectionSuite) TestSubsection_RemoveOption(c *C) {
+ sect := &Subsection{
+ Options: []*Option{
+ {Key: "key1", Value: "value1"},
+ {Key: "key2", Value: "value2"},
+ {Key: "key1", Value: "value3"},
+ },
+ }
+ c.Assert(sect.RemoveOption("otherkey"), DeepEquals, sect)
+
+ expected := &Subsection{
+ Options: []*Option{
+ {Key: "key2", Value: "value2"},
+ },
+ }
+ c.Assert(sect.RemoveOption("key1"), DeepEquals, expected)
+}
diff --git a/plumbing/format/idxfile/decoder.go b/plumbing/format/idxfile/decoder.go
new file mode 100644
index 0000000..e3ffc4b
--- /dev/null
+++ b/plumbing/format/idxfile/decoder.go
@@ -0,0 +1,148 @@
+package idxfile
+
+import (
+ "bytes"
+ "errors"
+ "io"
+
+ "gopkg.in/src-d/go-git.v4/plumbing"
+ "gopkg.in/src-d/go-git.v4/utils/binary"
+)
+
+var (
+ // ErrUnsupportedVersion is returned by Decode when the idx file version
+ // is not supported.
+ ErrUnsupportedVersion = errors.New("Unsuported version")
+ // ErrMalformedIdxFile is returned by Decode when the idx file is corrupted.
+ ErrMalformedIdxFile = errors.New("Malformed IDX file")
+)
+
+// A Decoder reads and decodes idx files from an input stream.
+type Decoder struct {
+ io.Reader
+}
+
+// NewDecoder returns a new decoder that reads from r.
+func NewDecoder(r io.Reader) *Decoder {
+ return &Decoder{r}
+}
+
+// Decode reads the whole idx object from its input and stores it in the
+// value pointed to by idx.
+func (d *Decoder) Decode(idx *Idxfile) error {
+ if err := validateHeader(d); err != nil {
+ return err
+ }
+
+ flow := []func(*Idxfile, io.Reader) error{
+ readVersion,
+ readFanout,
+ readObjectNames,
+ readCRC32,
+ readOffsets,
+ readChecksums,
+ }
+
+ for _, f := range flow {
+ if err := f(idx, d); err != nil {
+ return err
+ }
+ }
+
+ if !idx.isValid() {
+ return ErrMalformedIdxFile
+ }
+
+ return nil
+}
+
+func validateHeader(r io.Reader) error {
+ var h = make([]byte, 4)
+ if _, err := r.Read(h); err != nil {
+ return err
+ }
+
+ if !bytes.Equal(h, idxHeader) {
+ return ErrMalformedIdxFile
+ }
+
+ return nil
+}
+
+func readVersion(idx *Idxfile, r io.Reader) error {
+ v, err := binary.ReadUint32(r)
+ if err != nil {
+ return err
+ }
+
+ if v > VersionSupported {
+ return ErrUnsupportedVersion
+ }
+
+ idx.Version = v
+ return nil
+}
+
+func readFanout(idx *Idxfile, r io.Reader) error {
+ var err error
+ for i := 0; i < 255; i++ {
+ idx.Fanout[i], err = binary.ReadUint32(r)
+ if err != nil {
+ return err
+ }
+ }
+
+ idx.ObjectCount, err = binary.ReadUint32(r)
+ return err
+}
+
+func readObjectNames(idx *Idxfile, r io.Reader) error {
+ c := int(idx.ObjectCount)
+ for i := 0; i < c; i++ {
+ var ref plumbing.Hash
+ if _, err := r.Read(ref[:]); err != nil {
+ return err
+ }
+
+ idx.Entries = append(idx.Entries, Entry{Hash: ref})
+ }
+
+ return nil
+}
+
+func readCRC32(idx *Idxfile, r io.Reader) error {
+ c := int(idx.ObjectCount)
+ for i := 0; i < c; i++ {
+ if err := binary.Read(r, &idx.Entries[i].CRC32); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func readOffsets(idx *Idxfile, r io.Reader) error {
+ c := int(idx.ObjectCount)
+ for i := 0; i < c; i++ {
+ o, err := binary.ReadUint32(r)
+ if err != nil {
+ return err
+ }
+
+ idx.Entries[i].Offset = uint64(o)
+ }
+
+ return nil
+}
+
+func readChecksums(idx *Idxfile, r io.Reader) error {
+ if _, err := r.Read(idx.PackfileChecksum[:]); err != nil {
+ return err
+ }
+
+ if _, err := r.Read(idx.IdxChecksum[:]); err != nil {
+ return err
+ }
+
+ return nil
+}
diff --git a/plumbing/format/idxfile/decoder_test.go b/plumbing/format/idxfile/decoder_test.go
new file mode 100644
index 0000000..5231e64
--- /dev/null
+++ b/plumbing/format/idxfile/decoder_test.go
@@ -0,0 +1,69 @@
+package idxfile
+
+import (
+ "bytes"
+ "fmt"
+ "testing"
+
+ . "gopkg.in/check.v1"
+ "gopkg.in/src-d/go-git.v4/fixtures"
+ "gopkg.in/src-d/go-git.v4/plumbing/format/packfile"
+ "gopkg.in/src-d/go-git.v4/storage/memory"
+)
+
+func Test(t *testing.T) { TestingT(t) }
+
+type IdxfileSuite struct {
+ fixtures.Suite
+}
+
+var _ = Suite(&IdxfileSuite{})
+
+func (s *IdxfileSuite) TestDecode(c *C) {
+ f := fixtures.Basic().One()
+
+ d := NewDecoder(f.Idx())
+ idx := &Idxfile{}
+ err := d.Decode(idx)
+ c.Assert(err, IsNil)
+
+ c.Assert(idx.Entries, HasLen, 31)
+ c.Assert(idx.Entries[0].Hash.String(), Equals, "1669dce138d9b841a518c64b10914d88f5e488ea")
+ c.Assert(idx.Entries[0].Offset, Equals, uint64(615))
+ c.Assert(idx.Entries[0].CRC32, Equals, uint32(3645019190))
+
+ c.Assert(fmt.Sprintf("%x", idx.IdxChecksum), Equals, "fb794f1ec720b9bc8e43257451bd99c4be6fa1c9")
+ c.Assert(fmt.Sprintf("%x", idx.PackfileChecksum), Equals, f.PackfileHash.String())
+}
+
+func (s *IdxfileSuite) TestDecodeCRCs(c *C) {
+ f := fixtures.Basic().ByTag("ofs-delta").One()
+
+ scanner := packfile.NewScanner(f.Packfile())
+ storage := memory.NewStorage()
+
+ pd, err := packfile.NewDecoder(scanner, storage)
+ c.Assert(err, IsNil)
+ _, err = pd.Decode()
+ c.Assert(err, IsNil)
+
+ i := &Idxfile{Version: VersionSupported}
+
+ offsets := pd.Offsets()
+ for h, crc := range pd.CRCs() {
+ i.Add(h, uint64(offsets[h]), crc)
+ }
+
+ buf := bytes.NewBuffer(nil)
+ e := NewEncoder(buf)
+ _, err = e.Encode(i)
+ c.Assert(err, IsNil)
+
+ idx := &Idxfile{}
+
+ d := NewDecoder(buf)
+ err = d.Decode(idx)
+ c.Assert(err, IsNil)
+
+ c.Assert(idx.Entries, DeepEquals, i.Entries)
+}
diff --git a/plumbing/format/idxfile/doc.go b/plumbing/format/idxfile/doc.go
new file mode 100644
index 0000000..8a76853
--- /dev/null
+++ b/plumbing/format/idxfile/doc.go
@@ -0,0 +1,132 @@
+// Package idxfile implements a encoder/decoder of idx files
+package idxfile
+
+/*
+== Original (version 1) pack-*.idx files have the following format:
+
+ - The header consists of 256 4-byte network byte order
+ integers. N-th entry of this table records the number of
+ objects in the corresponding pack, the first byte of whose
+ object name is less than or equal to N. This is called the
+ 'first-level fan-out' table.
+
+ - The header is followed by sorted 24-byte entries, one entry
+ per object in the pack. Each entry is:
+
+ 4-byte network byte order integer, recording where the
+ object is stored in the packfile as the offset from the
+ beginning.
+
+ 20-byte object name.
+
+ - The file is concluded with a trailer:
+
+ A copy of the 20-byte SHA1 checksum at the end of
+ corresponding packfile.
+
+ 20-byte SHA1-checksum of all of the above.
+
+Pack Idx file:
+
+ -- +--------------------------------+
+fanout | fanout[0] = 2 (for example) |-.
+table +--------------------------------+ |
+ | fanout[1] | |
+ +--------------------------------+ |
+ | fanout[2] | |
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ |
+ | fanout[255] = total objects |---.
+ -- +--------------------------------+ | |
+main | offset | | |
+index | object name 00XXXXXXXXXXXXXXXX | | |
+table +--------------------------------+ | |
+ | offset | | |
+ | object name 00XXXXXXXXXXXXXXXX | | |
+ +--------------------------------+<+ |
+ .-| offset | |
+ | | object name 01XXXXXXXXXXXXXXXX | |
+ | +--------------------------------+ |
+ | | offset | |
+ | | object name 01XXXXXXXXXXXXXXXX | |
+ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ |
+ | | offset | |
+ | | object name FFXXXXXXXXXXXXXXXX | |
+ --| +--------------------------------+<--+
+trailer | | packfile checksum |
+ | +--------------------------------+
+ | | idxfile checksum |
+ | +--------------------------------+
+ .-------.
+ |
+Pack file entry: <+
+
+ packed object header:
+ 1-byte size extension bit (MSB)
+ type (next 3 bit)
+ size0 (lower 4-bit)
+ n-byte sizeN (as long as MSB is set, each 7-bit)
+ size0..sizeN form 4+7+7+..+7 bit integer, size0
+ is the least significant part, and sizeN is the
+ most significant part.
+ packed object data:
+ If it is not DELTA, then deflated bytes (the size above
+ is the size before compression).
+ If it is REF_DELTA, then
+ 20-byte base object name SHA1 (the size above is the
+ size of the delta data that follows).
+ delta data, deflated.
+ If it is OFS_DELTA, then
+ n-byte offset (see below) interpreted as a negative
+ offset from the type-byte of the header of the
+ ofs-delta entry (the size above is the size of
+ the delta data that follows).
+ delta data, deflated.
+
+ offset encoding:
+ n bytes with MSB set in all but the last one.
+ The offset is then the number constructed by
+ concatenating the lower 7 bit of each byte, and
+ for n >= 2 adding 2^7 + 2^14 + ... + 2^(7*(n-1))
+ to the result.
+
+
+
+== Version 2 pack-*.idx files support packs larger than 4 GiB, and
+ have some other reorganizations. They have the format:
+
+ - A 4-byte magic number '\377tOc' which is an unreasonable
+ fanout[0] value.
+
+ - A 4-byte version number (= 2)
+
+ - A 256-entry fan-out table just like v1.
+
+ - A table of sorted 20-byte SHA1 object names. These are
+ packed together without offset values to reduce the cache
+ footprint of the binary search for a specific object name.
+
+ - A table of 4-byte CRC32 values of the packed object data.
+ This is new in v2 so compressed data can be copied directly
+ from pack to pack during repacking without undetected
+ data corruption.
+
+ - A table of 4-byte offset values (in network byte order).
+ These are usually 31-bit pack file offsets, but large
+ offsets are encoded as an index into the next table with
+ the msbit set.
+
+ - A table of 8-byte offset entries (empty for pack files less
+ than 2 GiB). Pack files are organized with heavily used
+ objects toward the front, so most object references should
+ not need to refer to this table.
+
+ - The same trailer as a v1 pack file:
+
+ A copy of the 20-byte SHA1 checksum at the end of
+ corresponding packfile.
+
+ 20-byte SHA1-checksum of all of the above.
+
+From:
+https://www.kernel.org/pub/software/scm/git/docs/v1.7.5/technical/pack-protocol.txt
+*/
diff --git a/plumbing/format/idxfile/encoder.go b/plumbing/format/idxfile/encoder.go
new file mode 100644
index 0000000..164414a
--- /dev/null
+++ b/plumbing/format/idxfile/encoder.go
@@ -0,0 +1,131 @@
+package idxfile
+
+import (
+ "crypto/sha1"
+ "hash"
+ "io"
+ "sort"
+
+ "gopkg.in/src-d/go-git.v4/utils/binary"
+)
+
+// An Encoder writes idx files to an output stream.
+type Encoder struct {
+ io.Writer
+ hash hash.Hash
+}
+
+// NewEncoder returns a new encoder that writes to w.
+func NewEncoder(w io.Writer) *Encoder {
+ h := sha1.New()
+ mw := io.MultiWriter(w, h)
+ return &Encoder{mw, h}
+}
+
+// Encode writes the idx in an idx file format to the stream of the encoder.
+func (e *Encoder) Encode(idx *Idxfile) (int, error) {
+ idx.Entries.Sort()
+
+ flow := []func(*Idxfile) (int, error){
+ e.encodeHeader,
+ e.encodeFanout,
+ e.encodeHashes,
+ e.encodeCRC32,
+ e.encodeOffsets,
+ e.encodeChecksums,
+ }
+
+ sz := 0
+ for _, f := range flow {
+ i, err := f(idx)
+ sz += i
+
+ if err != nil {
+ return sz, err
+ }
+ }
+
+ return sz, nil
+}
+
+func (e *Encoder) encodeHeader(idx *Idxfile) (int, error) {
+ c, err := e.Write(idxHeader)
+ if err != nil {
+ return c, err
+ }
+
+ return c + 4, binary.WriteUint32(e, idx.Version)
+}
+
+func (e *Encoder) encodeFanout(idx *Idxfile) (int, error) {
+ fanout := idx.calculateFanout()
+ for _, c := range fanout {
+ if err := binary.WriteUint32(e, c); err != nil {
+ return 0, err
+ }
+ }
+
+ return 1024, nil
+}
+
+func (e *Encoder) encodeHashes(idx *Idxfile) (int, error) {
+ sz := 0
+ for _, ent := range idx.Entries {
+ i, err := e.Write(ent.Hash[:])
+ sz += i
+
+ if err != nil {
+ return sz, err
+ }
+ }
+
+ return sz, nil
+}
+
+func (e *Encoder) encodeCRC32(idx *Idxfile) (int, error) {
+ sz := 0
+ for _, ent := range idx.Entries {
+ err := binary.Write(e, ent.CRC32)
+ sz += 4
+
+ if err != nil {
+ return sz, err
+ }
+ }
+
+ return sz, nil
+}
+
+func (e *Encoder) encodeOffsets(idx *Idxfile) (int, error) {
+ sz := 0
+ for _, ent := range idx.Entries {
+ if err := binary.WriteUint32(e, uint32(ent.Offset)); err != nil {
+ return sz, err
+ }
+
+ sz += 4
+
+ }
+
+ return sz, nil
+}
+
+func (e *Encoder) encodeChecksums(idx *Idxfile) (int, error) {
+ if _, err := e.Write(idx.PackfileChecksum[:]); err != nil {
+ return 0, err
+ }
+
+ copy(idx.IdxChecksum[:], e.hash.Sum(nil)[:20])
+ if _, err := e.Write(idx.IdxChecksum[:]); err != nil {
+ return 0, err
+ }
+
+ return 40, nil
+}
+
+type EntryList []Entry
+
+func (p EntryList) Len() int { return len(p) }
+func (p EntryList) Less(i, j int) bool { return p[i].Hash.String() < p[j].Hash.String() }
+func (p EntryList) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+func (p EntryList) Sort() { sort.Sort(p) }
diff --git a/plumbing/format/idxfile/encoder_test.go b/plumbing/format/idxfile/encoder_test.go
new file mode 100644
index 0000000..9a53863
--- /dev/null
+++ b/plumbing/format/idxfile/encoder_test.go
@@ -0,0 +1,48 @@
+package idxfile
+
+import (
+ "bytes"
+ "io/ioutil"
+
+ . "gopkg.in/check.v1"
+ "gopkg.in/src-d/go-git.v4/fixtures"
+ "gopkg.in/src-d/go-git.v4/plumbing"
+)
+
+func (s *IdxfileSuite) TestEncode(c *C) {
+ expected := &Idxfile{}
+ expected.Add(plumbing.NewHash("4bfc730165c370df4a012afbb45ba3f9c332c0d4"), 82, 82)
+ expected.Add(plumbing.NewHash("8fa2238efdae08d83c12ee176fae65ff7c99af46"), 42, 42)
+
+ buf := bytes.NewBuffer(nil)
+ e := NewEncoder(buf)
+ _, err := e.Encode(expected)
+ c.Assert(err, IsNil)
+
+ idx := &Idxfile{}
+ d := NewDecoder(buf)
+ err = d.Decode(idx)
+ c.Assert(err, IsNil)
+
+ c.Assert(idx.Entries, DeepEquals, expected.Entries)
+}
+
+func (s *IdxfileSuite) TestDecodeEncode(c *C) {
+ fixtures.ByTag("packfile").Test(c, func(f *fixtures.Fixture) {
+ expected, err := ioutil.ReadAll(f.Idx())
+ c.Assert(err, IsNil)
+
+ idx := &Idxfile{}
+ d := NewDecoder(bytes.NewBuffer(expected))
+ err = d.Decode(idx)
+ c.Assert(err, IsNil)
+
+ result := bytes.NewBuffer(nil)
+ e := NewEncoder(result)
+ size, err := e.Encode(idx)
+ c.Assert(err, IsNil)
+
+ c.Assert(size, Equals, len(expected))
+ c.Assert(result.Bytes(), DeepEquals, expected)
+ })
+}
diff --git a/plumbing/format/idxfile/idxfile.go b/plumbing/format/idxfile/idxfile.go
new file mode 100644
index 0000000..8329c23
--- /dev/null
+++ b/plumbing/format/idxfile/idxfile.go
@@ -0,0 +1,62 @@
+package idxfile
+
+import "gopkg.in/src-d/go-git.v4/plumbing"
+
+const (
+ // VersionSupported is the only idx version supported.
+ VersionSupported = 2
+)
+
+var (
+ idxHeader = []byte{255, 't', 'O', 'c'}
+)
+
+// An Idxfile represents an idx file in memory.
+type Idxfile struct {
+ Version uint32
+ Fanout [255]uint32
+ ObjectCount uint32
+ Entries EntryList
+ PackfileChecksum [20]byte
+ IdxChecksum [20]byte
+}
+
+// An Entry represents data about an object in the packfile: its hash,
+// offset and CRC32 checksum.
+type Entry struct {
+ Hash plumbing.Hash
+ CRC32 uint32
+ Offset uint64
+}
+
+func (idx *Idxfile) Add(h plumbing.Hash, offset uint64, crc32 uint32) {
+ idx.Entries = append(idx.Entries, Entry{
+ Hash: h,
+ Offset: offset,
+ CRC32: crc32,
+ })
+}
+
+func (idx *Idxfile) isValid() bool {
+ fanout := idx.calculateFanout()
+ for k, c := range idx.Fanout {
+ if fanout[k] != c {
+ return false
+ }
+ }
+
+ return true
+}
+
+func (idx *Idxfile) calculateFanout() [256]uint32 {
+ fanout := [256]uint32{}
+ for _, e := range idx.Entries {
+ fanout[e.Hash[0]]++
+ }
+
+ for i := 1; i < 256; i++ {
+ fanout[i] += fanout[i-1]
+ }
+
+ return fanout
+}
diff --git a/plumbing/format/index/decoder.go b/plumbing/format/index/decoder.go
new file mode 100644
index 0000000..9069c9e
--- /dev/null
+++ b/plumbing/format/index/decoder.go
@@ -0,0 +1,446 @@
+package index
+
+import (
+ "bytes"
+ "crypto/sha1"
+ "errors"
+ "hash"
+ "io"
+ "io/ioutil"
+ "strconv"
+ "time"
+
+ "gopkg.in/src-d/go-git.v4/plumbing"
+ "gopkg.in/src-d/go-git.v4/utils/binary"
+)
+
+var (
+ // DecodeVersionSupported is the range of supported index versions
+ DecodeVersionSupported = struct{ Min, Max uint32 }{Min: 2, Max: 4}
+
+ // ErrMalformedSignature is returned by Decode when the index header file is
+ // malformed
+ ErrMalformedSignature = errors.New("malformed index signature file")
+ // ErrInvalidChecksum is returned by Decode if the SHA1 hash missmatch with
+ // the read content
+ ErrInvalidChecksum = errors.New("invalid checksum")
+
+ errUnknownExtension = errors.New("unknown extension")
+)
+
+const (
+ entryHeaderLength = 62
+ entryExtended = 0x4000
+ entryValid = 0x8000
+ nameMask = 0xfff
+ intentToAddMask = 1 << 13
+ skipWorkTreeMask = 1 << 14
+)
+
+// A Decoder reads and decodes idx files from an input stream.
+type Decoder struct {
+ r io.Reader
+ hash hash.Hash
+ lastEntry *Entry
+}
+
+// NewDecoder returns a new decoder that reads from r.
+func NewDecoder(r io.Reader) *Decoder {
+ h := sha1.New()
+ return &Decoder{
+ r: io.TeeReader(r, h),
+ hash: h,
+ }
+}
+
+// Decode reads the whole index object from its input and stores it in the
+// value pointed to by idx.
+func (d *Decoder) Decode(idx *Index) error {
+ var err error
+ idx.Version, err = validateHeader(d.r)
+ if err != nil {
+ return err
+ }
+
+ entryCount, err := binary.ReadUint32(d.r)
+ if err != nil {
+ return err
+ }
+
+ if err := d.readEntries(idx, int(entryCount)); err != nil {
+ return err
+ }
+
+ return d.readExtensions(idx)
+}
+
+func (d *Decoder) readEntries(idx *Index, count int) error {
+ for i := 0; i < count; i++ {
+ e, err := d.readEntry(idx)
+ if err != nil {
+ return err
+ }
+
+ d.lastEntry = e
+ idx.Entries = append(idx.Entries, *e)
+ }
+
+ return nil
+}
+
+func (d *Decoder) readEntry(idx *Index) (*Entry, error) {
+ e := &Entry{}
+
+ var msec, mnsec, sec, nsec uint32
+ var flags uint16
+
+ flow := []interface{}{
+ &sec, &nsec,
+ &msec, &mnsec,
+ &e.Dev,
+ &e.Inode,
+ &e.Mode,
+ &e.UID,
+ &e.GID,
+ &e.Size,
+ &e.Hash,
+ &flags,
+ }
+
+ if err := binary.Read(d.r, flow...); err != nil {
+ return nil, err
+ }
+
+ read := entryHeaderLength
+ e.CreatedAt = time.Unix(int64(sec), int64(nsec))
+ e.ModifiedAt = time.Unix(int64(msec), int64(mnsec))
+ e.Stage = Stage(flags>>12) & 0x3
+
+ if flags&entryExtended != 0 {
+ extended, err := binary.ReadUint16(d.r)
+ if err != nil {
+ return nil, err
+ }
+
+ read += 2
+ e.IntentToAdd = extended&intentToAddMask != 0
+ e.SkipWorktree = extended&skipWorkTreeMask != 0
+ }
+
+ if err := d.readEntryName(idx, e, flags); err != nil {
+ return nil, err
+ }
+
+ return e, d.padEntry(idx, e, read)
+}
+
+func (d *Decoder) readEntryName(idx *Index, e *Entry, flags uint16) error {
+ var name string
+ var err error
+
+ switch idx.Version {
+ case 2, 3:
+ len := flags & nameMask
+ name, err = d.doReadEntryName(len)
+ case 4:
+ name, err = d.doReadEntryNameV4()
+ default:
+ return ErrUnsupportedVersion
+ }
+
+ if err != nil {
+ return err
+ }
+
+ e.Name = name
+ return nil
+}
+
+func (d *Decoder) doReadEntryNameV4() (string, error) {
+ l, err := binary.ReadVariableWidthInt(d.r)
+ if err != nil {
+ return "", err
+ }
+
+ var base string
+ if d.lastEntry != nil {
+ base = d.lastEntry.Name[:len(d.lastEntry.Name)-int(l)]
+ }
+
+ name, err := binary.ReadUntil(d.r, '\x00')
+ if err != nil {
+ return "", err
+ }
+
+ return base + string(name), nil
+}
+
+func (d *Decoder) doReadEntryName(len uint16) (string, error) {
+ name := make([]byte, len)
+ if err := binary.Read(d.r, &name); err != nil {
+ return "", err
+ }
+
+ return string(name), nil
+}
+
+// Index entries are padded out to the next 8 byte alignment
+// for historical reasons related to how C Git read the files.
+func (d *Decoder) padEntry(idx *Index, e *Entry, read int) error {
+ if idx.Version == 4 {
+ return nil
+ }
+
+ entrySize := read + len(e.Name)
+ padLen := 8 - entrySize%8
+ if _, err := io.CopyN(ioutil.Discard, d.r, int64(padLen)); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// TODO: support 'Split index' and 'Untracked cache' extensions, take in count
+// that they are not supported by jgit or libgit
+func (d *Decoder) readExtensions(idx *Index) error {
+ var expected []byte
+ var err error
+
+ var header [4]byte
+ for {
+ expected = d.hash.Sum(nil)
+
+ var n int
+ if n, err = io.ReadFull(d.r, header[:]); err != nil {
+ if n == 0 {
+ err = io.EOF
+ }
+
+ break
+ }
+
+ err = d.readExtension(idx, header[:])
+ if err != nil {
+ break
+ }
+ }
+
+ if err != errUnknownExtension {
+ return err
+ }
+
+ return d.readChecksum(expected, header)
+}
+
+func (d *Decoder) readExtension(idx *Index, header []byte) error {
+ switch {
+ case bytes.Equal(header, treeExtSignature):
+ r, err := d.getExtensionReader()
+ if err != nil {
+ return err
+ }
+
+ idx.Cache = &Tree{}
+ d := &treeExtensionDecoder{r}
+ if err := d.Decode(idx.Cache); err != nil {
+ return err
+ }
+ case bytes.Equal(header, resolveUndoExtSignature):
+ r, err := d.getExtensionReader()
+ if err != nil {
+ return err
+ }
+
+ idx.ResolveUndo = &ResolveUndo{}
+ d := &resolveUndoDecoder{r}
+ if err := d.Decode(idx.ResolveUndo); err != nil {
+ return err
+ }
+ default:
+ return errUnknownExtension
+ }
+
+ return nil
+}
+
+func (d *Decoder) getExtensionReader() (io.Reader, error) {
+ len, err := binary.ReadUint32(d.r)
+ if err != nil {
+ return nil, err
+ }
+
+ return &io.LimitedReader{R: d.r, N: int64(len)}, nil
+}
+
+func (d *Decoder) readChecksum(expected []byte, alreadyRead [4]byte) error {
+ var h plumbing.Hash
+ copy(h[:4], alreadyRead[:])
+
+ if err := binary.Read(d.r, h[4:]); err != nil {
+ return err
+ }
+
+ if bytes.Compare(h[:], expected) != 0 {
+ return ErrInvalidChecksum
+ }
+
+ return nil
+}
+
+func validateHeader(r io.Reader) (version uint32, err error) {
+ var s = make([]byte, 4)
+ if _, err := io.ReadFull(r, s); err != nil {
+ return 0, err
+ }
+
+ if !bytes.Equal(s, indexSignature) {
+ return 0, ErrMalformedSignature
+ }
+
+ version, err = binary.ReadUint32(r)
+ if err != nil {
+ return 0, err
+ }
+
+ if version < DecodeVersionSupported.Min || version > DecodeVersionSupported.Max {
+ return 0, ErrUnsupportedVersion
+ }
+
+ return
+}
+
+type treeExtensionDecoder struct {
+ r io.Reader
+}
+
+func (d *treeExtensionDecoder) Decode(t *Tree) error {
+ for {
+ e, err := d.readEntry()
+ if err != nil {
+ if err == io.EOF {
+ return nil
+ }
+
+ return err
+ }
+
+ if e == nil {
+ continue
+ }
+
+ t.Entries = append(t.Entries, *e)
+ }
+}
+
+func (d *treeExtensionDecoder) readEntry() (*TreeEntry, error) {
+ e := &TreeEntry{}
+
+ path, err := binary.ReadUntil(d.r, '\x00')
+ if err != nil {
+ return nil, err
+ }
+
+ e.Path = string(path)
+
+ count, err := binary.ReadUntil(d.r, ' ')
+ if err != nil {
+ return nil, err
+ }
+
+ i, err := strconv.Atoi(string(count))
+ if err != nil {
+ return nil, err
+ }
+
+ // An entry can be in an invalidated state and is represented by having a
+ // negative number in the entry_count field.
+ if i == -1 {
+ return nil, nil
+ }
+
+ e.Entries = i
+ trees, err := binary.ReadUntil(d.r, '\n')
+ if err != nil {
+ return nil, err
+ }
+
+ i, err = strconv.Atoi(string(trees))
+ if err != nil {
+ return nil, err
+ }
+
+ e.Trees = i
+
+ if err := binary.Read(d.r, &e.Hash); err != nil {
+ return nil, err
+ }
+
+ return e, nil
+}
+
+type resolveUndoDecoder struct {
+ r io.Reader
+}
+
+func (d *resolveUndoDecoder) Decode(ru *ResolveUndo) error {
+ for {
+ e, err := d.readEntry()
+ if err != nil {
+ if err == io.EOF {
+ return nil
+ }
+
+ return err
+ }
+
+ ru.Entries = append(ru.Entries, *e)
+ }
+}
+
+func (d *resolveUndoDecoder) readEntry() (*ResolveUndoEntry, error) {
+ e := &ResolveUndoEntry{
+ Stages: make(map[Stage]plumbing.Hash, 0),
+ }
+
+ path, err := binary.ReadUntil(d.r, '\x00')
+ if err != nil {
+ return nil, err
+ }
+
+ e.Path = string(path)
+
+ for i := 0; i < 3; i++ {
+ if err := d.readStage(e, Stage(i+1)); err != nil {
+ return nil, err
+ }
+ }
+
+ for s := range e.Stages {
+ var hash plumbing.Hash
+ if err := binary.Read(d.r, hash[:]); err != nil {
+ return nil, err
+ }
+
+ e.Stages[s] = hash
+ }
+
+ return e, nil
+}
+
+func (d *resolveUndoDecoder) readStage(e *ResolveUndoEntry, s Stage) error {
+ ascii, err := binary.ReadUntil(d.r, '\x00')
+ if err != nil {
+ return err
+ }
+
+ stage, err := strconv.ParseInt(string(ascii), 8, 64)
+ if err != nil {
+ return err
+ }
+
+ if stage != 0 {
+ e.Stages[s] = plumbing.ZeroHash
+ }
+
+ return nil
+}
diff --git a/plumbing/format/index/decoder_test.go b/plumbing/format/index/decoder_test.go
new file mode 100644
index 0000000..44ecb69
--- /dev/null
+++ b/plumbing/format/index/decoder_test.go
@@ -0,0 +1,196 @@
+package index
+
+import (
+ "testing"
+
+ . "gopkg.in/check.v1"
+ "gopkg.in/src-d/go-git.v4/fixtures"
+ "gopkg.in/src-d/go-git.v4/plumbing"
+)
+
+func Test(t *testing.T) { TestingT(t) }
+
+type IdxfileSuite struct {
+ fixtures.Suite
+}
+
+var _ = Suite(&IdxfileSuite{})
+
+func (s *IdxfileSuite) TestDecode(c *C) {
+ f, err := fixtures.Basic().One().DotGit().Open("index")
+ c.Assert(err, IsNil)
+
+ idx := &Index{}
+ d := NewDecoder(f)
+ err = d.Decode(idx)
+ c.Assert(err, IsNil)
+
+ c.Assert(idx.Version, Equals, uint32(2))
+ c.Assert(idx.Entries, HasLen, 9)
+}
+
+func (s *IdxfileSuite) TestDecodeEntries(c *C) {
+ f, err := fixtures.Basic().One().DotGit().Open("index")
+ c.Assert(err, IsNil)
+
+ idx := &Index{}
+ d := NewDecoder(f)
+ err = d.Decode(idx)
+ c.Assert(err, IsNil)
+
+ c.Assert(idx.Entries, HasLen, 9)
+
+ e := idx.Entries[0]
+ c.Assert(e.CreatedAt.Unix(), Equals, int64(1473350251))
+ c.Assert(e.CreatedAt.Nanosecond(), Equals, 12059307)
+ c.Assert(e.ModifiedAt.Unix(), Equals, int64(1473350251))
+ c.Assert(e.ModifiedAt.Nanosecond(), Equals, 12059307)
+ c.Assert(e.Dev, Equals, uint32(38))
+ c.Assert(e.Inode, Equals, uint32(1715795))
+ c.Assert(e.UID, Equals, uint32(1000))
+ c.Assert(e.GID, Equals, uint32(100))
+ c.Assert(e.Size, Equals, uint32(189))
+ c.Assert(e.Hash.String(), Equals, "32858aad3c383ed1ff0a0f9bdf231d54a00c9e88")
+ c.Assert(e.Name, Equals, ".gitignore")
+ c.Assert(e.Mode.String(), Equals, "-rw-r--r--")
+
+ e = idx.Entries[1]
+ c.Assert(e.Name, Equals, "CHANGELOG")
+}
+
+func (s *IdxfileSuite) TestDecodeCacheTree(c *C) {
+ f, err := fixtures.Basic().One().DotGit().Open("index")
+ c.Assert(err, IsNil)
+
+ idx := &Index{}
+ d := NewDecoder(f)
+ err = d.Decode(idx)
+ c.Assert(err, IsNil)
+
+ c.Assert(idx.Entries, HasLen, 9)
+ c.Assert(idx.Cache.Entries, HasLen, 5)
+
+ for i, expected := range expectedEntries {
+ c.Assert(idx.Cache.Entries[i].Path, Equals, expected.Path)
+ c.Assert(idx.Cache.Entries[i].Entries, Equals, expected.Entries)
+ c.Assert(idx.Cache.Entries[i].Trees, Equals, expected.Trees)
+ c.Assert(idx.Cache.Entries[i].Hash.String(), Equals, expected.Hash.String())
+ }
+
+}
+
+var expectedEntries = []TreeEntry{
+ {Path: "", Entries: 9, Trees: 4, Hash: plumbing.NewHash("a8d315b2b1c615d43042c3a62402b8a54288cf5c")},
+ {Path: "go", Entries: 1, Trees: 0, Hash: plumbing.NewHash("a39771a7651f97faf5c72e08224d857fc35133db")},
+ {Path: "php", Entries: 1, Trees: 0, Hash: plumbing.NewHash("586af567d0bb5e771e49bdd9434f5e0fb76d25fa")},
+ {Path: "json", Entries: 2, Trees: 0, Hash: plumbing.NewHash("5a877e6a906a2743ad6e45d99c1793642aaf8eda")},
+ {Path: "vendor", Entries: 1, Trees: 0, Hash: plumbing.NewHash("cf4aa3b38974fb7d81f367c0830f7d78d65ab86b")},
+}
+
+func (s *IdxfileSuite) TestDecodeMergeConflict(c *C) {
+ f, err := fixtures.Basic().ByTag("merge-conflict").One().DotGit().Open("index")
+ c.Assert(err, IsNil)
+
+ idx := &Index{}
+ d := NewDecoder(f)
+ err = d.Decode(idx)
+ c.Assert(err, IsNil)
+
+ c.Assert(idx.Version, Equals, uint32(2))
+ c.Assert(idx.Entries, HasLen, 13)
+
+ expected := []struct {
+ Stage Stage
+ Hash string
+ }{
+ {AncestorMode, "880cd14280f4b9b6ed3986d6671f907d7cc2a198"},
+ {OurMode, "d499a1a0b79b7d87a35155afd0c1cce78b37a91c"},
+ {TheirMode, "14f8e368114f561c38e134f6e68ea6fea12d77ed"},
+ }
+
+ // stagged files
+ for i, e := range idx.Entries[4:7] {
+ c.Assert(e.Stage, Equals, expected[i].Stage)
+ c.Assert(e.CreatedAt.Unix(), Equals, int64(0))
+ c.Assert(e.CreatedAt.Nanosecond(), Equals, 0)
+ c.Assert(e.ModifiedAt.Unix(), Equals, int64(0))
+ c.Assert(e.ModifiedAt.Nanosecond(), Equals, 0)
+ c.Assert(e.Dev, Equals, uint32(0))
+ c.Assert(e.Inode, Equals, uint32(0))
+ c.Assert(e.UID, Equals, uint32(0))
+ c.Assert(e.GID, Equals, uint32(0))
+ c.Assert(e.Size, Equals, uint32(0))
+ c.Assert(e.Hash.String(), Equals, expected[i].Hash)
+ c.Assert(e.Name, Equals, "go/example.go")
+ }
+
+}
+
+func (s *IdxfileSuite) TestDecodeExtendedV3(c *C) {
+ f, err := fixtures.Basic().ByTag("intent-to-add").One().DotGit().Open("index")
+ c.Assert(err, IsNil)
+
+ idx := &Index{}
+ d := NewDecoder(f)
+ err = d.Decode(idx)
+ c.Assert(err, IsNil)
+
+ c.Assert(idx.Version, Equals, uint32(3))
+ c.Assert(idx.Entries, HasLen, 11)
+
+ c.Assert(idx.Entries[6].Name, Equals, "intent-to-add")
+ c.Assert(idx.Entries[6].IntentToAdd, Equals, true)
+ c.Assert(idx.Entries[6].SkipWorktree, Equals, false)
+}
+
+func (s *IdxfileSuite) TestDecodeResolveUndo(c *C) {
+ f, err := fixtures.Basic().ByTag("resolve-undo").One().DotGit().Open("index")
+ c.Assert(err, IsNil)
+
+ idx := &Index{}
+ d := NewDecoder(f)
+ err = d.Decode(idx)
+ c.Assert(err, IsNil)
+
+ c.Assert(idx.Version, Equals, uint32(2))
+ c.Assert(idx.Entries, HasLen, 8)
+
+ ru := idx.ResolveUndo
+ c.Assert(ru.Entries, HasLen, 2)
+ c.Assert(ru.Entries[0].Path, Equals, "go/example.go")
+ c.Assert(ru.Entries[0].Stages, HasLen, 3)
+ c.Assert(ru.Entries[0].Stages[AncestorMode], Not(Equals), plumbing.ZeroHash)
+ c.Assert(ru.Entries[0].Stages[OurMode], Not(Equals), plumbing.ZeroHash)
+ c.Assert(ru.Entries[0].Stages[TheirMode], Not(Equals), plumbing.ZeroHash)
+ c.Assert(ru.Entries[1].Path, Equals, "haskal/haskal.hs")
+ c.Assert(ru.Entries[1].Stages, HasLen, 2)
+ c.Assert(ru.Entries[1].Stages[OurMode], Not(Equals), plumbing.ZeroHash)
+ c.Assert(ru.Entries[1].Stages[TheirMode], Not(Equals), plumbing.ZeroHash)
+}
+
+func (s *IdxfileSuite) TestDecodeV4(c *C) {
+ f, err := fixtures.Basic().ByTag("index-v4").One().DotGit().Open("index")
+ c.Assert(err, IsNil)
+
+ idx := &Index{}
+ d := NewDecoder(f)
+ err = d.Decode(idx)
+ c.Assert(err, IsNil)
+
+ c.Assert(idx.Version, Equals, uint32(4))
+ c.Assert(idx.Entries, HasLen, 11)
+
+ names := []string{
+ ".gitignore", "CHANGELOG", "LICENSE", "binary.jpg", "go/example.go",
+ "haskal/haskal.hs", "intent-to-add", "json/long.json",
+ "json/short.json", "php/crappy.php", "vendor/foo.go",
+ }
+
+ for i, e := range idx.Entries {
+ c.Assert(e.Name, Equals, names[i])
+ }
+
+ c.Assert(idx.Entries[6].Name, Equals, "intent-to-add")
+ c.Assert(idx.Entries[6].IntentToAdd, Equals, true)
+ c.Assert(idx.Entries[6].SkipWorktree, Equals, false)
+}
diff --git a/plumbing/format/index/doc.go b/plumbing/format/index/doc.go
new file mode 100644
index 0000000..3a72606
--- /dev/null
+++ b/plumbing/format/index/doc.go
@@ -0,0 +1,302 @@
+// Package index implements a encoder/decoder of index format files
+package index
+
+/*
+Git index format
+================
+
+== The Git index file has the following format
+
+ All binary numbers are in network byte order. Version 2 is described
+ here unless stated otherwise.
+
+ - A 12-byte header consisting of
+
+ 4-byte signature:
+ The signature is { 'D', 'I', 'R', 'C' } (stands for "dircache")
+
+ 4-byte version number:
+ The current supported versions are 2, 3 and 4.
+
+ 32-bit number of index entries.
+
+ - A number of sorted index entries (see below).
+
+ - Extensions
+
+ Extensions are identified by signature. Optional extensions can
+ be ignored if Git does not understand them.
+
+ Git currently supports cached tree and resolve undo extensions.
+
+ 4-byte extension signature. If the first byte is 'A'..'Z' the
+ extension is optional and can be ignored.
+
+ 32-bit size of the extension
+
+ Extension data
+
+ - 160-bit SHA-1 over the content of the index file before this
+ checksum.
+
+== Index entry
+
+ Index entries are sorted in ascending order on the name field,
+ interpreted as a string of unsigned bytes (i.e. memcmp() order, no
+ localization, no special casing of directory separator '/'). Entries
+ with the same name are sorted by their stage field.
+
+ 32-bit ctime seconds, the last time a file's metadata changed
+ this is stat(2) data
+
+ 32-bit ctime nanosecond fractions
+ this is stat(2) data
+
+ 32-bit mtime seconds, the last time a file's data changed
+ this is stat(2) data
+
+ 32-bit mtime nanosecond fractions
+ this is stat(2) data
+
+ 32-bit dev
+ this is stat(2) data
+
+ 32-bit ino
+ this is stat(2) data
+
+ 32-bit mode, split into (high to low bits)
+
+ 4-bit object type
+ valid values in binary are 1000 (regular file), 1010 (symbolic link)
+ and 1110 (gitlink)
+
+ 3-bit unused
+
+ 9-bit unix permission. Only 0755 and 0644 are valid for regular files.
+ Symbolic links and gitlinks have value 0 in this field.
+
+ 32-bit uid
+ this is stat(2) data
+
+ 32-bit gid
+ this is stat(2) data
+
+ 32-bit file size
+ This is the on-disk size from stat(2), truncated to 32-bit.
+
+ 160-bit SHA-1 for the represented object
+
+ A 16-bit 'flags' field split into (high to low bits)
+
+ 1-bit assume-valid flag
+
+ 1-bit extended flag (must be zero in version 2)
+
+ 2-bit stage (during merge)
+
+ 12-bit name length if the length is less than 0xFFF; otherwise 0xFFF
+ is stored in this field.
+
+ (Version 3 or later) A 16-bit field, only applicable if the
+ "extended flag" above is 1, split into (high to low bits).
+
+ 1-bit reserved for future
+
+ 1-bit skip-worktree flag (used by sparse checkout)
+
+ 1-bit intent-to-add flag (used by "git add -N")
+
+ 13-bit unused, must be zero
+
+ Entry path name (variable length) relative to top level directory
+ (without leading slash). '/' is used as path separator. The special
+ path components ".", ".." and ".git" (without quotes) are disallowed.
+ Trailing slash is also disallowed.
+
+ The exact encoding is undefined, but the '.' and '/' characters
+ are encoded in 7-bit ASCII and the encoding cannot contain a NUL
+ byte (iow, this is a UNIX pathname).
+
+ (Version 4) In version 4, the entry path name is prefix-compressed
+ relative to the path name for the previous entry (the very first
+ entry is encoded as if the path name for the previous entry is an
+ empty string). At the beginning of an entry, an integer N in the
+ variable width encoding (the same encoding as the offset is encoded
+ for OFS_DELTA pack entries; see pack-format.txt) is stored, followed
+ by a NUL-terminated string S. Removing N bytes from the end of the
+ path name for the previous entry, and replacing it with the string S
+ yields the path name for this entry.
+
+ 1-8 nul bytes as necessary to pad the entry to a multiple of eight bytes
+ while keeping the name NUL-terminated.
+
+ (Version 4) In version 4, the padding after the pathname does not
+ exist.
+
+ Interpretation of index entries in split index mode is completely
+ different. See below for details.
+
+== Extensions
+
+=== Cached tree
+
+ Cached tree extension contains pre-computed hashes for trees that can
+ be derived from the index. It helps speed up tree object generation
+ from index for a new commit.
+
+ When a path is updated in index, the path must be invalidated and
+ removed from tree cache.
+
+ The signature for this extension is { 'T', 'R', 'E', 'E' }.
+
+ A series of entries fill the entire extension; each of which
+ consists of:
+
+ - NUL-terminated path component (relative to its parent directory);
+
+ - ASCII decimal number of entries in the index that is covered by the
+ tree this entry represents (entry_count);
+
+ - A space (ASCII 32);
+
+ - ASCII decimal number that represents the number of subtrees this
+ tree has;
+
+ - A newline (ASCII 10); and
+
+ - 160-bit object name for the object that would result from writing
+ this span of index as a tree.
+
+ An entry can be in an invalidated state and is represented by having
+ a negative number in the entry_count field. In this case, there is no
+ object name and the next entry starts immediately after the newline.
+ When writing an invalid entry, -1 should always be used as entry_count.
+
+ The entries are written out in the top-down, depth-first order. The
+ first entry represents the root level of the repository, followed by the
+ first subtree--let's call this A--of the root level (with its name
+ relative to the root level), followed by the first subtree of A (with
+ its name relative to A), ...
+
+=== Resolve undo
+
+ A conflict is represented in the index as a set of higher stage entries.
+ When a conflict is resolved (e.g. with "git add path"), these higher
+ stage entries will be removed and a stage-0 entry with proper resolution
+ is added.
+
+ When these higher stage entries are removed, they are saved in the
+ resolve undo extension, so that conflicts can be recreated (e.g. with
+ "git checkout -m"), in case users want to redo a conflict resolution
+ from scratch.
+
+ The signature for this extension is { 'R', 'E', 'U', 'C' }.
+
+ A series of entries fill the entire extension; each of which
+ consists of:
+
+ - NUL-terminated pathname the entry describes (relative to the root of
+ the repository, i.e. full pathname);
+
+ - Three NUL-terminated ASCII octal numbers, entry mode of entries in
+ stage 1 to 3 (a missing stage is represented by "0" in this field);
+ and
+
+ - At most three 160-bit object names of the entry in stages from 1 to 3
+ (nothing is written for a missing stage).
+
+=== Split index
+
+ In split index mode, the majority of index entries could be stored
+ in a separate file. This extension records the changes to be made on
+ top of that to produce the final index.
+
+ The signature for this extension is { 'l', 'i', 'n', 'k' }.
+
+ The extension consists of:
+
+ - 160-bit SHA-1 of the shared index file. The shared index file path
+ is $GIT_DIR/sharedindex.<SHA-1>. If all 160 bits are zero, the
+ index does not require a shared index file.
+
+ - An ewah-encoded delete bitmap, each bit represents an entry in the
+ shared index. If a bit is set, its corresponding entry in the
+ shared index will be removed from the final index. Note, because
+ a delete operation changes index entry positions, but we do need
+ original positions in replace phase, it's best to just mark
+ entries for removal, then do a mass deletion after replacement.
+
+ - An ewah-encoded replace bitmap, each bit represents an entry in
+ the shared index. If a bit is set, its corresponding entry in the
+ shared index will be replaced with an entry in this index
+ file. All replaced entries are stored in sorted order in this
+ index. The first "1" bit in the replace bitmap corresponds to the
+ first index entry, the second "1" bit to the second entry and so
+ on. Replaced entries may have empty path names to save space.
+
+ The remaining index entries after replaced ones will be added to the
+ final index. These added entries are also sorted by entry name then
+ stage.
+
+== Untracked cache
+
+ Untracked cache saves the untracked file list and necessary data to
+ verify the cache. The signature for this extension is { 'U', 'N',
+ 'T', 'R' }.
+
+ The extension starts with
+
+ - A sequence of NUL-terminated strings, preceded by the size of the
+ sequence in variable width encoding. Each string describes the
+ environment where the cache can be used.
+
+ - Stat data of $GIT_DIR/info/exclude. See "Index entry" section from
+ ctime field until "file size".
+
+ - Stat data of plumbing.excludesfile
+
+ - 32-bit dir_flags (see struct dir_struct)
+
+ - 160-bit SHA-1 of $GIT_DIR/info/exclude. Null SHA-1 means the file
+ does not exist.
+
+ - 160-bit SHA-1 of plumbing.excludesfile. Null SHA-1 means the file does
+ not exist.
+
+ - NUL-terminated string of per-dir exclude file name. This usually
+ is ".gitignore".
+
+ - The number of following directory blocks, variable width
+ encoding. If this number is zero, the extension ends here with a
+ following NUL.
+
+ - A number of directory blocks in depth-first-search order, each
+ consists of
+
+ - The number of untracked entries, variable width encoding.
+
+ - The number of sub-directory blocks, variable width encoding.
+
+ - The directory name terminated by NUL.
+
+ - A number of untracked file/dir names terminated by NUL.
+
+The remaining data of each directory block is grouped by type:
+
+ - An ewah bitmap, the n-th bit marks whether the n-th directory has
+ valid untracked cache entries.
+
+ - An ewah bitmap, the n-th bit records "check-only" bit of
+ read_directory_recursive() for the n-th directory.
+
+ - An ewah bitmap, the n-th bit indicates whether SHA-1 and stat data
+ is valid for the n-th directory and exists in the next data.
+
+ - An array of stat data. The n-th data corresponds with the n-th
+ "one" bit in the previous ewah bitmap.
+
+ - An array of SHA-1. The n-th SHA-1 corresponds with the n-th "one" bit
+ in the previous ewah bitmap.
+
+ - One NUL.
+*/
diff --git a/plumbing/format/index/encoder.go b/plumbing/format/index/encoder.go
new file mode 100644
index 0000000..94fbc68
--- /dev/null
+++ b/plumbing/format/index/encoder.go
@@ -0,0 +1,141 @@
+package index
+
+import (
+ "bytes"
+ "crypto/sha1"
+ "errors"
+ "hash"
+ "io"
+ "time"
+
+ "gopkg.in/src-d/go-git.v4/utils/binary"
+)
+
+var (
+ // EncodeVersionSupported is the range of supported index versions
+ EncodeVersionSupported uint32 = 2
+
+ // ErrInvalidTimestamp is returned by Encode if a Index with a Entry with
+ // negative timestamp values
+ ErrInvalidTimestamp = errors.New("negative timestamps are not allowed")
+)
+
+// An Encoder writes an Index to an output stream.
+type Encoder struct {
+ w io.Writer
+ hash hash.Hash
+}
+
+// NewEncoder returns a new encoder that writes to w.
+func NewEncoder(w io.Writer) *Encoder {
+ h := sha1.New()
+ mw := io.MultiWriter(w, h)
+ return &Encoder{mw, h}
+}
+
+// Encode writes the Index to the stream of the encoder.
+func (e *Encoder) Encode(idx *Index) error {
+ // TODO: support versions v3 and v4
+ // TODO: support extensions
+ if idx.Version != EncodeVersionSupported {
+ return ErrUnsupportedVersion
+ }
+
+ if err := e.encodeHeader(idx); err != nil {
+ return err
+ }
+
+ if err := e.encodeEntries(idx); err != nil {
+ return err
+ }
+
+ return e.encodeFooter()
+}
+
+func (e *Encoder) encodeHeader(idx *Index) error {
+ return binary.Write(e.w,
+ indexSignature,
+ idx.Version,
+ uint32(len(idx.Entries)),
+ )
+}
+
+func (e *Encoder) encodeEntries(idx *Index) error {
+ for _, entry := range idx.Entries {
+ if err := e.encodeEntry(&entry); err != nil {
+ return err
+ }
+
+ wrote := entryHeaderLength + len(entry.Name)
+ if err := e.padEntry(wrote); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (e *Encoder) encodeEntry(entry *Entry) error {
+ if entry.IntentToAdd || entry.SkipWorktree {
+ return ErrUnsupportedVersion
+ }
+
+ sec, nsec, err := e.timeToUint32(&entry.CreatedAt)
+ if err != nil {
+ return err
+ }
+
+ msec, mnsec, err := e.timeToUint32(&entry.ModifiedAt)
+ if err != nil {
+ return err
+ }
+
+ flags := uint16(entry.Stage&0x3) << 12
+ if l := len(entry.Name); l < nameMask {
+ flags |= uint16(l)
+ } else {
+ flags |= nameMask
+ }
+
+ flow := []interface{}{
+ sec, nsec,
+ msec, mnsec,
+ entry.Dev,
+ entry.Inode,
+ entry.Mode,
+ entry.UID,
+ entry.GID,
+ entry.Size,
+ entry.Hash[:],
+ flags,
+ }
+
+ if err := binary.Write(e.w, flow...); err != nil {
+ return err
+ }
+
+ return binary.Write(e.w, []byte(entry.Name))
+}
+
+func (e *Encoder) timeToUint32(t *time.Time) (uint32, uint32, error) {
+ if t.IsZero() {
+ return 0, 0, nil
+ }
+
+ if t.Unix() < 0 || t.UnixNano() < 0 {
+ return 0, 0, ErrInvalidTimestamp
+ }
+
+ return uint32(t.Unix()), uint32(t.Nanosecond()), nil
+}
+
+func (e *Encoder) padEntry(wrote int) error {
+ padLen := 8 - wrote%8
+
+ _, err := e.w.Write(bytes.Repeat([]byte{'\x00'}, padLen))
+ return err
+}
+
+func (e *Encoder) encodeFooter() error {
+ return binary.Write(e.w, e.hash.Sum(nil))
+}
diff --git a/plumbing/format/index/encoder_test.go b/plumbing/format/index/encoder_test.go
new file mode 100644
index 0000000..6770985
--- /dev/null
+++ b/plumbing/format/index/encoder_test.go
@@ -0,0 +1,78 @@
+package index
+
+import (
+ "bytes"
+ "strings"
+ "time"
+
+ . "gopkg.in/check.v1"
+ "gopkg.in/src-d/go-git.v4/plumbing"
+)
+
+func (s *IdxfileSuite) TestEncode(c *C) {
+ idx := &Index{
+ Version: 2,
+ Entries: []Entry{{
+ CreatedAt: time.Now(),
+ ModifiedAt: time.Now(),
+ Dev: 4242,
+ Inode: 424242,
+ UID: 84,
+ GID: 8484,
+ Size: 42,
+ Stage: TheirMode,
+ Hash: plumbing.NewHash("e25b29c8946e0e192fae2edc1dabf7be71e8ecf3"),
+ Name: "foo",
+ }, {
+ CreatedAt: time.Now(),
+ ModifiedAt: time.Now(),
+ Name: strings.Repeat(" ", 20),
+ Size: 82,
+ }},
+ }
+
+ buf := bytes.NewBuffer(nil)
+ e := NewEncoder(buf)
+ err := e.Encode(idx)
+ c.Assert(err, IsNil)
+
+ output := &Index{}
+ d := NewDecoder(buf)
+ err = d.Decode(output)
+ c.Assert(err, IsNil)
+
+ c.Assert(idx, DeepEquals, output)
+}
+
+func (s *IdxfileSuite) TestEncodeUnsuportedVersion(c *C) {
+ idx := &Index{Version: 3}
+
+ buf := bytes.NewBuffer(nil)
+ e := NewEncoder(buf)
+ err := e.Encode(idx)
+ c.Assert(err, Equals, ErrUnsupportedVersion)
+}
+
+func (s *IdxfileSuite) TestEncodeWithIntentToAddUnsuportedVersion(c *C) {
+ idx := &Index{
+ Version: 2,
+ Entries: []Entry{{IntentToAdd: true}},
+ }
+
+ buf := bytes.NewBuffer(nil)
+ e := NewEncoder(buf)
+ err := e.Encode(idx)
+ c.Assert(err, Equals, ErrUnsupportedVersion)
+}
+
+func (s *IdxfileSuite) TestEncodeWithSkipWorktreeUnsuportedVersion(c *C) {
+ idx := &Index{
+ Version: 2,
+ Entries: []Entry{{SkipWorktree: true}},
+ }
+
+ buf := bytes.NewBuffer(nil)
+ e := NewEncoder(buf)
+ err := e.Encode(idx)
+ c.Assert(err, Equals, ErrUnsupportedVersion)
+}
diff --git a/plumbing/format/index/index.go b/plumbing/format/index/index.go
new file mode 100644
index 0000000..4a3c798
--- /dev/null
+++ b/plumbing/format/index/index.go
@@ -0,0 +1,108 @@
+package index
+
+import (
+ "errors"
+ "os"
+ "time"
+
+ "gopkg.in/src-d/go-git.v4/plumbing"
+)
+
+var (
+ // ErrUnsupportedVersion is returned by Decode when the idxindex file
+ // version is not supported.
+ ErrUnsupportedVersion = errors.New("Unsuported version")
+
+ indexSignature = []byte{'D', 'I', 'R', 'C'}
+ treeExtSignature = []byte{'T', 'R', 'E', 'E'}
+ resolveUndoExtSignature = []byte{'R', 'E', 'U', 'C'}
+)
+
+// Stage during merge
+type Stage int
+
+const (
+ // Merged is the default stage, fully merged
+ Merged Stage = 1
+ // AncestorMode is the base revision
+ AncestorMode Stage = 1
+ // OurMode is the first tree revision, ours
+ OurMode Stage = 2
+ // TheirMode is the second tree revision, theirs
+ TheirMode Stage = 3
+)
+
+// Index contains the information about which objects are currently checked out
+// in the worktree, having information about the working files. Changes in
+// worktree are detected using this Index. The Index is also used during merges
+type Index struct {
+ Version uint32
+ Entries []Entry
+ Cache *Tree
+ ResolveUndo *ResolveUndo
+}
+
+// Entry represents a single file (or stage of a file) in the cache. An entry
+// represents exactly one stage of a file. If a file path is unmerged then
+// multiple Entry instances may appear for the same path name.
+type Entry struct {
+ // Hash is the SHA1 of the represented file
+ Hash plumbing.Hash
+ // Name is the Entry path name relative to top level directory
+ Name string
+ // CreatedAt time when the tracked path was created
+ CreatedAt time.Time
+ // ModifiedAt time when the tracked path was changed
+ ModifiedAt time.Time
+ // Dev and Inode of the tracked path
+ Dev, Inode uint32
+ // Mode of the path
+ Mode os.FileMode
+ // UID and GID, userid and group id of the owner
+ UID, GID uint32
+ // Size is the length in bytes for regular files
+ Size uint32
+ // Stage on a merge is defines what stage is representing this entry
+ // https://git-scm.com/book/en/v2/Git-Tools-Advanced-Merging
+ Stage Stage
+ // SkipWorktree used in sparse checkouts
+ // https://git-scm.com/docs/git-read-tree#_sparse_checkout
+ SkipWorktree bool
+ // IntentToAdd record only the fact that the path will be added later
+ // https://git-scm.com/docs/git-add ("git add -N")
+ IntentToAdd bool
+}
+
+// Tree contains pre-computed hashes for trees that can be derived from the
+// index. It helps speed up tree object generation from index for a new commit.
+type Tree struct {
+ Entries []TreeEntry
+}
+
+// TreeEntry entry of a cached Tree
+type TreeEntry struct {
+ // Path component (relative to its parent directory)
+ Path string
+ // Entries is the number of entries in the index that is covered by the tree
+ // this entry represents
+ Entries int
+ // Trees is the number that represents the number of subtrees this tree has
+ Trees int
+ // Hash object name for the object that would result from writing this span
+ // of index as a tree.
+ Hash plumbing.Hash
+}
+
+// ResolveUndo when a conflict is resolved (e.g. with "git add path"), these
+// higher stage entries will be removed and a stage-0 entry with proper
+// resolution is added. When these higher stage entries are removed, they are
+// saved in the resolve undo extension
+type ResolveUndo struct {
+ Entries []ResolveUndoEntry
+}
+
+// ResolveUndoEntry contains the information about a conflict when is resolved
+type ResolveUndoEntry struct {
+ Path string
+ Stages map[Stage]plumbing.Hash
+}
diff --git a/plumbing/format/objfile/common_test.go b/plumbing/format/objfile/common_test.go
new file mode 100644
index 0000000..7c8b75c
--- /dev/null
+++ b/plumbing/format/objfile/common_test.go
@@ -0,0 +1,69 @@
+package objfile
+
+import (
+ "encoding/base64"
+ "testing"
+
+ . "gopkg.in/check.v1"
+ "gopkg.in/src-d/go-git.v4/plumbing"
+)
+
+type objfileFixture struct {
+ hash string // hash of data
+ t plumbing.ObjectType // object type
+ content string // base64-encoded content
+ data string // base64-encoded objfile data
+}
+
+var objfileFixtures = []objfileFixture{
+ {
+ "e69de29bb2d1d6434b8b29ae775ad8c2e48c5391",
+ plumbing.BlobObject,
+ base64.StdEncoding.EncodeToString([]byte("")),
+ "eAFLyslPUjBgAAAJsAHw",
+ },
+ {
+ "a8a940627d132695a9769df883f85992f0ff4a43",
+ plumbing.BlobObject,
+ base64.StdEncoding.EncodeToString([]byte("this is a test")),
+ "eAFLyslPUjA0YSjJyCxWAKJEhZLU4hIAUDYHOg==",
+ },
+ {
+ "4dc2174801ac4a3d36886210fd086fbe134cf7b2",
+ plumbing.BlobObject,
+ base64.StdEncoding.EncodeToString([]byte("this\nis\n\n\na\nmultiline\n\ntest.\n")),
+ "eAFLyslPUjCyZCjJyCzmAiIurkSu3NKcksyczLxULq6S1OISPS4A1I8LMQ==",
+ },
+ {
+ "13e6f47dd57798bfdc728d91f5c6d7f40c5bb5fc",
+ plumbing.BlobObject,
+ base64.StdEncoding.EncodeToString([]byte("this tests\r\nCRLF\r\nencoded files.\r\n")),
+ "eAFLyslPUjA2YSjJyCxWKEktLinm5XIO8nHj5UrNS85PSU1RSMvMSS3W4+UCABp3DNE=",
+ },
+ {
+ "72a7bc4667ab068e954172437b993d9fbaa137cb",
+ plumbing.BlobObject,
+ base64.StdEncoding.EncodeToString([]byte("test@example.com")),
+ "eAFLyslPUjA0YyhJLS5xSK1IzC3ISdVLzs8FAGVtCIA=",
+ },
+ {
+ "bb2b40e85ec0455d1de72daff71583f0dd72a33f",
+ plumbing.BlobObject,
+ base64.StdEncoding.EncodeToString([]byte("package main\r\n\r\nimport (\r\n\t\"fmt\"\r\n\t\"io\"\r\n\t\"os\"\r\n\r\n\t\"gopkg.in/src-d/go-git.v3\"\r\n)\r\n\r\nfunc main() {\r\n\tfmt.Printf(\"Retrieving %q ...\\n\", os.Args[2])\r\n\tr, err := git.NewRepository(os.Args[2], nil)\r\n\tif err != nil {\r\n\t\tpanic(err)\r\n\t}\r\n\r\n\tif err := r.Pull(\"origin\", \"refs/heads/master\"); err != nil {\r\n\t\tpanic(err)\r\n\t}\r\n\r\n\tdumpCommits(r)\r\n}\r\n\r\nfunc dumpCommits(r *git.Repository) {\r\n\titer := r.Commits()\r\n\tdefer iter.Close()\r\n\r\n\tfor {\r\n\t\tcommit, err := iter.Next()\r\n\t\tif err != nil {\r\n\t\t\tif err == io.EOF {\r\n\t\t\t\tbreak\r\n\t\t\t}\r\n\r\n\t\t\tpanic(err)\r\n\t\t}\r\n\r\n\t\tfmt.Println(commit)\r\n\t}\r\n}\r\n")),
+ "eAGNUU1LAzEU9JpC/0NcEFJps2ARQdmDFD3W0qt6SHez8dHdZH1JqyL+d/Oy/aDgQVh47LzJTGayatyKX99MzzpVrpXRvFVgh4PhANrOYeBiOGBZ3YaMJrg0nI+D/o3r1kaCzT2Wkyo3bmIgyO00rkfEqDe2TIJixL/jgagjFwg21CJb6oCgt2ANv3jnUsoXm4258/IejX++eo0CDMdcI/LbgpPuXH8sdec8BIdf4sgccwsN0aFO9POCgGTIOmWhFFGE9j/p1jtWFEW52DSNyByCAXLPUNc+f9Oq8nmrfNCYje7+o1lt2m7m2haCF2SVnFL6kw2/pBzHEH0rEH0oI8q9BF220nWEaSdnjfNaRDDCtcM+WZnsDgUl4lx/BuKxv6rYY0XBwcmHp8deh7EVarWmQ7uC2Glre/TweI0VvTk5xaTx+wWX66Gs",
+ },
+ {
+ "e94db0f9ffca44dc7bade6a3591f544183395a7c",
+ plumbing.TreeObject,
+ "MTAwNjQ0IFRlc3QgMS50eHQAqKlAYn0TJpWpdp34g/hZkvD/SkMxMDA2NDQgVGVzdCAyLnR4dABNwhdIAaxKPTaIYhD9CG++E0z3sjEwMDY0NCBUZXN0IDMudHh0ABPm9H3Vd5i/3HKNkfXG1/QMW7X8MTAwNjQ0IFRlc3QgNC50eHQAcqe8RmerBo6VQXJDe5k9n7qhN8sxMDA2NDQgVGVzdCA1LnR4dAC7K0DoXsBFXR3nLa/3FYPw3XKjPw==",
+ "eAErKUpNVTC0NGAwNDAwMzFRCEktLlEw1CupKGFYsdIhqVZYberKsrk/mn9ETvrw38sZWZURWJXvIXEPxjVetmYdSQJ/OfL3Cft834SsyhisSvjZl9qr5TP23ynqnfj12PUvPNFb/yCrMgGrKlq+xy19NVvfVMci5+qZtvN3LTQ/jazKFKxqt7bDi7gDrrGyz3XXfxdt/nC3aLE9AA2STmk=",
+ },
+ {
+ "9d7f8a56eaf92469dee8a856e716a03387ddb076",
+ plumbing.CommitObject,
+ "dHJlZSBlOTRkYjBmOWZmY2E0NGRjN2JhZGU2YTM1OTFmNTQ0MTgzMzk1YTdjCmF1dGhvciBKb3NodWEgU2pvZGluZyA8am9zaHVhLnNqb2RpbmdAc2NqYWxsaWFuY2UuY29tPiAxNDU2NTMxNTgzIC0wODAwCmNvbW1pdHRlciBKb3NodWEgU2pvZGluZyA8am9zaHVhLnNqb2RpbmdAc2NqYWxsaWFuY2UuY29tPiAxNDU2NTMxNTgzIC0wODAwCgpUZXN0IENvbW1pdAo=",
+ "eAGtjksOgjAUAF33FO8CktZ+aBNjTNy51Qs8Xl8FAjSh5f4SvILLmcVkKM/zUOEi3amuzMDBxE6mkBKhMZHaDiM71DaoZI1RXutgsSWBW+3zCs9c+g3hNeY4LB+4jgc35cf3QiNO04ALcUN5voEy1lmtrNdwll5Ksdt9oPIfUuLNpcLjCIov3ApFmQ==",
+ },
+}
+
+func Test(t *testing.T) { TestingT(t) }
diff --git a/plumbing/format/objfile/reader.go b/plumbing/format/objfile/reader.go
new file mode 100644
index 0000000..e7e119c
--- /dev/null
+++ b/plumbing/format/objfile/reader.go
@@ -0,0 +1,118 @@
+package objfile
+
+import (
+ "compress/zlib"
+ "errors"
+ "io"
+ "strconv"
+
+ "gopkg.in/src-d/go-git.v4/plumbing"
+ "gopkg.in/src-d/go-git.v4/plumbing/format/packfile"
+)
+
+var (
+ ErrClosed = errors.New("objfile: already closed")
+ ErrHeader = errors.New("objfile: invalid header")
+ ErrNegativeSize = errors.New("objfile: negative object size")
+)
+
+// Reader reads and decodes compressed objfile data from a provided io.Reader.
+// Reader implements io.ReadCloser. Close should be called when finished with
+// the Reader. Close will not close the underlying io.Reader.
+type Reader struct {
+ multi io.Reader
+ zlib io.ReadCloser
+ hasher plumbing.Hasher
+}
+
+// NewReader returns a new Reader reading from r.
+func NewReader(r io.Reader) (*Reader, error) {
+ zlib, err := zlib.NewReader(r)
+ if err != nil {
+ return nil, packfile.ErrZLib.AddDetails(err.Error())
+ }
+
+ return &Reader{
+ zlib: zlib,
+ }, nil
+}
+
+// Header reads the type and the size of object, and prepares the reader for read
+func (r *Reader) Header() (t plumbing.ObjectType, size int64, err error) {
+ var raw []byte
+ raw, err = r.readUntil(' ')
+ if err != nil {
+ return
+ }
+
+ t, err = plumbing.ParseObjectType(string(raw))
+ if err != nil {
+ return
+ }
+
+ raw, err = r.readUntil(0)
+ if err != nil {
+ return
+ }
+
+ size, err = strconv.ParseInt(string(raw), 10, 64)
+ if err != nil {
+ err = ErrHeader
+ return
+ }
+
+ defer r.prepareForRead(t, size)
+ return
+}
+
+// readSlice reads one byte at a time from r until it encounters delim or an
+// error.
+func (r *Reader) readUntil(delim byte) ([]byte, error) {
+ var buf [1]byte
+ value := make([]byte, 0, 16)
+ for {
+ if n, err := r.zlib.Read(buf[:]); err != nil && (err != io.EOF || n == 0) {
+ if err == io.EOF {
+ return nil, ErrHeader
+ }
+ return nil, err
+ }
+
+ if buf[0] == delim {
+ return value, nil
+ }
+
+ value = append(value, buf[0])
+ }
+}
+
+func (r *Reader) prepareForRead(t plumbing.ObjectType, size int64) {
+ r.hasher = plumbing.NewHasher(t, size)
+ r.multi = io.TeeReader(r.zlib, r.hasher)
+}
+
+// Read reads len(p) bytes into p from the object data stream. It returns
+// the number of bytes read (0 <= n <= len(p)) and any error encountered. Even
+// if Read returns n < len(p), it may use all of p as scratch space during the
+// call.
+//
+// If Read encounters the end of the data stream it will return err == io.EOF,
+// either in the current call if n > 0 or in a subsequent call.
+func (r *Reader) Read(p []byte) (n int, err error) {
+ return r.multi.Read(p)
+}
+
+// Hash returns the hash of the object data stream that has been read so far.
+func (r *Reader) Hash() plumbing.Hash {
+ return r.hasher.Sum()
+}
+
+// Close releases any resources consumed by the Reader. Calling Close does not
+// close the wrapped io.Reader originally passed to NewReader.
+func (r *Reader) Close() error {
+ if err := r.zlib.Close(); err != nil {
+ return err
+ }
+
+ return nil
+}
diff --git a/plumbing/format/objfile/reader_test.go b/plumbing/format/objfile/reader_test.go
new file mode 100644
index 0000000..715792d
--- /dev/null
+++ b/plumbing/format/objfile/reader_test.go
@@ -0,0 +1,67 @@
+package objfile
+
+import (
+ "bytes"
+ "encoding/base64"
+ "fmt"
+ "io"
+ "io/ioutil"
+
+ . "gopkg.in/check.v1"
+ "gopkg.in/src-d/go-git.v4/plumbing"
+)
+
+type SuiteReader struct{}
+
+var _ = Suite(&SuiteReader{})
+
+func (s *SuiteReader) TestReadObjfile(c *C) {
+ for k, fixture := range objfileFixtures {
+ com := fmt.Sprintf("test %d: ", k)
+ hash := plumbing.NewHash(fixture.hash)
+ content, _ := base64.StdEncoding.DecodeString(fixture.content)
+ data, _ := base64.StdEncoding.DecodeString(fixture.data)
+
+ testReader(c, bytes.NewReader(data), hash, fixture.t, content, com)
+ }
+}
+
+func testReader(c *C, source io.Reader, hash plumbing.Hash, t plumbing.ObjectType, content []byte, com string) {
+ r, err := NewReader(source)
+ c.Assert(err, IsNil)
+
+ typ, size, err := r.Header()
+ c.Assert(err, IsNil)
+ c.Assert(typ, Equals, t)
+ c.Assert(content, HasLen, int(size))
+
+ rc, err := ioutil.ReadAll(r)
+ c.Assert(err, IsNil)
+ c.Assert(rc, DeepEquals, content, Commentf("%scontent=%s, expected=%s", base64.StdEncoding.EncodeToString(rc), base64.StdEncoding.EncodeToString(content)))
+
+ c.Assert(r.Hash(), Equals, hash) // Test Hash() before close
+ c.Assert(r.Close(), IsNil)
+
+}
+
+func (s *SuiteReader) TestReadEmptyObjfile(c *C) {
+ source := bytes.NewReader([]byte{})
+ _, err := NewReader(source)
+ c.Assert(err, NotNil)
+}
+
+func (s *SuiteReader) TestReadGarbage(c *C) {
+ source := bytes.NewReader([]byte("!@#$RO!@NROSADfinq@o#irn@oirfn"))
+ _, err := NewReader(source)
+ c.Assert(err, NotNil)
+}
+
+func (s *SuiteReader) TestReadCorruptZLib(c *C) {
+ data, _ := base64.StdEncoding.DecodeString("eAFLysaalPUjBgAAAJsAHw")
+ source := bytes.NewReader(data)
+ r, err := NewReader(source)
+ c.Assert(err, IsNil)
+
+ _, _, err = r.Header()
+ c.Assert(err, NotNil)
+}
diff --git a/plumbing/format/objfile/writer.go b/plumbing/format/objfile/writer.go
new file mode 100644
index 0000000..44563d2
--- /dev/null
+++ b/plumbing/format/objfile/writer.go
@@ -0,0 +1,109 @@
+package objfile
+
+import (
+ "compress/zlib"
+ "errors"
+ "io"
+ "strconv"
+
+ "gopkg.in/src-d/go-git.v4/plumbing"
+)
+
+var (
+ ErrOverflow = errors.New("objfile: declared data length exceeded (overflow)")
+)
+
+// Writer writes and encodes data in compressed objfile format to a provided
+// io.Writerº. Close should be called when finished with the Writer. Close will
+// not close the underlying io.Writer.
+type Writer struct {
+ raw io.Writer
+ zlib io.WriteCloser
+ hasher plumbing.Hasher
+ multi io.Writer
+
+ closed bool
+ pending int64 // number of unwritten bytes
+}
+
+// NewWriter returns a new Writer writing to w.
+//
+// The returned Writer implements io.WriteCloser. Close should be called when
+// finished with the Writer. Close will not close the underlying io.Writer.
+func NewWriter(w io.Writer) *Writer {
+ return &Writer{
+ raw: w,
+ zlib: zlib.NewWriter(w),
+ }
+}
+
+// WriteHeader writes the type and the size and prepares to accept the object's
+// contents. If an invalid t is provided, plumbing.ErrInvalidType is returned. If a
+// negative size is provided, ErrNegativeSize is returned.
+func (w *Writer) WriteHeader(t plumbing.ObjectType, size int64) error {
+ if !t.Valid() {
+ return plumbing.ErrInvalidType
+ }
+ if size < 0 {
+ return ErrNegativeSize
+ }
+
+ b := t.Bytes()
+ b = append(b, ' ')
+ b = append(b, []byte(strconv.FormatInt(size, 10))...)
+ b = append(b, 0)
+
+ defer w.prepareForWrite(t, size)
+ _, err := w.zlib.Write(b)
+
+ return err
+}
+
+func (w *Writer) prepareForWrite(t plumbing.ObjectType, size int64) {
+ w.pending = size
+
+ w.hasher = plumbing.NewHasher(t, size)
+ w.multi = io.MultiWriter(w.zlib, w.hasher)
+}
+
+// Write writes the object's contents. Write returns the error ErrOverflow if
+// more than size bytes are written after WriteHeader.
+func (w *Writer) Write(p []byte) (n int, err error) {
+ if w.closed {
+ return 0, ErrClosed
+ }
+
+ overwrite := false
+ if int64(len(p)) > w.pending {
+ p = p[0:w.pending]
+ overwrite = true
+ }
+
+ n, err = w.multi.Write(p)
+ w.pending -= int64(n)
+ if err == nil && overwrite {
+ err = ErrOverflow
+ return
+ }
+
+ return
+}
+
+// Hash returns the hash of the object data stream that has been written so far.
+// It can be called before or after Close.
+func (w *Writer) Hash() plumbing.Hash {
+ return w.hasher.Sum() // Not yet closed, return hash of data written so far
+}
+
+// Close releases any resources consumed by the Writer.
+//
+// Calling Close does not close the wrapped io.Writer originally passed to
+// NewWriter.
+func (w *Writer) Close() error {
+ if err := w.zlib.Close(); err != nil {
+ return err
+ }
+
+ w.closed = true
+ return nil
+}
diff --git a/plumbing/format/objfile/writer_test.go b/plumbing/format/objfile/writer_test.go
new file mode 100644
index 0000000..46dbea6
--- /dev/null
+++ b/plumbing/format/objfile/writer_test.go
@@ -0,0 +1,80 @@
+package objfile
+
+import (
+ "bytes"
+ "encoding/base64"
+ "fmt"
+ "io"
+
+ . "gopkg.in/check.v1"
+ "gopkg.in/src-d/go-git.v4/plumbing"
+)
+
+type SuiteWriter struct{}
+
+var _ = Suite(&SuiteWriter{})
+
+func (s *SuiteWriter) TestWriteObjfile(c *C) {
+ for k, fixture := range objfileFixtures {
+ buffer := bytes.NewBuffer(nil)
+
+ com := fmt.Sprintf("test %d: ", k)
+ hash := plumbing.NewHash(fixture.hash)
+ content, _ := base64.StdEncoding.DecodeString(fixture.content)
+
+ // Write the data out to the buffer
+ testWriter(c, buffer, hash, fixture.t, content)
+
+ // Read the data back in from the buffer to be sure it matches
+ testReader(c, buffer, hash, fixture.t, content, com)
+ }
+}
+
+func testWriter(c *C, dest io.Writer, hash plumbing.Hash, t plumbing.ObjectType, content []byte) {
+ size := int64(len(content))
+ w := NewWriter(dest)
+
+ err := w.WriteHeader(t, size)
+ c.Assert(err, IsNil)
+
+ written, err := io.Copy(w, bytes.NewReader(content))
+ c.Assert(err, IsNil)
+ c.Assert(written, Equals, size)
+
+ c.Assert(w.Hash(), Equals, hash)
+ c.Assert(w.Close(), IsNil)
+}
+
+func (s *SuiteWriter) TestWriteOverflow(c *C) {
+ buf := bytes.NewBuffer(nil)
+ w := NewWriter(buf)
+
+ err := w.WriteHeader(plumbing.BlobObject, 8)
+ c.Assert(err, IsNil)
+
+ n, err := w.Write([]byte("1234"))
+ c.Assert(err, IsNil)
+ c.Assert(n, Equals, 4)
+
+ n, err = w.Write([]byte("56789"))
+ c.Assert(err, Equals, ErrOverflow)
+ c.Assert(n, Equals, 4)
+}
+
+func (s *SuiteWriter) TestNewWriterInvalidType(c *C) {
+ buf := bytes.NewBuffer(nil)
+ w := NewWriter(buf)
+
+ err := w.WriteHeader(plumbing.InvalidObject, 8)
+ c.Assert(err, Equals, plumbing.ErrInvalidType)
+}
+
+func (s *SuiteWriter) TestNewWriterInvalidSize(c *C) {
+ buf := bytes.NewBuffer(nil)
+ w := NewWriter(buf)
+
+ err := w.WriteHeader(plumbing.BlobObject, -1)
+ c.Assert(err, Equals, ErrNegativeSize)
+ err = w.WriteHeader(plumbing.BlobObject, -1651860)
+ c.Assert(err, Equals, ErrNegativeSize)
+}
diff --git a/plumbing/format/packfile/decoder.go b/plumbing/format/packfile/decoder.go
new file mode 100644
index 0000000..470e59b
--- /dev/null
+++ b/plumbing/format/packfile/decoder.go
@@ -0,0 +1,307 @@
+package packfile
+
+import (
+ "bytes"
+
+ "gopkg.in/src-d/go-git.v4/plumbing"
+ "gopkg.in/src-d/go-git.v4/plumbing/storer"
+)
+
+// Format specifies if the packfile uses ref-deltas or ofs-deltas.
+type Format int
+
+// Possible values of the Format type.
+const (
+ UnknownFormat Format = iota
+ OFSDeltaFormat
+ REFDeltaFormat
+)
+
+var (
+ // ErrMaxObjectsLimitReached is returned by Decode when the number
+ // of objects in the packfile is higher than
+ // Decoder.MaxObjectsLimit.
+ ErrMaxObjectsLimitReached = NewError("max. objects limit reached")
+ // ErrInvalidObject is returned by Decode when an invalid object is
+ // found in the packfile.
+ ErrInvalidObject = NewError("invalid git object")
+ // ErrPackEntryNotFound is returned by Decode when a reference in
+ // the packfile references and unknown object.
+ ErrPackEntryNotFound = NewError("can't find a pack entry")
+ // ErrZLib is returned by Decode when there was an error unzipping
+ // the packfile contents.
+ ErrZLib = NewError("zlib reading error")
+ // ErrCannotRecall is returned by RecallByOffset or RecallByHash if the object
+ // to recall cannot be returned.
+ ErrCannotRecall = NewError("cannot recall object")
+ // ErrNonSeekable is returned if a NewDecoder is used with a non-seekable
+ // reader and without a plumbing.ObjectStorage or ReadObjectAt method is called
+ // without a seekable scanner
+ ErrNonSeekable = NewError("non-seekable scanner")
+ // ErrRollback error making Rollback over a transaction after an error
+ ErrRollback = NewError("rollback error, during set error")
+)
+
+// Decoder reads and decodes packfiles from an input stream.
+type Decoder struct {
+ s *Scanner
+ o storer.ObjectStorer
+ tx storer.Transaction
+
+ offsetToHash map[int64]plumbing.Hash
+ hashToOffset map[plumbing.Hash]int64
+ crcs map[plumbing.Hash]uint32
+}
+
+// NewDecoder returns a new Decoder that reads from r.
+func NewDecoder(s *Scanner, o storer.ObjectStorer) (*Decoder, error) {
+ if !s.IsSeekable && o == nil {
+ return nil, ErrNonSeekable
+ }
+
+ return &Decoder{
+ s: s,
+ o: o,
+
+ offsetToHash: make(map[int64]plumbing.Hash, 0),
+ hashToOffset: make(map[plumbing.Hash]int64, 0),
+ crcs: make(map[plumbing.Hash]uint32, 0),
+ }, nil
+}
+
+// Decode reads a packfile and stores it in the value pointed to by s.
+func (d *Decoder) Decode() (checksum plumbing.Hash, err error) {
+ if err := d.doDecode(); err != nil {
+ return plumbing.ZeroHash, err
+ }
+
+ return d.s.Checksum()
+}
+
+func (d *Decoder) doDecode() error {
+ _, count, err := d.s.Header()
+ if err != nil {
+ return err
+ }
+
+ _, isTxStorer := d.o.(storer.Transactioner)
+ switch {
+ case d.o == nil:
+ return d.readObjects(int(count))
+ case isTxStorer:
+ return d.readObjectsWithObjectStorerTx(int(count))
+ default:
+ return d.readObjectsWithObjectStorer(int(count))
+ }
+}
+
+func (d *Decoder) readObjects(count int) error {
+ for i := 0; i < count; i++ {
+ if _, err := d.ReadObject(); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (d *Decoder) readObjectsWithObjectStorer(count int) error {
+ for i := 0; i < count; i++ {
+ obj, err := d.ReadObject()
+ if err != nil {
+ return err
+ }
+
+ if _, err := d.o.SetObject(obj); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (d *Decoder) readObjectsWithObjectStorerTx(count int) error {
+ tx := d.o.(storer.Transactioner).Begin()
+
+ for i := 0; i < count; i++ {
+ obj, err := d.ReadObject()
+ if err != nil {
+ return err
+ }
+
+ if _, err := tx.SetObject(obj); err != nil {
+ if rerr := d.tx.Rollback(); rerr != nil {
+ return ErrRollback.AddDetails(
+ "error: %s, during tx.Set error: %s", rerr, err,
+ )
+ }
+
+ return err
+ }
+
+ }
+
+ return tx.Commit()
+}
+
+// ReadObject reads a object from the stream and return it
+func (d *Decoder) ReadObject() (plumbing.Object, error) {
+ h, err := d.s.NextObjectHeader()
+ if err != nil {
+ return nil, err
+ }
+
+ obj := d.newObject()
+ obj.SetSize(h.Length)
+ obj.SetType(h.Type)
+ var crc uint32
+ switch h.Type {
+ case plumbing.CommitObject, plumbing.TreeObject, plumbing.BlobObject, plumbing.TagObject:
+ crc, err = d.fillRegularObjectContent(obj)
+ case plumbing.REFDeltaObject:
+ crc, err = d.fillREFDeltaObjectContent(obj, h.Reference)
+ case plumbing.OFSDeltaObject:
+ crc, err = d.fillOFSDeltaObjectContent(obj, h.OffsetReference)
+ default:
+ err = ErrInvalidObject.AddDetails("type %q", h.Type)
+ }
+
+ if err != nil {
+ return obj, err
+ }
+
+ hash := obj.Hash()
+ d.setOffset(hash, h.Offset)
+ d.setCRC(hash, crc)
+
+ return obj, nil
+}
+
+func (d *Decoder) newObject() plumbing.Object {
+ if d.o == nil {
+ return &plumbing.MemoryObject{}
+ }
+
+ return d.o.NewObject()
+}
+
+// ReadObjectAt reads an object at the given location
+func (d *Decoder) ReadObjectAt(offset int64) (plumbing.Object, error) {
+ if !d.s.IsSeekable {
+ return nil, ErrNonSeekable
+ }
+
+ beforeJump, err := d.s.Seek(offset)
+ if err != nil {
+ return nil, err
+ }
+
+ defer func() {
+ _, seekErr := d.s.Seek(beforeJump)
+ if err == nil {
+ err = seekErr
+ }
+ }()
+
+ return d.ReadObject()
+}
+
+func (d *Decoder) fillRegularObjectContent(obj plumbing.Object) (uint32, error) {
+ w, err := obj.Writer()
+ if err != nil {
+ return 0, err
+ }
+
+ _, crc, err := d.s.NextObject(w)
+ return crc, err
+}
+
+func (d *Decoder) fillREFDeltaObjectContent(obj plumbing.Object, ref plumbing.Hash) (uint32, error) {
+ buf := bytes.NewBuffer(nil)
+ _, crc, err := d.s.NextObject(buf)
+ if err != nil {
+ return 0, err
+ }
+
+ base, err := d.recallByHash(ref)
+ if err != nil {
+ return 0, err
+ }
+
+ obj.SetType(base.Type())
+ return crc, ApplyDelta(obj, base, buf.Bytes())
+}
+
+func (d *Decoder) fillOFSDeltaObjectContent(obj plumbing.Object, offset int64) (uint32, error) {
+ buf := bytes.NewBuffer(nil)
+ _, crc, err := d.s.NextObject(buf)
+ if err != nil {
+ return 0, err
+ }
+
+ base, err := d.recallByOffset(offset)
+ if err != nil {
+ return 0, err
+ }
+
+ obj.SetType(base.Type())
+ return crc, ApplyDelta(obj, base, buf.Bytes())
+}
+
+func (d *Decoder) setOffset(h plumbing.Hash, offset int64) {
+ d.offsetToHash[offset] = h
+ d.hashToOffset[h] = offset
+}
+
+func (d *Decoder) setCRC(h plumbing.Hash, crc uint32) {
+ d.crcs[h] = crc
+}
+
+func (d *Decoder) recallByOffset(o int64) (plumbing.Object, error) {
+ if d.s.IsSeekable {
+ return d.ReadObjectAt(o)
+ }
+
+ if h, ok := d.offsetToHash[o]; ok {
+ return d.tx.Object(plumbing.AnyObject, h)
+ }
+
+ return nil, plumbing.ErrObjectNotFound
+}
+
+func (d *Decoder) recallByHash(h plumbing.Hash) (plumbing.Object, error) {
+ if d.s.IsSeekable {
+ if o, ok := d.hashToOffset[h]; ok {
+ return d.ReadObjectAt(o)
+ }
+ }
+
+ obj, err := d.tx.Object(plumbing.AnyObject, h)
+ if err != plumbing.ErrObjectNotFound {
+ return obj, err
+ }
+
+ return nil, plumbing.ErrObjectNotFound
+}
+
+// SetOffsets sets the offsets, required when using the method ReadObjectAt,
+// without decoding the full packfile
+func (d *Decoder) SetOffsets(offsets map[plumbing.Hash]int64) {
+ d.hashToOffset = offsets
+}
+
+// Offsets returns the objects read offset
+func (d *Decoder) Offsets() map[plumbing.Hash]int64 {
+ return d.hashToOffset
+}
+
+// CRCs returns the CRC-32 for each objected read
+func (d *Decoder) CRCs() map[plumbing.Hash]uint32 {
+ return d.crcs
+}
+
+// Close close the Scanner, usually this mean that the whole reader is read and
+// discarded
+func (d *Decoder) Close() error {
+ return d.s.Close()
+}
diff --git a/plumbing/format/packfile/decoder_test.go b/plumbing/format/packfile/decoder_test.go
new file mode 100644
index 0000000..e510cf2
--- /dev/null
+++ b/plumbing/format/packfile/decoder_test.go
@@ -0,0 +1,182 @@
+package packfile
+
+import (
+ "io"
+ "testing"
+
+ "gopkg.in/src-d/go-git.v4/fixtures"
+ "gopkg.in/src-d/go-git.v4/plumbing"
+ "gopkg.in/src-d/go-git.v4/plumbing/format/idxfile"
+ "gopkg.in/src-d/go-git.v4/storage/memory"
+
+ . "gopkg.in/check.v1"
+)
+
+func Test(t *testing.T) { TestingT(t) }
+
+type ReaderSuite struct {
+ fixtures.Suite
+}
+
+var _ = Suite(&ReaderSuite{})
+
+func (s *ReaderSuite) TestNewDecodeNonSeekable(c *C) {
+ scanner := NewScanner(nil)
+ d, err := NewDecoder(scanner, nil)
+
+ c.Assert(d, IsNil)
+ c.Assert(err, NotNil)
+}
+
+func (s *ReaderSuite) TestDecode(c *C) {
+ fixtures.Basic().ByTag("packfile").Test(c, func(f *fixtures.Fixture) {
+ scanner := NewScanner(f.Packfile())
+ storage := memory.NewStorage()
+
+ d, err := NewDecoder(scanner, storage)
+ c.Assert(err, IsNil)
+ defer d.Close()
+
+ ch, err := d.Decode()
+ c.Assert(err, IsNil)
+ c.Assert(ch, Equals, f.PackfileHash)
+
+ assertObjects(c, storage, expectedHashes)
+ })
+}
+
+func (s *ReaderSuite) TestDecodeInMemory(c *C) {
+ fixtures.Basic().ByTag("packfile").Test(c, func(f *fixtures.Fixture) {
+ scanner := NewScanner(f.Packfile())
+ d, err := NewDecoder(scanner, nil)
+ c.Assert(err, IsNil)
+
+ ch, err := d.Decode()
+ c.Assert(err, IsNil)
+ c.Assert(ch, Equals, f.PackfileHash)
+ })
+}
+
+var expectedHashes = []string{
+ "918c48b83bd081e863dbe1b80f8998f058cd8294",
+ "af2d6a6954d532f8ffb47615169c8fdf9d383a1a",
+ "1669dce138d9b841a518c64b10914d88f5e488ea",
+ "a5b8b09e2f8fcb0bb99d3ccb0958157b40890d69",
+ "b8e471f58bcbca63b07bda20e428190409c2db47",
+ "35e85108805c84807bc66a02d91535e1e24b38b9",
+ "b029517f6300c2da0f4b651b8642506cd6aaf45d",
+ "32858aad3c383ed1ff0a0f9bdf231d54a00c9e88",
+ "d3ff53e0564a9f87d8e84b6e28e5060e517008aa",
+ "c192bd6a24ea1ab01d78686e417c8bdc7c3d197f",
+ "d5c0f4ab811897cadf03aec358ae60d21f91c50d",
+ "49c6bb89b17060d7b4deacb7b338fcc6ea2352a9",
+ "cf4aa3b38974fb7d81f367c0830f7d78d65ab86b",
+ "9dea2395f5403188298c1dabe8bdafe562c491e3",
+ "586af567d0bb5e771e49bdd9434f5e0fb76d25fa",
+ "9a48f23120e880dfbe41f7c9b7b708e9ee62a492",
+ "5a877e6a906a2743ad6e45d99c1793642aaf8eda",
+ "c8f1d8c61f9da76f4cb49fd86322b6e685dba956",
+ "a8d315b2b1c615d43042c3a62402b8a54288cf5c",
+ "a39771a7651f97faf5c72e08224d857fc35133db",
+ "880cd14280f4b9b6ed3986d6671f907d7cc2a198",
+ "fb72698cab7617ac416264415f13224dfd7a165e",
+ "4d081c50e250fa32ea8b1313cf8bb7c2ad7627fd",
+ "eba74343e2f15d62adedfd8c883ee0262b5c8021",
+ "c2d30fa8ef288618f65f6eed6e168e0d514886f4",
+ "8dcef98b1d52143e1e2dbc458ffe38f925786bf2",
+ "aa9b383c260e1d05fbbf6b30a02914555e20c725",
+ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5",
+ "dbd3641b371024f44d0e469a9c8f5457b0660de1",
+ "e8d3ffab552895c19b9fcf7aa264d277cde33881",
+ "7e59600739c96546163833214c36459e324bad0a",
+}
+
+func (s *ReaderSuite) TestDecodeCRCs(c *C) {
+ f := fixtures.Basic().ByTag("ofs-delta").One()
+
+ scanner := NewScanner(f.Packfile())
+ storage := memory.NewStorage()
+
+ d, err := NewDecoder(scanner, storage)
+ c.Assert(err, IsNil)
+ _, err = d.Decode()
+ c.Assert(err, IsNil)
+
+ var sum uint64
+ for _, crc := range d.CRCs() {
+ sum += uint64(crc)
+ }
+
+ c.Assert(int(sum), Equals, 78022211966)
+}
+
+func (s *ReaderSuite) TestReadObjectAt(c *C) {
+ f := fixtures.Basic().One()
+ scanner := NewScanner(f.Packfile())
+ d, err := NewDecoder(scanner, nil)
+ c.Assert(err, IsNil)
+
+ // when the packfile is ref-delta based, the offsets are required
+ if f.Is("ref-delta") {
+ offsets := getOffsetsFromIdx(f.Idx())
+ d.SetOffsets(offsets)
+ }
+
+ // the objects at reference 186, is a delta, so should be recall,
+ // without being read before.
+ obj, err := d.ReadObjectAt(186)
+ c.Assert(err, IsNil)
+ c.Assert(obj.Hash().String(), Equals, "6ecf0ef2c2dffb796033e5a02219af86ec6584e5")
+}
+
+func (s *ReaderSuite) TestOffsets(c *C) {
+ f := fixtures.Basic().One()
+ scanner := NewScanner(f.Packfile())
+ d, err := NewDecoder(scanner, nil)
+ c.Assert(err, IsNil)
+
+ c.Assert(d.Offsets(), HasLen, 0)
+
+ _, err = d.Decode()
+ c.Assert(err, IsNil)
+
+ c.Assert(d.Offsets(), HasLen, 31)
+}
+
+func (s *ReaderSuite) TestSetOffsets(c *C) {
+ f := fixtures.Basic().One()
+ scanner := NewScanner(f.Packfile())
+ d, err := NewDecoder(scanner, nil)
+ c.Assert(err, IsNil)
+
+ h := plumbing.NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5")
+ d.SetOffsets(map[plumbing.Hash]int64{h: 42})
+
+ o := d.Offsets()
+ c.Assert(o, HasLen, 1)
+ c.Assert(o[h], Equals, int64(42))
+}
+
+func assertObjects(c *C, s *memory.Storage, expects []string) {
+ c.Assert(len(expects), Equals, len(s.Objects))
+ for _, exp := range expects {
+ obt, err := s.Object(plumbing.AnyObject, plumbing.NewHash(exp))
+ c.Assert(err, IsNil)
+ c.Assert(obt.Hash().String(), Equals, exp)
+ }
+}
+
+func getOffsetsFromIdx(r io.Reader) map[plumbing.Hash]int64 {
+ idx := &idxfile.Idxfile{}
+ err := idxfile.NewDecoder(r).Decode(idx)
+ if err != nil {
+ panic(err)
+ }
+
+ offsets := make(map[plumbing.Hash]int64)
+ for _, e := range idx.Entries {
+ offsets[e.Hash] = int64(e.Offset)
+ }
+
+ return offsets
+}
diff --git a/plumbing/format/packfile/delta.go b/plumbing/format/packfile/delta.go
new file mode 100644
index 0000000..2493a39
--- /dev/null
+++ b/plumbing/format/packfile/delta.go
@@ -0,0 +1,181 @@
+package packfile
+
+import (
+ "io/ioutil"
+
+ "gopkg.in/src-d/go-git.v4/plumbing"
+)
+
+// See https://github.com/git/git/blob/49fa3dc76179e04b0833542fa52d0f287a4955ac/delta.h
+// https://github.com/git/git/blob/c2c5f6b1e479f2c38e0e01345350620944e3527f/patch-delta.c,
+// and https://github.com/tarruda/node-git-core/blob/master/src/js/delta.js
+// for details about the delta format.
+
+const deltaSizeMin = 4
+
+// ApplyDelta writes to taget the result of applying the modification deltas in delta to base.
+func ApplyDelta(target, base plumbing.Object, delta []byte) error {
+ r, err := base.Reader()
+ if err != nil {
+ return err
+ }
+
+ w, err := target.Writer()
+ if err != nil {
+ return err
+ }
+
+ src, err := ioutil.ReadAll(r)
+ if err != nil {
+ return err
+ }
+
+ dst := PatchDelta(src, delta)
+ target.SetSize(int64(len(dst)))
+
+ if _, err := w.Write(dst); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// PatchDelta returns the result of applying the modification deltas in delta to src.
+func PatchDelta(src, delta []byte) []byte {
+ if len(delta) < deltaSizeMin {
+ return nil
+ }
+
+ srcSz, delta := decodeLEB128(delta)
+ if srcSz != uint(len(src)) {
+ return nil
+ }
+
+ targetSz, delta := decodeLEB128(delta)
+ remainingTargetSz := targetSz
+
+ var dest []byte
+ var cmd byte
+ for {
+ cmd = delta[0]
+ delta = delta[1:]
+ if isCopyFromSrc(cmd) {
+ var offset, sz uint
+ offset, delta = decodeOffset(cmd, delta)
+ sz, delta = decodeSize(cmd, delta)
+ if invalidSize(sz, targetSz) ||
+ invalidOffsetSize(offset, sz, srcSz) {
+ break
+ }
+ dest = append(dest, src[offset:offset+sz]...)
+ remainingTargetSz -= sz
+ } else if isCopyFromDelta(cmd) {
+ sz := uint(cmd) // cmd is the size itself
+ if invalidSize(sz, targetSz) {
+ break
+ }
+ dest = append(dest, delta[0:sz]...)
+ remainingTargetSz -= sz
+ delta = delta[sz:]
+ } else {
+ return nil
+ }
+
+ if remainingTargetSz <= 0 {
+ break
+ }
+ }
+
+ return dest
+}
+
+// Decodes a number encoded as an unsigned LEB128 at the start of some
+// binary data and returns the decoded number and the rest of the
+// stream.
+//
+// This must be called twice on the delta data buffer, first to get the
+// expected source buffer size, and again to get the target buffer size.
+func decodeLEB128(input []byte) (uint, []byte) {
+ var num, sz uint
+ var b byte
+ for {
+ b = input[sz]
+ num |= (uint(b) & payload) << (sz * 7) // concats 7 bits chunks
+ sz++
+
+ if uint(b)&continuation == 0 || sz == uint(len(input)) {
+ break
+ }
+ }
+
+ return num, input[sz:]
+}
+
+const (
+ payload = 0x7f // 0111 1111
+ continuation = 0x80 // 1000 0000
+)
+
+func isCopyFromSrc(cmd byte) bool {
+ return (cmd & 0x80) != 0
+}
+
+func isCopyFromDelta(cmd byte) bool {
+ return (cmd&0x80) == 0 && cmd != 0
+}
+
+func decodeOffset(cmd byte, delta []byte) (uint, []byte) {
+ var offset uint
+ if (cmd & 0x01) != 0 {
+ offset = uint(delta[0])
+ delta = delta[1:]
+ }
+ if (cmd & 0x02) != 0 {
+ offset |= uint(delta[0]) << 8
+ delta = delta[1:]
+ }
+ if (cmd & 0x04) != 0 {
+ offset |= uint(delta[0]) << 16
+ delta = delta[1:]
+ }
+ if (cmd & 0x08) != 0 {
+ offset |= uint(delta[0]) << 24
+ delta = delta[1:]
+ }
+
+ return offset, delta
+}
+
+func decodeSize(cmd byte, delta []byte) (uint, []byte) {
+ var sz uint
+ if (cmd & 0x10) != 0 {
+ sz = uint(delta[0])
+ delta = delta[1:]
+ }
+ if (cmd & 0x20) != 0 {
+ sz |= uint(delta[0]) << 8
+ delta = delta[1:]
+ }
+ if (cmd & 0x40) != 0 {
+ sz |= uint(delta[0]) << 16
+ delta = delta[1:]
+ }
+ if sz == 0 {
+ sz = 0x10000
+ }
+
+ return sz, delta
+}
+
+func invalidSize(sz, targetSz uint) bool {
+ return sz > targetSz
+}
+
+func invalidOffsetSize(offset, sz, srcSz uint) bool {
+ return sumOverflows(offset, sz) ||
+ offset+sz > srcSz
+}
+
+func sumOverflows(a, b uint) bool {
+ return a+b < a
+}
diff --git a/plumbing/format/packfile/doc.go b/plumbing/format/packfile/doc.go
new file mode 100644
index 0000000..0b173ca
--- /dev/null
+++ b/plumbing/format/packfile/doc.go
@@ -0,0 +1,168 @@
+// Package packfile implements a encoder/decoder of packfile format
+package packfile
+
+/*
+GIT pack format
+===============
+
+== pack-*.pack files have the following format:
+
+ - A header appears at the beginning and consists of the following:
+
+ 4-byte signature:
+ The signature is: {'P', 'A', 'C', 'K'}
+
+ 4-byte version number (network byte order):
+ GIT currently accepts version number 2 or 3 but
+ generates version 2 only.
+
+ 4-byte number of objects contained in the pack (network byte order)
+
+ Observation: we cannot have more than 4G versions ;-) and
+ more than 4G objects in a pack.
+
+ - The header is followed by number of object entries, each of
+ which looks like this:
+
+ (undeltified representation)
+ n-byte type and length (3-bit type, (n-1)*7+4-bit length)
+ compressed data
+
+ (deltified representation)
+ n-byte type and length (3-bit type, (n-1)*7+4-bit length)
+ 20-byte base object name
+ compressed delta data
+
+ Observation: length of each object is encoded in a variable
+ length format and is not constrained to 32-bit or anything.
+
+ - The trailer records 20-byte SHA1 checksum of all of the above.
+
+== Original (version 1) pack-*.idx files have the following format:
+
+ - The header consists of 256 4-byte network byte order
+ integers. N-th entry of this table records the number of
+ objects in the corresponding pack, the first byte of whose
+ object name is less than or equal to N. This is called the
+ 'first-level fan-out' table.
+
+ - The header is followed by sorted 24-byte entries, one entry
+ per object in the pack. Each entry is:
+
+ 4-byte network byte order integer, recording where the
+ object is stored in the packfile as the offset from the
+ beginning.
+
+ 20-byte object name.
+
+ - The file is concluded with a trailer:
+
+ A copy of the 20-byte SHA1 checksum at the end of
+ corresponding packfile.
+
+ 20-byte SHA1-checksum of all of the above.
+
+Pack Idx file:
+
+ -- +--------------------------------+
+fanout | fanout[0] = 2 (for example) |-.
+table +--------------------------------+ |
+ | fanout[1] | |
+ +--------------------------------+ |
+ | fanout[2] | |
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ |
+ | fanout[255] = total objects |---.
+ -- +--------------------------------+ | |
+main | offset | | |
+index | object name 00XXXXXXXXXXXXXXXX | | |
+table +--------------------------------+ | |
+ | offset | | |
+ | object name 00XXXXXXXXXXXXXXXX | | |
+ +--------------------------------+<+ |
+ .-| offset | |
+ | | object name 01XXXXXXXXXXXXXXXX | |
+ | +--------------------------------+ |
+ | | offset | |
+ | | object name 01XXXXXXXXXXXXXXXX | |
+ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ |
+ | | offset | |
+ | | object name FFXXXXXXXXXXXXXXXX | |
+ --| +--------------------------------+<--+
+trailer | | packfile checksum |
+ | +--------------------------------+
+ | | idxfile checksum |
+ | +--------------------------------+
+ .-------.
+ |
+Pack file entry: <+
+
+ packed object header:
+ 1-byte size extension bit (MSB)
+ type (next 3 bit)
+ size0 (lower 4-bit)
+ n-byte sizeN (as long as MSB is set, each 7-bit)
+ size0..sizeN form 4+7+7+..+7 bit integer, size0
+ is the least significant part, and sizeN is the
+ most significant part.
+ packed object data:
+ If it is not DELTA, then deflated bytes (the size above
+ is the size before compression).
+ If it is REF_DELTA, then
+ 20-byte base object name SHA1 (the size above is the
+ size of the delta data that follows).
+ delta data, deflated.
+ If it is OFS_DELTA, then
+ n-byte offset (see below) interpreted as a negative
+ offset from the type-byte of the header of the
+ ofs-delta entry (the size above is the size of
+ the delta data that follows).
+ delta data, deflated.
+
+ offset encoding:
+ n bytes with MSB set in all but the last one.
+ The offset is then the number constructed by
+ concatenating the lower 7 bit of each byte, and
+ for n >= 2 adding 2^7 + 2^14 + ... + 2^(7*(n-1))
+ to the result.
+
+
+
+== Version 2 pack-*.idx files support packs larger than 4 GiB, and
+ have some other reorganizations. They have the format:
+
+ - A 4-byte magic number '\377tOc' which is an unreasonable
+ fanout[0] value.
+
+ - A 4-byte version number (= 2)
+
+ - A 256-entry fan-out table just like v1.
+
+ - A table of sorted 20-byte SHA1 object names. These are
+ packed together without offset values to reduce the cache
+ footprint of the binary search for a specific object name.
+
+ - A table of 4-byte CRC32 values of the packed object data.
+ This is new in v2 so compressed data can be copied directly
+ from pack to pack during repacking without undetected
+ data corruption.
+
+ - A table of 4-byte offset values (in network byte order).
+ These are usually 31-bit pack file offsets, but large
+ offsets are encoded as an index into the next table with
+ the msbit set.
+
+ - A table of 8-byte offset entries (empty for pack files less
+ than 2 GiB). Pack files are organized with heavily used
+ objects toward the front, so most object references should
+ not need to refer to this table.
+
+ - The same trailer as a v1 pack file:
+
+ A copy of the 20-byte SHA1 checksum at the end of
+ corresponding packfile.
+
+ 20-byte SHA1-checksum of all of the above.
+
+From:
+https://www.kernel.org/pub/software/scm/git/docs/v1.7.5/technical/pack-protocol.txt
+*/
diff --git a/plumbing/format/packfile/error.go b/plumbing/format/packfile/error.go
new file mode 100644
index 0000000..c0b9163
--- /dev/null
+++ b/plumbing/format/packfile/error.go
@@ -0,0 +1,30 @@
+package packfile
+
+import "fmt"
+
+// Error specifies errors returned during packfile parsing.
+type Error struct {
+ reason, details string
+}
+
+// NewError returns a new error.
+func NewError(reason string) *Error {
+ return &Error{reason: reason}
+}
+
+// Error returns a text representation of the error.
+func (e *Error) Error() string {
+ if e.details == "" {
+ return e.reason
+ }
+
+ return fmt.Sprintf("%s: %s", e.reason, e.details)
+}
+
+// AddDetails adds details to an error, with additional text.
+func (e *Error) AddDetails(format string, args ...interface{}) *Error {
+ return &Error{
+ reason: e.reason,
+ details: fmt.Sprintf(format, args...),
+ }
+}
diff --git a/plumbing/format/packfile/scanner.go b/plumbing/format/packfile/scanner.go
new file mode 100644
index 0000000..130bb94
--- /dev/null
+++ b/plumbing/format/packfile/scanner.go
@@ -0,0 +1,418 @@
+package packfile
+
+import (
+ "bufio"
+ "bytes"
+ "compress/zlib"
+ "fmt"
+ "hash"
+ "hash/crc32"
+ "io"
+ "io/ioutil"
+
+ "gopkg.in/src-d/go-git.v4/plumbing"
+ "gopkg.in/src-d/go-git.v4/utils/binary"
+)
+
+var (
+ // ErrEmptyPackfile is returned by ReadHeader when no data is found in the packfile
+ ErrEmptyPackfile = NewError("empty packfile")
+ // ErrBadSignature is returned by ReadHeader when the signature in the packfile is incorrect.
+ ErrBadSignature = NewError("malformed pack file signature")
+ // ErrUnsupportedVersion is returned by ReadHeader when the packfile version is
+ // different than VersionSupported.
+ ErrUnsupportedVersion = NewError("unsupported packfile version")
+ // ErrSeekNotSupported returned if seek is not support
+ ErrSeekNotSupported = NewError("not seek support")
+)
+
+const (
+ // VersionSupported is the packfile version supported by this parser.
+ VersionSupported uint32 = 2
+)
+
+// ObjectHeader contains the information related to the object, this information
+// is collected from the previous bytes to the content of the object.
+type ObjectHeader struct {
+ Type plumbing.ObjectType
+ Offset int64
+ Length int64
+ Reference plumbing.Hash
+ OffsetReference int64
+}
+
+type Scanner struct {
+ r reader
+ crc hash.Hash32
+
+ // pendingObject is used to detect if an object has been read, or still
+ // is waiting to be read
+ pendingObject *ObjectHeader
+ version, objects uint32
+
+ // lsSeekable says if this scanner can do Seek or not, to have a Scanner
+ // seekable a r implementing io.Seeker is required
+ IsSeekable bool
+}
+
+// NewScanner returns a new Scanner based on a reader, if the given reader
+// implements io.ReadSeeker the Scanner will be also Seekable
+func NewScanner(r io.Reader) *Scanner {
+ seeker, ok := r.(io.ReadSeeker)
+ if !ok {
+ seeker = &trackableReader{Reader: r}
+ }
+
+ crc := crc32.NewIEEE()
+ return &Scanner{
+ r: &teeReader{
+ newByteReadSeeker(seeker),
+ crc,
+ },
+ crc: crc,
+ IsSeekable: ok,
+ }
+}
+
+// Header reads the whole packfile header (signature, version and object count).
+// It returns the version and the object count and performs checks on the
+// validity of the signature and the version fields.
+func (s *Scanner) Header() (version, objects uint32, err error) {
+ if s.version != 0 {
+ return s.version, s.objects, nil
+ }
+
+ sig, err := s.readSignature()
+ if err != nil {
+ if err == io.EOF {
+ err = ErrEmptyPackfile
+ }
+
+ return
+ }
+
+ if !s.isValidSignature(sig) {
+ err = ErrBadSignature
+ return
+ }
+
+ version, err = s.readVersion()
+ s.version = version
+ if err != nil {
+ return
+ }
+
+ if !s.isSupportedVersion(version) {
+ err = ErrUnsupportedVersion.AddDetails("%d", version)
+ return
+ }
+
+ objects, err = s.readCount()
+ s.objects = objects
+ return
+}
+
+// readSignature reads an returns the signature field in the packfile.
+func (s *Scanner) readSignature() ([]byte, error) {
+ var sig = make([]byte, 4)
+ if _, err := io.ReadFull(s.r, sig); err != nil {
+ return []byte{}, err
+ }
+
+ return sig, nil
+}
+
+// isValidSignature returns if sig is a valid packfile signature.
+func (s *Scanner) isValidSignature(sig []byte) bool {
+ return bytes.Equal(sig, []byte{'P', 'A', 'C', 'K'})
+}
+
+// readVersion reads and returns the version field of a packfile.
+func (s *Scanner) readVersion() (uint32, error) {
+ return binary.ReadUint32(s.r)
+}
+
+// isSupportedVersion returns whether version v is supported by the parser.
+// The current supported version is VersionSupported, defined above.
+func (s *Scanner) isSupportedVersion(v uint32) bool {
+ return v == VersionSupported
+}
+
+// readCount reads and returns the count of objects field of a packfile.
+func (s *Scanner) readCount() (uint32, error) {
+ return binary.ReadUint32(s.r)
+}
+
+// NextObjectHeader returns the ObjectHeader for the next object in the reader
+func (s *Scanner) NextObjectHeader() (*ObjectHeader, error) {
+ if err := s.doPending(); err != nil {
+ return nil, err
+ }
+
+ s.crc.Reset()
+
+ h := &ObjectHeader{}
+ s.pendingObject = h
+
+ var err error
+ h.Offset, err = s.r.Seek(0, io.SeekCurrent)
+ if err != nil {
+ return nil, err
+ }
+
+ h.Type, h.Length, err = s.readObjectTypeAndLength()
+ if err != nil {
+ return nil, err
+ }
+
+ switch h.Type {
+ case plumbing.OFSDeltaObject:
+ no, err := binary.ReadVariableWidthInt(s.r)
+ if err != nil {
+ return nil, err
+ }
+
+ h.OffsetReference = h.Offset - no
+ case plumbing.REFDeltaObject:
+ var err error
+ h.Reference, err = binary.ReadHash(s.r)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return h, nil
+}
+
+func (s *Scanner) doPending() error {
+ if s.version == 0 {
+ var err error
+ s.version, s.objects, err = s.Header()
+ if err != nil {
+ return err
+ }
+ }
+
+ return s.discardObjectIfNeeded()
+}
+
+func (s *Scanner) discardObjectIfNeeded() error {
+ if s.pendingObject == nil {
+ return nil
+ }
+
+ h := s.pendingObject
+ n, _, err := s.NextObject(ioutil.Discard)
+ if err != nil {
+ return err
+ }
+
+ if n != h.Length {
+ return fmt.Errorf(
+ "error discarding object, discarded %d, expected %d",
+ n, h.Length,
+ )
+ }
+
+ return nil
+}
+
+// ReadObjectTypeAndLength reads and returns the object type and the
+// length field from an object entry in a packfile.
+func (s *Scanner) readObjectTypeAndLength() (plumbing.ObjectType, int64, error) {
+ t, c, err := s.readType()
+ if err != nil {
+ return t, 0, err
+ }
+
+ l, err := s.readLength(c)
+
+ return t, l, err
+}
+
+const (
+ maskType = uint8(112) // 0111 0000
+ maskFirstLength = uint8(15) // 0000 1111
+ maskContinue = uint8(128) // 1000 000
+ firstLengthBits = uint8(4) // the first byte has 4 bits to store the length
+ maskLength = uint8(127) // 0111 1111
+ lengthBits = uint8(7) // subsequent bytes has 7 bits to store the length
+)
+
+func (s *Scanner) readType() (plumbing.ObjectType, byte, error) {
+ var c byte
+ var err error
+ if c, err = s.r.ReadByte(); err != nil {
+ return plumbing.ObjectType(0), 0, err
+ }
+
+ typ := parseType(c)
+
+ return typ, c, nil
+}
+
+func parseType(b byte) plumbing.ObjectType {
+ return plumbing.ObjectType((b & maskType) >> firstLengthBits)
+}
+
+// the length is codified in the last 4 bits of the first byte and in
+// the last 7 bits of subsequent bytes. Last byte has a 0 MSB.
+func (s *Scanner) readLength(first byte) (int64, error) {
+ length := int64(first & maskFirstLength)
+
+ c := first
+ shift := firstLengthBits
+ var err error
+ for c&maskContinue > 0 {
+ if c, err = s.r.ReadByte(); err != nil {
+ return 0, err
+ }
+
+ length += int64(c&maskLength) << shift
+ shift += lengthBits
+ }
+
+ return length, nil
+}
+
+// NextObject writes the content of the next object into the reader, returns
+// the number of bytes written, the CRC32 of the content and an error, if any
+func (s *Scanner) NextObject(w io.Writer) (written int64, crc32 uint32, err error) {
+ defer s.crc.Reset()
+
+ s.pendingObject = nil
+ written, err = s.copyObject(w)
+ crc32 = s.crc.Sum32()
+ return
+}
+
+// ReadRegularObject reads and write a non-deltified object
+// from it zlib stream in an object entry in the packfile.
+func (s *Scanner) copyObject(w io.Writer) (int64, error) {
+ zr, err := zlib.NewReader(s.r)
+ if err != nil {
+ return -1, fmt.Errorf("zlib reading error: %s", err)
+ }
+
+ defer func() {
+ closeErr := zr.Close()
+ if err == nil {
+ err = closeErr
+ }
+ }()
+
+ return io.Copy(w, zr)
+}
+
+// Seek sets a new offset from start, returns the old position before the change
+func (s *Scanner) Seek(offset int64) (previous int64, err error) {
+ // if seeking we asume that you are not interested on the header
+ if s.version == 0 {
+ s.version = VersionSupported
+ }
+
+ previous, err = s.r.Seek(0, io.SeekCurrent)
+ if err != nil {
+ return -1, err
+ }
+
+ _, err = s.r.Seek(offset, io.SeekStart)
+ return previous, err
+}
+
+// Checksum returns the checksum of the packfile
+func (s *Scanner) Checksum() (plumbing.Hash, error) {
+ err := s.discardObjectIfNeeded()
+ if err != nil {
+ return plumbing.ZeroHash, err
+ }
+
+ return binary.ReadHash(s.r)
+}
+
+// Close reads the reader until io.EOF
+func (s *Scanner) Close() error {
+ _, err := io.Copy(ioutil.Discard, s.r)
+ return err
+}
+
+type trackableReader struct {
+ count int64
+ io.Reader
+}
+
+// Read reads up to len(p) bytes into p.
+func (r *trackableReader) Read(p []byte) (n int, err error) {
+ n, err = r.Reader.Read(p)
+ r.count += int64(n)
+
+ return
+}
+
+// Seek only supports io.SeekCurrent, any other operation fails
+func (r *trackableReader) Seek(offset int64, whence int) (int64, error) {
+ if whence != io.SeekCurrent {
+ return -1, ErrSeekNotSupported
+ }
+
+ return r.count, nil
+}
+
+func newByteReadSeeker(r io.ReadSeeker) *bufferedSeeker {
+ return &bufferedSeeker{
+ r: r,
+ Reader: *bufio.NewReader(r),
+ }
+}
+
+type bufferedSeeker struct {
+ r io.ReadSeeker
+ bufio.Reader
+}
+
+func (r *bufferedSeeker) Seek(offset int64, whence int) (int64, error) {
+ if whence == io.SeekCurrent {
+ current, err := r.r.Seek(offset, whence)
+ if err != nil {
+ return current, err
+ }
+
+ return current - int64(r.Buffered()), nil
+ }
+
+ defer r.Reader.Reset(r.r)
+ return r.r.Seek(offset, whence)
+}
+
+type reader interface {
+ io.Reader
+ io.ByteReader
+ io.Seeker
+}
+
+type teeReader struct {
+ reader
+ w hash.Hash32
+}
+
+func (r *teeReader) Read(p []byte) (n int, err error) {
+ n, err = r.reader.Read(p)
+ if n > 0 {
+ if n, err := r.w.Write(p[:n]); err != nil {
+ return n, err
+ }
+ }
+ return
+}
+
+func (r *teeReader) ReadByte() (b byte, err error) {
+ b, err = r.reader.ReadByte()
+ if err == nil {
+ _, err := r.w.Write([]byte{b})
+ if err != nil {
+ return 0, err
+ }
+ }
+
+ return
+}
diff --git a/plumbing/format/packfile/scanner_test.go b/plumbing/format/packfile/scanner_test.go
new file mode 100644
index 0000000..8e9a593
--- /dev/null
+++ b/plumbing/format/packfile/scanner_test.go
@@ -0,0 +1,189 @@
+package packfile
+
+import (
+ "bytes"
+ "io"
+
+ . "gopkg.in/check.v1"
+ "gopkg.in/src-d/go-git.v4/fixtures"
+ "gopkg.in/src-d/go-git.v4/plumbing"
+)
+
+type ScannerSuite struct {
+ fixtures.Suite
+}
+
+var _ = Suite(&ScannerSuite{})
+
+func (s *ScannerSuite) TestHeader(c *C) {
+ r := fixtures.Basic().One().Packfile()
+ p := NewScanner(r)
+
+ version, objects, err := p.Header()
+ c.Assert(err, IsNil)
+ c.Assert(version, Equals, VersionSupported)
+ c.Assert(objects, Equals, uint32(31))
+}
+
+func (s *ScannerSuite) TestNextObjectHeaderWithoutHeader(c *C) {
+ r := fixtures.Basic().One().Packfile()
+ p := NewScanner(r)
+
+ h, err := p.NextObjectHeader()
+ c.Assert(err, IsNil)
+ c.Assert(h, DeepEquals, &expectedHeadersOFS[0])
+
+ version, objects, err := p.Header()
+ c.Assert(err, IsNil)
+ c.Assert(version, Equals, VersionSupported)
+ c.Assert(objects, Equals, uint32(31))
+}
+
+func (s *ScannerSuite) TestNextObjectHeaderREFDelta(c *C) {
+ s.testNextObjectHeader(c, "ref-delta", expectedHeadersREF)
+}
+
+func (s *ScannerSuite) TestNextObjectHeaderOFSDelta(c *C) {
+ s.testNextObjectHeader(c, "ofs-delta", expectedHeadersOFS)
+}
+
+func (s *ScannerSuite) testNextObjectHeader(c *C, tag string, expected []ObjectHeader) {
+ r := fixtures.Basic().ByTag(tag).One().Packfile()
+ p := NewScanner(r)
+
+ _, objects, err := p.Header()
+ c.Assert(err, IsNil)
+
+ for i := 0; i < int(objects); i++ {
+ h, err := p.NextObjectHeader()
+ c.Assert(err, IsNil)
+ c.Assert(*h, DeepEquals, expected[i])
+
+ buf := bytes.NewBuffer(nil)
+ n, _, err := p.NextObject(buf)
+ c.Assert(err, IsNil)
+ c.Assert(n, Equals, h.Length)
+ }
+
+ n, err := p.Checksum()
+ c.Assert(err, IsNil)
+ c.Assert(n, HasLen, 20)
+}
+
+func (s *ScannerSuite) TestNextObjectHeaderWithOutReadObject(c *C) {
+ f := fixtures.Basic().ByTag("ref-delta").One()
+ r := f.Packfile()
+ p := NewScanner(r)
+
+ _, objects, err := p.Header()
+ c.Assert(err, IsNil)
+
+ for i := 0; i < int(objects); i++ {
+ h, _ := p.NextObjectHeader()
+ c.Assert(err, IsNil)
+ c.Assert(*h, DeepEquals, expectedHeadersREF[i])
+ }
+
+ err = p.discardObjectIfNeeded()
+ c.Assert(err, IsNil)
+
+ n, err := p.Checksum()
+ c.Assert(err, IsNil)
+ c.Assert(n, Equals, f.PackfileHash)
+}
+
+func (s *ScannerSuite) TestNextObjectHeaderWithOutReadObjectNonSeekable(c *C) {
+ f := fixtures.Basic().ByTag("ref-delta").One()
+ r := io.MultiReader(f.Packfile())
+ p := NewScanner(r)
+
+ _, objects, err := p.Header()
+ c.Assert(err, IsNil)
+
+ for i := 0; i < int(objects); i++ {
+ h, _ := p.NextObjectHeader()
+ c.Assert(err, IsNil)
+ c.Assert(*h, DeepEquals, expectedHeadersREF[i])
+ }
+
+ err = p.discardObjectIfNeeded()
+ c.Assert(err, IsNil)
+
+ n, err := p.Checksum()
+ c.Assert(err, IsNil)
+ c.Assert(n, Equals, f.PackfileHash)
+}
+
+var expectedHeadersOFS = []ObjectHeader{
+ {Type: plumbing.CommitObject, Offset: 12, Length: 254},
+ {Type: plumbing.OFSDeltaObject, Offset: 186, Length: 93, OffsetReference: 12},
+ {Type: plumbing.CommitObject, Offset: 286, Length: 242},
+ {Type: plumbing.CommitObject, Offset: 449, Length: 242},
+ {Type: plumbing.CommitObject, Offset: 615, Length: 333},
+ {Type: plumbing.CommitObject, Offset: 838, Length: 332},
+ {Type: plumbing.CommitObject, Offset: 1063, Length: 244},
+ {Type: plumbing.CommitObject, Offset: 1230, Length: 243},
+ {Type: plumbing.CommitObject, Offset: 1392, Length: 187},
+ {Type: plumbing.BlobObject, Offset: 1524, Length: 189},
+ {Type: plumbing.BlobObject, Offset: 1685, Length: 18},
+ {Type: plumbing.BlobObject, Offset: 1713, Length: 1072},
+ {Type: plumbing.BlobObject, Offset: 2351, Length: 76110},
+ {Type: plumbing.BlobObject, Offset: 78050, Length: 2780},
+ {Type: plumbing.BlobObject, Offset: 78882, Length: 217848},
+ {Type: plumbing.BlobObject, Offset: 80725, Length: 706},
+ {Type: plumbing.BlobObject, Offset: 80998, Length: 11488},
+ {Type: plumbing.BlobObject, Offset: 84032, Length: 78},
+ {Type: plumbing.TreeObject, Offset: 84115, Length: 272},
+ {Type: plumbing.OFSDeltaObject, Offset: 84375, Length: 43, OffsetReference: 84115},
+ {Type: plumbing.TreeObject, Offset: 84430, Length: 38},
+ {Type: plumbing.TreeObject, Offset: 84479, Length: 75},
+ {Type: plumbing.TreeObject, Offset: 84559, Length: 38},
+ {Type: plumbing.TreeObject, Offset: 84608, Length: 34},
+ {Type: plumbing.BlobObject, Offset: 84653, Length: 9},
+ {Type: plumbing.OFSDeltaObject, Offset: 84671, Length: 6, OffsetReference: 84375},
+ {Type: plumbing.OFSDeltaObject, Offset: 84688, Length: 9, OffsetReference: 84375},
+ {Type: plumbing.OFSDeltaObject, Offset: 84708, Length: 6, OffsetReference: 84375},
+ {Type: plumbing.OFSDeltaObject, Offset: 84725, Length: 5, OffsetReference: 84115},
+ {Type: plumbing.OFSDeltaObject, Offset: 84741, Length: 8, OffsetReference: 84375},
+ {Type: plumbing.OFSDeltaObject, Offset: 84760, Length: 4, OffsetReference: 84741},
+}
+
+var expectedHeadersREF = []ObjectHeader{
+ {Type: plumbing.CommitObject, Offset: 12, Length: 254},
+ {Type: plumbing.REFDeltaObject, Offset: 186, Length: 93,
+ Reference: plumbing.NewHash("e8d3ffab552895c19b9fcf7aa264d277cde33881")},
+ {Type: plumbing.CommitObject, Offset: 304, Length: 242},
+ {Type: plumbing.CommitObject, Offset: 467, Length: 242},
+ {Type: plumbing.CommitObject, Offset: 633, Length: 333},
+ {Type: plumbing.CommitObject, Offset: 856, Length: 332},
+ {Type: plumbing.CommitObject, Offset: 1081, Length: 243},
+ {Type: plumbing.CommitObject, Offset: 1243, Length: 244},
+ {Type: plumbing.CommitObject, Offset: 1410, Length: 187},
+ {Type: plumbing.BlobObject, Offset: 1542, Length: 189},
+ {Type: plumbing.BlobObject, Offset: 1703, Length: 18},
+ {Type: plumbing.BlobObject, Offset: 1731, Length: 1072},
+ {Type: plumbing.BlobObject, Offset: 2369, Length: 76110},
+ {Type: plumbing.TreeObject, Offset: 78068, Length: 38},
+ {Type: plumbing.BlobObject, Offset: 78117, Length: 2780},
+ {Type: plumbing.TreeObject, Offset: 79049, Length: 75},
+ {Type: plumbing.BlobObject, Offset: 79129, Length: 217848},
+ {Type: plumbing.BlobObject, Offset: 80972, Length: 706},
+ {Type: plumbing.TreeObject, Offset: 81265, Length: 38},
+ {Type: plumbing.BlobObject, Offset: 81314, Length: 11488},
+ {Type: plumbing.TreeObject, Offset: 84752, Length: 34},
+ {Type: plumbing.BlobObject, Offset: 84797, Length: 78},
+ {Type: plumbing.TreeObject, Offset: 84880, Length: 271},
+ {Type: plumbing.REFDeltaObject, Offset: 85141, Length: 6,
+ Reference: plumbing.NewHash("a8d315b2b1c615d43042c3a62402b8a54288cf5c")},
+ {Type: plumbing.REFDeltaObject, Offset: 85176, Length: 37,
+ Reference: plumbing.NewHash("fb72698cab7617ac416264415f13224dfd7a165e")},
+ {Type: plumbing.BlobObject, Offset: 85244, Length: 9},
+ {Type: plumbing.REFDeltaObject, Offset: 85262, Length: 9,
+ Reference: plumbing.NewHash("fb72698cab7617ac416264415f13224dfd7a165e")},
+ {Type: plumbing.REFDeltaObject, Offset: 85300, Length: 6,
+ Reference: plumbing.NewHash("fb72698cab7617ac416264415f13224dfd7a165e")},
+ {Type: plumbing.TreeObject, Offset: 85335, Length: 110},
+ {Type: plumbing.REFDeltaObject, Offset: 85448, Length: 8,
+ Reference: plumbing.NewHash("eba74343e2f15d62adedfd8c883ee0262b5c8021")},
+ {Type: plumbing.TreeObject, Offset: 85485, Length: 73},
+}
diff --git a/plumbing/format/packp/advrefs/advrefs.go b/plumbing/format/packp/advrefs/advrefs.go
new file mode 100644
index 0000000..4d7c897
--- /dev/null
+++ b/plumbing/format/packp/advrefs/advrefs.go
@@ -0,0 +1,58 @@
+// Package advrefs implements encoding and decoding advertised-refs
+// messages from a git-upload-pack command.
+package advrefs
+
+import (
+ "gopkg.in/src-d/go-git.v4/plumbing"
+ "gopkg.in/src-d/go-git.v4/plumbing/format/packp"
+)
+
+const (
+ hashSize = 40
+ head = "HEAD"
+ noHead = "capabilities^{}"
+)
+
+var (
+ sp = []byte(" ")
+ null = []byte("\x00")
+ eol = []byte("\n")
+ peeled = []byte("^{}")
+ shallow = []byte("shallow ")
+ noHeadMark = []byte(" capabilities^{}\x00")
+)
+
+// AdvRefs values represent the information transmitted on an
+// advertised-refs message. Values from this type are not zero-value
+// safe, use the New function instead.
+//
+// When using this messages over (smart) HTTP, you have to add a pktline
+// before the whole thing with the following payload:
+//
+// '# service=$servicename" LF
+//
+// Moreover, some (all) git HTTP smart servers will send a flush-pkt
+// just after the first pkt-line.
+//
+// To accomodate both situations, the Prefix field allow you to store
+// any data you want to send before the actual pktlines. It will also
+// be filled up with whatever is found on the line.
+type AdvRefs struct {
+ Prefix [][]byte // payloads of the prefix
+ Head *plumbing.Hash
+ Capabilities *packp.Capabilities
+ References map[string]plumbing.Hash
+ Peeled map[string]plumbing.Hash
+ Shallows []plumbing.Hash
+}
+
+// New returns a pointer to a new AdvRefs value, ready to be used.
+func New() *AdvRefs {
+ return &AdvRefs{
+ Prefix: [][]byte{},
+ Capabilities: packp.NewCapabilities(),
+ References: make(map[string]plumbing.Hash),
+ Peeled: make(map[string]plumbing.Hash),
+ Shallows: []plumbing.Hash{},
+ }
+}
diff --git a/plumbing/format/packp/advrefs/advrefs_test.go b/plumbing/format/packp/advrefs/advrefs_test.go
new file mode 100644
index 0000000..2639b6e
--- /dev/null
+++ b/plumbing/format/packp/advrefs/advrefs_test.go
@@ -0,0 +1,315 @@
+package advrefs_test
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "strings"
+ "testing"
+
+ "gopkg.in/src-d/go-git.v4/plumbing"
+ "gopkg.in/src-d/go-git.v4/plumbing/format/packp/advrefs"
+ "gopkg.in/src-d/go-git.v4/plumbing/format/packp/pktline"
+
+ . "gopkg.in/check.v1"
+)
+
+func Test(t *testing.T) { TestingT(t) }
+
+type SuiteDecodeEncode struct{}
+
+var _ = Suite(&SuiteDecodeEncode{})
+
+func (s *SuiteDecodeEncode) test(c *C, in []string, exp []string) {
+ var err error
+ var input io.Reader
+ {
+ var buf bytes.Buffer
+ p := pktline.NewEncoder(&buf)
+ err = p.EncodeString(in...)
+ c.Assert(err, IsNil)
+ input = &buf
+ }
+
+ var expected []byte
+ {
+ var buf bytes.Buffer
+ p := pktline.NewEncoder(&buf)
+ err = p.EncodeString(exp...)
+ c.Assert(err, IsNil)
+
+ expected = buf.Bytes()
+ }
+
+ var obtained []byte
+ {
+ ar := advrefs.New()
+ d := advrefs.NewDecoder(input)
+ err = d.Decode(ar)
+ c.Assert(err, IsNil)
+
+ var buf bytes.Buffer
+ e := advrefs.NewEncoder(&buf)
+ err := e.Encode(ar)
+ c.Assert(err, IsNil)
+
+ obtained = buf.Bytes()
+ }
+
+ c.Assert(obtained, DeepEquals, expected,
+ Commentf("input = %v\nobtained = %q\nexpected = %q\n",
+ in, string(obtained), string(expected)))
+}
+
+func (s *SuiteDecodeEncode) TestNoHead(c *C) {
+ input := []string{
+ "0000000000000000000000000000000000000000 capabilities^{}\x00",
+ pktline.FlushString,
+ }
+
+ expected := []string{
+ "0000000000000000000000000000000000000000 capabilities^{}\x00\n",
+ pktline.FlushString,
+ }
+
+ s.test(c, input, expected)
+}
+
+func (s *SuiteDecodeEncode) TestNoHeadSmart(c *C) {
+ input := []string{
+ "# service=git-upload-pack\n",
+ "0000000000000000000000000000000000000000 capabilities^{}\x00",
+ pktline.FlushString,
+ }
+
+ expected := []string{
+ "# service=git-upload-pack\n",
+ "0000000000000000000000000000000000000000 capabilities^{}\x00\n",
+ pktline.FlushString,
+ }
+
+ s.test(c, input, expected)
+}
+
+func (s *SuiteDecodeEncode) TestNoHeadSmartBug(c *C) {
+ input := []string{
+ "# service=git-upload-pack\n",
+ pktline.FlushString,
+ "0000000000000000000000000000000000000000 capabilities^{}\x00\n",
+ pktline.FlushString,
+ }
+
+ expected := []string{
+ "# service=git-upload-pack\n",
+ pktline.FlushString,
+ "0000000000000000000000000000000000000000 capabilities^{}\x00\n",
+ pktline.FlushString,
+ }
+
+ s.test(c, input, expected)
+}
+
+func (s *SuiteDecodeEncode) TestRefs(c *C) {
+ input := []string{
+ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00symref=HEAD:/refs/heads/master ofs-delta multi_ack",
+ "a6930aaee06755d1bdcfd943fbf614e4d92bb0c7 refs/heads/master",
+ "5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c refs/tags/v2.6.11-tree\n",
+ "7777777777777777777777777777777777777777 refs/tags/v2.6.12-tree",
+ pktline.FlushString,
+ }
+
+ expected := []string{
+ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00multi_ack ofs-delta symref=HEAD:/refs/heads/master\n",
+ "a6930aaee06755d1bdcfd943fbf614e4d92bb0c7 refs/heads/master\n",
+ "5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c refs/tags/v2.6.11-tree\n",
+ "7777777777777777777777777777777777777777 refs/tags/v2.6.12-tree\n",
+ pktline.FlushString,
+ }
+
+ s.test(c, input, expected)
+}
+
+func (s *SuiteDecodeEncode) TestPeeled(c *C) {
+ input := []string{
+ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00symref=HEAD:/refs/heads/master ofs-delta multi_ack",
+ "7777777777777777777777777777777777777777 refs/tags/v2.6.12-tree\n",
+ "8888888888888888888888888888888888888888 refs/tags/v2.6.12-tree^{}",
+ "a6930aaee06755d1bdcfd943fbf614e4d92bb0c7 refs/heads/master\n",
+ "5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c refs/tags/v2.6.11-tree",
+ "c39ae07f393806ccf406ef966e9a15afc43cc36a refs/tags/v2.6.11-tree^{}\n",
+ pktline.FlushString,
+ }
+
+ expected := []string{
+ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00multi_ack ofs-delta symref=HEAD:/refs/heads/master\n",
+ "a6930aaee06755d1bdcfd943fbf614e4d92bb0c7 refs/heads/master\n",
+ "5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c refs/tags/v2.6.11-tree\n",
+ "c39ae07f393806ccf406ef966e9a15afc43cc36a refs/tags/v2.6.11-tree^{}\n",
+ "7777777777777777777777777777777777777777 refs/tags/v2.6.12-tree\n",
+ "8888888888888888888888888888888888888888 refs/tags/v2.6.12-tree^{}\n",
+ pktline.FlushString,
+ }
+
+ s.test(c, input, expected)
+}
+
+func (s *SuiteDecodeEncode) TestAll(c *C) {
+ input := []string{
+ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00symref=HEAD:/refs/heads/master ofs-delta multi_ack\n",
+ "a6930aaee06755d1bdcfd943fbf614e4d92bb0c7 refs/heads/master\n",
+ "5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c refs/tags/v2.6.11-tree",
+ "c39ae07f393806ccf406ef966e9a15afc43cc36a refs/tags/v2.6.11-tree^{}\n",
+ "7777777777777777777777777777777777777777 refs/tags/v2.6.12-tree\n",
+ "8888888888888888888888888888888888888888 refs/tags/v2.6.12-tree^{}",
+ "shallow 1111111111111111111111111111111111111111",
+ "shallow 2222222222222222222222222222222222222222\n",
+ pktline.FlushString,
+ }
+
+ expected := []string{
+ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00multi_ack ofs-delta symref=HEAD:/refs/heads/master\n",
+ "a6930aaee06755d1bdcfd943fbf614e4d92bb0c7 refs/heads/master\n",
+ "5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c refs/tags/v2.6.11-tree\n",
+ "c39ae07f393806ccf406ef966e9a15afc43cc36a refs/tags/v2.6.11-tree^{}\n",
+ "7777777777777777777777777777777777777777 refs/tags/v2.6.12-tree\n",
+ "8888888888888888888888888888888888888888 refs/tags/v2.6.12-tree^{}\n",
+ "shallow 1111111111111111111111111111111111111111\n",
+ "shallow 2222222222222222222222222222222222222222\n",
+ pktline.FlushString,
+ }
+
+ s.test(c, input, expected)
+}
+
+func (s *SuiteDecodeEncode) TestAllSmart(c *C) {
+ input := []string{
+ "# service=git-upload-pack\n",
+ pktline.FlushString,
+ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00symref=HEAD:/refs/heads/master ofs-delta multi_ack\n",
+ "a6930aaee06755d1bdcfd943fbf614e4d92bb0c7 refs/heads/master\n",
+ "5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c refs/tags/v2.6.11-tree\n",
+ "c39ae07f393806ccf406ef966e9a15afc43cc36a refs/tags/v2.6.11-tree^{}\n",
+ "7777777777777777777777777777777777777777 refs/tags/v2.6.12-tree\n",
+ "8888888888888888888888888888888888888888 refs/tags/v2.6.12-tree^{}\n",
+ "shallow 1111111111111111111111111111111111111111\n",
+ "shallow 2222222222222222222222222222222222222222\n",
+ pktline.FlushString,
+ }
+
+ expected := []string{
+ "# service=git-upload-pack\n",
+ pktline.FlushString,
+ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00multi_ack ofs-delta symref=HEAD:/refs/heads/master\n",
+ "a6930aaee06755d1bdcfd943fbf614e4d92bb0c7 refs/heads/master\n",
+ "5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c refs/tags/v2.6.11-tree\n",
+ "c39ae07f393806ccf406ef966e9a15afc43cc36a refs/tags/v2.6.11-tree^{}\n",
+ "7777777777777777777777777777777777777777 refs/tags/v2.6.12-tree\n",
+ "8888888888888888888888888888888888888888 refs/tags/v2.6.12-tree^{}\n",
+ "shallow 1111111111111111111111111111111111111111\n",
+ "shallow 2222222222222222222222222222222222222222\n",
+ pktline.FlushString,
+ }
+
+ s.test(c, input, expected)
+}
+
+func (s *SuiteDecodeEncode) TestAllSmartBug(c *C) {
+ input := []string{
+ "# service=git-upload-pack\n",
+ pktline.FlushString,
+ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00symref=HEAD:/refs/heads/master ofs-delta multi_ack\n",
+ "a6930aaee06755d1bdcfd943fbf614e4d92bb0c7 refs/heads/master\n",
+ "5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c refs/tags/v2.6.11-tree\n",
+ "c39ae07f393806ccf406ef966e9a15afc43cc36a refs/tags/v2.6.11-tree^{}\n",
+ "7777777777777777777777777777777777777777 refs/tags/v2.6.12-tree\n",
+ "8888888888888888888888888888888888888888 refs/tags/v2.6.12-tree^{}\n",
+ "shallow 1111111111111111111111111111111111111111\n",
+ "shallow 2222222222222222222222222222222222222222\n",
+ pktline.FlushString,
+ }
+
+ expected := []string{
+ "# service=git-upload-pack\n",
+ pktline.FlushString,
+ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00multi_ack ofs-delta symref=HEAD:/refs/heads/master\n",
+ "a6930aaee06755d1bdcfd943fbf614e4d92bb0c7 refs/heads/master\n",
+ "5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c refs/tags/v2.6.11-tree\n",
+ "c39ae07f393806ccf406ef966e9a15afc43cc36a refs/tags/v2.6.11-tree^{}\n",
+ "7777777777777777777777777777777777777777 refs/tags/v2.6.12-tree\n",
+ "8888888888888888888888888888888888888888 refs/tags/v2.6.12-tree^{}\n",
+ "shallow 1111111111111111111111111111111111111111\n",
+ "shallow 2222222222222222222222222222222222222222\n",
+ pktline.FlushString,
+ }
+
+ s.test(c, input, expected)
+}
+
+func ExampleDecoder_Decode() {
+ // Here is a raw advertised-ref message.
+ raw := "" +
+ "0065a6930aaee06755d1bdcfd943fbf614e4d92bb0c7 HEAD\x00multi_ack ofs-delta symref=HEAD:/refs/heads/master\n" +
+ "003fa6930aaee06755d1bdcfd943fbf614e4d92bb0c7 refs/heads/master\n" +
+ "00441111111111111111111111111111111111111111 refs/tags/v2.6.11-tree\n" +
+ "00475555555555555555555555555555555555555555 refs/tags/v2.6.11-tree^{}\n" +
+ "0035shallow 5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c\n" +
+ "0000"
+
+ // Use the raw message as our input.
+ input := strings.NewReader(raw)
+
+ // Create a advref.Decoder reading from our input.
+ d := advrefs.NewDecoder(input)
+
+ // Decode the input into a newly allocated AdvRefs value.
+ ar := advrefs.New()
+ _ = d.Decode(ar) // error check ignored for brevity
+
+ // Do something interesting with the AdvRefs, e.g. print its contents.
+ fmt.Println("head =", ar.Head)
+ fmt.Println("capabilities =", ar.Capabilities.String())
+ fmt.Println("...")
+ fmt.Println("shallows =", ar.Shallows)
+ // Output: head = a6930aaee06755d1bdcfd943fbf614e4d92bb0c7
+ // capabilities = multi_ack ofs-delta symref=HEAD:/refs/heads/master
+ // ...
+ // shallows = [5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c]
+}
+
+func ExampleEncoder_Encode() {
+ // Create an AdvRefs with the contents you want...
+ ar := advrefs.New()
+
+ // ...add a hash for the HEAD...
+ head := plumbing.NewHash("1111111111111111111111111111111111111111")
+ ar.Head = &head
+
+ // ...add some server capabilities...
+ ar.Capabilities.Add("symref", "HEAD:/refs/heads/master")
+ ar.Capabilities.Add("ofs-delta")
+ ar.Capabilities.Add("multi_ack")
+
+ // ...add a couple of references...
+ ar.References["refs/heads/master"] = plumbing.NewHash("2222222222222222222222222222222222222222")
+ ar.References["refs/tags/v1"] = plumbing.NewHash("3333333333333333333333333333333333333333")
+
+ // ...including a peeled ref...
+ ar.Peeled["refs/tags/v1"] = plumbing.NewHash("4444444444444444444444444444444444444444")
+
+ // ...and finally add a shallow
+ ar.Shallows = append(ar.Shallows, plumbing.NewHash("5555555555555555555555555555555555555555"))
+
+ // Encode the advrefs.Contents to a bytes.Buffer.
+ // You can encode into stdout too, but you will not be able
+ // see the '\x00' after "HEAD".
+ var buf bytes.Buffer
+ e := advrefs.NewEncoder(&buf)
+ _ = e.Encode(ar) // error checks ignored for brevity
+
+ // Print the contents of the buffer as a quoted string.
+ // Printing is as a non-quoted string will be prettier but you
+ // will miss the '\x00' after "HEAD".
+ fmt.Printf("%q", buf.String())
+ // Output:
+ // "00651111111111111111111111111111111111111111 HEAD\x00multi_ack ofs-delta symref=HEAD:/refs/heads/master\n003f2222222222222222222222222222222222222222 refs/heads/master\n003a3333333333333333333333333333333333333333 refs/tags/v1\n003d4444444444444444444444444444444444444444 refs/tags/v1^{}\n0035shallow 5555555555555555555555555555555555555555\n0000"
+}
diff --git a/plumbing/format/packp/advrefs/decoder.go b/plumbing/format/packp/advrefs/decoder.go
new file mode 100644
index 0000000..b654882
--- /dev/null
+++ b/plumbing/format/packp/advrefs/decoder.go
@@ -0,0 +1,288 @@
+package advrefs
+
+import (
+ "bytes"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "io"
+
+ "gopkg.in/src-d/go-git.v4/plumbing"
+ "gopkg.in/src-d/go-git.v4/plumbing/format/packp/pktline"
+)
+
+// A Decoder reads and decodes AdvRef values from an input stream.
+type Decoder struct {
+ s *pktline.Scanner // a pkt-line scanner from the input stream
+ line []byte // current pkt-line contents, use parser.nextLine() to make it advance
+ nLine int // current pkt-line number for debugging, begins at 1
+ hash plumbing.Hash // last hash read
+ err error // sticky error, use the parser.error() method to fill this out
+ data *AdvRefs // parsed data is stored here
+}
+
+// ErrEmpty is returned by Decode when there was no advertised-message at all
+var ErrEmpty = errors.New("empty advertised-ref message")
+
+// NewDecoder returns a new decoder that reads from r.
+//
+// Will not read more data from r than necessary.
+func NewDecoder(r io.Reader) *Decoder {
+ return &Decoder{
+ s: pktline.NewScanner(r),
+ }
+}
+
+// Decode reads the next advertised-refs message form its input and
+// stores it in the value pointed to by v.
+func (d *Decoder) Decode(v *AdvRefs) error {
+ d.data = v
+
+ for state := decodePrefix; state != nil; {
+ state = state(d)
+ }
+
+ return d.err
+}
+
+type decoderStateFn func(*Decoder) decoderStateFn
+
+// fills out the parser stiky error
+func (d *Decoder) error(format string, a ...interface{}) {
+ d.err = fmt.Errorf("pkt-line %d: %s", d.nLine,
+ fmt.Sprintf(format, a...))
+}
+
+// Reads a new pkt-line from the scanner, makes its payload available as
+// p.line and increments p.nLine. A successful invocation returns true,
+// otherwise, false is returned and the sticky error is filled out
+// accordingly. Trims eols at the end of the payloads.
+func (d *Decoder) nextLine() bool {
+ d.nLine++
+
+ if !d.s.Scan() {
+ if d.err = d.s.Err(); d.err != nil {
+ return false
+ }
+
+ if d.nLine == 1 {
+ d.err = ErrEmpty
+ return false
+ }
+
+ d.error("EOF")
+ return false
+ }
+
+ d.line = d.s.Bytes()
+ d.line = bytes.TrimSuffix(d.line, eol)
+
+ return true
+}
+
+// The HTTP smart prefix is often followed by a flush-pkt.
+func decodePrefix(d *Decoder) decoderStateFn {
+ if ok := d.nextLine(); !ok {
+ return nil
+ }
+
+ if isPrefix(d.line) {
+ tmp := make([]byte, len(d.line))
+ copy(tmp, d.line)
+ d.data.Prefix = append(d.data.Prefix, tmp)
+ if ok := d.nextLine(); !ok {
+ return nil
+ }
+ }
+
+ if isFlush(d.line) {
+ d.data.Prefix = append(d.data.Prefix, pktline.Flush)
+ if ok := d.nextLine(); !ok {
+ return nil
+ }
+ }
+
+ return decodeFirstHash
+}
+
+func isPrefix(payload []byte) bool {
+ return payload[0] == '#'
+}
+
+func isFlush(payload []byte) bool {
+ return len(payload) == 0
+}
+
+// If the first hash is zero, then a no-refs is comming. Otherwise, a
+// list-of-refs is comming, and the hash will be followed by the first
+// advertised ref.
+func decodeFirstHash(p *Decoder) decoderStateFn {
+ if len(p.line) < hashSize {
+ p.error("cannot read hash, pkt-line too short")
+ return nil
+ }
+
+ if _, err := hex.Decode(p.hash[:], p.line[:hashSize]); err != nil {
+ p.error("invalid hash text: %s", err)
+ return nil
+ }
+
+ p.line = p.line[hashSize:]
+
+ if p.hash.IsZero() {
+ return decodeSkipNoRefs
+ }
+
+ return decodeFirstRef
+}
+
+// Skips SP "capabilities^{}" NUL
+func decodeSkipNoRefs(p *Decoder) decoderStateFn {
+ if len(p.line) < len(noHeadMark) {
+ p.error("too short zero-id ref")
+ return nil
+ }
+
+ if !bytes.HasPrefix(p.line, noHeadMark) {
+ p.error("malformed zero-id ref")
+ return nil
+ }
+
+ p.line = p.line[len(noHeadMark):]
+
+ return decodeCaps
+}
+
+// decode the refname, expectes SP refname NULL
+func decodeFirstRef(l *Decoder) decoderStateFn {
+ if len(l.line) < 3 {
+ l.error("line too short after hash")
+ return nil
+ }
+
+ if !bytes.HasPrefix(l.line, sp) {
+ l.error("no space after hash")
+ return nil
+ }
+ l.line = l.line[1:]
+
+ chunks := bytes.SplitN(l.line, null, 2)
+ if len(chunks) < 2 {
+ l.error("NULL not found")
+ return nil
+ }
+ ref := chunks[0]
+ l.line = chunks[1]
+
+ if bytes.Equal(ref, []byte(head)) {
+ l.data.Head = &l.hash
+ } else {
+ l.data.References[string(ref)] = l.hash
+ }
+
+ return decodeCaps
+}
+
+func decodeCaps(p *Decoder) decoderStateFn {
+ if len(p.line) == 0 {
+ return decodeOtherRefs
+ }
+
+ for _, c := range bytes.Split(p.line, sp) {
+ name, values := readCapability(c)
+ p.data.Capabilities.Add(name, values...)
+ }
+
+ return decodeOtherRefs
+}
+
+// Capabilities are a single string or a name=value.
+// Even though we are only going to read at moust 1 value, we return
+// a slice of values, as Capability.Add receives that.
+func readCapability(data []byte) (name string, values []string) {
+ pair := bytes.SplitN(data, []byte{'='}, 2)
+ if len(pair) == 2 {
+ values = append(values, string(pair[1]))
+ }
+
+ return string(pair[0]), values
+}
+
+// The refs are either tips (obj-id SP refname) or a peeled (obj-id SP refname^{}).
+// If there are no refs, then there might be a shallow or flush-ptk.
+func decodeOtherRefs(p *Decoder) decoderStateFn {
+ if ok := p.nextLine(); !ok {
+ return nil
+ }
+
+ if bytes.HasPrefix(p.line, shallow) {
+ return decodeShallow
+ }
+
+ if len(p.line) == 0 {
+ return nil
+ }
+
+ saveTo := p.data.References
+ if bytes.HasSuffix(p.line, peeled) {
+ p.line = bytes.TrimSuffix(p.line, peeled)
+ saveTo = p.data.Peeled
+ }
+
+ ref, hash, err := readRef(p.line)
+ if err != nil {
+ p.error("%s", err)
+ return nil
+ }
+ saveTo[ref] = hash
+
+ return decodeOtherRefs
+}
+
+// Reads a ref-name
+func readRef(data []byte) (string, plumbing.Hash, error) {
+ chunks := bytes.Split(data, sp)
+ switch {
+ case len(chunks) == 1:
+ return "", plumbing.ZeroHash, fmt.Errorf("malformed ref data: no space was found")
+ case len(chunks) > 2:
+ return "", plumbing.ZeroHash, fmt.Errorf("malformed ref data: more than one space found")
+ default:
+ return string(chunks[1]), plumbing.NewHash(string(chunks[0])), nil
+ }
+}
+
+// Keeps reading shallows until a flush-pkt is found
+func decodeShallow(p *Decoder) decoderStateFn {
+ if !bytes.HasPrefix(p.line, shallow) {
+ p.error("malformed shallow prefix, found %q... instead", p.line[:len(shallow)])
+ return nil
+ }
+ p.line = bytes.TrimPrefix(p.line, shallow)
+
+ if len(p.line) != hashSize {
+ p.error(fmt.Sprintf(
+ "malformed shallow hash: wrong length, expected 40 bytes, read %d bytes",
+ len(p.line)))
+ return nil
+ }
+
+ text := p.line[:hashSize]
+ var h plumbing.Hash
+ if _, err := hex.Decode(h[:], text); err != nil {
+ p.error("invalid hash text: %s", err)
+ return nil
+ }
+
+ p.data.Shallows = append(p.data.Shallows, h)
+
+ if ok := p.nextLine(); !ok {
+ return nil
+ }
+
+ if len(p.line) == 0 {
+ return nil // succesfull parse of the advertised-refs message
+ }
+
+ return decodeShallow
+}
diff --git a/plumbing/format/packp/advrefs/decoder_test.go b/plumbing/format/packp/advrefs/decoder_test.go
new file mode 100644
index 0000000..03867d3
--- /dev/null
+++ b/plumbing/format/packp/advrefs/decoder_test.go
@@ -0,0 +1,500 @@
+package advrefs_test
+
+import (
+ "bytes"
+ "io"
+ "strings"
+
+ "gopkg.in/src-d/go-git.v4/plumbing"
+ "gopkg.in/src-d/go-git.v4/plumbing/format/packp"
+ "gopkg.in/src-d/go-git.v4/plumbing/format/packp/advrefs"
+ "gopkg.in/src-d/go-git.v4/plumbing/format/packp/pktline"
+
+ . "gopkg.in/check.v1"
+)
+
+type SuiteDecoder struct{}
+
+var _ = Suite(&SuiteDecoder{})
+
+func (s *SuiteDecoder) TestEmpty(c *C) {
+ ar := advrefs.New()
+ var buf bytes.Buffer
+ d := advrefs.NewDecoder(&buf)
+
+ err := d.Decode(ar)
+ c.Assert(err, Equals, advrefs.ErrEmpty)
+}
+
+func (s *SuiteDecoder) TestShortForHash(c *C) {
+ payloads := []string{
+ "6ecf0ef2c2dffb796",
+ pktline.FlushString,
+ }
+ r := toPktLines(c, payloads)
+ testDecoderErrorMatches(c, r, ".*too short")
+}
+
+func toPktLines(c *C, payloads []string) io.Reader {
+ var buf bytes.Buffer
+ e := pktline.NewEncoder(&buf)
+ err := e.EncodeString(payloads...)
+ c.Assert(err, IsNil)
+
+ return &buf
+}
+
+func testDecoderErrorMatches(c *C, input io.Reader, pattern string) {
+ ar := advrefs.New()
+ d := advrefs.NewDecoder(input)
+
+ err := d.Decode(ar)
+ c.Assert(err, ErrorMatches, pattern)
+}
+
+func (s *SuiteDecoder) TestInvalidFirstHash(c *C) {
+ payloads := []string{
+ "6ecf0ef2c2dffb796alberto2219af86ec6584e5 HEAD\x00multi_ack thin-pack\n",
+ pktline.FlushString,
+ }
+ r := toPktLines(c, payloads)
+ testDecoderErrorMatches(c, r, ".*invalid hash.*")
+}
+
+func (s *SuiteDecoder) TestZeroId(c *C) {
+ payloads := []string{
+ "0000000000000000000000000000000000000000 capabilities^{}\x00multi_ack thin-pack\n",
+ pktline.FlushString,
+ }
+ ar := testDecodeOK(c, payloads)
+ c.Assert(ar.Head, IsNil)
+}
+
+func testDecodeOK(c *C, payloads []string) *advrefs.AdvRefs {
+ var buf bytes.Buffer
+ e := pktline.NewEncoder(&buf)
+ err := e.EncodeString(payloads...)
+ c.Assert(err, IsNil)
+
+ ar := advrefs.New()
+ d := advrefs.NewDecoder(&buf)
+
+ err = d.Decode(ar)
+ c.Assert(err, IsNil)
+
+ return ar
+}
+
+func (s *SuiteDecoder) TestMalformedZeroId(c *C) {
+ payloads := []string{
+ "0000000000000000000000000000000000000000 wrong\x00multi_ack thin-pack\n",
+ pktline.FlushString,
+ }
+ r := toPktLines(c, payloads)
+ testDecoderErrorMatches(c, r, ".*malformed zero-id.*")
+}
+
+func (s *SuiteDecoder) TestShortZeroId(c *C) {
+ payloads := []string{
+ "0000000000000000000000000000000000000000 capabi",
+ pktline.FlushString,
+ }
+ r := toPktLines(c, payloads)
+ testDecoderErrorMatches(c, r, ".*too short zero-id.*")
+}
+
+func (s *SuiteDecoder) TestHead(c *C) {
+ payloads := []string{
+ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00",
+ pktline.FlushString,
+ }
+ ar := testDecodeOK(c, payloads)
+ c.Assert(*ar.Head, Equals,
+ plumbing.NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5"))
+}
+
+func (s *SuiteDecoder) TestFirstIsNotHead(c *C) {
+ payloads := []string{
+ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 refs/heads/master\x00",
+ pktline.FlushString,
+ }
+ ar := testDecodeOK(c, payloads)
+ c.Assert(ar.Head, IsNil)
+ c.Assert(ar.References["refs/heads/master"], Equals,
+ plumbing.NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5"))
+}
+
+func (s *SuiteDecoder) TestShortRef(c *C) {
+ payloads := []string{
+ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 H",
+ pktline.FlushString,
+ }
+ r := toPktLines(c, payloads)
+ testDecoderErrorMatches(c, r, ".*too short.*")
+}
+
+func (s *SuiteDecoder) TestNoNULL(c *C) {
+ payloads := []string{
+ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEADofs-delta multi_ack",
+ pktline.FlushString,
+ }
+ r := toPktLines(c, payloads)
+ testDecoderErrorMatches(c, r, ".*NULL not found.*")
+}
+
+func (s *SuiteDecoder) TestNoSpaceAfterHash(c *C) {
+ payloads := []string{
+ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5-HEAD\x00",
+ pktline.FlushString,
+ }
+ r := toPktLines(c, payloads)
+ testDecoderErrorMatches(c, r, ".*no space after hash.*")
+}
+
+func (s *SuiteDecoder) TestNoCaps(c *C) {
+ payloads := []string{
+ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00",
+ pktline.FlushString,
+ }
+ ar := testDecodeOK(c, payloads)
+ c.Assert(ar.Capabilities.IsEmpty(), Equals, true)
+}
+
+func (s *SuiteDecoder) TestCaps(c *C) {
+ for _, test := range [...]struct {
+ input []string
+ capabilities []packp.Capability
+ }{
+ {
+ input: []string{
+ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00",
+ pktline.FlushString,
+ },
+ capabilities: []packp.Capability{},
+ },
+ {
+ input: []string{
+ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00\n",
+ pktline.FlushString,
+ },
+ capabilities: []packp.Capability{},
+ },
+ {
+ input: []string{
+ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00ofs-delta",
+ pktline.FlushString,
+ },
+ capabilities: []packp.Capability{
+ {
+ Name: "ofs-delta",
+ Values: []string(nil),
+ },
+ },
+ },
+ {
+ input: []string{
+ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00ofs-delta multi_ack",
+ pktline.FlushString,
+ },
+ capabilities: []packp.Capability{
+ {Name: "ofs-delta", Values: []string(nil)},
+ {Name: "multi_ack", Values: []string(nil)},
+ },
+ },
+ {
+ input: []string{
+ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00ofs-delta multi_ack\n",
+ pktline.FlushString,
+ },
+ capabilities: []packp.Capability{
+ {Name: "ofs-delta", Values: []string(nil)},
+ {Name: "multi_ack", Values: []string(nil)},
+ },
+ },
+ {
+ input: []string{
+ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00symref=HEAD:refs/heads/master agent=foo=bar\n",
+ pktline.FlushString,
+ },
+ capabilities: []packp.Capability{
+ {Name: "symref", Values: []string{"HEAD:refs/heads/master"}},
+ {Name: "agent", Values: []string{"foo=bar"}},
+ },
+ },
+ {
+ input: []string{
+ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00symref=HEAD:refs/heads/master agent=foo=bar agent=new-agent\n",
+ pktline.FlushString,
+ },
+ capabilities: []packp.Capability{
+ {Name: "symref", Values: []string{"HEAD:refs/heads/master"}},
+ {Name: "agent", Values: []string{"foo=bar", "new-agent"}},
+ },
+ },
+ } {
+ ar := testDecodeOK(c, test.input)
+ for _, fixCap := range test.capabilities {
+ c.Assert(ar.Capabilities.Supports(fixCap.Name), Equals, true,
+ Commentf("input = %q, capability = %q", test.input, fixCap.Name))
+ c.Assert(ar.Capabilities.Get(fixCap.Name).Values, DeepEquals, fixCap.Values,
+ Commentf("input = %q, capability = %q", test.input, fixCap.Name))
+ }
+ }
+}
+
+func (s *SuiteDecoder) TestWithPrefix(c *C) {
+ payloads := []string{
+ "# this is a prefix\n",
+ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00foo\n",
+ pktline.FlushString,
+ }
+ ar := testDecodeOK(c, payloads)
+ c.Assert(len(ar.Prefix), Equals, 1)
+ c.Assert(ar.Prefix[0], DeepEquals, []byte("# this is a prefix"))
+}
+
+func (s *SuiteDecoder) TestWithPrefixAndFlush(c *C) {
+ payloads := []string{
+ "# this is a prefix\n",
+ pktline.FlushString,
+ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00foo\n",
+ pktline.FlushString,
+ }
+ ar := testDecodeOK(c, payloads)
+ c.Assert(len(ar.Prefix), Equals, 2)
+ c.Assert(ar.Prefix[0], DeepEquals, []byte("# this is a prefix"))
+ c.Assert(ar.Prefix[1], DeepEquals, []byte(pktline.FlushString))
+}
+
+func (s *SuiteDecoder) TestOtherRefs(c *C) {
+ for _, test := range [...]struct {
+ input []string
+ references map[string]plumbing.Hash
+ peeled map[string]plumbing.Hash
+ }{
+ {
+ input: []string{
+ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00ofs-delta symref=HEAD:/refs/heads/master\n",
+ pktline.FlushString,
+ },
+ references: make(map[string]plumbing.Hash),
+ peeled: make(map[string]plumbing.Hash),
+ }, {
+ input: []string{
+ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00ofs-delta symref=HEAD:/refs/heads/master\n",
+ "1111111111111111111111111111111111111111 ref/foo",
+ pktline.FlushString,
+ },
+ references: map[string]plumbing.Hash{
+ "ref/foo": plumbing.NewHash("1111111111111111111111111111111111111111"),
+ },
+ peeled: make(map[string]plumbing.Hash),
+ }, {
+ input: []string{
+ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00ofs-delta symref=HEAD:/refs/heads/master\n",
+ "1111111111111111111111111111111111111111 ref/foo\n",
+ pktline.FlushString,
+ },
+ references: map[string]plumbing.Hash{
+ "ref/foo": plumbing.NewHash("1111111111111111111111111111111111111111"),
+ },
+ peeled: make(map[string]plumbing.Hash),
+ }, {
+ input: []string{
+ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00ofs-delta symref=HEAD:/refs/heads/master\n",
+ "1111111111111111111111111111111111111111 ref/foo\n",
+ "2222222222222222222222222222222222222222 ref/bar",
+ pktline.FlushString,
+ },
+ references: map[string]plumbing.Hash{
+ "ref/foo": plumbing.NewHash("1111111111111111111111111111111111111111"),
+ "ref/bar": plumbing.NewHash("2222222222222222222222222222222222222222"),
+ },
+ peeled: make(map[string]plumbing.Hash),
+ }, {
+ input: []string{
+ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00ofs-delta symref=HEAD:/refs/heads/master\n",
+ "1111111111111111111111111111111111111111 ref/foo^{}\n",
+ pktline.FlushString,
+ },
+ references: make(map[string]plumbing.Hash),
+ peeled: map[string]plumbing.Hash{
+ "ref/foo": plumbing.NewHash("1111111111111111111111111111111111111111"),
+ },
+ }, {
+ input: []string{
+ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00ofs-delta symref=HEAD:/refs/heads/master\n",
+ "1111111111111111111111111111111111111111 ref/foo\n",
+ "2222222222222222222222222222222222222222 ref/bar^{}",
+ pktline.FlushString,
+ },
+ references: map[string]plumbing.Hash{
+ "ref/foo": plumbing.NewHash("1111111111111111111111111111111111111111"),
+ },
+ peeled: map[string]plumbing.Hash{
+ "ref/bar": plumbing.NewHash("2222222222222222222222222222222222222222"),
+ },
+ }, {
+ input: []string{
+ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00ofs-delta symref=HEAD:/refs/heads/master\n",
+ "a6930aaee06755d1bdcfd943fbf614e4d92bb0c7 refs/heads/master\n",
+ "51b8b4fb32271d39fbdd760397406177b2b0fd36 refs/pull/10/head\n",
+ "02b5a6031ba7a8cbfde5d65ff9e13ecdbc4a92ca refs/pull/100/head\n",
+ "c284c212704c43659bf5913656b8b28e32da1621 refs/pull/100/merge\n",
+ "3d6537dce68c8b7874333a1720958bd8db3ae8ca refs/pull/101/merge\n",
+ "5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c refs/tags/v2.6.11\n",
+ "c39ae07f393806ccf406ef966e9a15afc43cc36a refs/tags/v2.6.11^{}\n",
+ "5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c refs/tags/v2.6.11-tree\n",
+ "c39ae07f393806ccf406ef966e9a15afc43cc36a refs/tags/v2.6.11-tree^{}\n",
+ pktline.FlushString,
+ },
+ references: map[string]plumbing.Hash{
+ "refs/heads/master": plumbing.NewHash("a6930aaee06755d1bdcfd943fbf614e4d92bb0c7"),
+ "refs/pull/10/head": plumbing.NewHash("51b8b4fb32271d39fbdd760397406177b2b0fd36"),
+ "refs/pull/100/head": plumbing.NewHash("02b5a6031ba7a8cbfde5d65ff9e13ecdbc4a92ca"),
+ "refs/pull/100/merge": plumbing.NewHash("c284c212704c43659bf5913656b8b28e32da1621"),
+ "refs/pull/101/merge": plumbing.NewHash("3d6537dce68c8b7874333a1720958bd8db3ae8ca"),
+ "refs/tags/v2.6.11": plumbing.NewHash("5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c"),
+ "refs/tags/v2.6.11-tree": plumbing.NewHash("5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c"),
+ },
+ peeled: map[string]plumbing.Hash{
+ "refs/tags/v2.6.11": plumbing.NewHash("c39ae07f393806ccf406ef966e9a15afc43cc36a"),
+ "refs/tags/v2.6.11-tree": plumbing.NewHash("c39ae07f393806ccf406ef966e9a15afc43cc36a"),
+ },
+ },
+ } {
+ ar := testDecodeOK(c, test.input)
+ comment := Commentf("input = %v\n", test.input)
+ c.Assert(ar.References, DeepEquals, test.references, comment)
+ c.Assert(ar.Peeled, DeepEquals, test.peeled, comment)
+ }
+}
+
+func (s *SuiteDecoder) TestMalformedOtherRefsNoSpace(c *C) {
+ payloads := []string{
+ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00multi_ack thin-pack\n",
+ "5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8crefs/tags/v2.6.11\n",
+ pktline.FlushString,
+ }
+ r := toPktLines(c, payloads)
+ testDecoderErrorMatches(c, r, ".*malformed ref data.*")
+}
+
+func (s *SuiteDecoder) TestMalformedOtherRefsMultipleSpaces(c *C) {
+ payloads := []string{
+ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00multi_ack thin-pack\n",
+ "5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c refs/tags v2.6.11\n",
+ pktline.FlushString,
+ }
+ r := toPktLines(c, payloads)
+ testDecoderErrorMatches(c, r, ".*malformed ref data.*")
+}
+
+func (s *SuiteDecoder) TestShallow(c *C) {
+ for _, test := range [...]struct {
+ input []string
+ shallows []plumbing.Hash
+ }{
+ {
+ input: []string{
+ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00ofs-delta symref=HEAD:/refs/heads/master\n",
+ "a6930aaee06755d1bdcfd943fbf614e4d92bb0c7 refs/heads/master\n",
+ "5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c refs/tags/v2.6.11-tree\n",
+ "c39ae07f393806ccf406ef966e9a15afc43cc36a refs/tags/v2.6.11-tree^{}\n",
+ pktline.FlushString,
+ },
+ shallows: []plumbing.Hash{},
+ }, {
+ input: []string{
+ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00ofs-delta symref=HEAD:/refs/heads/master\n",
+ "a6930aaee06755d1bdcfd943fbf614e4d92bb0c7 refs/heads/master\n",
+ "5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c refs/tags/v2.6.11-tree\n",
+ "c39ae07f393806ccf406ef966e9a15afc43cc36a refs/tags/v2.6.11-tree^{}\n",
+ "shallow 1111111111111111111111111111111111111111\n",
+ pktline.FlushString,
+ },
+ shallows: []plumbing.Hash{plumbing.NewHash("1111111111111111111111111111111111111111")},
+ }, {
+ input: []string{
+ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00ofs-delta symref=HEAD:/refs/heads/master\n",
+ "a6930aaee06755d1bdcfd943fbf614e4d92bb0c7 refs/heads/master\n",
+ "5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c refs/tags/v2.6.11-tree\n",
+ "c39ae07f393806ccf406ef966e9a15afc43cc36a refs/tags/v2.6.11-tree^{}\n",
+ "shallow 1111111111111111111111111111111111111111\n",
+ "shallow 2222222222222222222222222222222222222222\n",
+ pktline.FlushString,
+ },
+ shallows: []plumbing.Hash{
+ plumbing.NewHash("1111111111111111111111111111111111111111"),
+ plumbing.NewHash("2222222222222222222222222222222222222222"),
+ },
+ },
+ } {
+ ar := testDecodeOK(c, test.input)
+ comment := Commentf("input = %v\n", test.input)
+ c.Assert(ar.Shallows, DeepEquals, test.shallows, comment)
+ }
+}
+
+func (s *SuiteDecoder) TestInvalidShallowHash(c *C) {
+ payloads := []string{
+ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00ofs-delta symref=HEAD:/refs/heads/master\n",
+ "a6930aaee06755d1bdcfd943fbf614e4d92bb0c7 refs/heads/master\n",
+ "5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c refs/tags/v2.6.11-tree\n",
+ "c39ae07f393806ccf406ef966e9a15afc43cc36a refs/tags/v2.6.11-tree^{}\n",
+ "shallow 11111111alcortes111111111111111111111111\n",
+ "shallow 2222222222222222222222222222222222222222\n",
+ pktline.FlushString,
+ }
+ r := toPktLines(c, payloads)
+ testDecoderErrorMatches(c, r, ".*invalid hash text.*")
+}
+
+func (s *SuiteDecoder) TestGarbageAfterShallow(c *C) {
+ payloads := []string{
+ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00ofs-delta symref=HEAD:/refs/heads/master\n",
+ "a6930aaee06755d1bdcfd943fbf614e4d92bb0c7 refs/heads/master\n",
+ "5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c refs/tags/v2.6.11-tree\n",
+ "c39ae07f393806ccf406ef966e9a15afc43cc36a refs/tags/v2.6.11-tree^{}\n",
+ "shallow 1111111111111111111111111111111111111111\n",
+ "shallow 2222222222222222222222222222222222222222\n",
+ "b5be40b90dbaa6bd337f3b77de361bfc0723468b refs/tags/v4.4",
+ pktline.FlushString,
+ }
+ r := toPktLines(c, payloads)
+ testDecoderErrorMatches(c, r, ".*malformed shallow prefix.*")
+}
+
+func (s *SuiteDecoder) TestMalformedShallowHash(c *C) {
+ payloads := []string{
+ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00ofs-delta symref=HEAD:/refs/heads/master\n",
+ "a6930aaee06755d1bdcfd943fbf614e4d92bb0c7 refs/heads/master\n",
+ "5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c refs/tags/v2.6.11-tree\n",
+ "c39ae07f393806ccf406ef966e9a15afc43cc36a refs/tags/v2.6.11-tree^{}\n",
+ "shallow 1111111111111111111111111111111111111111\n",
+ "shallow 2222222222222222222222222222222222222222 malformed\n",
+ pktline.FlushString,
+ }
+ r := toPktLines(c, payloads)
+ testDecoderErrorMatches(c, r, ".*malformed shallow hash.*")
+}
+
+func (s *SuiteDecoder) TestEOFRefs(c *C) {
+ input := strings.NewReader("" +
+ "005b6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00ofs-delta symref=HEAD:/refs/heads/master\n" +
+ "003fa6930aaee06755d1bdcfd943fbf614e4d92bb0c7 refs/heads/master\n" +
+ "00355dc01c595e6c6ec9ccda4f6ffbf614e4d92bb0c7 refs/foo\n",
+ )
+ testDecoderErrorMatches(c, input, ".*invalid pkt-len.*")
+}
+
+func (s *SuiteDecoder) TestEOFShallows(c *C) {
+ input := strings.NewReader("" +
+ "005b6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00ofs-delta symref=HEAD:/refs/heads/master\n" +
+ "003fa6930aaee06755d1bdcfd943fbf614e4d92bb0c7 refs/heads/master\n" +
+ "00445dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c refs/tags/v2.6.11-tree\n" +
+ "0047c39ae07f393806ccf406ef966e9a15afc43cc36a refs/tags/v2.6.11-tree^{}\n" +
+ "0035shallow 1111111111111111111111111111111111111111\n" +
+ "0034shallow 222222222222222222222222")
+ testDecoderErrorMatches(c, input, ".*unexpected EOF.*")
+}
diff --git a/plumbing/format/packp/advrefs/encoder.go b/plumbing/format/packp/advrefs/encoder.go
new file mode 100644
index 0000000..8c52f14
--- /dev/null
+++ b/plumbing/format/packp/advrefs/encoder.go
@@ -0,0 +1,155 @@
+package advrefs
+
+import (
+ "bytes"
+ "io"
+ "sort"
+
+ "gopkg.in/src-d/go-git.v4/plumbing"
+ "gopkg.in/src-d/go-git.v4/plumbing/format/packp"
+ "gopkg.in/src-d/go-git.v4/plumbing/format/packp/pktline"
+)
+
+// An Encoder writes AdvRefs values to an output stream.
+type Encoder struct {
+ data *AdvRefs // data to encode
+ pe *pktline.Encoder // where to write the encoded data
+ err error // sticky error
+}
+
+// NewEncoder returns a new encoder that writes to w.
+func NewEncoder(w io.Writer) *Encoder {
+ return &Encoder{
+ pe: pktline.NewEncoder(w),
+ }
+}
+
+// Encode writes the AdvRefs encoding of v to the stream.
+//
+// All the payloads will end with a newline character. Capabilities,
+// references and shallows are writen in alphabetical order, except for
+// peeled references that always follow their corresponding references.
+func (e *Encoder) Encode(v *AdvRefs) error {
+ e.data = v
+
+ for state := encodePrefix; state != nil; {
+ state = state(e)
+ }
+
+ return e.err
+}
+
+type encoderStateFn func(*Encoder) encoderStateFn
+
+func encodePrefix(e *Encoder) encoderStateFn {
+ for _, p := range e.data.Prefix {
+ if bytes.Equal(p, pktline.Flush) {
+ if e.err = e.pe.Flush(); e.err != nil {
+ return nil
+ }
+ continue
+ }
+ if e.err = e.pe.Encodef("%s\n", string(p)); e.err != nil {
+ return nil
+ }
+ }
+
+ return encodeFirstLine
+}
+
+// Adds the first pkt-line payload: head hash, head ref and capabilities.
+// Also handle the special case when no HEAD ref is found.
+func encodeFirstLine(e *Encoder) encoderStateFn {
+ head := formatHead(e.data.Head)
+ separator := formatSeparator(e.data.Head)
+ capabilities := formatCaps(e.data.Capabilities)
+
+ if e.err = e.pe.Encodef("%s %s\x00%s\n", head, separator, capabilities); e.err != nil {
+ return nil
+ }
+
+ return encodeRefs
+}
+
+func formatHead(h *plumbing.Hash) string {
+ if h == nil {
+ return plumbing.ZeroHash.String()
+ }
+
+ return h.String()
+}
+
+func formatSeparator(h *plumbing.Hash) string {
+ if h == nil {
+ return noHead
+ }
+
+ return head
+}
+
+func formatCaps(c *packp.Capabilities) string {
+ if c == nil {
+ return ""
+ }
+
+ c.Sort()
+
+ return c.String()
+}
+
+// Adds the (sorted) refs: hash SP refname EOL
+// and their peeled refs if any.
+func encodeRefs(e *Encoder) encoderStateFn {
+ refs := sortRefs(e.data.References)
+ for _, r := range refs {
+ hash, _ := e.data.References[r]
+ if e.err = e.pe.Encodef("%s %s\n", hash.String(), r); e.err != nil {
+ return nil
+ }
+
+ if hash, ok := e.data.Peeled[r]; ok {
+ if e.err = e.pe.Encodef("%s %s^{}\n", hash.String(), r); e.err != nil {
+ return nil
+ }
+ }
+ }
+
+ return encodeShallow
+}
+
+func sortRefs(m map[string]plumbing.Hash) []string {
+ ret := make([]string, 0, len(m))
+ for k := range m {
+ ret = append(ret, k)
+ }
+ sort.Strings(ret)
+
+ return ret
+}
+
+// Adds the (sorted) shallows: "shallow" SP hash EOL
+func encodeShallow(e *Encoder) encoderStateFn {
+ sorted := sortShallows(e.data.Shallows)
+ for _, hash := range sorted {
+ if e.err = e.pe.Encodef("shallow %s\n", hash); e.err != nil {
+ return nil
+ }
+ }
+
+ return encodeFlush
+}
+
+func sortShallows(c []plumbing.Hash) []string {
+ ret := []string{}
+ for _, h := range c {
+ ret = append(ret, h.String())
+ }
+ sort.Strings(ret)
+
+ return ret
+}
+
+func encodeFlush(e *Encoder) encoderStateFn {
+ e.err = e.pe.Flush()
+ return nil
+}
diff --git a/plumbing/format/packp/advrefs/encoder_test.go b/plumbing/format/packp/advrefs/encoder_test.go
new file mode 100644
index 0000000..b4b085c
--- /dev/null
+++ b/plumbing/format/packp/advrefs/encoder_test.go
@@ -0,0 +1,249 @@
+package advrefs_test
+
+import (
+ "bytes"
+ "strings"
+
+ "gopkg.in/src-d/go-git.v4/plumbing"
+ "gopkg.in/src-d/go-git.v4/plumbing/format/packp"
+ "gopkg.in/src-d/go-git.v4/plumbing/format/packp/advrefs"
+ "gopkg.in/src-d/go-git.v4/plumbing/format/packp/pktline"
+
+ . "gopkg.in/check.v1"
+)
+
+type SuiteEncoder struct{}
+
+var _ = Suite(&SuiteEncoder{})
+
+// returns a byte slice with the pkt-lines for the given payloads.
+func pktlines(c *C, payloads ...[]byte) []byte {
+ var buf bytes.Buffer
+ e := pktline.NewEncoder(&buf)
+ err := e.Encode(payloads...)
+ c.Assert(err, IsNil, Commentf("building pktlines for %v\n", payloads))
+
+ return buf.Bytes()
+}
+
+func testEncode(c *C, input *advrefs.AdvRefs, expected []byte) {
+ var buf bytes.Buffer
+ e := advrefs.NewEncoder(&buf)
+ err := e.Encode(input)
+ c.Assert(err, IsNil)
+ obtained := buf.Bytes()
+
+ comment := Commentf("\nobtained = %s\nexpected = %s\n", string(obtained), string(expected))
+
+ c.Assert(obtained, DeepEquals, expected, comment)
+}
+
+func (s *SuiteEncoder) TestZeroValue(c *C) {
+ ar := &advrefs.AdvRefs{}
+
+ expected := pktlines(c,
+ []byte("0000000000000000000000000000000000000000 capabilities^{}\x00\n"),
+ pktline.Flush,
+ )
+
+ testEncode(c, ar, expected)
+}
+
+func (s *SuiteEncoder) TestHead(c *C) {
+ hash := plumbing.NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5")
+ ar := &advrefs.AdvRefs{
+ Head: &hash,
+ }
+
+ expected := pktlines(c,
+ []byte("6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00\n"),
+ pktline.Flush,
+ )
+
+ testEncode(c, ar, expected)
+}
+
+func (s *SuiteEncoder) TestCapsNoHead(c *C) {
+ capabilities := packp.NewCapabilities()
+ capabilities.Add("symref", "HEAD:/refs/heads/master")
+ capabilities.Add("ofs-delta")
+ capabilities.Add("multi_ack")
+ ar := &advrefs.AdvRefs{
+ Capabilities: capabilities,
+ }
+
+ expected := pktlines(c,
+ []byte("0000000000000000000000000000000000000000 capabilities^{}\x00multi_ack ofs-delta symref=HEAD:/refs/heads/master\n"),
+ pktline.Flush,
+ )
+
+ testEncode(c, ar, expected)
+}
+
+func (s *SuiteEncoder) TestCapsWithHead(c *C) {
+ hash := plumbing.NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5")
+ capabilities := packp.NewCapabilities()
+ capabilities.Add("symref", "HEAD:/refs/heads/master")
+ capabilities.Add("ofs-delta")
+ capabilities.Add("multi_ack")
+ ar := &advrefs.AdvRefs{
+ Head: &hash,
+ Capabilities: capabilities,
+ }
+
+ expected := pktlines(c,
+ []byte("6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00multi_ack ofs-delta symref=HEAD:/refs/heads/master\n"),
+ pktline.Flush,
+ )
+
+ testEncode(c, ar, expected)
+}
+
+func (s *SuiteEncoder) TestRefs(c *C) {
+ references := map[string]plumbing.Hash{
+ "refs/heads/master": plumbing.NewHash("a6930aaee06755d1bdcfd943fbf614e4d92bb0c7"),
+ "refs/tags/v2.6.12-tree": plumbing.NewHash("1111111111111111111111111111111111111111"),
+ "refs/tags/v2.7.13-tree": plumbing.NewHash("3333333333333333333333333333333333333333"),
+ "refs/tags/v2.6.13-tree": plumbing.NewHash("2222222222222222222222222222222222222222"),
+ "refs/tags/v2.6.11-tree": plumbing.NewHash("5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c"),
+ }
+ ar := &advrefs.AdvRefs{
+ References: references,
+ }
+
+ expected := pktlines(c,
+ []byte("0000000000000000000000000000000000000000 capabilities^{}\x00\n"),
+ []byte("a6930aaee06755d1bdcfd943fbf614e4d92bb0c7 refs/heads/master\n"),
+ []byte("5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c refs/tags/v2.6.11-tree\n"),
+ []byte("1111111111111111111111111111111111111111 refs/tags/v2.6.12-tree\n"),
+ []byte("2222222222222222222222222222222222222222 refs/tags/v2.6.13-tree\n"),
+ []byte("3333333333333333333333333333333333333333 refs/tags/v2.7.13-tree\n"),
+ pktline.Flush,
+ )
+
+ testEncode(c, ar, expected)
+}
+
+func (s *SuiteEncoder) TestPeeled(c *C) {
+ references := map[string]plumbing.Hash{
+ "refs/heads/master": plumbing.NewHash("a6930aaee06755d1bdcfd943fbf614e4d92bb0c7"),
+ "refs/tags/v2.6.12-tree": plumbing.NewHash("1111111111111111111111111111111111111111"),
+ "refs/tags/v2.7.13-tree": plumbing.NewHash("3333333333333333333333333333333333333333"),
+ "refs/tags/v2.6.13-tree": plumbing.NewHash("2222222222222222222222222222222222222222"),
+ "refs/tags/v2.6.11-tree": plumbing.NewHash("5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c"),
+ }
+ peeled := map[string]plumbing.Hash{
+ "refs/tags/v2.7.13-tree": plumbing.NewHash("4444444444444444444444444444444444444444"),
+ "refs/tags/v2.6.12-tree": plumbing.NewHash("5555555555555555555555555555555555555555"),
+ }
+ ar := &advrefs.AdvRefs{
+ References: references,
+ Peeled: peeled,
+ }
+
+ expected := pktlines(c,
+ []byte("0000000000000000000000000000000000000000 capabilities^{}\x00\n"),
+ []byte("a6930aaee06755d1bdcfd943fbf614e4d92bb0c7 refs/heads/master\n"),
+ []byte("5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c refs/tags/v2.6.11-tree\n"),
+ []byte("1111111111111111111111111111111111111111 refs/tags/v2.6.12-tree\n"),
+ []byte("5555555555555555555555555555555555555555 refs/tags/v2.6.12-tree^{}\n"),
+ []byte("2222222222222222222222222222222222222222 refs/tags/v2.6.13-tree\n"),
+ []byte("3333333333333333333333333333333333333333 refs/tags/v2.7.13-tree\n"),
+ []byte("4444444444444444444444444444444444444444 refs/tags/v2.7.13-tree^{}\n"),
+ pktline.Flush,
+ )
+
+ testEncode(c, ar, expected)
+}
+
+func (s *SuiteEncoder) TestShallow(c *C) {
+ shallows := []plumbing.Hash{
+ plumbing.NewHash("1111111111111111111111111111111111111111"),
+ plumbing.NewHash("4444444444444444444444444444444444444444"),
+ plumbing.NewHash("3333333333333333333333333333333333333333"),
+ plumbing.NewHash("2222222222222222222222222222222222222222"),
+ }
+ ar := &advrefs.AdvRefs{
+ Shallows: shallows,
+ }
+
+ expected := pktlines(c,
+ []byte("0000000000000000000000000000000000000000 capabilities^{}\x00\n"),
+ []byte("shallow 1111111111111111111111111111111111111111\n"),
+ []byte("shallow 2222222222222222222222222222222222222222\n"),
+ []byte("shallow 3333333333333333333333333333333333333333\n"),
+ []byte("shallow 4444444444444444444444444444444444444444\n"),
+ pktline.Flush,
+ )
+
+ testEncode(c, ar, expected)
+}
+
+func (s *SuiteEncoder) TestAll(c *C) {
+ hash := plumbing.NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5")
+
+ capabilities := packp.NewCapabilities()
+ capabilities.Add("symref", "HEAD:/refs/heads/master")
+ capabilities.Add("ofs-delta")
+ capabilities.Add("multi_ack")
+
+ references := map[string]plumbing.Hash{
+ "refs/heads/master": plumbing.NewHash("a6930aaee06755d1bdcfd943fbf614e4d92bb0c7"),
+ "refs/tags/v2.6.12-tree": plumbing.NewHash("1111111111111111111111111111111111111111"),
+ "refs/tags/v2.7.13-tree": plumbing.NewHash("3333333333333333333333333333333333333333"),
+ "refs/tags/v2.6.13-tree": plumbing.NewHash("2222222222222222222222222222222222222222"),
+ "refs/tags/v2.6.11-tree": plumbing.NewHash("5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c"),
+ }
+
+ peeled := map[string]plumbing.Hash{
+ "refs/tags/v2.7.13-tree": plumbing.NewHash("4444444444444444444444444444444444444444"),
+ "refs/tags/v2.6.12-tree": plumbing.NewHash("5555555555555555555555555555555555555555"),
+ }
+
+ shallows := []plumbing.Hash{
+ plumbing.NewHash("1111111111111111111111111111111111111111"),
+ plumbing.NewHash("4444444444444444444444444444444444444444"),
+ plumbing.NewHash("3333333333333333333333333333333333333333"),
+ plumbing.NewHash("2222222222222222222222222222222222222222"),
+ }
+
+ ar := &advrefs.AdvRefs{
+ Head: &hash,
+ Capabilities: capabilities,
+ References: references,
+ Peeled: peeled,
+ Shallows: shallows,
+ }
+
+ expected := pktlines(c,
+ []byte("6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00multi_ack ofs-delta symref=HEAD:/refs/heads/master\n"),
+ []byte("a6930aaee06755d1bdcfd943fbf614e4d92bb0c7 refs/heads/master\n"),
+ []byte("5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c refs/tags/v2.6.11-tree\n"),
+ []byte("1111111111111111111111111111111111111111 refs/tags/v2.6.12-tree\n"),
+ []byte("5555555555555555555555555555555555555555 refs/tags/v2.6.12-tree^{}\n"),
+ []byte("2222222222222222222222222222222222222222 refs/tags/v2.6.13-tree\n"),
+ []byte("3333333333333333333333333333333333333333 refs/tags/v2.7.13-tree\n"),
+ []byte("4444444444444444444444444444444444444444 refs/tags/v2.7.13-tree^{}\n"),
+ []byte("shallow 1111111111111111111111111111111111111111\n"),
+ []byte("shallow 2222222222222222222222222222222222222222\n"),
+ []byte("shallow 3333333333333333333333333333333333333333\n"),
+ []byte("shallow 4444444444444444444444444444444444444444\n"),
+ pktline.Flush,
+ )
+
+ testEncode(c, ar, expected)
+}
+
+func (s *SuiteEncoder) TestErrorTooLong(c *C) {
+ references := map[string]plumbing.Hash{
+ strings.Repeat("a", pktline.MaxPayloadSize): plumbing.NewHash("a6930aaee06755d1bdcfd943fbf614e4d92bb0c7"),
+ }
+ ar := &advrefs.AdvRefs{
+ References: references,
+ }
+
+ var buf bytes.Buffer
+ e := advrefs.NewEncoder(&buf)
+ err := e.Encode(ar)
+ c.Assert(err, ErrorMatches, ".*payload is too long.*")
+}
diff --git a/plumbing/format/packp/capabilities.go b/plumbing/format/packp/capabilities.go
new file mode 100644
index 0000000..d77c2fa
--- /dev/null
+++ b/plumbing/format/packp/capabilities.go
@@ -0,0 +1,136 @@
+package packp
+
+import (
+ "fmt"
+ "sort"
+ "strings"
+)
+
+// Capabilities contains all the server capabilities
+// https://github.com/git/git/blob/master/Documentation/technical/protocol-capabilities.txt
+type Capabilities struct {
+ m map[string]*Capability
+ o []string
+}
+
+// Capability represents a server capability
+type Capability struct {
+ Name string
+ Values []string
+}
+
+// NewCapabilities returns a new Capabilities struct
+func NewCapabilities() *Capabilities {
+ return &Capabilities{
+ m: make(map[string]*Capability),
+ }
+}
+
+func (c *Capabilities) IsEmpty() bool {
+ return len(c.o) == 0
+}
+
+// Decode decodes a string
+func (c *Capabilities) Decode(raw string) {
+ params := strings.Split(raw, " ")
+ for _, p := range params {
+ s := strings.SplitN(p, "=", 2)
+
+ var value string
+ if len(s) == 2 {
+ value = s[1]
+ }
+
+ c.Add(s[0], value)
+ }
+}
+
+// Get returns the values for a capability
+func (c *Capabilities) Get(capability string) *Capability {
+ return c.m[capability]
+}
+
+// Set sets a capability removing the values
+func (c *Capabilities) Set(capability string, values ...string) {
+ if _, ok := c.m[capability]; ok {
+ delete(c.m, capability)
+ }
+
+ c.Add(capability, values...)
+}
+
+// Add adds a capability, values are optional
+func (c *Capabilities) Add(capability string, values ...string) {
+ if !c.Supports(capability) {
+ c.m[capability] = &Capability{Name: capability}
+ c.o = append(c.o, capability)
+ }
+
+ if len(values) == 0 {
+ return
+ }
+
+ c.m[capability].Values = append(c.m[capability].Values, values...)
+}
+
+// Supports returns true if capability is present
+func (c *Capabilities) Supports(capability string) bool {
+ _, ok := c.m[capability]
+ return ok
+}
+
+// SymbolicReference returns the reference for a given symbolic reference
+func (c *Capabilities) SymbolicReference(sym string) string {
+ if !c.Supports("symref") {
+ return ""
+ }
+
+ for _, symref := range c.Get("symref").Values {
+ parts := strings.Split(symref, ":")
+ if len(parts) != 2 {
+ continue
+ }
+
+ if parts[0] == sym {
+ return parts[1]
+ }
+ }
+
+ return ""
+}
+
+// Sorts capabilities in increasing order of their name
+func (c *Capabilities) Sort() {
+ sort.Strings(c.o)
+}
+
+func (c *Capabilities) String() string {
+ if len(c.o) == 0 {
+ return ""
+ }
+
+ var o string
+ for _, key := range c.o {
+ cap := c.m[key]
+
+ added := false
+ for _, value := range cap.Values {
+ if value == "" {
+ continue
+ }
+
+ added = true
+ o += fmt.Sprintf("%s=%s ", key, value)
+ }
+
+ if len(cap.Values) == 0 || !added {
+ o += key + " "
+ }
+ }
+
+ if len(o) == 0 {
+ return o
+ }
+
+ return o[:len(o)-1]
+}
diff --git a/plumbing/format/packp/capabilities_test.go b/plumbing/format/packp/capabilities_test.go
new file mode 100644
index 0000000..e42a0c7
--- /dev/null
+++ b/plumbing/format/packp/capabilities_test.go
@@ -0,0 +1,46 @@
+package packp
+
+import (
+ "testing"
+
+ . "gopkg.in/check.v1"
+)
+
+func Test(t *testing.T) { TestingT(t) }
+
+type SuiteCapabilities struct{}
+
+var _ = Suite(&SuiteCapabilities{})
+
+func (s *SuiteCapabilities) TestDecode(c *C) {
+ cap := NewCapabilities()
+ cap.Decode("symref=foo symref=qux thin-pack")
+
+ c.Assert(cap.m, HasLen, 2)
+ c.Assert(cap.Get("symref").Values, DeepEquals, []string{"foo", "qux"})
+ c.Assert(cap.Get("thin-pack").Values, DeepEquals, []string{""})
+}
+
+func (s *SuiteCapabilities) TestSet(c *C) {
+ cap := NewCapabilities()
+ cap.Add("symref", "foo", "qux")
+ cap.Set("symref", "bar")
+
+ c.Assert(cap.m, HasLen, 1)
+ c.Assert(cap.Get("symref").Values, DeepEquals, []string{"bar"})
+}
+
+func (s *SuiteCapabilities) TestSetEmpty(c *C) {
+ cap := NewCapabilities()
+ cap.Set("foo", "bar")
+
+ c.Assert(cap.Get("foo").Values, HasLen, 1)
+}
+
+func (s *SuiteCapabilities) TestAdd(c *C) {
+ cap := NewCapabilities()
+ cap.Add("symref", "foo", "qux")
+ cap.Add("thin-pack")
+
+ c.Assert(cap.String(), Equals, "symref=foo symref=qux thin-pack")
+}
diff --git a/plumbing/format/packp/doc.go b/plumbing/format/packp/doc.go
new file mode 100644
index 0000000..4950d1d
--- /dev/null
+++ b/plumbing/format/packp/doc.go
@@ -0,0 +1,724 @@
+package packp
+
+/*
+
+A nice way to trace the real data transmitted and received by git, use:
+
+GIT_TRACE_PACKET=true git ls-remote http://github.com/src-d/go-git
+GIT_TRACE_PACKET=true git clone http://github.com/src-d/go-git
+
+Here follows a copy of the current protocol specification at the time of
+this writing.
+
+(Please notice that most http git servers will add a flush-pkt after the
+first pkt-line when using HTTP smart.)
+
+
+Documentation Common to Pack and Http Protocols
+===============================================
+
+ABNF Notation
+-------------
+
+ABNF notation as described by RFC 5234 is used within the protocol documents,
+except the following replacement core rules are used:
+----
+ HEXDIG = DIGIT / "a" / "b" / "c" / "d" / "e" / "f"
+----
+
+We also define the following common rules:
+----
+ NUL = %x00
+ zero-id = 40*"0"
+ obj-id = 40*(HEXDIGIT)
+
+ refname = "HEAD"
+ refname /= "refs/" <see discussion below>
+----
+
+A refname is a hierarchical octet string beginning with "refs/" and
+not violating the 'git-check-ref-format' command's validation rules.
+More specifically, they:
+
+. They can include slash `/` for hierarchical (directory)
+ grouping, but no slash-separated component can begin with a
+ dot `.`.
+
+. They must contain at least one `/`. This enforces the presence of a
+ category like `heads/`, `tags/` etc. but the actual names are not
+ restricted.
+
+. They cannot have two consecutive dots `..` anywhere.
+
+. They cannot have ASCII control characters (i.e. bytes whose
+ values are lower than \040, or \177 `DEL`), space, tilde `~`,
+ caret `^`, colon `:`, question-mark `?`, asterisk `*`,
+ or open bracket `[` anywhere.
+
+. They cannot end with a slash `/` or a dot `.`.
+
+. They cannot end with the sequence `.lock`.
+
+. They cannot contain a sequence `@{`.
+
+. They cannot contain a `\\`.
+
+
+pkt-line Format
+---------------
+
+Much (but not all) of the payload is described around pkt-lines.
+
+A pkt-line is a variable length binary string. The first four bytes
+of the line, the pkt-len, indicates the total length of the line,
+in hexadecimal. The pkt-len includes the 4 bytes used to contain
+the length's hexadecimal representation.
+
+A pkt-line MAY contain binary data, so implementors MUST ensure
+pkt-line parsing/formatting routines are 8-bit clean.
+
+A non-binary line SHOULD BE terminated by an LF, which if present
+MUST be included in the total length. Receivers MUST treat pkt-lines
+with non-binary data the same whether or not they contain the trailing
+LF (stripping the LF if present, and not complaining when it is
+missing).
+
+The maximum length of a pkt-line's data component is 65516 bytes.
+Implementations MUST NOT send pkt-line whose length exceeds 65520
+(65516 bytes of payload + 4 bytes of length data).
+
+Implementations SHOULD NOT send an empty pkt-line ("0004").
+
+A pkt-line with a length field of 0 ("0000"), called a flush-pkt,
+is a special case and MUST be handled differently than an empty
+pkt-line ("0004").
+
+----
+ pkt-line = data-pkt / flush-pkt
+
+ data-pkt = pkt-len pkt-payload
+ pkt-len = 4*(HEXDIG)
+ pkt-payload = (pkt-len - 4)*(OCTET)
+
+ flush-pkt = "0000"
+----
+
+Examples (as C-style strings):
+
+----
+ pkt-line actual value
+ ---------------------------------
+ "0006a\n" "a\n"
+ "0005a" "a"
+ "000bfoobar\n" "foobar\n"
+ "0004" ""
+----
+
+Packfile transfer protocols
+===========================
+
+Git supports transferring data in packfiles over the ssh://, git://, http:// and
+file:// transports. There exist two sets of protocols, one for pushing
+data from a client to a server and another for fetching data from a
+server to a client. The three transports (ssh, git, file) use the same
+protocol to transfer data. http is documented in http-protocol.txt.
+
+The processes invoked in the canonical Git implementation are 'upload-pack'
+on the server side and 'fetch-pack' on the client side for fetching data;
+then 'receive-pack' on the server and 'send-pack' on the client for pushing
+data. The protocol functions to have a server tell a client what is
+currently on the server, then for the two to negotiate the smallest amount
+of data to send in order to fully update one or the other.
+
+pkt-line Format
+---------------
+
+The descriptions below build on the pkt-line format described in
+protocol-common.txt. When the grammar indicate `PKT-LINE(...)`, unless
+otherwise noted the usual pkt-line LF rules apply: the sender SHOULD
+include a LF, but the receiver MUST NOT complain if it is not present.
+
+Transports
+----------
+There are three transports over which the packfile protocol is
+initiated. The Git transport is a simple, unauthenticated server that
+takes the command (almost always 'upload-pack', though Git
+servers can be configured to be globally writable, in which 'receive-
+pack' initiation is also allowed) with which the client wishes to
+communicate and executes it and connects it to the requesting
+process.
+
+In the SSH transport, the client just runs the 'upload-pack'
+or 'receive-pack' process on the server over the SSH protocol and then
+communicates with that invoked process over the SSH connection.
+
+The file:// transport runs the 'upload-pack' or 'receive-pack'
+process locally and communicates with it over a pipe.
+
+Git Transport
+-------------
+
+The Git transport starts off by sending the command and repository
+on the wire using the pkt-line format, followed by a NUL byte and a
+hostname parameter, terminated by a NUL byte.
+
+ 0032git-upload-pack /project.git\0host=myserver.com\0
+
+--
+ git-proto-request = request-command SP pathname NUL [ host-parameter NUL ]
+ request-command = "git-upload-pack" / "git-receive-pack" /
+ "git-upload-archive" ; case sensitive
+ pathname = *( %x01-ff ) ; exclude NUL
+ host-parameter = "host=" hostname [ ":" port ]
+--
+
+Only host-parameter is allowed in the git-proto-request. Clients
+MUST NOT attempt to send additional parameters. It is used for the
+git-daemon name based virtual hosting. See --interpolated-path
+option to git daemon, with the %H/%CH format characters.
+
+Basically what the Git client is doing to connect to an 'upload-pack'
+process on the server side over the Git protocol is this:
+
+ $ echo -e -n \
+ "0039git-upload-pack /schacon/gitbook.git\0host=example.com\0" |
+ nc -v example.com 9418
+
+If the server refuses the request for some reasons, it could abort
+gracefully with an error message.
+
+----
+ error-line = PKT-LINE("ERR" SP explanation-text)
+----
+
+
+SSH Transport
+-------------
+
+Initiating the upload-pack or receive-pack processes over SSH is
+executing the binary on the server via SSH remote execution.
+It is basically equivalent to running this:
+
+ $ ssh git.example.com "git-upload-pack '/project.git'"
+
+For a server to support Git pushing and pulling for a given user over
+SSH, that user needs to be able to execute one or both of those
+commands via the SSH shell that they are provided on login. On some
+systems, that shell access is limited to only being able to run those
+two commands, or even just one of them.
+
+In an ssh:// format URI, it's absolute in the URI, so the '/' after
+the host name (or port number) is sent as an argument, which is then
+read by the remote git-upload-pack exactly as is, so it's effectively
+an absolute path in the remote filesystem.
+
+ git clone ssh://user@example.com/project.git
+ |
+ v
+ ssh user@example.com "git-upload-pack '/project.git'"
+
+In a "user@host:path" format URI, its relative to the user's home
+directory, because the Git client will run:
+
+ git clone user@example.com:project.git
+ |
+ v
+ ssh user@example.com "git-upload-pack 'project.git'"
+
+The exception is if a '~' is used, in which case
+we execute it without the leading '/'.
+
+ ssh://user@example.com/~alice/project.git,
+ |
+ v
+ ssh user@example.com "git-upload-pack '~alice/project.git'"
+
+A few things to remember here:
+
+- The "command name" is spelled with dash (e.g. git-upload-pack), but
+ this can be overridden by the client;
+
+- The repository path is always quoted with single quotes.
+
+Fetching Data From a Server
+---------------------------
+
+When one Git repository wants to get data that a second repository
+has, the first can 'fetch' from the second. This operation determines
+what data the server has that the client does not then streams that
+data down to the client in packfile format.
+
+
+Reference Discovery
+-------------------
+
+When the client initially connects the server will immediately respond
+with a listing of each reference it has (all branches and tags) along
+with the object name that each reference currently points to.
+
+ $ echo -e -n "0039git-upload-pack /schacon/gitbook.git\0host=example.com\0" |
+ nc -v example.com 9418
+ 00887217a7c7e582c46cec22a130adf4b9d7d950fba0 HEAD\0multi_ack thin-pack
+ side-band side-band-64k ofs-delta shallow no-progress include-tag
+ 00441d3fcd5ced445d1abc402225c0b8a1299641f497 refs/heads/integration
+ 003f7217a7c7e582c46cec22a130adf4b9d7d950fba0 refs/heads/master
+ 003cb88d2441cac0977faf98efc80305012112238d9d refs/tags/v0.9
+ 003c525128480b96c89e6418b1e40909bf6c5b2d580f refs/tags/v1.0
+ 003fe92df48743b7bc7d26bcaabfddde0a1e20cae47c refs/tags/v1.0^{}
+ 0000
+
+The returned response is a pkt-line stream describing each ref and
+its current value. The stream MUST be sorted by name according to
+the C locale ordering.
+
+If HEAD is a valid ref, HEAD MUST appear as the first advertised
+ref. If HEAD is not a valid ref, HEAD MUST NOT appear in the
+advertisement list at all, but other refs may still appear.
+
+The stream MUST include capability declarations behind a NUL on the
+first ref. The peeled value of a ref (that is "ref^{}") MUST be
+immediately after the ref itself, if presented. A conforming server
+MUST peel the ref if it's an annotated tag.
+
+----
+ advertised-refs = (no-refs / list-of-refs)
+ *shallow
+ flush-pkt
+
+ no-refs = PKT-LINE(zero-id SP "capabilities^{}"
+ NUL capability-list)
+
+ list-of-refs = first-ref *other-ref
+ first-ref = PKT-LINE(obj-id SP refname
+ NUL capability-list)
+
+ other-ref = PKT-LINE(other-tip / other-peeled)
+ other-tip = obj-id SP refname
+ other-peeled = obj-id SP refname "^{}"
+
+ shallow = PKT-LINE("shallow" SP obj-id)
+
+ capability-list = capability *(SP capability)
+ capability = 1*(LC_ALPHA / DIGIT / "-" / "_")
+ LC_ALPHA = %x61-7A
+----
+
+Server and client MUST use lowercase for obj-id, both MUST treat obj-id
+as case-insensitive.
+
+See protocol-capabilities.txt for a list of allowed server capabilities
+and descriptions.
+
+Packfile Negotiation
+--------------------
+After reference and capabilities discovery, the client can decide to
+terminate the connection by sending a flush-pkt, telling the server it can
+now gracefully terminate, and disconnect, when it does not need any pack
+data. This can happen with the ls-remote command, and also can happen when
+the client already is up-to-date.
+
+Otherwise, it enters the negotiation phase, where the client and
+server determine what the minimal packfile necessary for transport is,
+by telling the server what objects it wants, its shallow objects
+(if any), and the maximum commit depth it wants (if any). The client
+will also send a list of the capabilities it wants to be in effect,
+out of what the server said it could do with the first 'want' line.
+
+----
+ upload-request = want-list
+ *shallow-line
+ *1depth-request
+ flush-pkt
+
+ want-list = first-want
+ *additional-want
+
+ shallow-line = PKT-LINE("shallow" SP obj-id)
+
+ depth-request = PKT-LINE("deepen" SP depth) /
+ PKT-LINE("deepen-since" SP timestamp) /
+ PKT-LINE("deepen-not" SP ref)
+
+ first-want = PKT-LINE("want" SP obj-id SP capability-list)
+ additional-want = PKT-LINE("want" SP obj-id)
+
+ depth = 1*DIGIT
+----
+
+Clients MUST send all the obj-ids it wants from the reference
+discovery phase as 'want' lines. Clients MUST send at least one
+'want' command in the request body. Clients MUST NOT mention an
+obj-id in a 'want' command which did not appear in the response
+obtained through ref discovery.
+
+The client MUST write all obj-ids which it only has shallow copies
+of (meaning that it does not have the parents of a commit) as
+'shallow' lines so that the server is aware of the limitations of
+the client's history.
+
+The client now sends the maximum commit history depth it wants for
+this transaction, which is the number of commits it wants from the
+tip of the history, if any, as a 'deepen' line. A depth of 0 is the
+same as not making a depth request. The client does not want to receive
+any commits beyond this depth, nor does it want objects needed only to
+complete those commits. Commits whose parents are not received as a
+result are defined as shallow and marked as such in the server. This
+information is sent back to the client in the next step.
+
+Once all the 'want's and 'shallow's (and optional 'deepen') are
+transferred, clients MUST send a flush-pkt, to tell the server side
+that it is done sending the list.
+
+Otherwise, if the client sent a positive depth request, the server
+will determine which commits will and will not be shallow and
+send this information to the client. If the client did not request
+a positive depth, this step is skipped.
+
+----
+ shallow-update = *shallow-line
+ *unshallow-line
+ flush-pkt
+
+ shallow-line = PKT-LINE("shallow" SP obj-id)
+
+ unshallow-line = PKT-LINE("unshallow" SP obj-id)
+----
+
+If the client has requested a positive depth, the server will compute
+the set of commits which are no deeper than the desired depth. The set
+of commits start at the client's wants.
+
+The server writes 'shallow' lines for each
+commit whose parents will not be sent as a result. The server writes
+an 'unshallow' line for each commit which the client has indicated is
+shallow, but is no longer shallow at the currently requested depth
+(that is, its parents will now be sent). The server MUST NOT mark
+as unshallow anything which the client has not indicated was shallow.
+
+Now the client will send a list of the obj-ids it has using 'have'
+lines, so the server can make a packfile that only contains the objects
+that the client needs. In multi_ack mode, the canonical implementation
+will send up to 32 of these at a time, then will send a flush-pkt. The
+canonical implementation will skip ahead and send the next 32 immediately,
+so that there is always a block of 32 "in-flight on the wire" at a time.
+
+----
+ upload-haves = have-list
+ compute-end
+
+ have-list = *have-line
+ have-line = PKT-LINE("have" SP obj-id)
+ compute-end = flush-pkt / PKT-LINE("done")
+----
+
+If the server reads 'have' lines, it then will respond by ACKing any
+of the obj-ids the client said it had that the server also has. The
+server will ACK obj-ids differently depending on which ack mode is
+chosen by the client.
+
+In multi_ack mode:
+
+ * the server will respond with 'ACK obj-id continue' for any common
+ commits.
+
+ * once the server has found an acceptable common base commit and is
+ ready to make a packfile, it will blindly ACK all 'have' obj-ids
+ back to the client.
+
+ * the server will then send a 'NAK' and then wait for another response
+ from the client - either a 'done' or another list of 'have' lines.
+
+In multi_ack_detailed mode:
+
+ * the server will differentiate the ACKs where it is signaling
+ that it is ready to send data with 'ACK obj-id ready' lines, and
+ signals the identified common commits with 'ACK obj-id common' lines.
+
+Without either multi_ack or multi_ack_detailed:
+
+ * upload-pack sends "ACK obj-id" on the first common object it finds.
+ After that it says nothing until the client gives it a "done".
+
+ * upload-pack sends "NAK" on a flush-pkt if no common object
+ has been found yet. If one has been found, and thus an ACK
+ was already sent, it's silent on the flush-pkt.
+
+After the client has gotten enough ACK responses that it can determine
+that the server has enough information to send an efficient packfile
+(in the canonical implementation, this is determined when it has received
+enough ACKs that it can color everything left in the --date-order queue
+as common with the server, or the --date-order queue is empty), or the
+client determines that it wants to give up (in the canonical implementation,
+this is determined when the client sends 256 'have' lines without getting
+any of them ACKed by the server - meaning there is nothing in common and
+the server should just send all of its objects), then the client will send
+a 'done' command. The 'done' command signals to the server that the client
+is ready to receive its packfile data.
+
+However, the 256 limit *only* turns on in the canonical client
+implementation if we have received at least one "ACK %s continue"
+during a prior round. This helps to ensure that at least one common
+ancestor is found before we give up entirely.
+
+Once the 'done' line is read from the client, the server will either
+send a final 'ACK obj-id' or it will send a 'NAK'. 'obj-id' is the object
+name of the last commit determined to be common. The server only sends
+ACK after 'done' if there is at least one common base and multi_ack or
+multi_ack_detailed is enabled. The server always sends NAK after 'done'
+if there is no common base found.
+
+Then the server will start sending its packfile data.
+
+----
+ server-response = *ack_multi ack / nak
+ ack_multi = PKT-LINE("ACK" SP obj-id ack_status)
+ ack_status = "continue" / "common" / "ready"
+ ack = PKT-LINE("ACK" SP obj-id)
+ nak = PKT-LINE("NAK")
+----
+
+A simple clone may look like this (with no 'have' lines):
+
+----
+ C: 0054want 74730d410fcb6603ace96f1dc55ea6196122532d multi_ack \
+ side-band-64k ofs-delta\n
+ C: 0032want 7d1665144a3a975c05f1f43902ddaf084e784dbe\n
+ C: 0032want 5a3f6be755bbb7deae50065988cbfa1ffa9ab68a\n
+ C: 0032want 7e47fe2bd8d01d481f44d7af0531bd93d3b21c01\n
+ C: 0032want 74730d410fcb6603ace96f1dc55ea6196122532d\n
+ C: 0000
+ C: 0009done\n
+
+ S: 0008NAK\n
+ S: [PACKFILE]
+----
+
+An incremental update (fetch) response might look like this:
+
+----
+ C: 0054want 74730d410fcb6603ace96f1dc55ea6196122532d multi_ack \
+ side-band-64k ofs-delta\n
+ C: 0032want 7d1665144a3a975c05f1f43902ddaf084e784dbe\n
+ C: 0032want 5a3f6be755bbb7deae50065988cbfa1ffa9ab68a\n
+ C: 0000
+ C: 0032have 7e47fe2bd8d01d481f44d7af0531bd93d3b21c01\n
+ C: [30 more have lines]
+ C: 0032have 74730d410fcb6603ace96f1dc55ea6196122532d\n
+ C: 0000
+
+ S: 003aACK 7e47fe2bd8d01d481f44d7af0531bd93d3b21c01 continue\n
+ S: 003aACK 74730d410fcb6603ace96f1dc55ea6196122532d continue\n
+ S: 0008NAK\n
+
+ C: 0009done\n
+
+ S: 0031ACK 74730d410fcb6603ace96f1dc55ea6196122532d\n
+ S: [PACKFILE]
+----
+
+
+Packfile Data
+-------------
+
+Now that the client and server have finished negotiation about what
+the minimal amount of data that needs to be sent to the client is, the server
+will construct and send the required data in packfile format.
+
+See pack-format.txt for what the packfile itself actually looks like.
+
+If 'side-band' or 'side-band-64k' capabilities have been specified by
+the client, the server will send the packfile data multiplexed.
+
+Each packet starting with the packet-line length of the amount of data
+that follows, followed by a single byte specifying the sideband the
+following data is coming in on.
+
+In 'side-band' mode, it will send up to 999 data bytes plus 1 control
+code, for a total of up to 1000 bytes in a pkt-line. In 'side-band-64k'
+mode it will send up to 65519 data bytes plus 1 control code, for a
+total of up to 65520 bytes in a pkt-line.
+
+The sideband byte will be a '1', '2' or a '3'. Sideband '1' will contain
+packfile data, sideband '2' will be used for progress information that the
+client will generally print to stderr and sideband '3' is used for error
+information.
+
+If no 'side-band' capability was specified, the server will stream the
+entire packfile without multiplexing.
+
+
+Pushing Data To a Server
+------------------------
+
+Pushing data to a server will invoke the 'receive-pack' process on the
+server, which will allow the client to tell it which references it should
+update and then send all the data the server will need for those new
+references to be complete. Once all the data is received and validated,
+the server will then update its references to what the client specified.
+
+Authentication
+--------------
+
+The protocol itself contains no authentication mechanisms. That is to be
+handled by the transport, such as SSH, before the 'receive-pack' process is
+invoked. If 'receive-pack' is configured over the Git transport, those
+repositories will be writable by anyone who can access that port (9418) as
+that transport is unauthenticated.
+
+Reference Discovery
+-------------------
+
+The reference discovery phase is done nearly the same way as it is in the
+fetching protocol. Each reference obj-id and name on the server is sent
+in packet-line format to the client, followed by a flush-pkt. The only
+real difference is that the capability listing is different - the only
+possible values are 'report-status', 'delete-refs', 'ofs-delta' and
+'push-options'.
+
+Reference Update Request and Packfile Transfer
+----------------------------------------------
+
+Once the client knows what references the server is at, it can send a
+list of reference update requests. For each reference on the server
+that it wants to update, it sends a line listing the obj-id currently on
+the server, the obj-id the client would like to update it to and the name
+of the reference.
+
+This list is followed by a flush-pkt. Then the push options are transmitted
+one per packet followed by another flush-pkt. After that the packfile that
+should contain all the objects that the server will need to complete the new
+references will be sent.
+
+----
+ update-request = *shallow ( command-list | push-cert ) [packfile]
+
+ shallow = PKT-LINE("shallow" SP obj-id)
+
+ command-list = PKT-LINE(command NUL capability-list)
+ *PKT-LINE(command)
+ flush-pkt
+
+ command = create / delete / update
+ create = zero-id SP new-id SP name
+ delete = old-id SP zero-id SP name
+ update = old-id SP new-id SP name
+
+ old-id = obj-id
+ new-id = obj-id
+
+ push-cert = PKT-LINE("push-cert" NUL capability-list LF)
+ PKT-LINE("certificate version 0.1" LF)
+ PKT-LINE("pusher" SP ident LF)
+ PKT-LINE("pushee" SP url LF)
+ PKT-LINE("nonce" SP nonce LF)
+ PKT-LINE(LF)
+ *PKT-LINE(command LF)
+ *PKT-LINE(gpg-signature-lines LF)
+ PKT-LINE("push-cert-end" LF)
+
+ packfile = "PACK" 28*(OCTET)
+----
+
+If the receiving end does not support delete-refs, the sending end MUST
+NOT ask for delete command.
+
+If the receiving end does not support push-cert, the sending end
+MUST NOT send a push-cert command. When a push-cert command is
+sent, command-list MUST NOT be sent; the commands recorded in the
+push certificate is used instead.
+
+The packfile MUST NOT be sent if the only command used is 'delete'.
+
+A packfile MUST be sent if either create or update command is used,
+even if the server already has all the necessary objects. In this
+case the client MUST send an empty packfile. The only time this
+is likely to happen is if the client is creating
+a new branch or a tag that points to an existing obj-id.
+
+The server will receive the packfile, unpack it, then validate each
+reference that is being updated that it hasn't changed while the request
+was being processed (the obj-id is still the same as the old-id), and
+it will run any update hooks to make sure that the update is acceptable.
+If all of that is fine, the server will then update the references.
+
+Push Certificate
+----------------
+
+A push certificate begins with a set of header lines. After the
+header and an empty line, the protocol commands follow, one per
+line. Note that the trailing LF in push-cert PKT-LINEs is _not_
+optional; it must be present.
+
+Currently, the following header fields are defined:
+
+`pusher` ident::
+ Identify the GPG key in "Human Readable Name <email@address>"
+ format.
+
+`pushee` url::
+ The repository URL (anonymized, if the URL contains
+ authentication material) the user who ran `git push`
+ intended to push into.
+
+`nonce` nonce::
+ The 'nonce' string the receiving repository asked the
+ pushing user to include in the certificate, to prevent
+ replay attacks.
+
+The GPG signature lines are a detached signature for the contents
+recorded in the push certificate before the signature block begins.
+The detached signature is used to certify that the commands were
+given by the pusher, who must be the signer.
+
+Report Status
+-------------
+
+After receiving the pack data from the sender, the receiver sends a
+report if 'report-status' capability is in effect.
+It is a short listing of what happened in that update. It will first
+list the status of the packfile unpacking as either 'unpack ok' or
+'unpack [error]'. Then it will list the status for each of the references
+that it tried to update. Each line is either 'ok [refname]' if the
+update was successful, or 'ng [refname] [error]' if the update was not.
+
+----
+ report-status = unpack-status
+ 1*(command-status)
+ flush-pkt
+
+ unpack-status = PKT-LINE("unpack" SP unpack-result)
+ unpack-result = "ok" / error-msg
+
+ command-status = command-ok / command-fail
+ command-ok = PKT-LINE("ok" SP refname)
+ command-fail = PKT-LINE("ng" SP refname SP error-msg)
+
+ error-msg = 1*(OCTECT) ; where not "ok"
+----
+
+Updates can be unsuccessful for a number of reasons. The reference can have
+changed since the reference discovery phase was originally sent, meaning
+someone pushed in the meantime. The reference being pushed could be a
+non-fast-forward reference and the update hooks or configuration could be
+set to not allow that, etc. Also, some references can be updated while others
+can be rejected.
+
+An example client/server communication might look like this:
+
+----
+ S: 007c74730d410fcb6603ace96f1dc55ea6196122532d refs/heads/local\0report-status delete-refs ofs-delta\n
+ S: 003e7d1665144a3a975c05f1f43902ddaf084e784dbe refs/heads/debug\n
+ S: 003f74730d410fcb6603ace96f1dc55ea6196122532d refs/heads/master\n
+ S: 003f74730d410fcb6603ace96f1dc55ea6196122532d refs/heads/team\n
+ S: 0000
+
+ C: 003e7d1665144a3a975c05f1f43902ddaf084e784dbe 74730d410fcb6603ace96f1dc55ea6196122532d refs/heads/debug\n
+ C: 003e74730d410fcb6603ace96f1dc55ea6196122532d 5a3f6be755bbb7deae50065988cbfa1ffa9ab68a refs/heads/master\n
+ C: 0000
+ C: [PACKDATA]
+
+ S: 000eunpack ok\n
+ S: 0018ok refs/heads/debug\n
+ S: 002ang refs/heads/master non-fast-forward\n
+----
+*/
diff --git a/plumbing/format/packp/pktline/encoder.go b/plumbing/format/packp/pktline/encoder.go
new file mode 100644
index 0000000..0a88a9b
--- /dev/null
+++ b/plumbing/format/packp/pktline/encoder.go
@@ -0,0 +1,123 @@
+// Package pktline implements reading payloads form pkt-lines and encoding pkt-lines from payloads.
+package pktline
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+)
+
+// An Encoder writes pkt-lines to an output stream.
+type Encoder struct {
+ w io.Writer
+}
+
+const (
+ // MaxPayloadSize is the maximum payload size of a pkt-line in bytes.
+ MaxPayloadSize = 65516
+)
+
+var (
+ // FlushPkt are the contents of a flush-pkt pkt-line.
+ FlushPkt = []byte{'0', '0', '0', '0'}
+ // Flush is the payload to use with the Encode method to encode a flush-pkt.
+ Flush = []byte{}
+ // FlushString is the payload to use with the EncodeString method to encode a flush-pkt.
+ FlushString = ""
+ // ErrPayloadTooLong is returned by the Encode methods when any of the
+ // provided payloads is bigger than MaxPayloadSize.
+ ErrPayloadTooLong = errors.New("payload is too long")
+)
+
+// NewEncoder returns a new encoder that writes to w.
+func NewEncoder(w io.Writer) *Encoder {
+ return &Encoder{
+ w: w,
+ }
+}
+
+// Flush encodes a flush-pkt to the output stream.
+func (e *Encoder) Flush() error {
+ _, err := e.w.Write(FlushPkt)
+ return err
+}
+
+// Encode encodes a pkt-line with the payload specified and write it to
+// the output stream. If several payloads are specified, each of them
+// will get streamed in their own pkt-lines.
+func (e *Encoder) Encode(payloads ...[]byte) error {
+ for _, p := range payloads {
+ if err := e.encodeLine(p); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (e *Encoder) encodeLine(p []byte) error {
+ if len(p) > MaxPayloadSize {
+ return ErrPayloadTooLong
+ }
+
+ if bytes.Equal(p, Flush) {
+ if err := e.Flush(); err != nil {
+ return err
+ }
+ return nil
+ }
+
+ n := len(p) + 4
+ if _, err := e.w.Write(asciiHex16(n)); err != nil {
+ return err
+ }
+ if _, err := e.w.Write(p); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// Returns the hexadecimal ascii representation of the 16 less
+// significant bits of n. The length of the returned slice will always
+// be 4. Example: if n is 1234 (0x4d2), the return value will be
+// []byte{'0', '4', 'd', '2'}.
+func asciiHex16(n int) []byte {
+ var ret [4]byte
+ ret[0] = byteToASCIIHex(byte(n & 0xf000 >> 12))
+ ret[1] = byteToASCIIHex(byte(n & 0x0f00 >> 8))
+ ret[2] = byteToASCIIHex(byte(n & 0x00f0 >> 4))
+ ret[3] = byteToASCIIHex(byte(n & 0x000f))
+
+ return ret[:]
+}
+
+// turns a byte into its hexadecimal ascii representation. Example:
+// from 11 (0xb) to 'b'.
+func byteToASCIIHex(n byte) byte {
+ if n < 10 {
+ return '0' + n
+ }
+
+ return 'a' - 10 + n
+}
+
+// EncodeString works similarly as Encode but payloads are specified as strings.
+func (e *Encoder) EncodeString(payloads ...string) error {
+ for _, p := range payloads {
+ if err := e.Encode([]byte(p)); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// Encodef encodes a single pkt-line with the payload formatted as
+// the format specifier and the rest of the arguments suggest.
+func (e *Encoder) Encodef(format string, a ...interface{}) error {
+ return e.EncodeString(
+ fmt.Sprintf(format, a...),
+ )
+}
diff --git a/plumbing/format/packp/pktline/encoder_test.go b/plumbing/format/packp/pktline/encoder_test.go
new file mode 100644
index 0000000..cd97593
--- /dev/null
+++ b/plumbing/format/packp/pktline/encoder_test.go
@@ -0,0 +1,249 @@
+package pktline_test
+
+import (
+ "bytes"
+ "os"
+ "strings"
+ "testing"
+
+ "gopkg.in/src-d/go-git.v4/plumbing/format/packp/pktline"
+
+ . "gopkg.in/check.v1"
+)
+
+func Test(t *testing.T) { TestingT(t) }
+
+type SuiteEncoder struct{}
+
+var _ = Suite(&SuiteEncoder{})
+
+func (s *SuiteEncoder) TestFlush(c *C) {
+ var buf bytes.Buffer
+ e := pktline.NewEncoder(&buf)
+
+ err := e.Flush()
+ c.Assert(err, IsNil)
+
+ obtained := buf.Bytes()
+ c.Assert(obtained, DeepEquals, pktline.FlushPkt)
+}
+
+func (s *SuiteEncoder) TestEncode(c *C) {
+ for i, test := range [...]struct {
+ input [][]byte
+ expected []byte
+ }{
+ {
+ input: [][]byte{
+ []byte("hello\n"),
+ },
+ expected: []byte("000ahello\n"),
+ }, {
+ input: [][]byte{
+ []byte("hello\n"),
+ pktline.Flush,
+ },
+ expected: []byte("000ahello\n0000"),
+ }, {
+ input: [][]byte{
+ []byte("hello\n"),
+ []byte("world!\n"),
+ []byte("foo"),
+ },
+ expected: []byte("000ahello\n000bworld!\n0007foo"),
+ }, {
+ input: [][]byte{
+ []byte("hello\n"),
+ pktline.Flush,
+ []byte("world!\n"),
+ []byte("foo"),
+ pktline.Flush,
+ },
+ expected: []byte("000ahello\n0000000bworld!\n0007foo0000"),
+ }, {
+ input: [][]byte{
+ []byte(strings.Repeat("a", pktline.MaxPayloadSize)),
+ },
+ expected: []byte(
+ "fff0" + strings.Repeat("a", pktline.MaxPayloadSize)),
+ }, {
+ input: [][]byte{
+ []byte(strings.Repeat("a", pktline.MaxPayloadSize)),
+ []byte(strings.Repeat("b", pktline.MaxPayloadSize)),
+ },
+ expected: []byte(
+ "fff0" + strings.Repeat("a", pktline.MaxPayloadSize) +
+ "fff0" + strings.Repeat("b", pktline.MaxPayloadSize)),
+ },
+ } {
+ comment := Commentf("input %d = %v\n", i, test.input)
+
+ var buf bytes.Buffer
+ e := pktline.NewEncoder(&buf)
+
+ err := e.Encode(test.input...)
+ c.Assert(err, IsNil, comment)
+
+ c.Assert(buf.Bytes(), DeepEquals, test.expected, comment)
+ }
+}
+
+func (s *SuiteEncoder) TestEncodeErrPayloadTooLong(c *C) {
+ for i, input := range [...][][]byte{
+ {
+ []byte(strings.Repeat("a", pktline.MaxPayloadSize+1)),
+ },
+ {
+ []byte("hello world!"),
+ []byte(strings.Repeat("a", pktline.MaxPayloadSize+1)),
+ },
+ {
+ []byte("hello world!"),
+ []byte(strings.Repeat("a", pktline.MaxPayloadSize+1)),
+ []byte("foo"),
+ },
+ } {
+ comment := Commentf("input %d = %v\n", i, input)
+
+ var buf bytes.Buffer
+ e := pktline.NewEncoder(&buf)
+
+ err := e.Encode(input...)
+ c.Assert(err, Equals, pktline.ErrPayloadTooLong, comment)
+ }
+}
+
+func (s *SuiteEncoder) TestEncodeStrings(c *C) {
+ for i, test := range [...]struct {
+ input []string
+ expected []byte
+ }{
+ {
+ input: []string{
+ "hello\n",
+ },
+ expected: []byte("000ahello\n"),
+ }, {
+ input: []string{
+ "hello\n",
+ pktline.FlushString,
+ },
+ expected: []byte("000ahello\n0000"),
+ }, {
+ input: []string{
+ "hello\n",
+ "world!\n",
+ "foo",
+ },
+ expected: []byte("000ahello\n000bworld!\n0007foo"),
+ }, {
+ input: []string{
+ "hello\n",
+ pktline.FlushString,
+ "world!\n",
+ "foo",
+ pktline.FlushString,
+ },
+ expected: []byte("000ahello\n0000000bworld!\n0007foo0000"),
+ }, {
+ input: []string{
+ strings.Repeat("a", pktline.MaxPayloadSize),
+ },
+ expected: []byte(
+ "fff0" + strings.Repeat("a", pktline.MaxPayloadSize)),
+ }, {
+ input: []string{
+ strings.Repeat("a", pktline.MaxPayloadSize),
+ strings.Repeat("b", pktline.MaxPayloadSize),
+ },
+ expected: []byte(
+ "fff0" + strings.Repeat("a", pktline.MaxPayloadSize) +
+ "fff0" + strings.Repeat("b", pktline.MaxPayloadSize)),
+ },
+ } {
+ comment := Commentf("input %d = %v\n", i, test.input)
+
+ var buf bytes.Buffer
+ e := pktline.NewEncoder(&buf)
+
+ err := e.EncodeString(test.input...)
+ c.Assert(err, IsNil, comment)
+ c.Assert(buf.Bytes(), DeepEquals, test.expected, comment)
+ }
+}
+
+func (s *SuiteEncoder) TestEncodeStringErrPayloadTooLong(c *C) {
+ for i, input := range [...][]string{
+ {
+ strings.Repeat("a", pktline.MaxPayloadSize+1),
+ },
+ {
+ "hello world!",
+ strings.Repeat("a", pktline.MaxPayloadSize+1),
+ },
+ {
+ "hello world!",
+ strings.Repeat("a", pktline.MaxPayloadSize+1),
+ "foo",
+ },
+ } {
+ comment := Commentf("input %d = %v\n", i, input)
+
+ var buf bytes.Buffer
+ e := pktline.NewEncoder(&buf)
+
+ err := e.EncodeString(input...)
+ c.Assert(err, Equals, pktline.ErrPayloadTooLong, comment)
+ }
+}
+
+func (s *SuiteEncoder) TestEncodef(c *C) {
+ format := " %s %d\n"
+ str := "foo"
+ d := 42
+
+ var buf bytes.Buffer
+ e := pktline.NewEncoder(&buf)
+
+ err := e.Encodef(format, str, d)
+ c.Assert(err, IsNil)
+
+ expected := []byte("000c foo 42\n")
+ c.Assert(buf.Bytes(), DeepEquals, expected)
+}
+
+func ExampleEncoder() {
+ // Create an encoder that writes pktlines to stdout.
+ e := pktline.NewEncoder(os.Stdout)
+
+ // Encode some data as a new pkt-line.
+ _ = e.Encode([]byte("data\n")) // error checks removed for brevity
+
+ // Encode a flush-pkt.
+ _ = e.Flush()
+
+ // Encode a couple of byte slices and a flush in one go. Each of
+ // them will end up as payloads of their own pktlines.
+ _ = e.Encode(
+ []byte("hello\n"),
+ []byte("world!\n"),
+ pktline.Flush,
+ )
+
+ // You can also encode strings:
+ _ = e.EncodeString(
+ "foo\n",
+ "bar\n",
+ pktline.FlushString,
+ )
+
+ // You can also format and encode a payload:
+ _ = e.Encodef(" %s %d\n", "foo", 42)
+ // Output:
+ // 0009data
+ // 0000000ahello
+ // 000bworld!
+ // 00000008foo
+ // 0008bar
+ // 0000000c foo 42
+}
diff --git a/plumbing/format/packp/pktline/scanner.go b/plumbing/format/packp/pktline/scanner.go
new file mode 100644
index 0000000..3ce2adf
--- /dev/null
+++ b/plumbing/format/packp/pktline/scanner.go
@@ -0,0 +1,133 @@
+package pktline
+
+import (
+ "errors"
+ "io"
+)
+
+const (
+ lenSize = 4
+)
+
+// ErrInvalidPktLen is returned by Err() when an invalid pkt-len is found.
+var ErrInvalidPktLen = errors.New("invalid pkt-len found")
+
+// Scanner provides a convenient interface for reading the payloads of a
+// series of pkt-lines. It takes an io.Reader providing the source,
+// which then can be tokenized through repeated calls to the Scan
+// method.
+//
+// After each Scan call, the Bytes method will return the payload of the
+// corresponding pkt-line on a shared buffer, which will be 65516 bytes
+// or smaller. Flush pkt-lines are represented by empty byte slices.
+//
+// Scanning stops at EOF or the first I/O error.
+type Scanner struct {
+ r io.Reader // The reader provided by the client
+ err error // Sticky error
+ payload []byte // Last pkt-payload
+ len [lenSize]byte // Last pkt-len
+}
+
+// NewScanner returns a new Scanner to read from r.
+func NewScanner(r io.Reader) *Scanner {
+ return &Scanner{
+ r: r,
+ }
+}
+
+// Err returns the first error encountered by the Scanner.
+func (s *Scanner) Err() error {
+ return s.err
+}
+
+// Scan advances the Scanner to the next pkt-line, whose payload will
+// then be available through the Bytes method. Scanning stops at EOF
+// or the first I/O error. After Scan returns false, the Err method
+// will return any error that occurred during scanning, except that if
+// it was io.EOF, Err will return nil.
+func (s *Scanner) Scan() bool {
+ var l int
+ l, s.err = s.readPayloadLen()
+ if s.err == io.EOF {
+ s.err = nil
+ return false
+ }
+ if s.err != nil {
+ return false
+ }
+
+ if cap(s.payload) < l {
+ s.payload = make([]byte, 0, l)
+ }
+
+ if _, s.err = io.ReadFull(s.r, s.payload[:l]); s.err != nil {
+ return false
+ }
+ s.payload = s.payload[:l]
+
+ return true
+}
+
+// Bytes returns the most recent payload generated by a call to Scan.
+// The underlying array may point to data that will be overwritten by a
+// subsequent call to Scan. It does no allocation.
+func (s *Scanner) Bytes() []byte {
+ return s.payload
+}
+
+// Method readPayloadLen returns the payload length by reading the
+// pkt-len and substracting the pkt-len size.
+func (s *Scanner) readPayloadLen() (int, error) {
+ if _, err := io.ReadFull(s.r, s.len[:]); err != nil {
+ if err == io.EOF {
+ return 0, err
+ }
+ return 0, ErrInvalidPktLen
+ }
+
+ n, err := hexDecode(s.len)
+ if err != nil {
+ return 0, err
+ }
+
+ switch {
+ case n == 0:
+ return 0, nil
+ case n <= lenSize:
+ return 0, ErrInvalidPktLen
+ case n > MaxPayloadSize+lenSize:
+ return 0, ErrInvalidPktLen
+ default:
+ return n - lenSize, nil
+ }
+}
+
+// Turns the hexadecimal representation of a number in a byte slice into
+// a number. This function substitute strconv.ParseUint(string(buf), 16,
+// 16) and/or hex.Decode, to avoid generating new strings, thus helping the
+// GC.
+func hexDecode(buf [lenSize]byte) (int, error) {
+ var ret int
+ for i := 0; i < lenSize; i++ {
+ n, err := asciiHexToByte(buf[i])
+ if err != nil {
+ return 0, ErrInvalidPktLen
+ }
+ ret = 16*ret + int(n)
+ }
+ return ret, nil
+}
+
+// turns the hexadecimal ascii representation of a byte into its
+// numerical value. Example: from 'b' to 11 (0xb).
+func asciiHexToByte(b byte) (byte, error) {
+ switch {
+ case b >= '0' && b <= '9':
+ return b - '0', nil
+ case b >= 'a' && b <= 'f':
+ return b - 'a' + 10, nil
+ default:
+ return 0, ErrInvalidPktLen
+ }
+}
diff --git a/plumbing/format/packp/pktline/scanner_test.go b/plumbing/format/packp/pktline/scanner_test.go
new file mode 100644
index 0000000..c5395cf
--- /dev/null
+++ b/plumbing/format/packp/pktline/scanner_test.go
@@ -0,0 +1,225 @@
+package pktline_test
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "strings"
+
+ "gopkg.in/src-d/go-git.v4/plumbing/format/packp/pktline"
+
+ . "gopkg.in/check.v1"
+)
+
+type SuiteScanner struct{}
+
+var _ = Suite(&SuiteScanner{})
+
+func (s *SuiteScanner) TestInvalid(c *C) {
+ for _, test := range [...]string{
+ "0001", "0002", "0003", "0004",
+ "0001asdfsadf", "0004foo",
+ "fff1", "fff2",
+ "gorka",
+ "0", "003",
+ " 5a", "5 a", "5 \n",
+ "-001", "-000",
+ } {
+ r := strings.NewReader(test)
+ sc := pktline.NewScanner(r)
+ _ = sc.Scan()
+ c.Assert(sc.Err(), ErrorMatches, pktline.ErrInvalidPktLen.Error(),
+ Commentf("data = %q", test))
+ }
+}
+
+func (s *SuiteScanner) TestEmptyReader(c *C) {
+ r := strings.NewReader("")
+ sc := pktline.NewScanner(r)
+ hasPayload := sc.Scan()
+ c.Assert(hasPayload, Equals, false)
+ c.Assert(sc.Err(), Equals, nil)
+}
+
+func (s *SuiteScanner) TestFlush(c *C) {
+ var buf bytes.Buffer
+ e := pktline.NewEncoder(&buf)
+ err := e.Flush()
+ c.Assert(err, IsNil)
+
+ sc := pktline.NewScanner(&buf)
+ c.Assert(sc.Scan(), Equals, true)
+
+ payload := sc.Bytes()
+ c.Assert(len(payload), Equals, 0)
+}
+
+func (s *SuiteScanner) TestPktLineTooShort(c *C) {
+ r := strings.NewReader("010cfoobar")
+
+ sc := pktline.NewScanner(r)
+
+ c.Assert(sc.Scan(), Equals, false)
+ c.Assert(sc.Err(), ErrorMatches, "unexpected EOF")
+}
+
+func (s *SuiteScanner) TestScanAndPayload(c *C) {
+ for _, test := range [...]string{
+ "a",
+ "a\n",
+ strings.Repeat("a", 100),
+ strings.Repeat("a", 100) + "\n",
+ strings.Repeat("\x00", 100),
+ strings.Repeat("\x00", 100) + "\n",
+ strings.Repeat("a", pktline.MaxPayloadSize),
+ strings.Repeat("a", pktline.MaxPayloadSize-1) + "\n",
+ } {
+ var buf bytes.Buffer
+ e := pktline.NewEncoder(&buf)
+ err := e.EncodeString(test)
+ c.Assert(err, IsNil,
+ Commentf("input len=%x, contents=%.10q\n", len(test), test))
+
+ sc := pktline.NewScanner(&buf)
+ c.Assert(sc.Scan(), Equals, true,
+ Commentf("test = %.20q...", test))
+
+ obtained := sc.Bytes()
+ c.Assert(obtained, DeepEquals, []byte(test),
+ Commentf("in = %.20q out = %.20q", test, string(obtained)))
+ }
+}
+
+func (s *SuiteScanner) TestSkip(c *C) {
+ for _, test := range [...]struct {
+ input []string
+ n int
+ expected []byte
+ }{
+ {
+ input: []string{
+ "first",
+ "second",
+ "third"},
+ n: 1,
+ expected: []byte("second"),
+ },
+ {
+ input: []string{
+ "first",
+ "second",
+ "third"},
+ n: 2,
+ expected: []byte("third"),
+ },
+ } {
+ var buf bytes.Buffer
+ e := pktline.NewEncoder(&buf)
+ err := e.EncodeString(test.input...)
+ c.Assert(err, IsNil)
+
+ sc := pktline.NewScanner(&buf)
+ for i := 0; i < test.n; i++ {
+ c.Assert(sc.Scan(), Equals, true,
+ Commentf("scan error = %s", sc.Err()))
+ }
+ c.Assert(sc.Scan(), Equals, true,
+ Commentf("scan error = %s", sc.Err()))
+
+ obtained := sc.Bytes()
+ c.Assert(obtained, DeepEquals, test.expected,
+ Commentf("\nin = %.20q\nout = %.20q\nexp = %.20q",
+ test.input, obtained, test.expected))
+ }
+}
+
+func (s *SuiteScanner) TestEOF(c *C) {
+ var buf bytes.Buffer
+ e := pktline.NewEncoder(&buf)
+ err := e.EncodeString("first", "second")
+ c.Assert(err, IsNil)
+
+ sc := pktline.NewScanner(&buf)
+ for sc.Scan() {
+ }
+ c.Assert(sc.Err(), IsNil)
+}
+
+// A section are several non flush-pkt lines followed by a flush-pkt, which
+// how the git protocol sends long messages.
+func (s *SuiteScanner) TestReadSomeSections(c *C) {
+ nSections := 2
+ nLines := 4
+ data := sectionsExample(c, nSections, nLines)
+ sc := pktline.NewScanner(data)
+
+ sectionCounter := 0
+ lineCounter := 0
+ for sc.Scan() {
+ if len(sc.Bytes()) == 0 {
+ sectionCounter++
+ }
+ lineCounter++
+ }
+ c.Assert(sc.Err(), IsNil)
+ c.Assert(sectionCounter, Equals, nSections)
+ c.Assert(lineCounter, Equals, (1+nLines)*nSections)
+}
+
+// returns nSection sections, each of them with nLines pkt-lines (not
+// counting the flush-pkt:
+//
+// 0009 0.0\n
+// 0009 0.1\n
+// ...
+// 0000
+// and so on
+func sectionsExample(c *C, nSections, nLines int) io.Reader {
+ var buf bytes.Buffer
+ e := pktline.NewEncoder(&buf)
+
+ for section := 0; section < nSections; section++ {
+ ss := []string{}
+ for line := 0; line < nLines; line++ {
+ line := fmt.Sprintf(" %d.%d\n", section, line)
+ ss = append(ss, line)
+ }
+ err := e.EncodeString(ss...)
+ c.Assert(err, IsNil)
+ err = e.Flush()
+ c.Assert(err, IsNil)
+ }
+
+ return &buf
+}
+
+func ExampleScanner() {
+ // A reader is needed as input.
+ input := strings.NewReader("000ahello\n" +
+ "000bworld!\n" +
+ "0000",
+ )
+
+ // Create the scanner...
+ s := pktline.NewScanner(input)
+
+ // and scan every pkt-line found in the input.
+ for s.Scan() {
+ payload := s.Bytes()
+ if len(payload) == 0 { // zero sized payloads correspond to flush-pkts.
+ fmt.Println("FLUSH-PKT DETECTED\n")
+ } else { // otherwise, you will be able to access the full payload.
+ fmt.Printf("PAYLOAD = %q\n", string(payload))
+ }
+ }
+
+ // this will catch any error when reading from the input, if any.
+ if s.Err() != nil {
+ fmt.Println(s.Err())
+ }
+
+ // Output:
+ // PAYLOAD = "hello\n"
+ // PAYLOAD = "world!\n"
+ // FLUSH-PKT DETECTED
+}
diff --git a/plumbing/format/packp/ulreq/decoder.go b/plumbing/format/packp/ulreq/decoder.go
new file mode 100644
index 0000000..9083e04
--- /dev/null
+++ b/plumbing/format/packp/ulreq/decoder.go
@@ -0,0 +1,287 @@
+package ulreq
+
+import (
+ "bytes"
+ "encoding/hex"
+ "fmt"
+ "io"
+ "strconv"
+ "time"
+
+ "gopkg.in/src-d/go-git.v4/plumbing"
+ "gopkg.in/src-d/go-git.v4/plumbing/format/packp/pktline"
+)
+
+const (
+ hashSize = 40
+)
+
+var (
+ eol = []byte("\n")
+ sp = []byte(" ")
+ want = []byte("want ")
+ shallow = []byte("shallow ")
+ deepen = []byte("deepen")
+ deepenCommits = []byte("deepen ")
+ deepenSince = []byte("deepen-since ")
+ deepenReference = []byte("deepen-not ")
+)
+
+// A Decoder reads and decodes AdvRef values from an input stream.
+type Decoder struct {
+ s *pktline.Scanner // a pkt-line scanner from the input stream
+ line []byte // current pkt-line contents, use parser.nextLine() to make it advance
+ nLine int // current pkt-line number for debugging, begins at 1
+ err error // sticky error, use the parser.error() method to fill this out
+ data *UlReq // parsed data is stored here
+}
+
+// NewDecoder returns a new decoder that reads from r.
+//
+// Will not read more data from r than necessary.
+func NewDecoder(r io.Reader) *Decoder {
+ return &Decoder{
+ s: pktline.NewScanner(r),
+ }
+}
+
+// Decode reads the next upload-request form its input and
+// stores it in the value pointed to by v.
+func (d *Decoder) Decode(v *UlReq) error {
+ d.data = v
+
+ for state := decodeFirstWant; state != nil; {
+ state = state(d)
+ }
+
+ return d.err
+}
+
+type decoderStateFn func(*Decoder) decoderStateFn
+
+// fills out the parser stiky error
+func (d *Decoder) error(format string, a ...interface{}) {
+ d.err = fmt.Errorf("pkt-line %d: %s", d.nLine,
+ fmt.Sprintf(format, a...))
+}
+
+// Reads a new pkt-line from the scanner, makes its payload available as
+// p.line and increments p.nLine. A successful invocation returns true,
+// otherwise, false is returned and the sticky error is filled out
+// accordingly. Trims eols at the end of the payloads.
+func (d *Decoder) nextLine() bool {
+ d.nLine++
+
+ if !d.s.Scan() {
+ if d.err = d.s.Err(); d.err != nil {
+ return false
+ }
+
+ d.error("EOF")
+ return false
+ }
+
+ d.line = d.s.Bytes()
+ d.line = bytes.TrimSuffix(d.line, eol)
+
+ return true
+}
+
+// Expected format: want <hash>[ capabilities]
+func decodeFirstWant(d *Decoder) decoderStateFn {
+ if ok := d.nextLine(); !ok {
+ return nil
+ }
+
+ if !bytes.HasPrefix(d.line, want) {
+ d.error("missing 'want ' prefix")
+ return nil
+ }
+ d.line = bytes.TrimPrefix(d.line, want)
+
+ hash, ok := d.readHash()
+ if !ok {
+ return nil
+ }
+ d.data.Wants = append(d.data.Wants, hash)
+
+ return decodeCaps
+}
+
+func (d *Decoder) readHash() (plumbing.Hash, bool) {
+ if len(d.line) < hashSize {
+ d.err = fmt.Errorf("malformed hash: %v", d.line)
+ return plumbing.ZeroHash, false
+ }
+
+ var hash plumbing.Hash
+ if _, err := hex.Decode(hash[:], d.line[:hashSize]); err != nil {
+ d.error("invalid hash text: %s", err)
+ return plumbing.ZeroHash, false
+ }
+ d.line = d.line[hashSize:]
+
+ return hash, true
+}
+
+// Expected format: sp cap1 sp cap2 sp cap3...
+func decodeCaps(d *Decoder) decoderStateFn {
+ if len(d.line) == 0 {
+ return decodeOtherWants
+ }
+
+ d.line = bytes.TrimPrefix(d.line, sp)
+
+ for _, c := range bytes.Split(d.line, sp) {
+ name, values := readCapability(c)
+ d.data.Capabilities.Add(name, values...)
+ }
+
+ return decodeOtherWants
+}
+
+// Capabilities are a single string or a name=value.
+// Even though we are only going to read at moust 1 value, we return
+// a slice of values, as Capability.Add receives that.
+func readCapability(data []byte) (name string, values []string) {
+ pair := bytes.SplitN(data, []byte{'='}, 2)
+ if len(pair) == 2 {
+ values = append(values, string(pair[1]))
+ }
+
+ return string(pair[0]), values
+}
+
+// Expected format: want <hash>
+func decodeOtherWants(d *Decoder) decoderStateFn {
+ if ok := d.nextLine(); !ok {
+ return nil
+ }
+
+ if bytes.HasPrefix(d.line, shallow) {
+ return decodeShallow
+ }
+
+ if bytes.HasPrefix(d.line, deepen) {
+ return decodeDeepen
+ }
+
+ if len(d.line) == 0 {
+ return nil
+ }
+
+ if !bytes.HasPrefix(d.line, want) {
+ d.error("unexpected payload while expecting a want: %q", d.line)
+ return nil
+ }
+ d.line = bytes.TrimPrefix(d.line, want)
+
+ hash, ok := d.readHash()
+ if !ok {
+ return nil
+ }
+ d.data.Wants = append(d.data.Wants, hash)
+
+ return decodeOtherWants
+}
+
+// Expected format: shallow <hash>
+func decodeShallow(d *Decoder) decoderStateFn {
+ if bytes.HasPrefix(d.line, deepen) {
+ return decodeDeepen
+ }
+
+ if len(d.line) == 0 {
+ return nil
+ }
+
+ if !bytes.HasPrefix(d.line, shallow) {
+ d.error("unexpected payload while expecting a shallow: %q", d.line)
+ return nil
+ }
+ d.line = bytes.TrimPrefix(d.line, shallow)
+
+ hash, ok := d.readHash()
+ if !ok {
+ return nil
+ }
+ d.data.Shallows = append(d.data.Shallows, hash)
+
+ if ok := d.nextLine(); !ok {
+ return nil
+ }
+
+ return decodeShallow
+}
+
+// Expected format: deepen <n> / deepen-since <ul> / deepen-not <ref>
+func decodeDeepen(d *Decoder) decoderStateFn {
+ if bytes.HasPrefix(d.line, deepenCommits) {
+ return decodeDeepenCommits
+ }
+
+ if bytes.HasPrefix(d.line, deepenSince) {
+ return decodeDeepenSince
+ }
+
+ if bytes.HasPrefix(d.line, deepenReference) {
+ return decodeDeepenReference
+ }
+
+ if len(d.line) == 0 {
+ return nil
+ }
+
+ d.error("unexpected deepen specification: %q", d.line)
+ return nil
+}
+
+func decodeDeepenCommits(d *Decoder) decoderStateFn {
+ d.line = bytes.TrimPrefix(d.line, deepenCommits)
+
+ var n int
+ if n, d.err = strconv.Atoi(string(d.line)); d.err != nil {
+ return nil
+ }
+ if n < 0 {
+ d.err = fmt.Errorf("negative depth")
+ return nil
+ }
+ d.data.Depth = DepthCommits(n)
+
+ return decodeFlush
+}
+
+func decodeDeepenSince(d *Decoder) decoderStateFn {
+ d.line = bytes.TrimPrefix(d.line, deepenSince)
+
+ var secs int64
+ secs, d.err = strconv.ParseInt(string(d.line), 10, 64)
+ if d.err != nil {
+ return nil
+ }
+ t := time.Unix(secs, 0).UTC()
+ d.data.Depth = DepthSince(t)
+
+ return decodeFlush
+}
+
+func decodeDeepenReference(d *Decoder) decoderStateFn {
+ d.line = bytes.TrimPrefix(d.line, deepenReference)
+
+ d.data.Depth = DepthReference(string(d.line))
+
+ return decodeFlush
+}
+
+func decodeFlush(d *Decoder) decoderStateFn {
+ if ok := d.nextLine(); !ok {
+ return nil
+ }
+
+ if len(d.line) != 0 {
+ d.err = fmt.Errorf("unexpected payload while expecting a flush-pkt: %q", d.line)
+ }
+
+ return nil
+}
diff --git a/plumbing/format/packp/ulreq/decoder_test.go b/plumbing/format/packp/ulreq/decoder_test.go
new file mode 100644
index 0000000..01e4f90
--- /dev/null
+++ b/plumbing/format/packp/ulreq/decoder_test.go
@@ -0,0 +1,541 @@
+package ulreq
+
+import (
+ "bytes"
+ "io"
+ "sort"
+ "time"
+
+ "gopkg.in/src-d/go-git.v4/plumbing"
+ "gopkg.in/src-d/go-git.v4/plumbing/format/packp/pktline"
+
+ . "gopkg.in/check.v1"
+)
+
+type SuiteDecoder struct{}
+
+var _ = Suite(&SuiteDecoder{})
+
+func (s *SuiteDecoder) TestEmpty(c *C) {
+ ur := New()
+ var buf bytes.Buffer
+ d := NewDecoder(&buf)
+
+ err := d.Decode(ur)
+ c.Assert(err, ErrorMatches, "pkt-line 1: EOF")
+}
+
+func (s *SuiteDecoder) TestNoWant(c *C) {
+ payloads := []string{
+ "foobar",
+ pktline.FlushString,
+ }
+ r := toPktLines(c, payloads)
+ testDecoderErrorMatches(c, r, ".*missing 'want '.*")
+}
+
+func toPktLines(c *C, payloads []string) io.Reader {
+ var buf bytes.Buffer
+ e := pktline.NewEncoder(&buf)
+ err := e.EncodeString(payloads...)
+ c.Assert(err, IsNil)
+
+ return &buf
+}
+
+func testDecoderErrorMatches(c *C, input io.Reader, pattern string) {
+ ur := New()
+ d := NewDecoder(input)
+
+ err := d.Decode(ur)
+ c.Assert(err, ErrorMatches, pattern)
+}
+
+func (s *SuiteDecoder) TestInvalidFirstHash(c *C) {
+ payloads := []string{
+ "want 6ecf0ef2c2dffb796alberto2219af86ec6584e5\n",
+ pktline.FlushString,
+ }
+ r := toPktLines(c, payloads)
+ testDecoderErrorMatches(c, r, ".*invalid hash.*")
+}
+
+func (s *SuiteDecoder) TestWantOK(c *C) {
+ payloads := []string{
+ "want 1111111111111111111111111111111111111111",
+ pktline.FlushString,
+ }
+ ur := testDecodeOK(c, payloads)
+
+ c.Assert(ur.Wants, DeepEquals, []plumbing.Hash{
+ plumbing.NewHash("1111111111111111111111111111111111111111"),
+ })
+}
+
+func testDecodeOK(c *C, payloads []string) *UlReq {
+ var buf bytes.Buffer
+ e := pktline.NewEncoder(&buf)
+ err := e.EncodeString(payloads...)
+ c.Assert(err, IsNil)
+
+ ur := New()
+ d := NewDecoder(&buf)
+
+ err = d.Decode(ur)
+ c.Assert(err, IsNil)
+
+ return ur
+}
+
+func (s *SuiteDecoder) TestWantWithCapabilities(c *C) {
+ payloads := []string{
+ "want 1111111111111111111111111111111111111111 ofs-delta multi_ack",
+ pktline.FlushString,
+ }
+ ur := testDecodeOK(c, payloads)
+ c.Assert(ur.Wants, DeepEquals, []plumbing.Hash{
+ plumbing.NewHash("1111111111111111111111111111111111111111")})
+
+ c.Assert(ur.Capabilities.Supports("ofs-delta"), Equals, true)
+ c.Assert(ur.Capabilities.Supports("multi_ack"), Equals, true)
+}
+
+func (s *SuiteDecoder) TestManyWantsNoCapabilities(c *C) {
+ payloads := []string{
+ "want 3333333333333333333333333333333333333333",
+ "want 4444444444444444444444444444444444444444",
+ "want 1111111111111111111111111111111111111111",
+ "want 2222222222222222222222222222222222222222",
+ pktline.FlushString,
+ }
+ ur := testDecodeOK(c, payloads)
+
+ expected := []plumbing.Hash{
+ plumbing.NewHash("1111111111111111111111111111111111111111"),
+ plumbing.NewHash("2222222222222222222222222222222222222222"),
+ plumbing.NewHash("3333333333333333333333333333333333333333"),
+ plumbing.NewHash("4444444444444444444444444444444444444444"),
+ }
+
+ sort.Sort(byHash(ur.Wants))
+ sort.Sort(byHash(expected))
+ c.Assert(ur.Wants, DeepEquals, expected)
+}
+
+type byHash []plumbing.Hash
+
+func (a byHash) Len() int { return len(a) }
+func (a byHash) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+func (a byHash) Less(i, j int) bool {
+ ii := [20]byte(a[i])
+ jj := [20]byte(a[j])
+ return bytes.Compare(ii[:], jj[:]) < 0
+}
+
+func (s *SuiteDecoder) TestManyWantsBadWant(c *C) {
+ payloads := []string{
+ "want 3333333333333333333333333333333333333333",
+ "want 4444444444444444444444444444444444444444",
+ "foo",
+ "want 2222222222222222222222222222222222222222",
+ pktline.FlushString,
+ }
+ r := toPktLines(c, payloads)
+ testDecoderErrorMatches(c, r, ".*unexpected payload.*")
+}
+
+func (s *SuiteDecoder) TestManyWantsInvalidHash(c *C) {
+ payloads := []string{
+ "want 3333333333333333333333333333333333333333",
+ "want 4444444444444444444444444444444444444444",
+ "want 1234567890abcdef",
+ "want 2222222222222222222222222222222222222222",
+ pktline.FlushString,
+ }
+ r := toPktLines(c, payloads)
+ testDecoderErrorMatches(c, r, ".*malformed hash.*")
+}
+
+func (s *SuiteDecoder) TestManyWantsWithCapabilities(c *C) {
+ payloads := []string{
+ "want 3333333333333333333333333333333333333333 ofs-delta multi_ack",
+ "want 4444444444444444444444444444444444444444",
+ "want 1111111111111111111111111111111111111111",
+ "want 2222222222222222222222222222222222222222",
+ pktline.FlushString,
+ }
+ ur := testDecodeOK(c, payloads)
+
+ expected := []plumbing.Hash{
+ plumbing.NewHash("1111111111111111111111111111111111111111"),
+ plumbing.NewHash("2222222222222222222222222222222222222222"),
+ plumbing.NewHash("3333333333333333333333333333333333333333"),
+ plumbing.NewHash("4444444444444444444444444444444444444444"),
+ }
+
+ sort.Sort(byHash(ur.Wants))
+ sort.Sort(byHash(expected))
+ c.Assert(ur.Wants, DeepEquals, expected)
+
+ c.Assert(ur.Capabilities.Supports("ofs-delta"), Equals, true)
+ c.Assert(ur.Capabilities.Supports("multi_ack"), Equals, true)
+}
+
+func (s *SuiteDecoder) TestSingleShallowSingleWant(c *C) {
+ payloads := []string{
+ "want 3333333333333333333333333333333333333333 ofs-delta multi_ack",
+ "shallow aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
+ pktline.FlushString,
+ }
+ ur := testDecodeOK(c, payloads)
+
+ expectedWants := []plumbing.Hash{
+ plumbing.NewHash("3333333333333333333333333333333333333333"),
+ }
+
+ expectedShallows := []plumbing.Hash{
+ plumbing.NewHash("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"),
+ }
+
+ c.Assert(ur.Wants, DeepEquals, expectedWants)
+ c.Assert(ur.Capabilities.Supports("ofs-delta"), Equals, true)
+ c.Assert(ur.Capabilities.Supports("multi_ack"), Equals, true)
+
+ c.Assert(ur.Shallows, DeepEquals, expectedShallows)
+}
+
+func (s *SuiteDecoder) TestSingleShallowManyWants(c *C) {
+ payloads := []string{
+ "want 3333333333333333333333333333333333333333 ofs-delta multi_ack",
+ "want 4444444444444444444444444444444444444444",
+ "want 1111111111111111111111111111111111111111",
+ "want 2222222222222222222222222222222222222222",
+ "shallow aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
+ pktline.FlushString,
+ }
+ ur := testDecodeOK(c, payloads)
+
+ expectedWants := []plumbing.Hash{
+ plumbing.NewHash("1111111111111111111111111111111111111111"),
+ plumbing.NewHash("2222222222222222222222222222222222222222"),
+ plumbing.NewHash("3333333333333333333333333333333333333333"),
+ plumbing.NewHash("4444444444444444444444444444444444444444"),
+ }
+ sort.Sort(byHash(expectedWants))
+
+ expectedShallows := []plumbing.Hash{
+ plumbing.NewHash("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"),
+ }
+
+ sort.Sort(byHash(ur.Wants))
+ c.Assert(ur.Wants, DeepEquals, expectedWants)
+ c.Assert(ur.Capabilities.Supports("ofs-delta"), Equals, true)
+ c.Assert(ur.Capabilities.Supports("multi_ack"), Equals, true)
+
+ c.Assert(ur.Shallows, DeepEquals, expectedShallows)
+}
+
+func (s *SuiteDecoder) TestManyShallowSingleWant(c *C) {
+ payloads := []string{
+ "want 3333333333333333333333333333333333333333 ofs-delta multi_ack",
+ "shallow aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
+ "shallow bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
+ "shallow cccccccccccccccccccccccccccccccccccccccc",
+ "shallow dddddddddddddddddddddddddddddddddddddddd",
+ pktline.FlushString,
+ }
+ ur := testDecodeOK(c, payloads)
+
+ expectedWants := []plumbing.Hash{
+ plumbing.NewHash("3333333333333333333333333333333333333333"),
+ }
+
+ expectedShallows := []plumbing.Hash{
+ plumbing.NewHash("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"),
+ plumbing.NewHash("bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"),
+ plumbing.NewHash("cccccccccccccccccccccccccccccccccccccccc"),
+ plumbing.NewHash("dddddddddddddddddddddddddddddddddddddddd"),
+ }
+ sort.Sort(byHash(expectedShallows))
+
+ c.Assert(ur.Wants, DeepEquals, expectedWants)
+ c.Assert(ur.Capabilities.Supports("ofs-delta"), Equals, true)
+ c.Assert(ur.Capabilities.Supports("multi_ack"), Equals, true)
+
+ sort.Sort(byHash(ur.Shallows))
+ c.Assert(ur.Shallows, DeepEquals, expectedShallows)
+}
+
+func (s *SuiteDecoder) TestManyShallowManyWants(c *C) {
+ payloads := []string{
+ "want 3333333333333333333333333333333333333333 ofs-delta multi_ack",
+ "want 4444444444444444444444444444444444444444",
+ "want 1111111111111111111111111111111111111111",
+ "want 2222222222222222222222222222222222222222",
+ "shallow aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
+ "shallow bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
+ "shallow cccccccccccccccccccccccccccccccccccccccc",
+ "shallow dddddddddddddddddddddddddddddddddddddddd",
+ pktline.FlushString,
+ }
+ ur := testDecodeOK(c, payloads)
+
+ expectedWants := []plumbing.Hash{
+ plumbing.NewHash("1111111111111111111111111111111111111111"),
+ plumbing.NewHash("2222222222222222222222222222222222222222"),
+ plumbing.NewHash("3333333333333333333333333333333333333333"),
+ plumbing.NewHash("4444444444444444444444444444444444444444"),
+ }
+ sort.Sort(byHash(expectedWants))
+
+ expectedShallows := []plumbing.Hash{
+ plumbing.NewHash("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"),
+ plumbing.NewHash("bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"),
+ plumbing.NewHash("cccccccccccccccccccccccccccccccccccccccc"),
+ plumbing.NewHash("dddddddddddddddddddddddddddddddddddddddd"),
+ }
+ sort.Sort(byHash(expectedShallows))
+
+ sort.Sort(byHash(ur.Wants))
+ c.Assert(ur.Wants, DeepEquals, expectedWants)
+ c.Assert(ur.Capabilities.Supports("ofs-delta"), Equals, true)
+ c.Assert(ur.Capabilities.Supports("multi_ack"), Equals, true)
+
+ sort.Sort(byHash(ur.Shallows))
+ c.Assert(ur.Shallows, DeepEquals, expectedShallows)
+}
+
+func (s *SuiteDecoder) TestMalformedShallow(c *C) {
+ payloads := []string{
+ "want 3333333333333333333333333333333333333333 ofs-delta multi_ack",
+ "shalow aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
+ pktline.FlushString,
+ }
+ r := toPktLines(c, payloads)
+ testDecoderErrorMatches(c, r, ".*unexpected payload.*")
+}
+
+func (s *SuiteDecoder) TestMalformedShallowHash(c *C) {
+ payloads := []string{
+ "want 3333333333333333333333333333333333333333 ofs-delta multi_ack",
+ "shallow aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
+ pktline.FlushString,
+ }
+ r := toPktLines(c, payloads)
+ testDecoderErrorMatches(c, r, ".*malformed hash.*")
+}
+
+func (s *SuiteDecoder) TestMalformedShallowManyShallows(c *C) {
+ payloads := []string{
+ "want 3333333333333333333333333333333333333333 ofs-delta multi_ack",
+ "shallow aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
+ "shalow bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
+ "shallow cccccccccccccccccccccccccccccccccccccccc",
+ pktline.FlushString,
+ }
+ r := toPktLines(c, payloads)
+ testDecoderErrorMatches(c, r, ".*unexpected payload.*")
+}
+
+func (s *SuiteDecoder) TestMalformedDeepenSpec(c *C) {
+ payloads := []string{
+ "want 3333333333333333333333333333333333333333 ofs-delta multi_ack",
+ "deepen-foo 34",
+ pktline.FlushString,
+ }
+ r := toPktLines(c, payloads)
+ testDecoderErrorMatches(c, r, ".*unexpected deepen.*")
+}
+
+func (s *SuiteDecoder) TestMalformedDeepenSingleWant(c *C) {
+ payloads := []string{
+ "want 3333333333333333333333333333333333333333 ofs-delta multi_ack",
+ "depth 32",
+ pktline.FlushString,
+ }
+ r := toPktLines(c, payloads)
+ testDecoderErrorMatches(c, r, ".*unexpected payload.*")
+}
+
+func (s *SuiteDecoder) TestMalformedDeepenMultiWant(c *C) {
+ payloads := []string{
+ "want 3333333333333333333333333333333333333333 ofs-delta multi_ack",
+ "want 2222222222222222222222222222222222222222",
+ "depth 32",
+ pktline.FlushString,
+ }
+ r := toPktLines(c, payloads)
+ testDecoderErrorMatches(c, r, ".*unexpected payload.*")
+}
+
+func (s *SuiteDecoder) TestMalformedDeepenWithSingleShallow(c *C) {
+ payloads := []string{
+ "want 3333333333333333333333333333333333333333 ofs-delta multi_ack",
+ "shallow 2222222222222222222222222222222222222222",
+ "depth 32",
+ pktline.FlushString,
+ }
+ r := toPktLines(c, payloads)
+ testDecoderErrorMatches(c, r, ".*unexpected payload.*")
+}
+
+func (s *SuiteDecoder) TestMalformedDeepenWithMultiShallow(c *C) {
+ payloads := []string{
+ "want 3333333333333333333333333333333333333333 ofs-delta multi_ack",
+ "shallow 2222222222222222222222222222222222222222",
+ "shallow 5555555555555555555555555555555555555555",
+ "depth 32",
+ pktline.FlushString,
+ }
+ r := toPktLines(c, payloads)
+ testDecoderErrorMatches(c, r, ".*unexpected payload.*")
+}
+
+func (s *SuiteDecoder) TestDeepenCommits(c *C) {
+ payloads := []string{
+ "want 3333333333333333333333333333333333333333 ofs-delta multi_ack",
+ "deepen 1234",
+ pktline.FlushString,
+ }
+ ur := testDecodeOK(c, payloads)
+
+ c.Assert(ur.Depth, FitsTypeOf, DepthCommits(0))
+ commits, ok := ur.Depth.(DepthCommits)
+ c.Assert(ok, Equals, true)
+ c.Assert(int(commits), Equals, 1234)
+}
+
+func (s *SuiteDecoder) TestDeepenCommitsInfiniteInplicit(c *C) {
+ payloads := []string{
+ "want 3333333333333333333333333333333333333333 ofs-delta multi_ack",
+ "deepen 0",
+ pktline.FlushString,
+ }
+ ur := testDecodeOK(c, payloads)
+
+ c.Assert(ur.Depth, FitsTypeOf, DepthCommits(0))
+ commits, ok := ur.Depth.(DepthCommits)
+ c.Assert(ok, Equals, true)
+ c.Assert(int(commits), Equals, 0)
+}
+
+func (s *SuiteDecoder) TestDeepenCommitsInfiniteExplicit(c *C) {
+ payloads := []string{
+ "want 3333333333333333333333333333333333333333 ofs-delta multi_ack",
+ pktline.FlushString,
+ }
+ ur := testDecodeOK(c, payloads)
+
+ c.Assert(ur.Depth, FitsTypeOf, DepthCommits(0))
+ commits, ok := ur.Depth.(DepthCommits)
+ c.Assert(ok, Equals, true)
+ c.Assert(int(commits), Equals, 0)
+}
+
+func (s *SuiteDecoder) TestMalformedDeepenCommits(c *C) {
+ payloads := []string{
+ "want 3333333333333333333333333333333333333333 ofs-delta multi_ack",
+ "deepen -32",
+ pktline.FlushString,
+ }
+ r := toPktLines(c, payloads)
+ testDecoderErrorMatches(c, r, ".*negative depth.*")
+}
+
+func (s *SuiteDecoder) TestDeepenCommitsEmpty(c *C) {
+ payloads := []string{
+ "want 3333333333333333333333333333333333333333 ofs-delta multi_ack",
+ "deepen ",
+ pktline.FlushString,
+ }
+ r := toPktLines(c, payloads)
+ testDecoderErrorMatches(c, r, ".*invalid syntax.*")
+}
+
+func (s *SuiteDecoder) TestDeepenSince(c *C) {
+ payloads := []string{
+ "want 3333333333333333333333333333333333333333 ofs-delta multi_ack",
+ "deepen-since 1420167845", // 2015-01-02T03:04:05+00:00
+ pktline.FlushString,
+ }
+ ur := testDecodeOK(c, payloads)
+
+ expected := time.Date(2015, time.January, 2, 3, 4, 5, 0, time.UTC)
+
+ c.Assert(ur.Depth, FitsTypeOf, DepthSince(time.Now()))
+ since, ok := ur.Depth.(DepthSince)
+ c.Assert(ok, Equals, true)
+ c.Assert(time.Time(since).Equal(expected), Equals, true,
+ Commentf("obtained=%s\nexpected=%s", time.Time(since), expected))
+}
+
+func (s *SuiteDecoder) TestDeepenReference(c *C) {
+ payloads := []string{
+ "want 3333333333333333333333333333333333333333 ofs-delta multi_ack",
+ "deepen-not refs/heads/master",
+ pktline.FlushString,
+ }
+ ur := testDecodeOK(c, payloads)
+
+ expected := "refs/heads/master"
+
+ c.Assert(ur.Depth, FitsTypeOf, DepthReference(""))
+ reference, ok := ur.Depth.(DepthReference)
+ c.Assert(ok, Equals, true)
+ c.Assert(string(reference), Equals, expected)
+}
+
+func (s *SuiteDecoder) TestAll(c *C) {
+ payloads := []string{
+ "want 3333333333333333333333333333333333333333 ofs-delta multi_ack",
+ "want 4444444444444444444444444444444444444444",
+ "want 1111111111111111111111111111111111111111",
+ "want 2222222222222222222222222222222222222222",
+ "shallow aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
+ "shallow bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
+ "shallow cccccccccccccccccccccccccccccccccccccccc",
+ "shallow dddddddddddddddddddddddddddddddddddddddd",
+ "deepen 1234",
+ pktline.FlushString,
+ }
+ ur := testDecodeOK(c, payloads)
+
+ expectedWants := []plumbing.Hash{
+ plumbing.NewHash("1111111111111111111111111111111111111111"),
+ plumbing.NewHash("2222222222222222222222222222222222222222"),
+ plumbing.NewHash("3333333333333333333333333333333333333333"),
+ plumbing.NewHash("4444444444444444444444444444444444444444"),
+ }
+ sort.Sort(byHash(expectedWants))
+ sort.Sort(byHash(ur.Wants))
+ c.Assert(ur.Wants, DeepEquals, expectedWants)
+
+ c.Assert(ur.Capabilities.Supports("ofs-delta"), Equals, true)
+ c.Assert(ur.Capabilities.Supports("multi_ack"), Equals, true)
+
+ expectedShallows := []plumbing.Hash{
+ plumbing.NewHash("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"),
+ plumbing.NewHash("bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"),
+ plumbing.NewHash("cccccccccccccccccccccccccccccccccccccccc"),
+ plumbing.NewHash("dddddddddddddddddddddddddddddddddddddddd"),
+ }
+ sort.Sort(byHash(expectedShallows))
+ sort.Sort(byHash(ur.Shallows))
+ c.Assert(ur.Shallows, DeepEquals, expectedShallows)
+
+ c.Assert(ur.Depth, FitsTypeOf, DepthCommits(0))
+ commits, ok := ur.Depth.(DepthCommits)
+ c.Assert(ok, Equals, true)
+ c.Assert(int(commits), Equals, 1234)
+}
+
+func (s *SuiteDecoder) TestExtraData(c *C) {
+ payloads := []string{
+ "want 3333333333333333333333333333333333333333 ofs-delta multi_ack",
+ "deepen 32",
+ "foo",
+ pktline.FlushString,
+ }
+ r := toPktLines(c, payloads)
+ testDecoderErrorMatches(c, r, ".*unexpected payload.*")
+}
diff --git a/plumbing/format/packp/ulreq/encoder.go b/plumbing/format/packp/ulreq/encoder.go
new file mode 100644
index 0000000..1264e0e
--- /dev/null
+++ b/plumbing/format/packp/ulreq/encoder.go
@@ -0,0 +1,140 @@
+package ulreq
+
+import (
+ "fmt"
+ "io"
+ "sort"
+ "time"
+
+ "gopkg.in/src-d/go-git.v4/plumbing"
+ "gopkg.in/src-d/go-git.v4/plumbing/format/packp/pktline"
+)
+
+// An Encoder writes UlReq values to an output stream.
+type Encoder struct {
+ pe *pktline.Encoder // where to write the encoded data
+ data *UlReq // the data to encode
+ sortedWants []string
+ err error // sticky error
+}
+
+// NewEncoder returns a new encoder that writes to w.
+func NewEncoder(w io.Writer) *Encoder {
+ return &Encoder{
+ pe: pktline.NewEncoder(w),
+ }
+}
+
+// Encode writes the UlReq encoding of v to the stream.
+//
+// All the payloads will end with a newline character. Wants and
+// shallows are sorted alphabetically. A depth of 0 means no depth
+// request is sent.
+func (e *Encoder) Encode(v *UlReq) error {
+ if len(v.Wants) == 0 {
+ return fmt.Errorf("empty wants provided")
+ }
+
+ e.data = v
+ e.sortedWants = sortHashes(v.Wants)
+
+ for state := encodeFirstWant; state != nil; {
+ state = state(e)
+ }
+
+ return e.err
+}
+
+type encoderStateFn func(*Encoder) encoderStateFn
+
+func sortHashes(list []plumbing.Hash) []string {
+ sorted := make([]string, len(list))
+ for i, hash := range list {
+ sorted[i] = hash.String()
+ }
+ sort.Strings(sorted)
+
+ return sorted
+}
+
+func encodeFirstWant(e *Encoder) encoderStateFn {
+ var err error
+ if e.data.Capabilities.IsEmpty() {
+ err = e.pe.Encodef("want %s\n", e.sortedWants[0])
+ } else {
+ e.data.Capabilities.Sort()
+ err = e.pe.Encodef(
+ "want %s %s\n",
+ e.sortedWants[0],
+ e.data.Capabilities.String(),
+ )
+ }
+ if err != nil {
+ e.err = fmt.Errorf("encoding first want line: %s", err)
+ return nil
+ }
+
+ return encodeAditionalWants
+}
+
+func encodeAditionalWants(e *Encoder) encoderStateFn {
+ for _, w := range e.sortedWants[1:] {
+ if err := e.pe.Encodef("want %s\n", w); err != nil {
+ e.err = fmt.Errorf("encoding want %q: %s", w, err)
+ return nil
+ }
+ }
+
+ return encodeShallows
+}
+
+func encodeShallows(e *Encoder) encoderStateFn {
+ sorted := sortHashes(e.data.Shallows)
+ for _, s := range sorted {
+ if err := e.pe.Encodef("shallow %s\n", s); err != nil {
+ e.err = fmt.Errorf("encoding shallow %q: %s", s, err)
+ return nil
+ }
+ }
+
+ return encodeDepth
+}
+
+func encodeDepth(e *Encoder) encoderStateFn {
+ switch depth := e.data.Depth.(type) {
+ case DepthCommits:
+ if depth != 0 {
+ commits := int(depth)
+ if err := e.pe.Encodef("deepen %d\n", commits); err != nil {
+ e.err = fmt.Errorf("encoding depth %d: %s", depth, err)
+ return nil
+ }
+ }
+ case DepthSince:
+ when := time.Time(depth).UTC()
+ if err := e.pe.Encodef("deepen-since %d\n", when.Unix()); err != nil {
+ e.err = fmt.Errorf("encoding depth %s: %s", when, err)
+ return nil
+ }
+ case DepthReference:
+ reference := string(depth)
+ if err := e.pe.Encodef("deepen-not %s\n", reference); err != nil {
+ e.err = fmt.Errorf("encoding depth %s: %s", reference, err)
+ return nil
+ }
+ default:
+ e.err = fmt.Errorf("unsupported depth type")
+ return nil
+ }
+
+ return encodeFlush
+}
+
+func encodeFlush(e *Encoder) encoderStateFn {
+ if err := e.pe.Flush(); err != nil {
+ e.err = fmt.Errorf("encoding flush-pkt: %s", err)
+ return nil
+ }
+
+ return nil
+}
diff --git a/plumbing/format/packp/ulreq/encoder_test.go b/plumbing/format/packp/ulreq/encoder_test.go
new file mode 100644
index 0000000..44c6d26
--- /dev/null
+++ b/plumbing/format/packp/ulreq/encoder_test.go
@@ -0,0 +1,268 @@
+package ulreq
+
+import (
+ "bytes"
+ "time"
+
+ "gopkg.in/src-d/go-git.v4/plumbing"
+ "gopkg.in/src-d/go-git.v4/plumbing/format/packp/pktline"
+
+ . "gopkg.in/check.v1"
+)
+
+type SuiteEncoder struct{}
+
+var _ = Suite(&SuiteEncoder{})
+
+// returns a byte slice with the pkt-lines for the given payloads.
+func pktlines(c *C, payloads ...string) []byte {
+ var buf bytes.Buffer
+ e := pktline.NewEncoder(&buf)
+
+ err := e.EncodeString(payloads...)
+ c.Assert(err, IsNil, Commentf("building pktlines for %v\n", payloads))
+
+ return buf.Bytes()
+}
+
+func testEncode(c *C, ur *UlReq, expectedPayloads []string) {
+ var buf bytes.Buffer
+ e := NewEncoder(&buf)
+
+ err := e.Encode(ur)
+ c.Assert(err, IsNil)
+ obtained := buf.Bytes()
+
+ expected := pktlines(c, expectedPayloads...)
+
+ comment := Commentf("\nobtained = %s\nexpected = %s\n", string(obtained), string(expected))
+
+ c.Assert(obtained, DeepEquals, expected, comment)
+}
+
+func testEncodeError(c *C, ur *UlReq, expectedErrorRegEx string) {
+ var buf bytes.Buffer
+ e := NewEncoder(&buf)
+
+ err := e.Encode(ur)
+ c.Assert(err, ErrorMatches, expectedErrorRegEx)
+}
+
+func (s *SuiteEncoder) TestZeroValue(c *C) {
+ ur := New()
+ expectedErrorRegEx := ".*empty wants.*"
+
+ testEncodeError(c, ur, expectedErrorRegEx)
+}
+
+func (s *SuiteEncoder) TestOneWant(c *C) {
+ ur := New()
+ ur.Wants = append(ur.Wants, plumbing.NewHash("1111111111111111111111111111111111111111"))
+
+ expected := []string{
+ "want 1111111111111111111111111111111111111111\n",
+ pktline.FlushString,
+ }
+
+ testEncode(c, ur, expected)
+}
+
+func (s *SuiteEncoder) TestOneWantWithCapabilities(c *C) {
+ ur := New()
+ ur.Wants = append(ur.Wants, plumbing.NewHash("1111111111111111111111111111111111111111"))
+ ur.Capabilities.Add("sysref", "HEAD:/refs/heads/master")
+ ur.Capabilities.Add("multi_ack")
+ ur.Capabilities.Add("thin-pack")
+ ur.Capabilities.Add("side-band")
+ ur.Capabilities.Add("ofs-delta")
+
+ expected := []string{
+ "want 1111111111111111111111111111111111111111 multi_ack ofs-delta side-band sysref=HEAD:/refs/heads/master thin-pack\n",
+ pktline.FlushString,
+ }
+
+ testEncode(c, ur, expected)
+}
+
+func (s *SuiteEncoder) TestWants(c *C) {
+ ur := New()
+ ur.Wants = append(ur.Wants, plumbing.NewHash("4444444444444444444444444444444444444444"))
+ ur.Wants = append(ur.Wants, plumbing.NewHash("1111111111111111111111111111111111111111"))
+ ur.Wants = append(ur.Wants, plumbing.NewHash("3333333333333333333333333333333333333333"))
+ ur.Wants = append(ur.Wants, plumbing.NewHash("2222222222222222222222222222222222222222"))
+ ur.Wants = append(ur.Wants, plumbing.NewHash("5555555555555555555555555555555555555555"))
+
+ expected := []string{
+ "want 1111111111111111111111111111111111111111\n",
+ "want 2222222222222222222222222222222222222222\n",
+ "want 3333333333333333333333333333333333333333\n",
+ "want 4444444444444444444444444444444444444444\n",
+ "want 5555555555555555555555555555555555555555\n",
+ pktline.FlushString,
+ }
+
+ testEncode(c, ur, expected)
+}
+
+func (s *SuiteEncoder) TestWantsWithCapabilities(c *C) {
+ ur := New()
+ ur.Wants = append(ur.Wants, plumbing.NewHash("4444444444444444444444444444444444444444"))
+ ur.Wants = append(ur.Wants, plumbing.NewHash("1111111111111111111111111111111111111111"))
+ ur.Wants = append(ur.Wants, plumbing.NewHash("3333333333333333333333333333333333333333"))
+ ur.Wants = append(ur.Wants, plumbing.NewHash("2222222222222222222222222222222222222222"))
+ ur.Wants = append(ur.Wants, plumbing.NewHash("5555555555555555555555555555555555555555"))
+
+ ur.Capabilities.Add("sysref", "HEAD:/refs/heads/master")
+ ur.Capabilities.Add("multi_ack")
+ ur.Capabilities.Add("thin-pack")
+ ur.Capabilities.Add("side-band")
+ ur.Capabilities.Add("ofs-delta")
+
+ expected := []string{
+ "want 1111111111111111111111111111111111111111 multi_ack ofs-delta side-band sysref=HEAD:/refs/heads/master thin-pack\n",
+ "want 2222222222222222222222222222222222222222\n",
+ "want 3333333333333333333333333333333333333333\n",
+ "want 4444444444444444444444444444444444444444\n",
+ "want 5555555555555555555555555555555555555555\n",
+ pktline.FlushString,
+ }
+
+ testEncode(c, ur, expected)
+}
+
+func (s *SuiteEncoder) TestShallow(c *C) {
+ ur := New()
+ ur.Wants = append(ur.Wants, plumbing.NewHash("1111111111111111111111111111111111111111"))
+ ur.Capabilities.Add("multi_ack")
+ ur.Shallows = append(ur.Shallows, plumbing.NewHash("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"))
+
+ expected := []string{
+ "want 1111111111111111111111111111111111111111 multi_ack\n",
+ "shallow aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n",
+ pktline.FlushString,
+ }
+
+ testEncode(c, ur, expected)
+}
+
+func (s *SuiteEncoder) TestManyShallows(c *C) {
+ ur := New()
+ ur.Wants = append(ur.Wants, plumbing.NewHash("1111111111111111111111111111111111111111"))
+ ur.Capabilities.Add("multi_ack")
+ ur.Shallows = append(ur.Shallows, plumbing.NewHash("bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"))
+ ur.Shallows = append(ur.Shallows, plumbing.NewHash("dddddddddddddddddddddddddddddddddddddddd"))
+ ur.Shallows = append(ur.Shallows, plumbing.NewHash("cccccccccccccccccccccccccccccccccccccccc"))
+ ur.Shallows = append(ur.Shallows, plumbing.NewHash("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"))
+
+ expected := []string{
+ "want 1111111111111111111111111111111111111111 multi_ack\n",
+ "shallow aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n",
+ "shallow bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\n",
+ "shallow cccccccccccccccccccccccccccccccccccccccc\n",
+ "shallow dddddddddddddddddddddddddddddddddddddddd\n",
+ pktline.FlushString,
+ }
+
+ testEncode(c, ur, expected)
+}
+
+func (s *SuiteEncoder) TestDepthCommits(c *C) {
+ ur := New()
+ ur.Wants = append(ur.Wants, plumbing.NewHash("1111111111111111111111111111111111111111"))
+ ur.Depth = DepthCommits(1234)
+
+ expected := []string{
+ "want 1111111111111111111111111111111111111111\n",
+ "deepen 1234\n",
+ pktline.FlushString,
+ }
+
+ testEncode(c, ur, expected)
+}
+
+func (s *SuiteEncoder) TestDepthSinceUTC(c *C) {
+ ur := New()
+ ur.Wants = append(ur.Wants, plumbing.NewHash("1111111111111111111111111111111111111111"))
+ since := time.Date(2015, time.January, 2, 3, 4, 5, 0, time.UTC)
+ ur.Depth = DepthSince(since)
+
+ expected := []string{
+ "want 1111111111111111111111111111111111111111\n",
+ "deepen-since 1420167845\n",
+ pktline.FlushString,
+ }
+
+ testEncode(c, ur, expected)
+}
+
+func (s *SuiteEncoder) TestDepthSinceNonUTC(c *C) {
+ ur := New()
+ ur.Wants = append(ur.Wants, plumbing.NewHash("1111111111111111111111111111111111111111"))
+ berlin, err := time.LoadLocation("Europe/Berlin")
+ c.Assert(err, IsNil)
+ since := time.Date(2015, time.January, 2, 3, 4, 5, 0, berlin)
+ // since value is 2015-01-02 03:04:05 +0100 UTC (Europe/Berlin) or
+ // 2015-01-02 02:04:05 +0000 UTC, which is 1420164245 Unix seconds.
+ ur.Depth = DepthSince(since)
+
+ expected := []string{
+ "want 1111111111111111111111111111111111111111\n",
+ "deepen-since 1420164245\n",
+ pktline.FlushString,
+ }
+
+ testEncode(c, ur, expected)
+}
+
+func (s *SuiteEncoder) TestDepthReference(c *C) {
+ ur := New()
+ ur.Wants = append(ur.Wants, plumbing.NewHash("1111111111111111111111111111111111111111"))
+ ur.Depth = DepthReference("refs/heads/feature-foo")
+
+ expected := []string{
+ "want 1111111111111111111111111111111111111111\n",
+ "deepen-not refs/heads/feature-foo\n",
+ pktline.FlushString,
+ }
+
+ testEncode(c, ur, expected)
+}
+
+func (s *SuiteEncoder) TestAll(c *C) {
+ ur := New()
+ ur.Wants = append(ur.Wants, plumbing.NewHash("4444444444444444444444444444444444444444"))
+ ur.Wants = append(ur.Wants, plumbing.NewHash("1111111111111111111111111111111111111111"))
+ ur.Wants = append(ur.Wants, plumbing.NewHash("3333333333333333333333333333333333333333"))
+ ur.Wants = append(ur.Wants, plumbing.NewHash("2222222222222222222222222222222222222222"))
+ ur.Wants = append(ur.Wants, plumbing.NewHash("5555555555555555555555555555555555555555"))
+
+ ur.Capabilities.Add("sysref", "HEAD:/refs/heads/master")
+ ur.Capabilities.Add("multi_ack")
+ ur.Capabilities.Add("thin-pack")
+ ur.Capabilities.Add("side-band")
+ ur.Capabilities.Add("ofs-delta")
+
+ ur.Shallows = append(ur.Shallows, plumbing.NewHash("bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"))
+ ur.Shallows = append(ur.Shallows, plumbing.NewHash("dddddddddddddddddddddddddddddddddddddddd"))
+ ur.Shallows = append(ur.Shallows, plumbing.NewHash("cccccccccccccccccccccccccccccccccccccccc"))
+ ur.Shallows = append(ur.Shallows, plumbing.NewHash("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"))
+
+ since := time.Date(2015, time.January, 2, 3, 4, 5, 0, time.UTC)
+ ur.Depth = DepthSince(since)
+
+ expected := []string{
+ "want 1111111111111111111111111111111111111111 multi_ack ofs-delta side-band sysref=HEAD:/refs/heads/master thin-pack\n",
+ "want 2222222222222222222222222222222222222222\n",
+ "want 3333333333333333333333333333333333333333\n",
+ "want 4444444444444444444444444444444444444444\n",
+ "want 5555555555555555555555555555555555555555\n",
+ "shallow aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n",
+ "shallow bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\n",
+ "shallow cccccccccccccccccccccccccccccccccccccccc\n",
+ "shallow dddddddddddddddddddddddddddddddddddddddd\n",
+ "deepen-since 1420167845\n",
+ pktline.FlushString,
+ }
+
+ testEncode(c, ur, expected)
+}
diff --git a/plumbing/format/packp/ulreq/ulreq.go b/plumbing/format/packp/ulreq/ulreq.go
new file mode 100644
index 0000000..d2cc7c0
--- /dev/null
+++ b/plumbing/format/packp/ulreq/ulreq.go
@@ -0,0 +1,56 @@
+// Package ulreq implements encoding and decoding upload-request
+// messages from a git-upload-pack command.
+package ulreq
+
+import (
+ "time"
+
+ "gopkg.in/src-d/go-git.v4/plumbing"
+ "gopkg.in/src-d/go-git.v4/plumbing/format/packp"
+)
+
+// UlReq values represent the information transmitted on a
+// upload-request message. Values from this type are not zero-value
+// safe, use the New function instead.
+type UlReq struct {
+ Capabilities *packp.Capabilities
+ Wants []plumbing.Hash
+ Shallows []plumbing.Hash
+ Depth Depth
+}
+
+// Depth values stores the desired depth of the requested packfile: see
+// DepthCommit, DepthSince and DepthReference.
+type Depth interface {
+ isDepth()
+}
+
+// DepthCommits values stores the maximum number of requested commits in
+// the packfile. Zero means infinite. A negative value will have
+// undefined consecuences.
+type DepthCommits int
+
+func (d DepthCommits) isDepth() {}
+
+// DepthSince values requests only commits newer than the specified time.
+type DepthSince time.Time
+
+func (d DepthSince) isDepth() {}
+
+// DepthReference requests only commits not to found in the specified reference.
+type DepthReference string
+
+func (d DepthReference) isDepth() {}
+
+// New returns a pointer to a new UlReq value, ready to be used. It has
+// no capabilities, wants or shallows and an infinite depth. Please
+// note that to encode an upload-request it has to have at least one
+// wanted hash.
+func New() *UlReq {
+ return &UlReq{
+ Capabilities: packp.NewCapabilities(),
+ Wants: []plumbing.Hash{},
+ Shallows: []plumbing.Hash{},
+ Depth: DepthCommits(0),
+ }
+}
diff --git a/plumbing/format/packp/ulreq/ulreq_test.go b/plumbing/format/packp/ulreq/ulreq_test.go
new file mode 100644
index 0000000..06963ff
--- /dev/null
+++ b/plumbing/format/packp/ulreq/ulreq_test.go
@@ -0,0 +1,91 @@
+package ulreq
+
+import (
+ "fmt"
+ "os"
+ "strings"
+ "testing"
+ "time"
+
+ "gopkg.in/src-d/go-git.v4/plumbing"
+ "gopkg.in/src-d/go-git.v4/plumbing/format/packp/pktline"
+
+ . "gopkg.in/check.v1"
+)
+
+func Test(t *testing.T) { TestingT(t) }
+
+func ExampleEncoder_Encode() {
+ // Create an empty UlReq with the contents you want...
+ ur := New()
+
+ // Add a couple of wants
+ ur.Wants = append(ur.Wants, plumbing.NewHash("3333333333333333333333333333333333333333"))
+ ur.Wants = append(ur.Wants, plumbing.NewHash("1111111111111111111111111111111111111111"))
+ ur.Wants = append(ur.Wants, plumbing.NewHash("2222222222222222222222222222222222222222"))
+
+ // And some capabilities you will like the server to use
+ ur.Capabilities.Add("sysref", "HEAD:/refs/heads/master")
+ ur.Capabilities.Add("ofs-delta")
+
+ // Add a couple of shallows
+ ur.Shallows = append(ur.Shallows, plumbing.NewHash("bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"))
+ ur.Shallows = append(ur.Shallows, plumbing.NewHash("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"))
+
+ // And retrict the answer of the server to commits newer than "2015-01-02 03:04:05 UTC"
+ since := time.Date(2015, time.January, 2, 3, 4, 5, 0, time.UTC)
+ ur.Depth = DepthSince(since)
+
+ // Create a new Encode for the stdout...
+ e := NewEncoder(os.Stdout)
+ // ...and encode the upload-request to it.
+ _ = e.Encode(ur) // ignoring errors for brevity
+ // Output:
+ // 005bwant 1111111111111111111111111111111111111111 ofs-delta sysref=HEAD:/refs/heads/master
+ // 0032want 2222222222222222222222222222222222222222
+ // 0032want 3333333333333333333333333333333333333333
+ // 0035shallow aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+ // 0035shallow bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb
+ // 001cdeepen-since 1420167845
+ // 0000
+}
+
+func ExampleDecoder_Decode() {
+ // Here is a raw advertised-ref message.
+ raw := "" +
+ "005bwant 1111111111111111111111111111111111111111 ofs-delta sysref=HEAD:/refs/heads/master\n" +
+ "0032want 2222222222222222222222222222222222222222\n" +
+ "0032want 3333333333333333333333333333333333333333\n" +
+ "0035shallow aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n" +
+ "0035shallow bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\n" +
+ "001cdeepen-since 1420167845\n" + // 2015-01-02 03:04:05 +0000 UTC
+ pktline.FlushString
+
+ // Use the raw message as our input.
+ input := strings.NewReader(raw)
+
+ // Create the Decoder reading from our input.
+ d := NewDecoder(input)
+
+ // Decode the input into a newly allocated UlReq value.
+ ur := New()
+ _ = d.Decode(ur) // error check ignored for brevity
+
+ // Do something interesting with the UlReq, e.g. print its contents.
+ fmt.Println("capabilities =", ur.Capabilities.String())
+ fmt.Println("wants =", ur.Wants)
+ fmt.Println("shallows =", ur.Shallows)
+ switch depth := ur.Depth.(type) {
+ case DepthCommits:
+ fmt.Println("depth =", int(depth))
+ case DepthSince:
+ fmt.Println("depth =", time.Time(depth))
+ case DepthReference:
+ fmt.Println("depth =", string(depth))
+ }
+ // Output:
+ // capabilities = ofs-delta sysref=HEAD:/refs/heads/master
+ // wants = [1111111111111111111111111111111111111111 2222222222222222222222222222222222222222 3333333333333333333333333333333333333333]
+ // shallows = [aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb]
+ // depth = 2015-01-02 03:04:05 +0000 UTC
+}
diff --git a/plumbing/hash.go b/plumbing/hash.go
new file mode 100644
index 0000000..7fa953d
--- /dev/null
+++ b/plumbing/hash.go
@@ -0,0 +1,58 @@
+package plumbing
+
+import (
+ "crypto/sha1"
+ "encoding/hex"
+ "hash"
+ "strconv"
+)
+
+// Hash SHA1 hased content
+type Hash [20]byte
+
+// ZeroHash is Hash with value zero
+var ZeroHash Hash
+
+// ComputeHash compute the hash for a given ObjectType and content
+func ComputeHash(t ObjectType, content []byte) Hash {
+ h := NewHasher(t, int64(len(content)))
+ h.Write(content)
+ return h.Sum()
+}
+
+// NewHash return a new Hash from a hexadecimal hash representation
+func NewHash(s string) Hash {
+ b, _ := hex.DecodeString(s)
+
+ var h Hash
+ copy(h[:], b)
+
+ return h
+}
+
+func (h Hash) IsZero() bool {
+ var empty Hash
+ return h == empty
+}
+
+func (h Hash) String() string {
+ return hex.EncodeToString(h[:])
+}
+
+type Hasher struct {
+ hash.Hash
+}
+
+func NewHasher(t ObjectType, size int64) Hasher {
+ h := Hasher{sha1.New()}
+ h.Write(t.Bytes())
+ h.Write([]byte(" "))
+ h.Write([]byte(strconv.FormatInt(size, 10)))
+ h.Write([]byte{0})
+ return h
+}
+
+func (h Hasher) Sum() (hash Hash) {
+ copy(hash[:], h.Hash.Sum(nil))
+ return
+}
diff --git a/plumbing/hash_test.go b/plumbing/hash_test.go
new file mode 100644
index 0000000..370811e
--- /dev/null
+++ b/plumbing/hash_test.go
@@ -0,0 +1,42 @@
+package plumbing
+
+import (
+ "testing"
+
+ . "gopkg.in/check.v1"
+)
+
+func Test(t *testing.T) { TestingT(t) }
+
+type HashSuite struct{}
+
+var _ = Suite(&HashSuite{})
+
+func (s *HashSuite) TestComputeHash(c *C) {
+ hash := ComputeHash(BlobObject, []byte(""))
+ c.Assert(hash.String(), Equals, "e69de29bb2d1d6434b8b29ae775ad8c2e48c5391")
+
+ hash = ComputeHash(BlobObject, []byte("Hello, World!\n"))
+ c.Assert(hash.String(), Equals, "8ab686eafeb1f44702738c8b0f24f2567c36da6d")
+}
+
+func (s *HashSuite) TestNewHash(c *C) {
+ hash := ComputeHash(BlobObject, []byte("Hello, World!\n"))
+
+ c.Assert(hash, Equals, NewHash(hash.String()))
+}
+
+func (s *HashSuite) TestIsZero(c *C) {
+ hash := NewHash("foo")
+ c.Assert(hash.IsZero(), Equals, true)
+
+ hash = NewHash("8ab686eafeb1f44702738c8b0f24f2567c36da6d")
+ c.Assert(hash.IsZero(), Equals, false)
+}
+
+func (s *HashSuite) TestNewHasher(c *C) {
+ content := "hasher test sample"
+ hasher := NewHasher(BlobObject, int64(len(content)))
+ hasher.Write([]byte(content))
+ c.Assert(hasher.Sum().String(), Equals, "dc42c3cc80028d0ec61f0a6b24cadd1c195c4dfc")
+}
diff --git a/plumbing/memory.go b/plumbing/memory.go
new file mode 100644
index 0000000..c65ce1f
--- /dev/null
+++ b/plumbing/memory.go
@@ -0,0 +1,59 @@
+package plumbing
+
+import (
+ "bytes"
+ "io"
+ "io/ioutil"
+)
+
+// MemoryObject on memory Object implementation
+type MemoryObject struct {
+ t ObjectType
+ h Hash
+ cont []byte
+ sz int64
+}
+
+// Hash return the object Hash, the hash is calculated on-the-fly the first
+// time is called, the subsequent calls the same Hash is returned even if the
+// type or the content has changed. The Hash is only generated if the size of
+// the content is exactly the Object.Size
+func (o *MemoryObject) Hash() Hash {
+ if o.h == ZeroHash && int64(len(o.cont)) == o.sz {
+ o.h = ComputeHash(o.t, o.cont)
+ }
+
+ return o.h
+}
+
+// Type return the ObjectType
+func (o *MemoryObject) Type() ObjectType { return o.t }
+
+// SetType sets the ObjectType
+func (o *MemoryObject) SetType(t ObjectType) { o.t = t }
+
+// Size return the size of the object
+func (o *MemoryObject) Size() int64 { return o.sz }
+
+// SetSize set the object size, a content of the given size should be written
+// afterwards
+func (o *MemoryObject) SetSize(s int64) { o.sz = s }
+
+// Reader returns a ObjectReader used to read the object's content.
+func (o *MemoryObject) Reader() (io.ReadCloser, error) {
+ return ioutil.NopCloser(bytes.NewBuffer(o.cont)), nil
+}
+
+// Writer returns a ObjectWriter used to write the object's content.
+func (o *MemoryObject) Writer() (io.WriteCloser, error) {
+ return o, nil
+}
+
+func (o *MemoryObject) Write(p []byte) (n int, err error) {
+ o.cont = append(o.cont, p...)
+ return len(p), nil
+}
+
+// Close releases any resources consumed by the object when it is acting as a
+// ObjectWriter.
+func (o *MemoryObject) Close() error { return nil }
diff --git a/plumbing/memory_test.go b/plumbing/memory_test.go
new file mode 100644
index 0000000..879ed37
--- /dev/null
+++ b/plumbing/memory_test.go
@@ -0,0 +1,71 @@
+package plumbing
+
+import (
+ "io/ioutil"
+
+ . "gopkg.in/check.v1"
+)
+
+type MemoryObjectSuite struct{}
+
+var _ = Suite(&MemoryObjectSuite{})
+
+func (s *MemoryObjectSuite) TestHash(c *C) {
+ o := &MemoryObject{}
+ o.SetType(BlobObject)
+ o.SetSize(14)
+
+ _, err := o.Write([]byte("Hello, World!\n"))
+ c.Assert(err, IsNil)
+
+ c.Assert(o.Hash().String(), Equals, "8ab686eafeb1f44702738c8b0f24f2567c36da6d")
+
+ o.SetType(CommitObject)
+ c.Assert(o.Hash().String(), Equals, "8ab686eafeb1f44702738c8b0f24f2567c36da6d")
+}
+
+func (s *MemoryObjectSuite) TestHashNotFilled(c *C) {
+ o := &MemoryObject{}
+ o.SetType(BlobObject)
+ o.SetSize(14)
+
+ c.Assert(o.Hash(), Equals, ZeroHash)
+}
+
+func (s *MemoryObjectSuite) TestType(c *C) {
+ o := &MemoryObject{}
+ o.SetType(BlobObject)
+ c.Assert(o.Type(), Equals, BlobObject)
+}
+
+func (s *MemoryObjectSuite) TestSize(c *C) {
+ o := &MemoryObject{}
+ o.SetSize(42)
+ c.Assert(o.Size(), Equals, int64(42))
+}
+
+func (s *MemoryObjectSuite) TestReader(c *C) {
+ o := &MemoryObject{cont: []byte("foo")}
+
+ reader, err := o.Reader()
+ c.Assert(err, IsNil)
+ defer func() { c.Assert(reader.Close(), IsNil) }()
+
+ b, err := ioutil.ReadAll(reader)
+ c.Assert(err, IsNil)
+ c.Assert(b, DeepEquals, []byte("foo"))
+}
+
+func (s *MemoryObjectSuite) TestWriter(c *C) {
+ o := &MemoryObject{}
+
+ writer, err := o.Writer()
+ c.Assert(err, IsNil)
+ defer func() { c.Assert(writer.Close(), IsNil) }()
+
+ n, err := writer.Write([]byte("foo"))
+ c.Assert(err, IsNil)
+ c.Assert(n, Equals, 3)
+
+ c.Assert(o.cont, DeepEquals, []byte("foo"))
+}
diff --git a/plumbing/object.go b/plumbing/object.go
new file mode 100644
index 0000000..23abd4f
--- /dev/null
+++ b/plumbing/object.go
@@ -0,0 +1,94 @@
+// package plumbing implement the core interfaces and structs used by go-git
+package plumbing
+
+import (
+ "errors"
+ "io"
+)
+
+var (
+ ErrObjectNotFound = errors.New("object not found")
+ // ErrInvalidType is returned when an invalid object type is provided.
+ ErrInvalidType = errors.New("invalid object type")
+)
+
+// Object is a generic representation of any git object
+type Object interface {
+ Hash() Hash
+ Type() ObjectType
+ SetType(ObjectType)
+ Size() int64
+ SetSize(int64)
+ Reader() (io.ReadCloser, error)
+ Writer() (io.WriteCloser, error)
+}
+
+// ObjectType internal object type
+// Integer values from 0 to 7 map to those exposed by git.
+// AnyObject is used to represent any from 0 to 7.
+type ObjectType int8
+
+const (
+ InvalidObject ObjectType = 0
+ CommitObject ObjectType = 1
+ TreeObject ObjectType = 2
+ BlobObject ObjectType = 3
+ TagObject ObjectType = 4
+ // 5 reserved for future expansion
+ OFSDeltaObject ObjectType = 6
+ REFDeltaObject ObjectType = 7
+
+ AnyObject ObjectType = -127
+)
+
+func (t ObjectType) String() string {
+ switch t {
+ case CommitObject:
+ return "commit"
+ case TreeObject:
+ return "tree"
+ case BlobObject:
+ return "blob"
+ case TagObject:
+ return "tag"
+ case OFSDeltaObject:
+ return "ofs-delta"
+ case REFDeltaObject:
+ return "ref-delta"
+ case AnyObject:
+ return "any"
+ default:
+ return "unknown"
+ }
+}
+
+func (t ObjectType) Bytes() []byte {
+ return []byte(t.String())
+}
+
+// Valid returns true if t is a valid ObjectType.
+func (t ObjectType) Valid() bool {
+ return t >= CommitObject && t <= REFDeltaObject
+}
+
+// ParseObjectType parses a string representation of ObjectType. It returns an
+// error on parse failure.
+func ParseObjectType(value string) (typ ObjectType, err error) {
+ switch value {
+ case "commit":
+ typ = CommitObject
+ case "tree":
+ typ = TreeObject
+ case "blob":
+ typ = BlobObject
+ case "tag":
+ typ = TagObject
+ case "ofs-delta":
+ typ = OFSDeltaObject
+ case "ref-delta":
+ typ = REFDeltaObject
+ default:
+ err = ErrInvalidType
+ }
+ return
+}
diff --git a/plumbing/object_test.go b/plumbing/object_test.go
new file mode 100644
index 0000000..4d2dbe2
--- /dev/null
+++ b/plumbing/object_test.go
@@ -0,0 +1,46 @@
+package plumbing
+
+import . "gopkg.in/check.v1"
+
+type ObjectSuite struct{}
+
+var _ = Suite(&ObjectSuite{})
+
+func (s *ObjectSuite) TestObjectTypeString(c *C) {
+ c.Assert(CommitObject.String(), Equals, "commit")
+ c.Assert(TreeObject.String(), Equals, "tree")
+ c.Assert(BlobObject.String(), Equals, "blob")
+ c.Assert(TagObject.String(), Equals, "tag")
+ c.Assert(REFDeltaObject.String(), Equals, "ref-delta")
+ c.Assert(OFSDeltaObject.String(), Equals, "ofs-delta")
+ c.Assert(AnyObject.String(), Equals, "any")
+ c.Assert(ObjectType(42).String(), Equals, "unknown")
+}
+
+func (s *ObjectSuite) TestObjectTypeBytes(c *C) {
+ c.Assert(CommitObject.Bytes(), DeepEquals, []byte("commit"))
+}
+
+func (s *ObjectSuite) TestObjectTypeValid(c *C) {
+ c.Assert(CommitObject.Valid(), Equals, true)
+ c.Assert(ObjectType(42).Valid(), Equals, false)
+}
+
+func (s *ObjectSuite) TestParseObjectType(c *C) {
+ for s, e := range map[string]ObjectType{
+ "commit": CommitObject,
+ "tree": TreeObject,
+ "blob": BlobObject,
+ "tag": TagObject,
+ "ref-delta": REFDeltaObject,
+ "ofs-delta": OFSDeltaObject,
+ } {
+ t, err := ParseObjectType(s)
+ c.Assert(err, IsNil)
+ c.Assert(e, Equals, t)
+ }
+
+ t, err := ParseObjectType("foo")
+ c.Assert(err, Equals, ErrInvalidType)
+ c.Assert(t, Equals, InvalidObject)
+}
diff --git a/plumbing/reference.go b/plumbing/reference.go
new file mode 100644
index 0000000..98516c7
--- /dev/null
+++ b/plumbing/reference.go
@@ -0,0 +1,146 @@
+package plumbing
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+)
+
+const (
+ refPrefix = "refs/"
+ refHeadPrefix = refPrefix + "heads/"
+ refTagPrefix = refPrefix + "tags/"
+ refRemotePrefix = refPrefix + "remotes/"
+ refNotePrefix = refPrefix + "notes/"
+ symrefPrefix = "ref: "
+)
+
+var (
+ ErrReferenceNotFound = errors.New("reference not found")
+)
+
+// ReferenceType reference type's
+type ReferenceType int8
+
+const (
+ InvalidReference ReferenceType = 0
+ HashReference ReferenceType = 1
+ SymbolicReference ReferenceType = 2
+)
+
+// ReferenceName reference name's
+type ReferenceName string
+
+func (r ReferenceName) String() string {
+ return string(r)
+}
+
+// Short returns the short name of a ReferenceName
+func (r ReferenceName) Short() string {
+ parts := strings.Split(string(r), "/")
+ return parts[len(parts)-1]
+}
+
+const (
+ HEAD ReferenceName = "HEAD"
+)
+
+// Reference is a representation of git reference
+type Reference struct {
+ t ReferenceType
+ n ReferenceName
+ h Hash
+ target ReferenceName
+}
+
+// NewReferenceFromStrings creates a reference from name and target as string,
+// the resulting reference can be a SymbolicReference or a HashReference base
+// on the target provided
+func NewReferenceFromStrings(name, target string) *Reference {
+ n := ReferenceName(name)
+
+ if strings.HasPrefix(target, symrefPrefix) {
+ target := ReferenceName(target[len(symrefPrefix):])
+ return NewSymbolicReference(n, target)
+ }
+
+ return NewHashReference(n, NewHash(target))
+}
+
+// NewSymbolicReference creates a new SymbolicReference reference
+func NewSymbolicReference(n, target ReferenceName) *Reference {
+ return &Reference{
+ t: SymbolicReference,
+ n: n,
+ target: target,
+ }
+}
+
+// NewHashReference creates a new HashReference reference
+func NewHashReference(n ReferenceName, h Hash) *Reference {
+ return &Reference{
+ t: HashReference,
+ n: n,
+ h: h,
+ }
+}
+
+// Type return the type of a reference
+func (r *Reference) Type() ReferenceType {
+ return r.t
+}
+
+// Name return the name of a reference
+func (r *Reference) Name() ReferenceName {
+ return r.n
+}
+
+// Hash return the hash of a hash reference
+func (r *Reference) Hash() Hash {
+ return r.h
+}
+
+// Target return the target of a symbolic reference
+func (r *Reference) Target() ReferenceName {
+ return r.target
+}
+
+// IsBranch check if a reference is a branch
+func (r *Reference) IsBranch() bool {
+ return strings.HasPrefix(string(r.n), refHeadPrefix)
+}
+
+// IsNote check if a reference is a note
+func (r *Reference) IsNote() bool {
+ return strings.HasPrefix(string(r.n), refNotePrefix)
+}
+
+// IsRemote check if a reference is a remote
+func (r *Reference) IsRemote() bool {
+ return strings.HasPrefix(string(r.n), refRemotePrefix)
+}
+
+// IsTag check if a reference is a tag
+func (r *Reference) IsTag() bool {
+ return strings.HasPrefix(string(r.n), refTagPrefix)
+}
+
+// Strings dump a reference as a [2]string
+func (r *Reference) Strings() [2]string {
+ var o [2]string
+ o[0] = r.Name().String()
+
+ switch r.Type() {
+ case HashReference:
+ o[1] = r.Hash().String()
+ case SymbolicReference:
+ o[1] = symrefPrefix + r.Target().String()
+ }
+
+ return o
+}
+
+func (r *Reference) String() string {
+ s := r.Strings()
+ return fmt.Sprintf("%s %s", s[1], s[0])
+}
diff --git a/plumbing/reference_test.go b/plumbing/reference_test.go
new file mode 100644
index 0000000..4d9b393
--- /dev/null
+++ b/plumbing/reference_test.go
@@ -0,0 +1,61 @@
+package plumbing
+
+import . "gopkg.in/check.v1"
+
+type ReferenceSuite struct{}
+
+var _ = Suite(&ReferenceSuite{})
+
+const (
+ ExampleReferenceName ReferenceName = "refs/heads/v4"
+)
+
+func (s *ReferenceSuite) TestReferenceNameShort(c *C) {
+ c.Assert(ExampleReferenceName.Short(), Equals, "v4")
+}
+
+func (s *ReferenceSuite) TestNewReferenceFromStrings(c *C) {
+ r := NewReferenceFromStrings("refs/heads/v4", "6ecf0ef2c2dffb796033e5a02219af86ec6584e5")
+ c.Assert(r.Type(), Equals, HashReference)
+ c.Assert(r.Name(), Equals, ExampleReferenceName)
+ c.Assert(r.Hash(), Equals, NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5"))
+
+ r = NewReferenceFromStrings("HEAD", "ref: refs/heads/v4")
+ c.Assert(r.Type(), Equals, SymbolicReference)
+ c.Assert(r.Name(), Equals, HEAD)
+ c.Assert(r.Target(), Equals, ExampleReferenceName)
+}
+
+func (s *ReferenceSuite) TestNewSymbolicReference(c *C) {
+ r := NewSymbolicReference(HEAD, ExampleReferenceName)
+ c.Assert(r.Type(), Equals, SymbolicReference)
+ c.Assert(r.Name(), Equals, HEAD)
+ c.Assert(r.Target(), Equals, ExampleReferenceName)
+}
+
+func (s *ReferenceSuite) TestNewHashReference(c *C) {
+ r := NewHashReference(ExampleReferenceName, NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5"))
+ c.Assert(r.Type(), Equals, HashReference)
+ c.Assert(r.Name(), Equals, ExampleReferenceName)
+ c.Assert(r.Hash(), Equals, NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5"))
+}
+
+func (s *ReferenceSuite) TestIsBranch(c *C) {
+ r := NewHashReference(ExampleReferenceName, ZeroHash)
+ c.Assert(r.IsBranch(), Equals, true)
+}
+
+func (s *ReferenceSuite) TestIsNote(c *C) {
+ r := NewHashReference(ReferenceName("refs/notes/foo"), ZeroHash)
+ c.Assert(r.IsNote(), Equals, true)
+}
+
+func (s *ReferenceSuite) TestIsRemote(c *C) {
+ r := NewHashReference(ReferenceName("refs/remotes/origin/master"), ZeroHash)
+ c.Assert(r.IsRemote(), Equals, true)
+}
+
+func (s *ReferenceSuite) TestIsTag(c *C) {
+ r := NewHashReference(ReferenceName("refs/tags/v3.1."), ZeroHash)
+ c.Assert(r.IsTag(), Equals, true)
+}
diff --git a/plumbing/storer/index.go b/plumbing/storer/index.go
new file mode 100644
index 0000000..e087296
--- /dev/null
+++ b/plumbing/storer/index.go
@@ -0,0 +1,9 @@
+package storer
+
+import "gopkg.in/src-d/go-git.v4/plumbing/format/index"
+
+// IndexStorer generic storage of index.Index
+type IndexStorer interface {
+ SetIndex(*index.Index) error
+ Index() (*index.Index, error)
+}
diff --git a/plumbing/storer/object.go b/plumbing/storer/object.go
new file mode 100644
index 0000000..c7841b6
--- /dev/null
+++ b/plumbing/storer/object.go
@@ -0,0 +1,241 @@
+package storer
+
+import (
+ "errors"
+ "io"
+
+ "gopkg.in/src-d/go-git.v4/plumbing"
+)
+
+var (
+ //ErrStop is used to stop a ForEach function in an Iter
+ ErrStop = errors.New("stop iter")
+)
+
+// ObjectStorer generic storage of objects
+type ObjectStorer interface {
+ // NewObject returns a new plumbing.Object, the real type of the object can
+ // be a custom implementation or the defaul one, plumbing.MemoryObject
+ NewObject() plumbing.Object
+ // SetObject save an object into the storage, the object shuld be create
+ // with the NewObject, method, and file if the type is not supported.
+ SetObject(plumbing.Object) (plumbing.Hash, error)
+ // Object get an object by hash with the given plumbing.ObjectType.
+ // Implementors should return (nil, plumbing.ErrObjectNotFound) if an object
+ // doesn't exist with both the given hash and object type.
+ //
+ // Valid plumbing.ObjectType values are CommitObject, BlobObject, TagObject,
+ // TreeObject and AnyObject. If plumbing.AnyObject is given, the object must
+ // be looked up regardless of its type.
+ Object(plumbing.ObjectType, plumbing.Hash) (plumbing.Object, error)
+ // IterObjects returns a custom ObjectIter over all the object on the
+ // storage.
+ //
+ // Valid plumbing.ObjectType values are CommitObject, BlobObject, TagObject,
+ IterObjects(plumbing.ObjectType) (ObjectIter, error)
+}
+
+// Transactioner is a optional method for ObjectStorer, it enable transaction
+// base write and read operations in the storage
+type Transactioner interface {
+ // Begin starts a transaction.
+ Begin() Transaction
+}
+
+// PackfileWriter is a optional method for ObjectStorer, it enable direct write
+// of packfile to the storage
+type PackfileWriter interface {
+ // PackfileWriter retuns a writer for writing a packfile to the storage
+ //
+ // If the Storer not implements PackfileWriter the objects should be written
+ // using the Set method.
+ PackfileWriter() (io.WriteCloser, error)
+}
+
+// ObjectIter is a generic closable interface for iterating over objects.
+type ObjectIter interface {
+ Next() (plumbing.Object, error)
+ ForEach(func(plumbing.Object) error) error
+ Close()
+}
+
+// Transaction is an in-progress storage transaction. A transaction must end
+// with a call to Commit or Rollback.
+type Transaction interface {
+ SetObject(plumbing.Object) (plumbing.Hash, error)
+ Object(plumbing.ObjectType, plumbing.Hash) (plumbing.Object, error)
+ Commit() error
+ Rollback() error
+}
+
+// ObjectLookupIter implements ObjectIter. It iterates over a series of object
+// hashes and yields their associated objects by retrieving each one from
+// object storage. The retrievals are lazy and only occur when the iterator
+// moves forward with a call to Next().
+//
+// The ObjectLookupIter must be closed with a call to Close() when it is no
+// longer needed.
+type ObjectLookupIter struct {
+ storage ObjectStorer
+ series []plumbing.Hash
+ t plumbing.ObjectType
+ pos int
+}
+
+// NewObjectLookupIter returns an object iterator given an object storage and
+// a slice of object hashes.
+func NewObjectLookupIter(
+ storage ObjectStorer, t plumbing.ObjectType, series []plumbing.Hash) *ObjectLookupIter {
+ return &ObjectLookupIter{
+ storage: storage,
+ series: series,
+ t: t,
+ }
+}
+
+// Next returns the next object from the iterator. If the iterator has reached
+// the end it will return io.EOF as an error. If the object can't be found in
+// the object storage, it will return plumbing.ErrObjectNotFound as an error.
+// If the object is retreieved successfully error will be nil.
+func (iter *ObjectLookupIter) Next() (plumbing.Object, error) {
+ if iter.pos >= len(iter.series) {
+ return nil, io.EOF
+ }
+
+ hash := iter.series[iter.pos]
+ obj, err := iter.storage.Object(iter.t, hash)
+ if err == nil {
+ iter.pos++
+ }
+
+ return obj, err
+}
+
+// ForEach call the cb function for each object contained on this iter until
+// an error happends or the end of the iter is reached. If ErrStop is sent
+// the iteration is stop but no error is returned. The iterator is closed.
+func (iter *ObjectLookupIter) ForEach(cb func(plumbing.Object) error) error {
+ return ForEachIterator(iter, cb)
+}
+
+// Close releases any resources used by the iterator.
+func (iter *ObjectLookupIter) Close() {
+ iter.pos = len(iter.series)
+}
+
+// ObjectSliceIter implements ObjectIter. It iterates over a series of objects
+// stored in a slice and yields each one in turn when Next() is called.
+//
+// The ObjectSliceIter must be closed with a call to Close() when it is no
+// longer needed.
+type ObjectSliceIter struct {
+ series []plumbing.Object
+ pos int
+}
+
+// NewObjectSliceIter returns an object iterator for the given slice of objects.
+func NewObjectSliceIter(series []plumbing.Object) *ObjectSliceIter {
+ return &ObjectSliceIter{
+ series: series,
+ }
+}
+
+// Next returns the next object from the iterator. If the iterator has reached
+// the end it will return io.EOF as an error. If the object is retreieved
+// successfully error will be nil.
+func (iter *ObjectSliceIter) Next() (plumbing.Object, error) {
+ if len(iter.series) == 0 {
+ return nil, io.EOF
+ }
+
+ obj := iter.series[0]
+ iter.series = iter.series[1:]
+
+ return obj, nil
+}
+
+// ForEach call the cb function for each object contained on this iter until
+// an error happends or the end of the iter is reached. If ErrStop is sent
+// the iteration is stop but no error is returned. The iterator is closed.
+func (iter *ObjectSliceIter) ForEach(cb func(plumbing.Object) error) error {
+ return ForEachIterator(iter, cb)
+}
+
+// Close releases any resources used by the iterator.
+func (iter *ObjectSliceIter) Close() {
+ iter.series = []plumbing.Object{}
+}
+
+// MultiObjectIter implements ObjectIter. It iterates over several ObjectIter,
+//
+// The MultiObjectIter must be closed with a call to Close() when it is no
+// longer needed.
+type MultiObjectIter struct {
+ iters []ObjectIter
+ pos int
+}
+
+// NewMultiObjectIter returns an object iterator for the given slice of objects.
+func NewMultiObjectIter(iters []ObjectIter) ObjectIter {
+ return &MultiObjectIter{iters: iters}
+}
+
+// Next returns the next object from the iterator, if one iterator reach io.EOF
+// is removed and the next one is used.
+func (iter *MultiObjectIter) Next() (plumbing.Object, error) {
+ if len(iter.iters) == 0 {
+ return nil, io.EOF
+ }
+
+ obj, err := iter.iters[0].Next()
+ if err == io.EOF {
+ iter.iters[0].Close()
+ iter.iters = iter.iters[1:]
+ return iter.Next()
+ }
+
+ return obj, err
+}
+
+// ForEach call the cb function for each object contained on this iter until
+// an error happends or the end of the iter is reached. If ErrStop is sent
+// the iteration is stop but no error is returned. The iterator is closed.
+func (iter *MultiObjectIter) ForEach(cb func(plumbing.Object) error) error {
+ return ForEachIterator(iter, cb)
+}
+
+// Close releases any resources used by the iterator.
+func (iter *MultiObjectIter) Close() {
+ for _, i := range iter.iters {
+ i.Close()
+ }
+}
+
+type bareIterator interface {
+ Next() (plumbing.Object, error)
+ Close()
+}
+
+// ForEachIterator is a helper function to build iterators without need to
+// rewrite the same ForEach function each time.
+func ForEachIterator(iter bareIterator, cb func(plumbing.Object) error) error {
+ defer iter.Close()
+ for {
+ obj, err := iter.Next()
+ if err != nil {
+ if err == io.EOF {
+ return nil
+ }
+
+ return err
+ }
+
+ if err := cb(obj); err != nil {
+ if err == ErrStop {
+ return nil
+ }
+
+ return err
+ }
+ }
+}
diff --git a/plumbing/storer/object_test.go b/plumbing/storer/object_test.go
new file mode 100644
index 0000000..a0a7755
--- /dev/null
+++ b/plumbing/storer/object_test.go
@@ -0,0 +1,150 @@
+package storer
+
+import (
+ "fmt"
+ "testing"
+
+ . "gopkg.in/check.v1"
+ "gopkg.in/src-d/go-git.v4/plumbing"
+)
+
+func Test(t *testing.T) { TestingT(t) }
+
+type ObjectSuite struct {
+ Objects []plumbing.Object
+ Hash []plumbing.Hash
+}
+
+var _ = Suite(&ObjectSuite{})
+
+func (s *ObjectSuite) SetUpSuite(c *C) {
+ s.Objects = []plumbing.Object{
+ s.buildObject([]byte("foo")),
+ s.buildObject([]byte("bar")),
+ }
+
+ for _, o := range s.Objects {
+ s.Hash = append(s.Hash, o.Hash())
+ }
+}
+
+func (s *ObjectSuite) TestMultiObjectIterNext(c *C) {
+ expected := []plumbing.Object{
+ &plumbing.MemoryObject{},
+ &plumbing.MemoryObject{},
+ &plumbing.MemoryObject{},
+ &plumbing.MemoryObject{},
+ &plumbing.MemoryObject{},
+ &plumbing.MemoryObject{},
+ }
+
+ iter := NewMultiObjectIter([]ObjectIter{
+ NewObjectSliceIter(expected[0:2]),
+ NewObjectSliceIter(expected[2:4]),
+ NewObjectSliceIter(expected[4:5]),
+ })
+
+ var i int
+ iter.ForEach(func(o plumbing.Object) error {
+ c.Assert(o, Equals, expected[i])
+ i++
+ return nil
+ })
+
+ iter.Close()
+}
+
+func (s *ObjectSuite) buildObject(content []byte) plumbing.Object {
+ o := &plumbing.MemoryObject{}
+ o.Write(content)
+
+ return o
+}
+
+func (s *ObjectSuite) TestObjectLookupIter(c *C) {
+ var count int
+
+ storage := &MockObjectStorage{s.Objects}
+ i := NewObjectLookupIter(storage, plumbing.CommitObject, s.Hash)
+ err := i.ForEach(func(o plumbing.Object) error {
+ c.Assert(o, NotNil)
+ c.Assert(o.Hash().String(), Equals, s.Hash[count].String())
+ count++
+ return nil
+ })
+
+ c.Assert(err, IsNil)
+ i.Close()
+}
+
+func (s *ObjectSuite) TestObjectSliceIter(c *C) {
+ var count int
+
+ i := NewObjectSliceIter(s.Objects)
+ err := i.ForEach(func(o plumbing.Object) error {
+ c.Assert(o, NotNil)
+ c.Assert(o.Hash().String(), Equals, s.Hash[count].String())
+ count++
+ return nil
+ })
+
+ c.Assert(count, Equals, 2)
+ c.Assert(err, IsNil)
+ c.Assert(i.series, HasLen, 0)
+}
+
+func (s *ObjectSuite) TestObjectSliceIterStop(c *C) {
+ i := NewObjectSliceIter(s.Objects)
+
+ var count = 0
+ err := i.ForEach(func(o plumbing.Object) error {
+ c.Assert(o, NotNil)
+ c.Assert(o.Hash().String(), Equals, s.Hash[count].String())
+ count++
+ return ErrStop
+ })
+
+ c.Assert(count, Equals, 1)
+ c.Assert(err, IsNil)
+}
+
+func (s *ObjectSuite) TestObjectSliceIterError(c *C) {
+ i := NewObjectSliceIter([]plumbing.Object{
+ s.buildObject([]byte("foo")),
+ })
+
+ err := i.ForEach(func(plumbing.Object) error {
+ return fmt.Errorf("a random error")
+ })
+
+ c.Assert(err, NotNil)
+}
+
+type MockObjectStorage struct {
+ db []plumbing.Object
+}
+
+func (o *MockObjectStorage) NewObject() plumbing.Object {
+ return nil
+}
+
+func (o *MockObjectStorage) SetObject(obj plumbing.Object) (plumbing.Hash, error) {
+ return plumbing.ZeroHash, nil
+}
+
+func (o *MockObjectStorage) Object(t plumbing.ObjectType, h plumbing.Hash) (plumbing.Object, error) {
+ for _, o := range o.db {
+ if o.Hash() == h {
+ return o, nil
+ }
+ }
+ return nil, plumbing.ErrObjectNotFound
+}
+
+func (o *MockObjectStorage) IterObjects(t plumbing.ObjectType) (ObjectIter, error) {
+ return nil, nil
+}
+
+func (o *MockObjectStorage) Begin() Transaction {
+ return nil
+}
diff --git a/plumbing/storer/reference.go b/plumbing/storer/reference.go
new file mode 100644
index 0000000..5e818c6
--- /dev/null
+++ b/plumbing/storer/reference.go
@@ -0,0 +1,109 @@
+package storer
+
+import (
+ "errors"
+ "io"
+
+ "gopkg.in/src-d/go-git.v4/plumbing"
+)
+
+const MaxResolveRecursion = 1024
+
+// ErrMaxResolveRecursion is returned by ResolveReference is MaxResolveRecursion
+// is exceeded
+var ErrMaxResolveRecursion = errors.New("max. recursion level reached")
+
+// ReferenceStorer generic storage of references
+type ReferenceStorer interface {
+ SetReference(*plumbing.Reference) error
+ Reference(plumbing.ReferenceName) (*plumbing.Reference, error)
+ IterReferences() (ReferenceIter, error)
+}
+
+// ReferenceIter is a generic closable interface for iterating over references
+type ReferenceIter interface {
+ Next() (*plumbing.Reference, error)
+ ForEach(func(*plumbing.Reference) error) error
+ Close()
+}
+
+// ReferenceSliceIter implements ReferenceIter. It iterates over a series of
+// references stored in a slice and yields each one in turn when Next() is
+// called.
+//
+// The ReferenceSliceIter must be closed with a call to Close() when it is no
+// longer needed.
+type ReferenceSliceIter struct {
+ series []*plumbing.Reference
+ pos int
+}
+
+// NewReferenceSliceIter returns a reference iterator for the given slice of
+// objects.
+func NewReferenceSliceIter(series []*plumbing.Reference) *ReferenceSliceIter {
+ return &ReferenceSliceIter{
+ series: series,
+ }
+}
+
+// Next returns the next reference from the iterator. If the iterator has
+// reached the end it will return io.EOF as an error.
+func (iter *ReferenceSliceIter) Next() (*plumbing.Reference, error) {
+ if iter.pos >= len(iter.series) {
+ return nil, io.EOF
+ }
+
+ obj := iter.series[iter.pos]
+ iter.pos++
+ return obj, nil
+}
+
+// ForEach call the cb function for each reference contained on this iter until
+// an error happends or the end of the iter is reached. If ErrStop is sent
+// the iteration is stop but no error is returned. The iterator is closed.
+func (iter *ReferenceSliceIter) ForEach(cb func(*plumbing.Reference) error) error {
+ defer iter.Close()
+ for _, r := range iter.series {
+ if err := cb(r); err != nil {
+ if err == ErrStop {
+ return nil
+ }
+
+ return nil
+ }
+ }
+
+ return nil
+}
+
+// Close releases any resources used by the iterator.
+func (iter *ReferenceSliceIter) Close() {
+ iter.pos = len(iter.series)
+}
+
+// ResolveReference resolve a SymbolicReference to a HashReference
+func ResolveReference(s ReferenceStorer, n plumbing.ReferenceName) (*plumbing.Reference, error) {
+ r, err := s.Reference(n)
+ if err != nil || r == nil {
+ return r, err
+ }
+ return resolveReference(s, r, 0)
+}
+
+func resolveReference(s ReferenceStorer, r *plumbing.Reference, recursion int) (*plumbing.Reference, error) {
+ if r.Type() != plumbing.SymbolicReference {
+ return r, nil
+ }
+
+ if recursion > MaxResolveRecursion {
+ return nil, ErrMaxResolveRecursion
+ }
+
+ t, err := s.Reference(r.Target())
+ if err != nil {
+ return nil, err
+ }
+
+ recursion++
+ return resolveReference(s, t, recursion)
+}
diff --git a/plumbing/storer/reference_test.go b/plumbing/storer/reference_test.go
new file mode 100644
index 0000000..3014df5
--- /dev/null
+++ b/plumbing/storer/reference_test.go
@@ -0,0 +1,67 @@
+package storer
+
+import (
+ "io"
+
+ . "gopkg.in/check.v1"
+ "gopkg.in/src-d/go-git.v4/plumbing"
+)
+
+type ReferenceSuite struct{}
+
+var _ = Suite(&ReferenceSuite{})
+
+func (s *ReferenceSuite) TestReferenceSliceIterNext(c *C) {
+ slice := []*plumbing.Reference{
+ plumbing.NewReferenceFromStrings("foo", "foo"),
+ plumbing.NewReferenceFromStrings("bar", "bar"),
+ }
+
+ i := NewReferenceSliceIter(slice)
+ foo, err := i.Next()
+ c.Assert(err, IsNil)
+ c.Assert(foo == slice[0], Equals, true)
+
+ bar, err := i.Next()
+ c.Assert(err, IsNil)
+ c.Assert(bar == slice[1], Equals, true)
+
+ empty, err := i.Next()
+ c.Assert(err, Equals, io.EOF)
+ c.Assert(empty, IsNil)
+}
+
+func (s *ReferenceSuite) TestReferenceSliceIterForEach(c *C) {
+ slice := []*plumbing.Reference{
+ plumbing.NewReferenceFromStrings("foo", "foo"),
+ plumbing.NewReferenceFromStrings("bar", "bar"),
+ }
+
+ i := NewReferenceSliceIter(slice)
+ var count int
+ i.ForEach(func(r *plumbing.Reference) error {
+ c.Assert(r == slice[count], Equals, true)
+ count++
+ return nil
+ })
+
+ c.Assert(count, Equals, 2)
+}
+
+func (s *ReferenceSuite) TestReferenceSliceIterForEachStop(c *C) {
+ slice := []*plumbing.Reference{
+ plumbing.NewReferenceFromStrings("foo", "foo"),
+ plumbing.NewReferenceFromStrings("bar", "bar"),
+ }
+
+ i := NewReferenceSliceIter(slice)
+
+ var count int
+ i.ForEach(func(r *plumbing.Reference) error {
+ c.Assert(r == slice[count], Equals, true)
+ count++
+ return ErrStop
+ })
+
+ c.Assert(count, Equals, 1)
+}