aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--go.mod22
-rw-r--r--go.sum52
-rw-r--r--plumbing/format/idxfile/idxfile.go30
-rw-r--r--plumbing/format/index/decoder.go37
-rw-r--r--plumbing/format/packfile/common.go10
-rw-r--r--plumbing/format/packfile/packfile.go5
-rw-r--r--plumbing/format/packfile/scanner.go189
-rw-r--r--plumbing/format/packfile/scanner_test.go49
-rw-r--r--plumbing/object/commit.go4
-rw-r--r--plumbing/object/common.go12
-rw-r--r--plumbing/object/tag.go6
-rw-r--r--plumbing/object/tag_test.go72
-rw-r--r--plumbing/object/tree.go23
-rw-r--r--storage/filesystem/dotgit/dotgit.go20
-rw-r--r--storage/filesystem/index.go3
-rw-r--r--storage/filesystem/object.go145
-rw-r--r--storage/filesystem/object_test.go18
-rw-r--r--storage/filesystem/storage.go4
-rw-r--r--utils/binary/read.go15
-rw-r--r--utils/binary/read_test.go10
20 files changed, 506 insertions, 220 deletions
diff --git a/go.mod b/go.mod
index 36a1bed..1326235 100644
--- a/go.mod
+++ b/go.mod
@@ -4,26 +4,26 @@ require (
github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7 // indirect
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
- github.com/emirpasic/gods v1.9.0
+ github.com/emirpasic/gods v1.12.0
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 // indirect
- github.com/gliderlabs/ssh v0.1.1
+ github.com/gliderlabs/ssh v0.1.3
github.com/google/go-cmp v0.2.0
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99
github.com/jessevdk/go-flags v1.4.0
github.com/kevinburke/ssh_config v0.0.0-20180830205328-81db2a75821e
- github.com/mitchellh/go-homedir v1.0.0
+ github.com/mitchellh/go-homedir v1.1.0
github.com/pelletier/go-buffruneio v0.2.0 // indirect
- github.com/pkg/errors v0.8.0 // indirect
- github.com/pmezard/go-difflib v1.0.0 // indirect
+ github.com/pkg/errors v0.8.1 // indirect
github.com/sergi/go-diff v1.0.0
github.com/src-d/gcfg v1.4.0
- github.com/stretchr/testify v1.2.2 // indirect
- github.com/xanzy/ssh-agent v0.2.0
- golang.org/x/crypto v0.0.0-20180904163835-0709b304e793
- golang.org/x/net v0.0.0-20180906233101-161cd47e91fd // indirect
+ github.com/stretchr/testify v1.3.0 // indirect
+ github.com/xanzy/ssh-agent v0.2.1
+ golang.org/x/crypto v0.0.0-20190422183909-d864b10871cd
+ golang.org/x/net v0.0.0-20190420063019-afa5a82059c6 // indirect
+ golang.org/x/sys v0.0.0-20190422165155-953cdadca894 // indirect
golang.org/x/text v0.3.0
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127
- gopkg.in/src-d/go-billy.v4 v4.2.1
- gopkg.in/src-d/go-git-fixtures.v3 v3.1.1
+ gopkg.in/src-d/go-billy.v4 v4.3.0
+ gopkg.in/src-d/go-git-fixtures.v3 v3.5.0
gopkg.in/warnings.v0 v0.1.2 // indirect
)
diff --git a/go.sum b/go.sum
index 98ba1d4..e944896 100644
--- a/go.sum
+++ b/go.sum
@@ -2,14 +2,15 @@ github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7 h1:uSoVVbwJiQipAclBb
github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7/go.mod h1:6zEj6s6u/ghQa61ZWa/C2Aw3RkjiTBOix7dkqa1VLIs=
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 h1:kFOfPq6dUM1hTo4JG6LR5AXSUEsOjtdm0kw0FtQtMJA=
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/emirpasic/gods v1.9.0 h1:rUF4PuzEjMChMiNsVjdI+SyLu7rEqpQ5reNFnhC7oFo=
-github.com/emirpasic/gods v1.9.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o=
+github.com/emirpasic/gods v1.12.0 h1:QAUIPSaCu4G+POclxeqb3F+WPpdKqFGlw36+yOzGlrg=
+github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o=
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 h1:BHsljHzVlRcyQhjrss6TZTdY2VfCqZPbv5k3iBFa2ZQ=
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
-github.com/gliderlabs/ssh v0.1.1 h1:j3L6gSLQalDETeEg/Jg0mGY0/y/N6zI2xX1978P0Uqw=
-github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
+github.com/gliderlabs/ssh v0.1.3 h1:cBU46h1lYQk5f2Z+jZbewFKy+1zzE2aUX/ilcPDAm9M=
+github.com/gliderlabs/ssh v0.1.3/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A=
@@ -23,37 +24,44 @@ github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORN
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
-github.com/mitchellh/go-homedir v1.0.0 h1:vKb8ShqSby24Yrqr/yDYkuFz8d0WUjys40rvnGC8aR0=
-github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
+github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
+github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/pelletier/go-buffruneio v0.2.0 h1:U4t4R6YkofJ5xHm3dJzuRpPZ0mr5MMCoAWooScCR7aA=
github.com/pelletier/go-buffruneio v0.2.0/go.mod h1:JkE26KsDizTr40EUHkXVtNPvgGtbSNq5BcowyYOWdKo=
-github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw=
-github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
+github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ=
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
-github.com/src-d/gcfg v1.3.0 h1:2BEDr8r0I0b8h/fOqwtxCEiq2HJu8n2JGZJQFGXWLjg=
-github.com/src-d/gcfg v1.3.0/go.mod h1:p/UMsR43ujA89BJY9duynAwIpvqEujIH/jFlfL7jWoI=
github.com/src-d/gcfg v1.4.0 h1:xXbNR5AlLSA315x2UO+fTSSAXCDf+Ar38/6oyGbDKQ4=
github.com/src-d/gcfg v1.4.0/go.mod h1:p/UMsR43ujA89BJY9duynAwIpvqEujIH/jFlfL7jWoI=
-github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
-github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
-github.com/xanzy/ssh-agent v0.2.0 h1:Adglfbi5p9Z0BmK2oKU9nTG+zKfniSfnaMYB+ULd+Ro=
-github.com/xanzy/ssh-agent v0.2.0/go.mod h1:0NyE30eGUDliuLEHJgYte/zncp2zdTStcOnWhgSqHD8=
-golang.org/x/crypto v0.0.0-20180904163835-0709b304e793 h1:u+LnwYTOOW7Ukr/fppxEb1Nwz0AtPflrblfvUudpo+I=
-golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
-golang.org/x/net v0.0.0-20180906233101-161cd47e91fd h1:nTDtHvHSdCn1m6ITfMRqtOd/9+7a3s8RBNOZ3eYZzJA=
-golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+github.com/xanzy/ssh-agent v0.2.1 h1:TCbipTQL2JiiCprBWx9frJ2eJlCYT00NmctrHxVAr70=
+github.com/xanzy/ssh-agent v0.2.1/go.mod h1:mLlQY/MoOhWBj+gOGMQkOeiEvkx+8pJSI+0Bx9h2kr4=
+golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190422183909-d864b10871cd h1:sMHc2rZHuzQmrbVoSpt9HgerkXPyIeCSO6k0zUMGfFk=
+golang.org/x/crypto v0.0.0-20190422183909-d864b10871cd/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190420063019-afa5a82059c6 h1:HdqqaWmYAUI7/dmByKKEw+yxDksGSo+9GjkUc9Zp34E=
+golang.org/x/net v0.0.0-20190420063019-afa5a82059c6/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/sys v0.0.0-20180903190138-2b024373dcd9 h1:lkiLiLBHGoH3XnqSLUIaBsilGMUjI+Uy2Xu2JLUtTas=
golang.org/x/sys v0.0.0-20180903190138-2b024373dcd9/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190221075227-b4e8571b14e0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190422165155-953cdadca894 h1:Cz4ceDQGXuKRnVBDTS23GTn/pU5OE2C0WrNTOYK1Uuc=
+golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/src-d/go-billy.v4 v4.2.1 h1:omN5CrMrMcQ+4I8bJ0wEhOBPanIRWzFC953IiXKdYzo=
-gopkg.in/src-d/go-billy.v4 v4.2.1/go.mod h1:tm33zBoOwxjYHZIE+OV8bxTWFMJLrconzFMd38aARFk=
-gopkg.in/src-d/go-git-fixtures.v3 v3.1.1 h1:XWW/s5W18RaJpmo1l0IYGqXKuJITWRFuA45iOf1dKJs=
-gopkg.in/src-d/go-git-fixtures.v3 v3.1.1/go.mod h1:dLBcvytrw/TYZsNTWCnkNF2DSIlzWYqTe3rJR56Ac7g=
+gopkg.in/src-d/go-billy.v4 v4.3.0 h1:KtlZ4c1OWbIs4jCv5ZXrTqG8EQocr0g/d4DjNg70aek=
+gopkg.in/src-d/go-billy.v4 v4.3.0/go.mod h1:tm33zBoOwxjYHZIE+OV8bxTWFMJLrconzFMd38aARFk=
+gopkg.in/src-d/go-git-fixtures.v3 v3.5.0 h1:ivZFOIltbce2Mo8IjzUHAFoq/IylO9WHhNOAJK+LsJg=
+gopkg.in/src-d/go-git-fixtures.v3 v3.5.0/go.mod h1:dLBcvytrw/TYZsNTWCnkNF2DSIlzWYqTe3rJR56Ac7g=
gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME=
gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=
diff --git a/plumbing/format/idxfile/idxfile.go b/plumbing/format/idxfile/idxfile.go
index 5fed278..26cca59 100644
--- a/plumbing/format/idxfile/idxfile.go
+++ b/plumbing/format/idxfile/idxfile.go
@@ -55,7 +55,8 @@ type MemoryIndex struct {
PackfileChecksum [20]byte
IdxChecksum [20]byte
- offsetHash map[int64]plumbing.Hash
+ offsetHash map[int64]plumbing.Hash
+ offsetHashIsFull bool
}
var _ Index = (*MemoryIndex)(nil)
@@ -121,7 +122,17 @@ func (idx *MemoryIndex) FindOffset(h plumbing.Hash) (int64, error) {
return 0, plumbing.ErrObjectNotFound
}
- return idx.getOffset(k, i)
+ offset, err := idx.getOffset(k, i)
+
+ if !idx.offsetHashIsFull {
+ // Save the offset for reverse lookup
+ if idx.offsetHash == nil {
+ idx.offsetHash = make(map[int64]plumbing.Hash)
+ }
+ idx.offsetHash[offset] = h
+ }
+
+ return offset, err
}
const isO64Mask = uint64(1) << 31
@@ -167,14 +178,24 @@ func (idx *MemoryIndex) getCRC32(firstLevel, secondLevel int) (uint32, error) {
// FindHash implements the Index interface.
func (idx *MemoryIndex) FindHash(o int64) (plumbing.Hash, error) {
+ var hash plumbing.Hash
+ var ok bool
+
+ if idx.offsetHash != nil {
+ if hash, ok = idx.offsetHash[o]; ok {
+ return hash, nil
+ }
+ }
+
// Lazily generate the reverse offset/hash map if required.
- if idx.offsetHash == nil {
+ if !idx.offsetHashIsFull || idx.offsetHash == nil {
if err := idx.genOffsetHash(); err != nil {
return plumbing.ZeroHash, err
}
+
+ hash, ok = idx.offsetHash[o]
}
- hash, ok := idx.offsetHash[o]
if !ok {
return plumbing.ZeroHash, plumbing.ErrObjectNotFound
}
@@ -190,6 +211,7 @@ func (idx *MemoryIndex) genOffsetHash() error {
}
idx.offsetHash = make(map[int64]plumbing.Hash, count)
+ idx.offsetHashIsFull = true
iter, err := idx.Entries()
if err != nil {
diff --git a/plumbing/format/index/decoder.go b/plumbing/format/index/decoder.go
index ac57d08..98f92fd 100644
--- a/plumbing/format/index/decoder.go
+++ b/plumbing/format/index/decoder.go
@@ -1,6 +1,7 @@
package index
import (
+ "bufio"
"bytes"
"crypto/sha1"
"errors"
@@ -42,14 +43,17 @@ type Decoder struct {
r io.Reader
hash hash.Hash
lastEntry *Entry
+
+ extReader *bufio.Reader
}
// NewDecoder returns a new decoder that reads from r.
func NewDecoder(r io.Reader) *Decoder {
h := sha1.New()
return &Decoder{
- r: io.TeeReader(r, h),
- hash: h,
+ r: io.TeeReader(r, h),
+ hash: h,
+ extReader: bufio.NewReader(nil),
}
}
@@ -184,11 +188,9 @@ func (d *Decoder) doReadEntryNameV4() (string, error) {
func (d *Decoder) doReadEntryName(len uint16) (string, error) {
name := make([]byte, len)
- if err := binary.Read(d.r, &name); err != nil {
- return "", err
- }
+ _, err := io.ReadFull(d.r, name[:])
- return string(name), nil
+ return string(name), err
}
// Index entries are padded out to the next 8 byte alignment
@@ -279,20 +281,21 @@ func (d *Decoder) readExtension(idx *Index, header []byte) error {
return nil
}
-func (d *Decoder) getExtensionReader() (io.Reader, error) {
+func (d *Decoder) getExtensionReader() (*bufio.Reader, error) {
len, err := binary.ReadUint32(d.r)
if err != nil {
return nil, err
}
- return &io.LimitedReader{R: d.r, N: int64(len)}, nil
+ d.extReader.Reset(&io.LimitedReader{R: d.r, N: int64(len)})
+ return d.extReader, nil
}
func (d *Decoder) readChecksum(expected []byte, alreadyRead [4]byte) error {
var h plumbing.Hash
copy(h[:4], alreadyRead[:])
- if err := binary.Read(d.r, h[4:]); err != nil {
+ if _, err := io.ReadFull(d.r, h[4:]); err != nil {
return err
}
@@ -326,7 +329,7 @@ func validateHeader(r io.Reader) (version uint32, err error) {
}
type treeExtensionDecoder struct {
- r io.Reader
+ r *bufio.Reader
}
func (d *treeExtensionDecoder) Decode(t *Tree) error {
@@ -386,16 +389,13 @@ func (d *treeExtensionDecoder) readEntry() (*TreeEntry, error) {
}
e.Trees = i
-
- if err := binary.Read(d.r, &e.Hash); err != nil {
- return nil, err
- }
+ _, err = io.ReadFull(d.r, e.Hash[:])
return e, nil
}
type resolveUndoDecoder struct {
- r io.Reader
+ r *bufio.Reader
}
func (d *resolveUndoDecoder) Decode(ru *ResolveUndo) error {
@@ -433,7 +433,7 @@ func (d *resolveUndoDecoder) readEntry() (*ResolveUndoEntry, error) {
for s := range e.Stages {
var hash plumbing.Hash
- if err := binary.Read(d.r, hash[:]); err != nil {
+ if _, err := io.ReadFull(d.r, hash[:]); err != nil {
return nil, err
}
@@ -462,7 +462,7 @@ func (d *resolveUndoDecoder) readStage(e *ResolveUndoEntry, s Stage) error {
}
type endOfIndexEntryDecoder struct {
- r io.Reader
+ r *bufio.Reader
}
func (d *endOfIndexEntryDecoder) Decode(e *EndOfIndexEntry) error {
@@ -472,5 +472,6 @@ func (d *endOfIndexEntryDecoder) Decode(e *EndOfIndexEntry) error {
return err
}
- return binary.Read(d.r, &e.Hash)
+ _, err = io.ReadFull(d.r, e.Hash[:])
+ return err
}
diff --git a/plumbing/format/packfile/common.go b/plumbing/format/packfile/common.go
index 0d9ed54..f82c1ab 100644
--- a/plumbing/format/packfile/common.go
+++ b/plumbing/format/packfile/common.go
@@ -2,6 +2,7 @@ package packfile
import (
"bytes"
+ "compress/zlib"
"io"
"sync"
@@ -66,3 +67,12 @@ var bufPool = sync.Pool{
return bytes.NewBuffer(nil)
},
}
+
+var zlibInitBytes = []byte{0x78, 0x9c, 0x01, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00, 0x01}
+
+var zlibReaderPool = sync.Pool{
+ New: func() interface{} {
+ r, _ := zlib.NewReader(bytes.NewReader(zlibInitBytes))
+ return r
+ },
+}
diff --git a/plumbing/format/packfile/packfile.go b/plumbing/format/packfile/packfile.go
index def6e99..f528073 100644
--- a/plumbing/format/packfile/packfile.go
+++ b/plumbing/format/packfile/packfile.go
@@ -448,6 +448,11 @@ func (p *Packfile) ID() (plumbing.Hash, error) {
return hash, nil
}
+// Scanner returns the packfile's Scanner
+func (p *Packfile) Scanner() *Scanner {
+ return p.s
+}
+
// Close the packfile and its resources.
func (p *Packfile) Close() error {
closer, ok := p.file.(io.Closer)
diff --git a/plumbing/format/packfile/scanner.go b/plumbing/format/packfile/scanner.go
index 614b0d1..7b44192 100644
--- a/plumbing/format/packfile/scanner.go
+++ b/plumbing/format/packfile/scanner.go
@@ -39,8 +39,7 @@ type ObjectHeader struct {
}
type Scanner struct {
- r reader
- zr readerResetter
+ r *scannerReader
crc hash.Hash32
// pendingObject is used to detect if an object has been read, or still
@@ -56,19 +55,27 @@ type Scanner struct {
// NewScanner returns a new Scanner based on a reader, if the given reader
// implements io.ReadSeeker the Scanner will be also Seekable
func NewScanner(r io.Reader) *Scanner {
- seeker, ok := r.(io.ReadSeeker)
- if !ok {
- seeker = &trackableReader{Reader: r}
- }
+ _, ok := r.(io.ReadSeeker)
crc := crc32.NewIEEE()
return &Scanner{
- r: newTeeReader(newByteReadSeeker(seeker), crc),
+ r: newScannerReader(r, crc),
crc: crc,
IsSeekable: ok,
}
}
+func (s *Scanner) Reset(r io.Reader) {
+ _, ok := r.(io.ReadSeeker)
+
+ s.r.Reset(r)
+ s.crc.Reset()
+ s.IsSeekable = ok
+ s.pendingObject = nil
+ s.version = 0
+ s.objects = 0
+}
+
// Header reads the whole packfile header (signature, version and object count).
// It returns the version and the object count and performs checks on the
// validity of the signature and the version fields.
@@ -182,8 +189,7 @@ func (s *Scanner) NextObjectHeader() (*ObjectHeader, error) {
// nextObjectHeader returns the ObjectHeader for the next object in the reader
// without the Offset field
func (s *Scanner) nextObjectHeader() (*ObjectHeader, error) {
- defer s.Flush()
-
+ s.r.Flush()
s.crc.Reset()
h := &ObjectHeader{}
@@ -304,35 +310,29 @@ func (s *Scanner) readLength(first byte) (int64, error) {
// NextObject writes the content of the next object into the reader, returns
// the number of bytes written, the CRC32 of the content and an error, if any
func (s *Scanner) NextObject(w io.Writer) (written int64, crc32 uint32, err error) {
- defer s.crc.Reset()
-
s.pendingObject = nil
written, err = s.copyObject(w)
- s.Flush()
+
+ s.r.Flush()
crc32 = s.crc.Sum32()
+ s.crc.Reset()
+
return
}
// ReadRegularObject reads and write a non-deltified object
// from it zlib stream in an object entry in the packfile.
func (s *Scanner) copyObject(w io.Writer) (n int64, err error) {
- if s.zr == nil {
- var zr io.ReadCloser
- zr, err = zlib.NewReader(s.r)
- if err != nil {
- return 0, fmt.Errorf("zlib initialization error: %s", err)
- }
+ zr := zlibReaderPool.Get().(io.ReadCloser)
+ defer zlibReaderPool.Put(zr)
- s.zr = zr.(readerResetter)
- } else {
- if err = s.zr.Reset(s.r, nil); err != nil {
- return 0, fmt.Errorf("zlib reset error: %s", err)
- }
+ if err = zr.(zlib.Resetter).Reset(s.r, nil); err != nil {
+ return 0, fmt.Errorf("zlib reset error: %s", err)
}
- defer ioutil.CheckClose(s.zr, &err)
+ defer ioutil.CheckClose(zr, &err)
buf := byteSlicePool.Get().([]byte)
- n, err = io.CopyBuffer(w, s.zr, buf)
+ n, err = io.CopyBuffer(w, zr, buf)
byteSlicePool.Put(buf)
return
}
@@ -378,110 +378,89 @@ func (s *Scanner) Close() error {
return err
}
-// Flush finishes writing the buffer to crc hasher in case we are using
-// a teeReader. Otherwise it is a no-op.
+// Flush is a no-op (deprecated)
func (s *Scanner) Flush() error {
- tee, ok := s.r.(*teeReader)
- if ok {
- return tee.Flush()
- }
return nil
}
-type trackableReader struct {
- count int64
- io.Reader
+// scannerReader has the following characteristics:
+// - Provides an io.SeekReader impl for bufio.Reader, when the underlying
+// reader supports it.
+// - Keeps track of the current read position, for when the underlying reader
+// isn't an io.SeekReader, but we still want to know the current offset.
+// - Writes to the hash writer what it reads, with the aid of a smaller buffer.
+// The buffer helps avoid a performance penality for performing small writes
+// to the crc32 hash writer.
+type scannerReader struct {
+ reader io.Reader
+ crc io.Writer
+ rbuf *bufio.Reader
+ wbuf *bufio.Writer
+ offset int64
}
-// Read reads up to len(p) bytes into p.
-func (r *trackableReader) Read(p []byte) (n int, err error) {
- n, err = r.Reader.Read(p)
- r.count += int64(n)
-
- return
-}
-
-// Seek only supports io.SeekCurrent, any other operation fails
-func (r *trackableReader) Seek(offset int64, whence int) (int64, error) {
- if whence != io.SeekCurrent {
- return -1, ErrSeekNotSupported
+func newScannerReader(r io.Reader, h io.Writer) *scannerReader {
+ sr := &scannerReader{
+ rbuf: bufio.NewReader(nil),
+ wbuf: bufio.NewWriterSize(nil, 64),
+ crc: h,
}
+ sr.Reset(r)
- return r.count, nil
+ return sr
}
-func newByteReadSeeker(r io.ReadSeeker) *bufferedSeeker {
- return &bufferedSeeker{
- r: r,
- Reader: *bufio.NewReader(r),
- }
-}
+func (r *scannerReader) Reset(reader io.Reader) {
+ r.reader = reader
+ r.rbuf.Reset(r.reader)
+ r.wbuf.Reset(r.crc)
-type bufferedSeeker struct {
- r io.ReadSeeker
- bufio.Reader
-}
-
-func (r *bufferedSeeker) Seek(offset int64, whence int) (int64, error) {
- if whence == io.SeekCurrent && offset == 0 {
- current, err := r.r.Seek(offset, whence)
- if err != nil {
- return current, err
- }
-
- return current - int64(r.Buffered()), nil
+ r.offset = 0
+ if seeker, ok := r.reader.(io.ReadSeeker); ok {
+ r.offset, _ = seeker.Seek(0, io.SeekCurrent)
}
-
- defer r.Reader.Reset(r.r)
- return r.r.Seek(offset, whence)
}
-type readerResetter interface {
- io.ReadCloser
- zlib.Resetter
-}
+func (r *scannerReader) Read(p []byte) (n int, err error) {
+ n, err = r.rbuf.Read(p)
-type reader interface {
- io.Reader
- io.ByteReader
- io.Seeker
+ r.offset += int64(n)
+ if _, err := r.wbuf.Write(p[:n]); err != nil {
+ return n, err
+ }
+ return
}
-type teeReader struct {
- reader
- w hash.Hash32
- bufWriter *bufio.Writer
+func (r *scannerReader) ReadByte() (b byte, err error) {
+ b, err = r.rbuf.ReadByte()
+ if err == nil {
+ r.offset++
+ return b, r.wbuf.WriteByte(b)
+ }
+ return
}
-func newTeeReader(r reader, h hash.Hash32) *teeReader {
- return &teeReader{
- reader: r,
- w: h,
- bufWriter: bufio.NewWriter(h),
- }
+func (r *scannerReader) Flush() error {
+ return r.wbuf.Flush()
}
-func (r *teeReader) Read(p []byte) (n int, err error) {
- r.Flush()
+// Seek seeks to a location. If the underlying reader is not an io.ReadSeeker,
+// then only whence=io.SeekCurrent is supported, any other operation fails.
+func (r *scannerReader) Seek(offset int64, whence int) (int64, error) {
+ var err error
- n, err = r.reader.Read(p)
- if n > 0 {
- if n, err := r.w.Write(p[:n]); err != nil {
- return n, err
+ if seeker, ok := r.reader.(io.ReadSeeker); !ok {
+ if whence != io.SeekCurrent || offset != 0 {
+ return -1, ErrSeekNotSupported
+ }
+ } else {
+ if whence == io.SeekCurrent && offset == 0 {
+ return r.offset, nil
}
- }
- return
-}
-func (r *teeReader) ReadByte() (b byte, err error) {
- b, err = r.reader.ReadByte()
- if err == nil {
- return b, r.bufWriter.WriteByte(b)
+ r.offset, err = seeker.Seek(offset, whence)
+ r.rbuf.Reset(r.reader)
}
- return
-}
-
-func (r *teeReader) Flush() (err error) {
- return r.bufWriter.Flush()
+ return r.offset, err
}
diff --git a/plumbing/format/packfile/scanner_test.go b/plumbing/format/packfile/scanner_test.go
index 091b457..a401d6d 100644
--- a/plumbing/format/packfile/scanner_test.go
+++ b/plumbing/format/packfile/scanner_test.go
@@ -135,6 +135,55 @@ func (s *ScannerSuite) TestSeekObjectHeaderNonSeekable(c *C) {
c.Assert(err, Equals, ErrSeekNotSupported)
}
+func (s *ScannerSuite) TestReaderReset(c *C) {
+ r := fixtures.Basic().One().Packfile()
+ p := NewScanner(r)
+
+ version, objects, err := p.Header()
+ c.Assert(version, Equals, VersionSupported)
+ c.Assert(objects, Equals, uint32(31))
+
+ h, err := p.SeekObjectHeader(expectedHeadersOFS[0].Offset)
+ c.Assert(err, IsNil)
+ c.Assert(h, DeepEquals, &expectedHeadersOFS[0])
+
+ p.Reset(r)
+ c.Assert(p.pendingObject, IsNil)
+ c.Assert(p.version, Equals, uint32(0))
+ c.Assert(p.objects, Equals, uint32(0))
+ c.Assert(p.r.reader, Equals, r)
+ c.Assert(p.r.offset > expectedHeadersOFS[0].Offset, Equals, true)
+
+ p.Reset(bytes.NewReader(nil))
+ c.Assert(p.r.offset, Equals, int64(0))
+}
+
+func (s *ScannerSuite) TestReaderResetSeeks(c *C) {
+ r := fixtures.Basic().One().Packfile()
+
+ // seekable
+ p := NewScanner(r)
+ c.Assert(p.IsSeekable, Equals, true)
+ h, err := p.SeekObjectHeader(expectedHeadersOFS[0].Offset)
+ c.Assert(err, IsNil)
+ c.Assert(h, DeepEquals, &expectedHeadersOFS[0])
+
+ // reset with seekable
+ p.Reset(r)
+ c.Assert(p.IsSeekable, Equals, true)
+ h, err = p.SeekObjectHeader(expectedHeadersOFS[1].Offset)
+ c.Assert(err, IsNil)
+ c.Assert(h, DeepEquals, &expectedHeadersOFS[1])
+
+ // reset with non-seekable
+ f := fixtures.Basic().ByTag("ref-delta").One()
+ p.Reset(io.MultiReader(f.Packfile()))
+ c.Assert(p.IsSeekable, Equals, false)
+
+ _, err = p.SeekObjectHeader(expectedHeadersOFS[4].Offset)
+ c.Assert(err, Equals, ErrSeekNotSupported)
+}
+
var expectedHeadersOFS = []ObjectHeader{
{Type: plumbing.CommitObject, Offset: 12, Length: 254},
{Type: plumbing.OFSDeltaObject, Offset: 186, Length: 93, OffsetReference: 12},
diff --git a/plumbing/object/commit.go b/plumbing/object/commit.go
index b569d3c..511242d 100644
--- a/plumbing/object/commit.go
+++ b/plumbing/object/commit.go
@@ -171,7 +171,9 @@ func (c *Commit) Decode(o plumbing.EncodedObject) (err error) {
}
defer ioutil.CheckClose(reader, &err)
- r := bufio.NewReader(reader)
+ r := bufPool.Get().(*bufio.Reader)
+ defer bufPool.Put(r)
+ r.Reset(reader)
var message bool
var pgpsig bool
diff --git a/plumbing/object/common.go b/plumbing/object/common.go
new file mode 100644
index 0000000..3591f5f
--- /dev/null
+++ b/plumbing/object/common.go
@@ -0,0 +1,12 @@
+package object
+
+import (
+ "bufio"
+ "sync"
+)
+
+var bufPool = sync.Pool{
+ New: func() interface{} {
+ return bufio.NewReader(nil)
+ },
+}
diff --git a/plumbing/object/tag.go b/plumbing/object/tag.go
index 03749f9..bc03477 100644
--- a/plumbing/object/tag.go
+++ b/plumbing/object/tag.go
@@ -93,7 +93,9 @@ func (t *Tag) Decode(o plumbing.EncodedObject) (err error) {
}
defer ioutil.CheckClose(reader, &err)
- r := bufio.NewReader(reader)
+ r := bufPool.Get().(*bufio.Reader)
+ defer bufPool.Put(r)
+ r.Reset(reader)
for {
var line []byte
line, err = r.ReadBytes('\n')
@@ -141,7 +143,7 @@ func (t *Tag) Decode(o plumbing.EncodedObject) (err error) {
if pgpsig {
if bytes.Contains(l, []byte(endpgp)) {
t.PGPSignature += endpgp + "\n"
- pgpsig = false
+ break
} else {
t.PGPSignature += string(l) + "\n"
}
diff --git a/plumbing/object/tag_test.go b/plumbing/object/tag_test.go
index 59c28b0..0ef7136 100644
--- a/plumbing/object/tag_test.go
+++ b/plumbing/object/tag_test.go
@@ -375,3 +375,75 @@ sYyf9RfOnw/KUFAQbdtvLx3ikODQC+D3KBtuKI9ISHQfgw==
_, ok := e.Identities["Sunny <me@darkowlzz.space>"]
c.Assert(ok, Equals, true)
}
+
+func (s *TagSuite) TestDecodeAndVerify(c *C) {
+ objectText := `object 7dba2f128d1298e385b28b56a7e1c579779eac82
+type commit
+tag v1.6
+tagger Filip Navara <filip.navara@gmail.com> 1555269936 +0200
+
+Hello
+
+world
+
+boo
+-----BEGIN PGP SIGNATURE-----
+
+iQEzBAABCAAdFiEEdRIEYXeoLk1t7PBDqeqoMkraaZ4FAlyziT4ACgkQqeqoMkra
+aZ502wgAxG4+69l8PYfq45u1R3CCf4x0m5WwcYwvaa4ang0S9mExh/C32NHnpM/V
+DbqMpAlFvBlixOsZ8FNWaM8VXnvRWyx64E6WnInxjx9+Wgv2fy5P1N5rtpvi+S2V
+iGc0RQJlIloqXr7qPYDrwcbgg6AFg9EPhgJxLyizglu9nYvNsH1InaPXMjzgGX8+
+3irnIYEMIrLcKPrCyHo4Q6gdBjEEBF8hFclPJ8OwXBPc6uNYjnDYx0me9TTQYqoG
+oGgO/rADU9fy4c/Q1ZQpocba/ca6abRJ9LAx9VXFOSlQrMKLgHCYfqU/MAZXKcZM
+6XXOL4+8Z3FJN6CapZKX7cdYB8LJnw==
+=t5Px
+-----END PGP SIGNATURE-----
+
+`
+
+ armoredKeyRing := `
+-----BEGIN PGP PUBLIC KEY BLOCK-----
+
+mQENBFyzedYBCADN3lVNUNkrjn0kfwKAxGQOI8a1977UaIq9ktFg+Uv4Jyq2Y59L
+ZVx2WYk1iDaRhxhv203HV//CA/Hr4IoPjK53qAkg2bPyi8UuDbL+gU+4Z+IiSeXd
+18ZcAbcYt188PWoUq9/82ofO8EiaBbUEEZJjEegLDtX8gxBDG0aI3Yj4Txj73mno
+w6+E5HDkgPElmH3oNQcr8iK9U2Kuj+ZAHkzbWL++gDCPiLl2eWf0Cr1nlVsv6YLa
+Fsn5vjMGT3dMJFc78ZqCHOeyYK7KHjW1EjzgqeG2eJVay+ZQ5zEx4Fp/dL0RdUSV
+U7zslRiraaPxshdhYOjQ0o72RpSkP1G6+8OhABEBAAG0JUZpbGlwIE5hdmFyYSA8
+ZmlsaXAubmF2YXJhQGdtYWlsLmNvbT6JAVQEEwEIAD4WIQR1EgRhd6guTW3s8EOp
+6qgyStppngUCXLN51gIbAwUJA8JnAAULCQgHAgYVCgkICwIEFgIDAQIeAQIXgAAK
+CRCp6qgyStppnlzjB/sFu7HqJrTRsnHsoWo2+nDeicXnR0VAhiLvv7uRRw4i90FJ
+0zDwjAmIH+po6vPffWRMcWOFVvAwZCX7/XcvDNF9OupFj/aold334+VVN0ha47IQ
+g44bJZie9mvLagEsqUXggpKQjd414Tk08aUucfaN9RFJIOGCwF05j2eXOBGR2HTe
+FLq3obeObryEPf0c8N/nw4RQ8OOcq98gxiHx5Gk+nLCcJCTvOlc9ULqpJ2a6cZry
+kxgSOI9dd74ilRQdpfPvoEeEGSqkY+daf+dhgSMT2mII0UJ6qQeY0DpCZZNsL8dr
+PxR4SPRlzLBuJIpnHY21ebOqwOPOLjzR+J2RBufkuQENBFyzedYBCADTCglXrST6
+DRz7Uq3zrrrzdCchHH0/+LgYOEoGs82UvdFfigQYGTydmXz27bHKfWNfGIa9IlLF
+MhasFueCnKnmfVxnlINRdyAXv7Tmx4mSjuCEmGkvM1nPpdhxWXptnVMqhQMddiMO
+N55bElDK2ftPc2s4dBmTItXXbet2kFZiv7MZBZpA4eRAHj5DDSwl8pnQArU50RDZ
+q3qYKvAP/z2SLjekcOFtMhZ9BXMvwAW4FWV0ztpfP3LvUUb0T7fSo5cXlm/0eqwa
+MUrUlbbwJMDg1/wJ3pbKhZlP+xXNLj5UE86TtfqNqaohOcIBdCsdTUQgbkLVlibP
+JmZH7lGDhvi3ABEBAAGJATwEGAEIACYWIQR1EgRhd6guTW3s8EOp6qgyStppngUC
+XLN51gIbDAUJA8JnAAAKCRCp6qgyStppntq1B/9bmw4XjEm5KyXwWnlAVGr8skXY
+KIJr6drUOOwQzl7rxsJRjUsFdX0IjaZwx303G/23eQMIvVkoaWpHrT0Y7EsTQ55x
++GSuANhEzobks4spzQ66VW9FHRlRr5wg5PTwWnGtV/5QVSTY/zeC9R/AFUJFsDWe
+tgHlNrb6MWx5EtypZDpAkubAMvD/QoZHX0oPXYAA2CugD4uSdzjf6Ys3xUuwjKKG
+5hvimAg1/Hympq71Znb6Ec1m4ZM22Br7dcWHIX2GWfDPyRG+rYPu4Fk9KKAD4FRz
+HdzbB2ak/HxIeCqmHVlmUqa+WfTMUJcsgOm3/ZFPCSoL6l0bz9Z1XVbiyD03
+=+gC9
+-----END PGP PUBLIC KEY BLOCK-----
+`
+
+ tagEncodedObject := &plumbing.MemoryObject{}
+
+ _, err := tagEncodedObject.Write([]byte(objectText))
+ tagEncodedObject.SetType(plumbing.TagObject)
+ c.Assert(err, IsNil)
+
+ tag := &Tag{}
+ err = tag.Decode(tagEncodedObject)
+ c.Assert(err, IsNil)
+
+ _, err = tag.Verify(armoredKeyRing)
+ c.Assert(err, IsNil)
+}
diff --git a/plumbing/object/tree.go b/plumbing/object/tree.go
index 1f9ea26..d30cf6e 100644
--- a/plumbing/object/tree.go
+++ b/plumbing/object/tree.go
@@ -230,7 +230,9 @@ func (t *Tree) Decode(o plumbing.EncodedObject) (err error) {
}
defer ioutil.CheckClose(reader, &err)
- r := bufio.NewReader(reader)
+ r := bufPool.Get().(*bufio.Reader)
+ defer bufPool.Put(r)
+ r.Reset(reader)
for {
str, err := r.ReadString(' ')
if err != nil {
@@ -383,7 +385,7 @@ func NewTreeWalker(t *Tree, recursive bool, seen map[plumbing.Hash]bool) *TreeWa
// underlying repository will be skipped automatically. It is possible that this
// may change in future versions.
func (w *TreeWalker) Next() (name string, entry TreeEntry, err error) {
- var obj Object
+ var obj *Tree
for {
current := len(w.stack) - 1
if current < 0 {
@@ -403,7 +405,7 @@ func (w *TreeWalker) Next() (name string, entry TreeEntry, err error) {
// Finished with the current tree, move back up to the parent
w.stack = w.stack[:current]
w.base, _ = path.Split(w.base)
- w.base = path.Clean(w.base) // Remove trailing slash
+ w.base = strings.TrimSuffix(w.base, "/")
continue
}
@@ -419,7 +421,7 @@ func (w *TreeWalker) Next() (name string, entry TreeEntry, err error) {
obj, err = GetTree(w.s, entry.Hash)
}
- name = path.Join(w.base, entry.Name)
+ name = simpleJoin(w.base, entry.Name)
if err != nil {
err = io.EOF
@@ -433,9 +435,9 @@ func (w *TreeWalker) Next() (name string, entry TreeEntry, err error) {
return
}
- if t, ok := obj.(*Tree); ok {
- w.stack = append(w.stack, &treeEntryIter{t, 0})
- w.base = path.Join(w.base, entry.Name)
+ if obj != nil {
+ w.stack = append(w.stack, &treeEntryIter{obj, 0})
+ w.base = simpleJoin(w.base, entry.Name)
}
return
@@ -509,3 +511,10 @@ func (iter *TreeIter) ForEach(cb func(*Tree) error) error {
return cb(t)
})
}
+
+func simpleJoin(parent, child string) string {
+ if len(parent) > 0 {
+ return parent + "/" + child
+ }
+ return child
+} \ No newline at end of file
diff --git a/storage/filesystem/dotgit/dotgit.go b/storage/filesystem/dotgit/dotgit.go
index ba9667e..111769b 100644
--- a/storage/filesystem/dotgit/dotgit.go
+++ b/storage/filesystem/dotgit/dotgit.go
@@ -83,7 +83,7 @@ type DotGit struct {
packList []plumbing.Hash
packMap map[plumbing.Hash]struct{}
- files map[string]billy.File
+ files map[plumbing.Hash]billy.File
}
// New returns a DotGit value ready to be used. The path argument must
@@ -245,8 +245,15 @@ func (d *DotGit) objectPackPath(hash plumbing.Hash, extension string) string {
}
func (d *DotGit) objectPackOpen(hash plumbing.Hash, extension string) (billy.File, error) {
- if d.files == nil {
- d.files = make(map[string]billy.File)
+ if d.options.KeepDescriptors && extension == "pack" {
+ if d.files == nil {
+ d.files = make(map[plumbing.Hash]billy.File)
+ }
+
+ f, ok := d.files[hash]
+ if ok {
+ return f, nil
+ }
}
err := d.hasPack(hash)
@@ -255,11 +262,6 @@ func (d *DotGit) objectPackOpen(hash plumbing.Hash, extension string) (billy.Fil
}
path := d.objectPackPath(hash, extension)
- f, ok := d.files[path]
- if ok {
- return f, nil
- }
-
pack, err := d.fs.Open(path)
if err != nil {
if os.IsNotExist(err) {
@@ -270,7 +272,7 @@ func (d *DotGit) objectPackOpen(hash plumbing.Hash, extension string) (billy.Fil
}
if d.options.KeepDescriptors && extension == "pack" {
- d.files[path] = pack
+ d.files[hash] = pack
}
return pack, nil
diff --git a/storage/filesystem/index.go b/storage/filesystem/index.go
index 2ebf57e..d04195c 100644
--- a/storage/filesystem/index.go
+++ b/storage/filesystem/index.go
@@ -1,6 +1,7 @@
package filesystem
import (
+ "bufio"
"os"
"gopkg.in/src-d/go-git.v4/plumbing/format/index"
@@ -41,7 +42,7 @@ func (s *IndexStorage) Index() (i *index.Index, err error) {
defer ioutil.CheckClose(f, &err)
- d := index.NewDecoder(f)
+ d := index.NewDecoder(bufio.NewReader(f))
err = d.Decode(idx)
return idx, err
}
diff --git a/storage/filesystem/object.go b/storage/filesystem/object.go
index 3eb62a2..ad5d8d0 100644
--- a/storage/filesystem/object.go
+++ b/storage/filesystem/object.go
@@ -26,6 +26,10 @@ type ObjectStorage struct {
dir *dotgit.DotGit
index map[plumbing.Hash]idxfile.Index
+
+ packList []plumbing.Hash
+ packListIdx int
+ packfiles map[plumbing.Hash]*packfile.Packfile
}
// NewObjectStorage creates a new ObjectStorage with the given .git directory and cache.
@@ -187,6 +191,73 @@ func (s *ObjectStorage) encodedObjectSizeFromUnpacked(h plumbing.Hash) (
return size, err
}
+func (s *ObjectStorage) packfile(idx idxfile.Index, pack plumbing.Hash) (*packfile.Packfile, error) {
+ if p := s.packfileFromCache(pack); p != nil {
+ return p, nil
+ }
+
+ f, err := s.dir.ObjectPack(pack)
+ if err != nil {
+ return nil, err
+ }
+
+ var p *packfile.Packfile
+ if s.objectCache != nil {
+ p = packfile.NewPackfileWithCache(idx, s.dir.Fs(), f, s.objectCache)
+ } else {
+ p = packfile.NewPackfile(idx, s.dir.Fs(), f)
+ }
+
+ return p, s.storePackfileInCache(pack, p)
+}
+
+func (s *ObjectStorage) packfileFromCache(hash plumbing.Hash) *packfile.Packfile {
+ if s.packfiles == nil {
+ if s.options.KeepDescriptors {
+ s.packfiles = make(map[plumbing.Hash]*packfile.Packfile)
+ } else if s.options.MaxOpenDescriptors > 0 {
+ s.packList = make([]plumbing.Hash, s.options.MaxOpenDescriptors)
+ s.packfiles = make(map[plumbing.Hash]*packfile.Packfile, s.options.MaxOpenDescriptors)
+ }
+ }
+
+ return s.packfiles[hash]
+}
+
+func (s *ObjectStorage) storePackfileInCache(hash plumbing.Hash, p *packfile.Packfile) error {
+ if s.options.KeepDescriptors {
+ s.packfiles[hash] = p
+ return nil
+ }
+
+ if s.options.MaxOpenDescriptors <= 0 {
+ return nil
+ }
+
+ // start over as the limit of packList is hit
+ if s.packListIdx >= len(s.packList) {
+ s.packListIdx = 0
+ }
+
+ // close the existing packfile if open
+ if next := s.packList[s.packListIdx]; !next.IsZero() {
+ open := s.packfiles[next]
+ delete(s.packfiles, next)
+ if open != nil {
+ if err := open.Close(); err != nil {
+ return err
+ }
+ }
+ }
+
+ // cache newly open packfile
+ s.packList[s.packListIdx] = hash
+ s.packfiles[hash] = p
+ s.packListIdx++
+
+ return nil
+}
+
func (s *ObjectStorage) encodedObjectSizeFromPackfile(h plumbing.Hash) (
size int64, err error) {
if err := s.requireIndex(); err != nil {
@@ -198,12 +269,6 @@ func (s *ObjectStorage) encodedObjectSizeFromPackfile(h plumbing.Hash) (
return 0, plumbing.ErrObjectNotFound
}
- f, err := s.dir.ObjectPack(pack)
- if err != nil {
- return 0, err
- }
- defer ioutil.CheckClose(f, &err)
-
idx := s.index[pack]
hash, err := idx.FindHash(offset)
if err == nil {
@@ -215,11 +280,13 @@ func (s *ObjectStorage) encodedObjectSizeFromPackfile(h plumbing.Hash) (
return 0, err
}
- var p *packfile.Packfile
- if s.objectCache != nil {
- p = packfile.NewPackfileWithCache(idx, s.dir.Fs(), f, s.objectCache)
- } else {
- p = packfile.NewPackfile(idx, s.dir.Fs(), f)
+ p, err := s.packfile(idx, pack)
+ if err != nil {
+ return 0, err
+ }
+
+ if !s.options.KeepDescriptors && s.options.MaxOpenDescriptors == 0 {
+ defer ioutil.CheckClose(p, &err)
}
return p.GetSizeByOffset(offset)
@@ -361,29 +428,28 @@ func (s *ObjectStorage) getFromPackfile(h plumbing.Hash, canBeDelta bool) (
return nil, plumbing.ErrObjectNotFound
}
- f, err := s.dir.ObjectPack(pack)
+ idx := s.index[pack]
+ p, err := s.packfile(idx, pack)
if err != nil {
return nil, err
}
- if !s.options.KeepDescriptors {
- defer ioutil.CheckClose(f, &err)
+ if !s.options.KeepDescriptors && s.options.MaxOpenDescriptors == 0 {
+ defer ioutil.CheckClose(p, &err)
}
- idx := s.index[pack]
if canBeDelta {
- return s.decodeDeltaObjectAt(f, idx, offset, hash)
+ return s.decodeDeltaObjectAt(p, offset, hash)
}
- return s.decodeObjectAt(f, idx, offset)
+ return s.decodeObjectAt(p, offset)
}
func (s *ObjectStorage) decodeObjectAt(
- f billy.File,
- idx idxfile.Index,
+ p *packfile.Packfile,
offset int64,
) (plumbing.EncodedObject, error) {
- hash, err := idx.FindHash(offset)
+ hash, err := p.FindHash(offset)
if err == nil {
obj, ok := s.objectCache.Get(hash)
if ok {
@@ -395,28 +461,16 @@ func (s *ObjectStorage) decodeObjectAt(
return nil, err
}
- var p *packfile.Packfile
- if s.objectCache != nil {
- p = packfile.NewPackfileWithCache(idx, s.dir.Fs(), f, s.objectCache)
- } else {
- p = packfile.NewPackfile(idx, s.dir.Fs(), f)
- }
-
return p.GetByOffset(offset)
}
func (s *ObjectStorage) decodeDeltaObjectAt(
- f billy.File,
- idx idxfile.Index,
+ p *packfile.Packfile,
offset int64,
hash plumbing.Hash,
) (plumbing.EncodedObject, error) {
- if _, err := f.Seek(0, io.SeekStart); err != nil {
- return nil, err
- }
-
- p := packfile.NewScanner(f)
- header, err := p.SeekObjectHeader(offset)
+ scan := p.Scanner()
+ header, err := scan.SeekObjectHeader(offset)
if err != nil {
return nil, err
}
@@ -429,12 +483,12 @@ func (s *ObjectStorage) decodeDeltaObjectAt(
case plumbing.REFDeltaObject:
base = header.Reference
case plumbing.OFSDeltaObject:
- base, err = idx.FindHash(header.OffsetReference)
+ base, err = p.FindHash(header.OffsetReference)
if err != nil {
return nil, err
}
default:
- return s.decodeObjectAt(f, idx, offset)
+ return s.decodeObjectAt(p, offset)
}
obj := &plumbing.MemoryObject{}
@@ -444,7 +498,7 @@ func (s *ObjectStorage) decodeDeltaObjectAt(
return nil, err
}
- if _, _, err := p.NextObject(w); err != nil {
+ if _, _, err := scan.NextObject(w); err != nil {
return nil, err
}
@@ -515,7 +569,20 @@ func (s *ObjectStorage) buildPackfileIters(
// Close closes all opened files.
func (s *ObjectStorage) Close() error {
- return s.dir.Close()
+ var firstError error
+ if s.options.KeepDescriptors || s.options.MaxOpenDescriptors > 0 {
+ for _, packfile := range s.packfiles {
+ err := packfile.Close()
+ if firstError == nil && err != nil {
+ firstError = err
+ }
+ }
+ }
+
+ s.packfiles = nil
+ s.dir.Close()
+
+ return firstError
}
type lazyPackfilesIter struct {
diff --git a/storage/filesystem/object_test.go b/storage/filesystem/object_test.go
index 5cfb227..c2461db 100644
--- a/storage/filesystem/object_test.go
+++ b/storage/filesystem/object_test.go
@@ -86,6 +86,24 @@ func (s *FsSuite) TestGetFromPackfileKeepDescriptors(c *C) {
})
}
+func (s *FsSuite) TestGetFromPackfileMaxOpenDescriptors(c *C) {
+ fs := fixtures.ByTag(".git").ByTag("multi-packfile").One().DotGit()
+ o := NewObjectStorageWithOptions(dotgit.New(fs), cache.NewObjectLRUDefault(), Options{MaxOpenDescriptors: 1})
+
+ expected := plumbing.NewHash("8d45a34641d73851e01d3754320b33bb5be3c4d3")
+ obj, err := o.getFromPackfile(expected, false)
+ c.Assert(err, IsNil)
+ c.Assert(obj.Hash(), Equals, expected)
+
+ expected = plumbing.NewHash("e9cfa4c9ca160546efd7e8582ec77952a27b17db")
+ obj, err = o.getFromPackfile(expected, false)
+ c.Assert(err, IsNil)
+ c.Assert(obj.Hash(), Equals, expected)
+
+ err = o.Close()
+ c.Assert(err, IsNil)
+}
+
func (s *FsSuite) TestGetSizeOfObjectFile(c *C) {
fs := fixtures.ByTag(".git").ByTag("unpacked").One().DotGit()
o := NewObjectStorage(dotgit.New(fs), cache.NewObjectLRUDefault())
diff --git a/storage/filesystem/storage.go b/storage/filesystem/storage.go
index 370f7bd..88d1ed4 100644
--- a/storage/filesystem/storage.go
+++ b/storage/filesystem/storage.go
@@ -31,6 +31,9 @@ type Options struct {
// KeepDescriptors makes the file descriptors to be reused but they will
// need to be manually closed calling Close().
KeepDescriptors bool
+ // MaxOpenDescriptors is the max number of file descriptors to keep
+ // open. If KeepDescriptors is true, all file descriptors will remain open.
+ MaxOpenDescriptors int
}
// NewStorage returns a new Storage backed by a given `fs.Filesystem` and cache.
@@ -43,7 +46,6 @@ func NewStorage(fs billy.Filesystem, cache cache.Object) *Storage {
func NewStorageWithOptions(fs billy.Filesystem, cache cache.Object, ops Options) *Storage {
dirOps := dotgit.Options{
ExclusiveAccess: ops.ExclusiveAccess,
- KeepDescriptors: ops.KeepDescriptors,
}
dir := dotgit.NewWithOptions(fs, dirOps)
diff --git a/utils/binary/read.go b/utils/binary/read.go
index 50da1ff..12e57c3 100644
--- a/utils/binary/read.go
+++ b/utils/binary/read.go
@@ -25,6 +25,10 @@ func Read(r io.Reader, data ...interface{}) error {
// ReadUntil reads from r untin delim is found
func ReadUntil(r io.Reader, delim byte) ([]byte, error) {
+ if bufr, ok := r.(*bufio.Reader); ok {
+ return ReadUntilFromBufioReader(bufr, delim)
+ }
+
var buf [1]byte
value := make([]byte, 0, 16)
for {
@@ -44,6 +48,17 @@ func ReadUntil(r io.Reader, delim byte) ([]byte, error) {
}
}
+// ReadUntilFromBufioReader is like bufio.ReadBytes but drops the delimiter
+// from the result.
+func ReadUntilFromBufioReader(r *bufio.Reader, delim byte) ([]byte, error) {
+ value, err := r.ReadBytes(delim)
+ if err != nil || len(value) == 0 {
+ return nil, err
+ }
+
+ return value[:len(value)-1], nil
+}
+
// ReadVariableWidthInt reads and returns an int in Git VLQ special format:
//
// Ordinary VLQ has some redundancies, example: the number 358 can be
diff --git a/utils/binary/read_test.go b/utils/binary/read_test.go
index 5674653..22867c2 100644
--- a/utils/binary/read_test.go
+++ b/utils/binary/read_test.go
@@ -1,6 +1,7 @@
package binary
import (
+ "bufio"
"bytes"
"encoding/binary"
"testing"
@@ -39,6 +40,15 @@ func (s *BinarySuite) TestReadUntil(c *C) {
c.Assert(string(b), Equals, "foo")
}
+func (s *BinarySuite) TestReadUntilFromBufioReader(c *C) {
+ buf := bufio.NewReader(bytes.NewBuffer([]byte("foo bar")))
+
+ b, err := ReadUntilFromBufioReader(buf, ' ')
+ c.Assert(err, IsNil)
+ c.Assert(b, HasLen, 3)
+ c.Assert(string(b), Equals, "foo")
+}
+
func (s *BinarySuite) TestReadVariableWidthInt(c *C) {
buf := bytes.NewBuffer([]byte{129, 110})