aboutsummaryrefslogtreecommitdiffstats
path: root/formats
diff options
context:
space:
mode:
authorMáximo Cuadros <mcuadros@gmail.com>2016-08-11 18:07:29 +0200
committerMáximo Cuadros <mcuadros@gmail.com>2016-08-11 18:07:29 +0200
commit1f64d789038594098ea2c9cf796391f101d0bea5 (patch)
tree50fb530fc2e48560e70489dc81758f54822dcf50 /formats
parentc1e277a7ca75ff84741d75ad45e29a2ff3e633e3 (diff)
downloadgo-git-1f64d789038594098ea2c9cf796391f101d0bea5.tar.gz
core: new MemoryObject, move from memory.Object, packfile.Parser, base on new ObjectStorage interface
Diffstat (limited to 'formats')
-rw-r--r--formats/packfile/decoder.go27
-rw-r--r--formats/packfile/delta.go20
-rw-r--r--formats/packfile/parser.go89
-rw-r--r--formats/packfile/parser_test.go24
-rw-r--r--formats/packfile/read_recaller_impl_test.go25
-rw-r--r--formats/packfile/seekable.go3
6 files changed, 91 insertions, 97 deletions
diff --git a/formats/packfile/decoder.go b/formats/packfile/decoder.go
index 2b5ef88..6d0cd8b 100644
--- a/formats/packfile/decoder.go
+++ b/formats/packfile/decoder.go
@@ -35,21 +35,8 @@ var (
ErrZLib = NewError("zlib reading error")
)
-const (
- // DefaultMaxObjectsLimit is the maximum amount of objects the
- // decoder will decode before returning ErrMaxObjectsLimitReached.
- DefaultMaxObjectsLimit = 1 << 20
-)
-
// Decoder reads and decodes packfiles from an input stream.
type Decoder struct {
- // MaxObjectsLimit is the limit of objects to be load in the packfile, if
- // a packfile excess this number an error is throw, the default value
- // is defined by DefaultMaxObjectsLimit, usually the default limit is more
- // than enough to work with any repository, with higher values and huge
- // repositories you can run out of memory.
- MaxObjectsLimit uint32
-
p *Parser
s core.ObjectStorage
}
@@ -57,8 +44,6 @@ type Decoder struct {
// NewDecoder returns a new Decoder that reads from r.
func NewDecoder(r ReadRecaller) *Decoder {
return &Decoder{
- MaxObjectsLimit: DefaultMaxObjectsLimit,
-
p: NewParser(r),
}
}
@@ -72,13 +57,7 @@ func (d *Decoder) Decode(s core.ObjectStorage) error {
return err
}
- if count > d.MaxObjectsLimit {
- return ErrMaxObjectsLimitReached.AddDetails("%d", count)
- }
-
- err = d.readObjects(count)
-
- return err
+ return d.readObjects(count)
}
func (d *Decoder) readObjects(count uint32) error {
@@ -92,8 +71,8 @@ func (d *Decoder) readObjects(count uint32) error {
return err
}
- obj, err := d.p.ReadObject()
- if err != nil {
+ obj := d.s.NewObject()
+ if err := d.p.FillObject(obj); err != nil {
if err == io.EOF {
break
}
diff --git a/formats/packfile/delta.go b/formats/packfile/delta.go
index e0bbb65..95c13d5 100644
--- a/formats/packfile/delta.go
+++ b/formats/packfile/delta.go
@@ -1,5 +1,7 @@
package packfile
+import "gopkg.in/src-d/go-git.v4/core"
+
// See https://github.com/git/git/blob/49fa3dc76179e04b0833542fa52d0f287a4955ac/delta.h
// https://github.com/git/git/blob/c2c5f6b1e479f2c38e0e01345350620944e3527f/patch-delta.c,
// and https://github.com/tarruda/node-git-core/blob/master/src/js/delta.js
@@ -7,6 +9,24 @@ package packfile
const deltaSizeMin = 4
+// ApplyDelta writes to taget the result of applying the modification deltas in delta to base.
+func ApplyDelta(target, base core.Object, delta []byte) error {
+ src := base.Content()
+ w, err := target.Writer()
+ if err != nil {
+ return err
+ }
+
+ dst := PatchDelta(src, delta)
+ target.SetSize(int64(len(dst)))
+
+ if _, err := w.Write(dst); err != nil {
+ return err
+ }
+
+ return nil
+}
+
// PatchDelta returns the result of applying the modification deltas in delta to src.
func PatchDelta(src, delta []byte) []byte {
if len(delta) < deltaSizeMin {
diff --git a/formats/packfile/parser.go b/formats/packfile/parser.go
index a7c4047..8b7a692 100644
--- a/formats/packfile/parser.go
+++ b/formats/packfile/parser.go
@@ -8,7 +8,6 @@ import (
"io"
"gopkg.in/src-d/go-git.v4/core"
- "gopkg.in/src-d/go-git.v4/storage/memory"
)
var (
@@ -30,6 +29,7 @@ const (
// Values from this type are not zero-value safe. See the NewParser function bellow.
type Parser struct {
ReadRecaller
+ ObjectFactory func() core.Object
}
// NewParser returns a new Parser that reads from the packfile represented by r.
@@ -174,47 +174,43 @@ func moreBytesInLength(c byte) bool {
// ReadObject reads and returns a git object from an object entry in the packfile.
// Non-deltified and deltified objects are supported.
-func (p Parser) ReadObject() (core.Object, error) {
+func (p Parser) FillObject(obj core.Object) error {
start, err := p.Offset()
if err != nil {
- return nil, err
+ return err
}
- var typ core.ObjectType
- typ, _, err = p.ReadObjectTypeAndLength()
+ t, l, err := p.ReadObjectTypeAndLength()
if err != nil {
- return nil, err
+ return err
}
- var cont []byte
- switch typ {
+ obj.SetSize(l)
+
+ switch t {
case core.CommitObject, core.TreeObject, core.BlobObject, core.TagObject:
- cont, err = p.ReadNonDeltaObjectContent()
+ obj.SetType(t)
+ err = p.ReadNonDeltaObjectContent(obj)
case core.REFDeltaObject:
- cont, typ, err = p.ReadREFDeltaObjectContent()
+ err = p.ReadREFDeltaObjectContent(obj)
case core.OFSDeltaObject:
- cont, typ, err = p.ReadOFSDeltaObjectContent(start)
+ err = p.ReadOFSDeltaObjectContent(obj, start)
default:
- err = ErrInvalidObject.AddDetails("tag %q", typ)
- }
- if err != nil {
- return nil, err
+ err = ErrInvalidObject.AddDetails("tag %q", t)
}
- return memory.NewObject(typ, int64(len(cont)), cont), nil
+ return err
}
// ReadNonDeltaObjectContent reads and returns a non-deltified object
// from it zlib stream in an object entry in the packfile.
-func (p Parser) ReadNonDeltaObjectContent() ([]byte, error) {
- return p.readZip()
-}
-
-func (p Parser) readZip() ([]byte, error) {
- buf := bytes.NewBuffer(nil)
- err := p.inflate(buf)
+func (p Parser) ReadNonDeltaObjectContent(obj core.Object) error {
+ w, err := obj.Writer()
+ if err != nil {
+ return err
+ }
- return buf.Bytes(), err
+ return p.inflate(w)
}
func (p Parser) inflate(w io.Writer) (err error) {
@@ -239,23 +235,23 @@ func (p Parser) inflate(w io.Writer) (err error) {
// ReadREFDeltaObjectContent reads and returns an object specified by a
// REF-Delta entry in the packfile, form the hash onwards.
-func (p Parser) ReadREFDeltaObjectContent() ([]byte, core.ObjectType, error) {
+func (p Parser) ReadREFDeltaObjectContent(obj core.Object) error {
refHash, err := p.ReadHash()
if err != nil {
- return nil, core.ObjectType(0), err
+ return err
}
- refObj, err := p.RecallByHash(refHash)
+ base, err := p.RecallByHash(refHash)
if err != nil {
- return nil, core.ObjectType(0), err
+ return err
}
- content, err := p.ReadSolveDelta(refObj.Content())
- if err != nil {
- return nil, refObj.Type(), err
+ obj.SetType(base.Type())
+ if err := p.ReadAndApplyDelta(obj, base); err != nil {
+ return err
}
- return content, refObj.Type(), nil
+ return nil
}
// ReadHash reads a hash.
@@ -268,41 +264,40 @@ func (p Parser) ReadHash() (core.Hash, error) {
return h, nil
}
-// ReadSolveDelta reads and returns the base patched with the contents
+// ReadAndSolveDelta reads and returns the base patched with the contents
// of a zlib compressed diff data in the delta portion of an object
// entry in the packfile.
-func (p Parser) ReadSolveDelta(base []byte) ([]byte, error) {
- diff, err := p.readZip()
- if err != nil {
- return nil, err
+func (p Parser) ReadAndApplyDelta(target, base core.Object) error {
+ buf := bytes.NewBuffer(nil)
+ if err := p.inflate(buf); err != nil {
+ return err
}
- return PatchDelta(base, diff), nil
+ return ApplyDelta(target, base, buf.Bytes())
}
// ReadOFSDeltaObjectContent reads an returns an object specified by an
// OFS-delta entry in the packfile from it negative offset onwards. The
// start parameter is the offset of this particular object entry (the
// current offset minus the already processed type and length).
-func (p Parser) ReadOFSDeltaObjectContent(start int64) (
- []byte, core.ObjectType, error) {
+func (p Parser) ReadOFSDeltaObjectContent(obj core.Object, start int64) error {
jump, err := p.ReadNegativeOffset()
if err != nil {
- return nil, core.ObjectType(0), err
+ return err
}
- ref, err := p.RecallByOffset(start + jump)
+ base, err := p.RecallByOffset(start + jump)
if err != nil {
- return nil, core.ObjectType(0), err
+ return err
}
- content, err := p.ReadSolveDelta(ref.Content())
- if err != nil {
- return nil, ref.Type(), err
+ obj.SetType(base.Type())
+ if err := p.ReadAndApplyDelta(obj, base); err != nil {
+ return err
}
- return content, ref.Type(), nil
+ return nil
}
// ReadNegativeOffset reads and returns an offset from a OFS DELTA
diff --git a/formats/packfile/parser_test.go b/formats/packfile/parser_test.go
index ec9b19a..c2b99f1 100644
--- a/formats/packfile/parser_test.go
+++ b/formats/packfile/parser_test.go
@@ -8,7 +8,6 @@ import (
. "gopkg.in/check.v1"
"gopkg.in/src-d/go-git.v4/core"
- "gopkg.in/src-d/go-git.v4/storage/memory"
)
const (
@@ -238,9 +237,10 @@ func (s *ParserSuite) TestReadNonDeltaObjectContent(c *C) {
_, _, err = p.ReadObjectTypeAndLength()
c.Assert(err, IsNil, com)
- cont, err := p.ReadNonDeltaObjectContent()
+ obj := &core.MemoryObject{}
+ err = p.ReadNonDeltaObjectContent(obj)
c.Assert(err, IsNil, com)
- c.Assert(cont, DeepEquals, test.expected, com)
+ c.Assert(obj.Content(), DeepEquals, test.expected, com)
}
}
@@ -293,10 +293,11 @@ func (s *ParserSuite) TestReadOFSDeltaObjectContent(c *C) {
err = fix.seek(beforeJumpSize)
c.Assert(err, IsNil, com)
- cont, typ, err := p.ReadOFSDeltaObjectContent(test.offset)
+ obj := &core.MemoryObject{}
+ err = p.ReadOFSDeltaObjectContent(obj, test.offset)
c.Assert(err, IsNil, com)
- c.Assert(typ, Equals, test.expType, com)
- c.Assert(cont, DeepEquals, test.expContent, com)
+ c.Assert(obj.Type(), Equals, test.expType, com)
+ c.Assert(obj.Content(), DeepEquals, test.expContent, com)
}
}
@@ -356,17 +357,18 @@ func (s *ParserSuite) TestReadREFDeltaObjectContent(c *C) {
err = fix.seek(beforeHash)
c.Assert(err, IsNil, com)
- cont, typ, err := p.ReadREFDeltaObjectContent()
+ obj := &core.MemoryObject{}
+ err = p.ReadREFDeltaObjectContent(obj)
c.Assert(err, IsNil, com)
- c.Assert(typ, Equals, test.expType, com)
- c.Assert(cont, DeepEquals, test.expContent, com)
+ c.Assert(obj.Type(), Equals, test.expType, com)
+ c.Assert(obj.Content(), DeepEquals, test.expContent, com)
p.ForgetAll()
}
}
-func newObject(t core.ObjectType, c []byte) *memory.Object {
- return memory.NewObject(t, int64(len(c)), c)
+func newObject(t core.ObjectType, c []byte) core.Object {
+ return core.NewMemoryObject(t, int64(len(c)), c)
}
func (s *ParserSuite) TestReadHeaderBadSignatureError(c *C) {
diff --git a/formats/packfile/read_recaller_impl_test.go b/formats/packfile/read_recaller_impl_test.go
index 8de7e2a..f89171d 100644
--- a/formats/packfile/read_recaller_impl_test.go
+++ b/formats/packfile/read_recaller_impl_test.go
@@ -7,7 +7,6 @@ import (
"os"
"gopkg.in/src-d/go-git.v4/core"
- "gopkg.in/src-d/go-git.v4/storage/memory"
. "gopkg.in/check.v1"
)
@@ -152,20 +151,20 @@ func (s *ReadRecallerImplSuite) TestRememberRecall(c *C) {
}{
{
off: 12,
- obj: newObj(core.CommitObject, []byte("tree 44a1cdf21c791867c51caad8f1b77e6baee6f462\nparent 87fe6e7c6b1b89519fe3a03a8961c5aa14d4cc68\nparent 9244ee648182b91a63d8cc4cbe4b9ac2a27c0492\nauthor Matt Duftler <duftler@google.com> 1448290941 -0500\ncommitter Matt Duftler <duftler@google.com> 1448290941 -0500\n\nMerge pull request #615 from ewiseblatt/create_dev\n\nPreserve original credentials of spinnaker-local.yml when transforming it.")),
+ obj: newObject(core.CommitObject, []byte("tree 44a1cdf21c791867c51caad8f1b77e6baee6f462\nparent 87fe6e7c6b1b89519fe3a03a8961c5aa14d4cc68\nparent 9244ee648182b91a63d8cc4cbe4b9ac2a27c0492\nauthor Matt Duftler <duftler@google.com> 1448290941 -0500\ncommitter Matt Duftler <duftler@google.com> 1448290941 -0500\n\nMerge pull request #615 from ewiseblatt/create_dev\n\nPreserve original credentials of spinnaker-local.yml when transforming it.")),
}, {
off: 3037,
- obj: newObj(core.TagObject, []byte("object e0005f50e22140def60260960b21667f1fdfff80\ntype commit\ntag v0.10.0\ntagger cfieber <cfieber@netflix.com> 1447687536 -0800\n\nRelease of 0.10.0\n\n- e0005f50e22140def60260960b21667f1fdfff80: Merge pull request #553 from ewiseblatt/rendezvous\n- e1a2b26b784179e6903a7ae967c037c721899eba: Wait for cassandra before starting spinnaker\n- c756e09461d071e98b8660818cf42d90c90f2854: Merge pull request #552 from duftler/google-c2d-tweaks\n- 0777fadf4ca6f458d7071de414f9bd5417911037: Fix incorrect config prop names: s/SPINNAKER_GOOGLE_PROJECT_DEFAULT_REGION/SPINNAKER_GOOGLE_DEFAULT_REGION s/SPINNAKER_GOOGLE_PROJECT_DEFAULT_ZONE/SPINNAKER_GOOGLE_DEFAULT_ZONE Hardcode profile name in generated ~/.aws/credentials to [default]. Restart all of spinnaker after updating cassandra and reconfiguring spinnaker, instead of just restarting clouddriver.\n- d8d031c1ac45801074418c43424a6f2c0dff642c: Merge pull request #551 from kenzanmedia/fixGroup\n- 626d23075f9e92aad19015f2964c95d45f41fa3a: Put in correct block for public image. Delineate cloud provider.\n")),
+ obj: newObject(core.TagObject, []byte("object e0005f50e22140def60260960b21667f1fdfff80\ntype commit\ntag v0.10.0\ntagger cfieber <cfieber@netflix.com> 1447687536 -0800\n\nRelease of 0.10.0\n\n- e0005f50e22140def60260960b21667f1fdfff80: Merge pull request #553 from ewiseblatt/rendezvous\n- e1a2b26b784179e6903a7ae967c037c721899eba: Wait for cassandra before starting spinnaker\n- c756e09461d071e98b8660818cf42d90c90f2854: Merge pull request #552 from duftler/google-c2d-tweaks\n- 0777fadf4ca6f458d7071de414f9bd5417911037: Fix incorrect config prop names: s/SPINNAKER_GOOGLE_PROJECT_DEFAULT_REGION/SPINNAKER_GOOGLE_DEFAULT_REGION s/SPINNAKER_GOOGLE_PROJECT_DEFAULT_ZONE/SPINNAKER_GOOGLE_DEFAULT_ZONE Hardcode profile name in generated ~/.aws/credentials to [default]. Restart all of spinnaker after updating cassandra and reconfiguring spinnaker, instead of just restarting clouddriver.\n- d8d031c1ac45801074418c43424a6f2c0dff642c: Merge pull request #551 from kenzanmedia/fixGroup\n- 626d23075f9e92aad19015f2964c95d45f41fa3a: Put in correct block for public image. Delineate cloud provider.\n")),
}, {
off: 157625,
- obj: newObj(core.BlobObject, []byte(".gradle\nbuild/\n*.iml\n.idea\n*.pyc\n*~\n#*\nconfig/spinnaker-local.yml\n.DS_Store\npacker/ami_table.md\npacker/ami_table.json\npacker/example_output.txt")),
+ obj: newObject(core.BlobObject, []byte(".gradle\nbuild/\n*.iml\n.idea\n*.pyc\n*~\n#*\nconfig/spinnaker-local.yml\n.DS_Store\npacker/ami_table.md\npacker/ami_table.json\npacker/example_output.txt")),
}, {
off: 1234,
- obj: newObj(core.BlobObject, []byte(".gradle\nbuild/\n*.iml\n.idea\n*.pyc\n*~\n#*\nconfig/spinnaker-local.yml\n.DS_Store\npacker/ami_table.md\npacker/ami_table.json\npacker/example_output.txt")),
+ obj: newObject(core.BlobObject, []byte(".gradle\nbuild/\n*.iml\n.idea\n*.pyc\n*~\n#*\nconfig/spinnaker-local.yml\n.DS_Store\npacker/ami_table.md\npacker/ami_table.json\npacker/example_output.txt")),
err: "duplicated object: with hash .*",
}, {
off: 3037,
- obj: newObj(core.BlobObject, []byte("")),
+ obj: newObject(core.BlobObject, []byte("")),
err: "duplicated object: with offset 3037",
ignore: "seekable",
// seekable can not check if the offset has already been added
@@ -186,19 +185,17 @@ func (s *ReadRecallerImplSuite) TestRememberRecall(c *C) {
result, err := sr.RecallByHash(test.obj.Hash())
c.Assert(err, IsNil, com)
+ c.Assert(result.Hash(), Equals, test.obj.Hash())
c.Assert(result, DeepEquals, test.obj, com)
result, err = sr.RecallByOffset(test.off)
c.Assert(err, IsNil, com)
+ c.Assert(result.Hash(), Equals, test.obj.Hash())
c.Assert(result, DeepEquals, test.obj, com)
}
}
}
-func newObj(typ core.ObjectType, cont []byte) core.Object {
- return memory.NewObject(typ, int64(len(cont)), cont)
-}
-
func (s *ReadRecallerImplSuite) TestRecallByHashErrors(c *C) {
for _, impl := range []struct {
id string
@@ -209,7 +206,7 @@ func (s *ReadRecallerImplSuite) TestRecallByHashErrors(c *C) {
} {
com := Commentf("implementation %s", impl.id)
sr := impl.newFn([]byte{})
- obj := newObj(core.CommitObject, []byte{})
+ obj := newObject(core.CommitObject, []byte{})
_, err := sr.RecallByHash(obj.Hash())
c.Assert(err, ErrorMatches, ErrCannotRecall.Error()+".*", com)
@@ -249,9 +246,9 @@ func rememberSomeObjects(sr ReadRecaller) error {
off int64
obj core.Object
}{
- {off: 0, obj: newObj(core.CommitObject, []byte{'a'})}, // 93114cce67ec23976d15199514399203f69cc676
- {off: 10, obj: newObj(core.CommitObject, []byte{'b'})}, // 2bb767097e479f668f0ebdabe88df11337bd8f19
- {off: 20, obj: newObj(core.CommitObject, []byte{'c'})}, // 2f8096005677370e6446541a50e074299d43d468
+ {off: 0, obj: newObject(core.CommitObject, []byte{'a'})}, // 93114cce67ec23976d15199514399203f69cc676
+ {off: 10, obj: newObject(core.CommitObject, []byte{'b'})}, // 2bb767097e479f668f0ebdabe88df11337bd8f19
+ {off: 20, obj: newObject(core.CommitObject, []byte{'c'})}, // 2f8096005677370e6446541a50e074299d43d468
} {
err := sr.Remember(init.off, init.obj)
if err != nil {
diff --git a/formats/packfile/seekable.go b/formats/packfile/seekable.go
index 37b4ee9..65c8a69 100644
--- a/formats/packfile/seekable.go
+++ b/formats/packfile/seekable.go
@@ -104,5 +104,6 @@ func (r *Seekable) RecallByOffset(o int64) (obj core.Object, err error) {
return nil, err
}
- return NewParser(r).ReadObject()
+ obj = &core.MemoryObject{}
+ return obj, NewParser(r).FillObject(obj)
}