aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMáximo Cuadros <mcuadros@gmail.com>2016-08-11 18:07:29 +0200
committerMáximo Cuadros <mcuadros@gmail.com>2016-08-11 18:07:29 +0200
commit1f64d789038594098ea2c9cf796391f101d0bea5 (patch)
tree50fb530fc2e48560e70489dc81758f54822dcf50
parentc1e277a7ca75ff84741d75ad45e29a2ff3e633e3 (diff)
downloadgo-git-1f64d789038594098ea2c9cf796391f101d0bea5.tar.gz
core: new MemoryObject, move from memory.Object, packfile.Parser, base on new ObjectStorage interface
-rw-r--r--clients/http/common.go1
-rw-r--r--core/memory.go65
-rw-r--r--core/memory_test.go (renamed from storage/memory/object_test.go)46
-rw-r--r--core/object.go1
-rw-r--r--formats/packfile/decoder.go27
-rw-r--r--formats/packfile/delta.go20
-rw-r--r--formats/packfile/parser.go89
-rw-r--r--formats/packfile/parser_test.go24
-rw-r--r--formats/packfile/read_recaller_impl_test.go25
-rw-r--r--formats/packfile/seekable.go3
-rw-r--r--objects_test.go3
-rw-r--r--storage/memory/object.go72
-rw-r--r--storage/memory/storage.go5
-rw-r--r--storage/memory/storage_test.go8
-rw-r--r--storage/seekable/internal/index/index.go4
-rw-r--r--storage/seekable/storage.go7
-rw-r--r--storage/seekable/storage_test.go2
17 files changed, 199 insertions, 203 deletions
diff --git a/clients/http/common.go b/clients/http/common.go
index d375e62..e7c43d5 100644
--- a/clients/http/common.go
+++ b/clients/http/common.go
@@ -7,7 +7,6 @@ import (
"net/http"
"gopkg.in/src-d/go-git.v4/clients/common"
-
"gopkg.in/src-d/go-git.v4/core"
)
diff --git a/core/memory.go b/core/memory.go
new file mode 100644
index 0000000..6477f74
--- /dev/null
+++ b/core/memory.go
@@ -0,0 +1,65 @@
+package core
+
+import (
+ "bytes"
+ "io/ioutil"
+)
+
+// MemoryObject on memory Object implementation
+type MemoryObject struct {
+ t ObjectType
+ h Hash
+ cont []byte
+ sz int64
+}
+
+// NewMemoryObject creates a new MemoryObject
+func NewMemoryObject(t ObjectType, len int64, cont []byte) *MemoryObject {
+ return &MemoryObject{t: t, sz: len, cont: cont}
+}
+
+// Hash return the object Hash, the hash is calculated on-the-fly the first
+// time is called, the subsequent calls the same Hash is returned even if the
+// type or the content has changed. The Hash is only generated if the size of
+// the content is exactly the Object.Size
+func (o *MemoryObject) Hash() Hash {
+ if o.h == ZeroHash && int64(len(o.cont)) == o.sz {
+ o.h = ComputeHash(o.t, o.cont)
+ }
+
+ return o.h
+}
+
+// Type return the ObjectType
+func (o *MemoryObject) Type() ObjectType { return o.t }
+
+// SetType sets the ObjectType
+func (o *MemoryObject) SetType(t ObjectType) { o.t = t }
+
+// Size return the size of the object
+func (o *MemoryObject) Size() int64 { return o.sz }
+
+// SetSize set the object size, the given size should be written afterwards
+func (o *MemoryObject) SetSize(s int64) { o.sz = s }
+
+// Content returns the contents of the object
+func (o *MemoryObject) Content() []byte { return o.cont }
+
+// Reader returns a ObjectReader used to read the object's content.
+func (o *MemoryObject) Reader() (ObjectReader, error) {
+ return ioutil.NopCloser(bytes.NewBuffer(o.cont)), nil
+}
+
+// Writer returns a ObjectWriter used to write the object's content.
+func (o *MemoryObject) Writer() (ObjectWriter, error) {
+ return o, nil
+}
+
+func (o *MemoryObject) Write(p []byte) (n int, err error) {
+ o.cont = append(o.cont, p...)
+ return len(p), nil
+}
+
+// Close releases any resources consumed by the object when it is acting as a
+// ObjectWriter.
+func (o *MemoryObject) Close() error { return nil }
diff --git a/storage/memory/object_test.go b/core/memory_test.go
index c252626..f1c3f64 100644
--- a/storage/memory/object_test.go
+++ b/core/memory_test.go
@@ -1,22 +1,18 @@
-package memory
+package core
import (
"io/ioutil"
- "testing"
. "gopkg.in/check.v1"
- "gopkg.in/src-d/go-git.v4/core"
)
-func Test(t *testing.T) { TestingT(t) }
+type MemoryObjectSuite struct{}
-type ObjectSuite struct{}
+var _ = Suite(&MemoryObjectSuite{})
-var _ = Suite(&ObjectSuite{})
-
-func (s *ObjectSuite) TestHash(c *C) {
- o := &Object{}
- o.SetType(core.BlobObject)
+func (s *MemoryObjectSuite) TestHash(c *C) {
+ o := &MemoryObject{}
+ o.SetType(BlobObject)
o.SetSize(14)
_, err := o.Write([]byte("Hello, World!\n"))
@@ -24,32 +20,32 @@ func (s *ObjectSuite) TestHash(c *C) {
c.Assert(o.Hash().String(), Equals, "8ab686eafeb1f44702738c8b0f24f2567c36da6d")
- o.SetType(core.CommitObject)
+ o.SetType(CommitObject)
c.Assert(o.Hash().String(), Equals, "8ab686eafeb1f44702738c8b0f24f2567c36da6d")
}
-func (s *ObjectSuite) TestHashNotFilled(c *C) {
- o := &Object{}
- o.SetType(core.BlobObject)
+func (s *MemoryObjectSuite) TestHashNotFilled(c *C) {
+ o := &MemoryObject{}
+ o.SetType(BlobObject)
o.SetSize(14)
- c.Assert(o.Hash(), Equals, core.ZeroHash)
+ c.Assert(o.Hash(), Equals, ZeroHash)
}
-func (s *ObjectSuite) TestType(c *C) {
- o := &Object{}
- o.SetType(core.BlobObject)
- c.Assert(o.Type(), Equals, core.BlobObject)
+func (s *MemoryObjectSuite) TestType(c *C) {
+ o := &MemoryObject{}
+ o.SetType(BlobObject)
+ c.Assert(o.Type(), Equals, BlobObject)
}
-func (s *ObjectSuite) TestSize(c *C) {
- o := &Object{}
+func (s *MemoryObjectSuite) TestSize(c *C) {
+ o := &MemoryObject{}
o.SetSize(42)
c.Assert(o.Size(), Equals, int64(42))
}
-func (s *ObjectSuite) TestReader(c *C) {
- o := &Object{cont: []byte("foo")}
+func (s *MemoryObjectSuite) TestReader(c *C) {
+ o := &MemoryObject{cont: []byte("foo")}
reader, err := o.Reader()
c.Assert(err, IsNil)
@@ -60,8 +56,8 @@ func (s *ObjectSuite) TestReader(c *C) {
c.Assert(b, DeepEquals, []byte("foo"))
}
-func (s *ObjectSuite) TestWriter(c *C) {
- o := &Object{}
+func (s *MemoryObjectSuite) TestWriter(c *C) {
+ o := &MemoryObject{}
writer, err := o.Writer()
c.Assert(err, IsNil)
diff --git a/core/object.go b/core/object.go
index 4610c45..01dd660 100644
--- a/core/object.go
+++ b/core/object.go
@@ -41,6 +41,7 @@ type Object interface {
// ObjectStorage generic storage of objects
type ObjectStorage interface {
+ NewObject() Object
Set(Object) (Hash, error)
Get(Hash) (Object, error)
Iter(ObjectType) (ObjectIter, error)
diff --git a/formats/packfile/decoder.go b/formats/packfile/decoder.go
index 2b5ef88..6d0cd8b 100644
--- a/formats/packfile/decoder.go
+++ b/formats/packfile/decoder.go
@@ -35,21 +35,8 @@ var (
ErrZLib = NewError("zlib reading error")
)
-const (
- // DefaultMaxObjectsLimit is the maximum amount of objects the
- // decoder will decode before returning ErrMaxObjectsLimitReached.
- DefaultMaxObjectsLimit = 1 << 20
-)
-
// Decoder reads and decodes packfiles from an input stream.
type Decoder struct {
- // MaxObjectsLimit is the limit of objects to be load in the packfile, if
- // a packfile excess this number an error is throw, the default value
- // is defined by DefaultMaxObjectsLimit, usually the default limit is more
- // than enough to work with any repository, with higher values and huge
- // repositories you can run out of memory.
- MaxObjectsLimit uint32
-
p *Parser
s core.ObjectStorage
}
@@ -57,8 +44,6 @@ type Decoder struct {
// NewDecoder returns a new Decoder that reads from r.
func NewDecoder(r ReadRecaller) *Decoder {
return &Decoder{
- MaxObjectsLimit: DefaultMaxObjectsLimit,
-
p: NewParser(r),
}
}
@@ -72,13 +57,7 @@ func (d *Decoder) Decode(s core.ObjectStorage) error {
return err
}
- if count > d.MaxObjectsLimit {
- return ErrMaxObjectsLimitReached.AddDetails("%d", count)
- }
-
- err = d.readObjects(count)
-
- return err
+ return d.readObjects(count)
}
func (d *Decoder) readObjects(count uint32) error {
@@ -92,8 +71,8 @@ func (d *Decoder) readObjects(count uint32) error {
return err
}
- obj, err := d.p.ReadObject()
- if err != nil {
+ obj := d.s.NewObject()
+ if err := d.p.FillObject(obj); err != nil {
if err == io.EOF {
break
}
diff --git a/formats/packfile/delta.go b/formats/packfile/delta.go
index e0bbb65..95c13d5 100644
--- a/formats/packfile/delta.go
+++ b/formats/packfile/delta.go
@@ -1,5 +1,7 @@
package packfile
+import "gopkg.in/src-d/go-git.v4/core"
+
// See https://github.com/git/git/blob/49fa3dc76179e04b0833542fa52d0f287a4955ac/delta.h
// https://github.com/git/git/blob/c2c5f6b1e479f2c38e0e01345350620944e3527f/patch-delta.c,
// and https://github.com/tarruda/node-git-core/blob/master/src/js/delta.js
@@ -7,6 +9,24 @@ package packfile
const deltaSizeMin = 4
+// ApplyDelta writes to taget the result of applying the modification deltas in delta to base.
+func ApplyDelta(target, base core.Object, delta []byte) error {
+ src := base.Content()
+ w, err := target.Writer()
+ if err != nil {
+ return err
+ }
+
+ dst := PatchDelta(src, delta)
+ target.SetSize(int64(len(dst)))
+
+ if _, err := w.Write(dst); err != nil {
+ return err
+ }
+
+ return nil
+}
+
// PatchDelta returns the result of applying the modification deltas in delta to src.
func PatchDelta(src, delta []byte) []byte {
if len(delta) < deltaSizeMin {
diff --git a/formats/packfile/parser.go b/formats/packfile/parser.go
index a7c4047..8b7a692 100644
--- a/formats/packfile/parser.go
+++ b/formats/packfile/parser.go
@@ -8,7 +8,6 @@ import (
"io"
"gopkg.in/src-d/go-git.v4/core"
- "gopkg.in/src-d/go-git.v4/storage/memory"
)
var (
@@ -30,6 +29,7 @@ const (
// Values from this type are not zero-value safe. See the NewParser function bellow.
type Parser struct {
ReadRecaller
+ ObjectFactory func() core.Object
}
// NewParser returns a new Parser that reads from the packfile represented by r.
@@ -174,47 +174,43 @@ func moreBytesInLength(c byte) bool {
// ReadObject reads and returns a git object from an object entry in the packfile.
// Non-deltified and deltified objects are supported.
-func (p Parser) ReadObject() (core.Object, error) {
+func (p Parser) FillObject(obj core.Object) error {
start, err := p.Offset()
if err != nil {
- return nil, err
+ return err
}
- var typ core.ObjectType
- typ, _, err = p.ReadObjectTypeAndLength()
+ t, l, err := p.ReadObjectTypeAndLength()
if err != nil {
- return nil, err
+ return err
}
- var cont []byte
- switch typ {
+ obj.SetSize(l)
+
+ switch t {
case core.CommitObject, core.TreeObject, core.BlobObject, core.TagObject:
- cont, err = p.ReadNonDeltaObjectContent()
+ obj.SetType(t)
+ err = p.ReadNonDeltaObjectContent(obj)
case core.REFDeltaObject:
- cont, typ, err = p.ReadREFDeltaObjectContent()
+ err = p.ReadREFDeltaObjectContent(obj)
case core.OFSDeltaObject:
- cont, typ, err = p.ReadOFSDeltaObjectContent(start)
+ err = p.ReadOFSDeltaObjectContent(obj, start)
default:
- err = ErrInvalidObject.AddDetails("tag %q", typ)
- }
- if err != nil {
- return nil, err
+ err = ErrInvalidObject.AddDetails("tag %q", t)
}
- return memory.NewObject(typ, int64(len(cont)), cont), nil
+ return err
}
// ReadNonDeltaObjectContent reads and returns a non-deltified object
// from it zlib stream in an object entry in the packfile.
-func (p Parser) ReadNonDeltaObjectContent() ([]byte, error) {
- return p.readZip()
-}
-
-func (p Parser) readZip() ([]byte, error) {
- buf := bytes.NewBuffer(nil)
- err := p.inflate(buf)
+func (p Parser) ReadNonDeltaObjectContent(obj core.Object) error {
+ w, err := obj.Writer()
+ if err != nil {
+ return err
+ }
- return buf.Bytes(), err
+ return p.inflate(w)
}
func (p Parser) inflate(w io.Writer) (err error) {
@@ -239,23 +235,23 @@ func (p Parser) inflate(w io.Writer) (err error) {
// ReadREFDeltaObjectContent reads and returns an object specified by a
// REF-Delta entry in the packfile, form the hash onwards.
-func (p Parser) ReadREFDeltaObjectContent() ([]byte, core.ObjectType, error) {
+func (p Parser) ReadREFDeltaObjectContent(obj core.Object) error {
refHash, err := p.ReadHash()
if err != nil {
- return nil, core.ObjectType(0), err
+ return err
}
- refObj, err := p.RecallByHash(refHash)
+ base, err := p.RecallByHash(refHash)
if err != nil {
- return nil, core.ObjectType(0), err
+ return err
}
- content, err := p.ReadSolveDelta(refObj.Content())
- if err != nil {
- return nil, refObj.Type(), err
+ obj.SetType(base.Type())
+ if err := p.ReadAndApplyDelta(obj, base); err != nil {
+ return err
}
- return content, refObj.Type(), nil
+ return nil
}
// ReadHash reads a hash.
@@ -268,41 +264,40 @@ func (p Parser) ReadHash() (core.Hash, error) {
return h, nil
}
-// ReadSolveDelta reads and returns the base patched with the contents
+// ReadAndSolveDelta reads and returns the base patched with the contents
// of a zlib compressed diff data in the delta portion of an object
// entry in the packfile.
-func (p Parser) ReadSolveDelta(base []byte) ([]byte, error) {
- diff, err := p.readZip()
- if err != nil {
- return nil, err
+func (p Parser) ReadAndApplyDelta(target, base core.Object) error {
+ buf := bytes.NewBuffer(nil)
+ if err := p.inflate(buf); err != nil {
+ return err
}
- return PatchDelta(base, diff), nil
+ return ApplyDelta(target, base, buf.Bytes())
}
// ReadOFSDeltaObjectContent reads an returns an object specified by an
// OFS-delta entry in the packfile from it negative offset onwards. The
// start parameter is the offset of this particular object entry (the
// current offset minus the already processed type and length).
-func (p Parser) ReadOFSDeltaObjectContent(start int64) (
- []byte, core.ObjectType, error) {
+func (p Parser) ReadOFSDeltaObjectContent(obj core.Object, start int64) error {
jump, err := p.ReadNegativeOffset()
if err != nil {
- return nil, core.ObjectType(0), err
+ return err
}
- ref, err := p.RecallByOffset(start + jump)
+ base, err := p.RecallByOffset(start + jump)
if err != nil {
- return nil, core.ObjectType(0), err
+ return err
}
- content, err := p.ReadSolveDelta(ref.Content())
- if err != nil {
- return nil, ref.Type(), err
+ obj.SetType(base.Type())
+ if err := p.ReadAndApplyDelta(obj, base); err != nil {
+ return err
}
- return content, ref.Type(), nil
+ return nil
}
// ReadNegativeOffset reads and returns an offset from a OFS DELTA
diff --git a/formats/packfile/parser_test.go b/formats/packfile/parser_test.go
index ec9b19a..c2b99f1 100644
--- a/formats/packfile/parser_test.go
+++ b/formats/packfile/parser_test.go
@@ -8,7 +8,6 @@ import (
. "gopkg.in/check.v1"
"gopkg.in/src-d/go-git.v4/core"
- "gopkg.in/src-d/go-git.v4/storage/memory"
)
const (
@@ -238,9 +237,10 @@ func (s *ParserSuite) TestReadNonDeltaObjectContent(c *C) {
_, _, err = p.ReadObjectTypeAndLength()
c.Assert(err, IsNil, com)
- cont, err := p.ReadNonDeltaObjectContent()
+ obj := &core.MemoryObject{}
+ err = p.ReadNonDeltaObjectContent(obj)
c.Assert(err, IsNil, com)
- c.Assert(cont, DeepEquals, test.expected, com)
+ c.Assert(obj.Content(), DeepEquals, test.expected, com)
}
}
@@ -293,10 +293,11 @@ func (s *ParserSuite) TestReadOFSDeltaObjectContent(c *C) {
err = fix.seek(beforeJumpSize)
c.Assert(err, IsNil, com)
- cont, typ, err := p.ReadOFSDeltaObjectContent(test.offset)
+ obj := &core.MemoryObject{}
+ err = p.ReadOFSDeltaObjectContent(obj, test.offset)
c.Assert(err, IsNil, com)
- c.Assert(typ, Equals, test.expType, com)
- c.Assert(cont, DeepEquals, test.expContent, com)
+ c.Assert(obj.Type(), Equals, test.expType, com)
+ c.Assert(obj.Content(), DeepEquals, test.expContent, com)
}
}
@@ -356,17 +357,18 @@ func (s *ParserSuite) TestReadREFDeltaObjectContent(c *C) {
err = fix.seek(beforeHash)
c.Assert(err, IsNil, com)
- cont, typ, err := p.ReadREFDeltaObjectContent()
+ obj := &core.MemoryObject{}
+ err = p.ReadREFDeltaObjectContent(obj)
c.Assert(err, IsNil, com)
- c.Assert(typ, Equals, test.expType, com)
- c.Assert(cont, DeepEquals, test.expContent, com)
+ c.Assert(obj.Type(), Equals, test.expType, com)
+ c.Assert(obj.Content(), DeepEquals, test.expContent, com)
p.ForgetAll()
}
}
-func newObject(t core.ObjectType, c []byte) *memory.Object {
- return memory.NewObject(t, int64(len(c)), c)
+func newObject(t core.ObjectType, c []byte) core.Object {
+ return core.NewMemoryObject(t, int64(len(c)), c)
}
func (s *ParserSuite) TestReadHeaderBadSignatureError(c *C) {
diff --git a/formats/packfile/read_recaller_impl_test.go b/formats/packfile/read_recaller_impl_test.go
index 8de7e2a..f89171d 100644
--- a/formats/packfile/read_recaller_impl_test.go
+++ b/formats/packfile/read_recaller_impl_test.go
@@ -7,7 +7,6 @@ import (
"os"
"gopkg.in/src-d/go-git.v4/core"
- "gopkg.in/src-d/go-git.v4/storage/memory"
. "gopkg.in/check.v1"
)
@@ -152,20 +151,20 @@ func (s *ReadRecallerImplSuite) TestRememberRecall(c *C) {
}{
{
off: 12,
- obj: newObj(core.CommitObject, []byte("tree 44a1cdf21c791867c51caad8f1b77e6baee6f462\nparent 87fe6e7c6b1b89519fe3a03a8961c5aa14d4cc68\nparent 9244ee648182b91a63d8cc4cbe4b9ac2a27c0492\nauthor Matt Duftler <duftler@google.com> 1448290941 -0500\ncommitter Matt Duftler <duftler@google.com> 1448290941 -0500\n\nMerge pull request #615 from ewiseblatt/create_dev\n\nPreserve original credentials of spinnaker-local.yml when transforming it.")),
+ obj: newObject(core.CommitObject, []byte("tree 44a1cdf21c791867c51caad8f1b77e6baee6f462\nparent 87fe6e7c6b1b89519fe3a03a8961c5aa14d4cc68\nparent 9244ee648182b91a63d8cc4cbe4b9ac2a27c0492\nauthor Matt Duftler <duftler@google.com> 1448290941 -0500\ncommitter Matt Duftler <duftler@google.com> 1448290941 -0500\n\nMerge pull request #615 from ewiseblatt/create_dev\n\nPreserve original credentials of spinnaker-local.yml when transforming it.")),
}, {
off: 3037,
- obj: newObj(core.TagObject, []byte("object e0005f50e22140def60260960b21667f1fdfff80\ntype commit\ntag v0.10.0\ntagger cfieber <cfieber@netflix.com> 1447687536 -0800\n\nRelease of 0.10.0\n\n- e0005f50e22140def60260960b21667f1fdfff80: Merge pull request #553 from ewiseblatt/rendezvous\n- e1a2b26b784179e6903a7ae967c037c721899eba: Wait for cassandra before starting spinnaker\n- c756e09461d071e98b8660818cf42d90c90f2854: Merge pull request #552 from duftler/google-c2d-tweaks\n- 0777fadf4ca6f458d7071de414f9bd5417911037: Fix incorrect config prop names: s/SPINNAKER_GOOGLE_PROJECT_DEFAULT_REGION/SPINNAKER_GOOGLE_DEFAULT_REGION s/SPINNAKER_GOOGLE_PROJECT_DEFAULT_ZONE/SPINNAKER_GOOGLE_DEFAULT_ZONE Hardcode profile name in generated ~/.aws/credentials to [default]. Restart all of spinnaker after updating cassandra and reconfiguring spinnaker, instead of just restarting clouddriver.\n- d8d031c1ac45801074418c43424a6f2c0dff642c: Merge pull request #551 from kenzanmedia/fixGroup\n- 626d23075f9e92aad19015f2964c95d45f41fa3a: Put in correct block for public image. Delineate cloud provider.\n")),
+ obj: newObject(core.TagObject, []byte("object e0005f50e22140def60260960b21667f1fdfff80\ntype commit\ntag v0.10.0\ntagger cfieber <cfieber@netflix.com> 1447687536 -0800\n\nRelease of 0.10.0\n\n- e0005f50e22140def60260960b21667f1fdfff80: Merge pull request #553 from ewiseblatt/rendezvous\n- e1a2b26b784179e6903a7ae967c037c721899eba: Wait for cassandra before starting spinnaker\n- c756e09461d071e98b8660818cf42d90c90f2854: Merge pull request #552 from duftler/google-c2d-tweaks\n- 0777fadf4ca6f458d7071de414f9bd5417911037: Fix incorrect config prop names: s/SPINNAKER_GOOGLE_PROJECT_DEFAULT_REGION/SPINNAKER_GOOGLE_DEFAULT_REGION s/SPINNAKER_GOOGLE_PROJECT_DEFAULT_ZONE/SPINNAKER_GOOGLE_DEFAULT_ZONE Hardcode profile name in generated ~/.aws/credentials to [default]. Restart all of spinnaker after updating cassandra and reconfiguring spinnaker, instead of just restarting clouddriver.\n- d8d031c1ac45801074418c43424a6f2c0dff642c: Merge pull request #551 from kenzanmedia/fixGroup\n- 626d23075f9e92aad19015f2964c95d45f41fa3a: Put in correct block for public image. Delineate cloud provider.\n")),
}, {
off: 157625,
- obj: newObj(core.BlobObject, []byte(".gradle\nbuild/\n*.iml\n.idea\n*.pyc\n*~\n#*\nconfig/spinnaker-local.yml\n.DS_Store\npacker/ami_table.md\npacker/ami_table.json\npacker/example_output.txt")),
+ obj: newObject(core.BlobObject, []byte(".gradle\nbuild/\n*.iml\n.idea\n*.pyc\n*~\n#*\nconfig/spinnaker-local.yml\n.DS_Store\npacker/ami_table.md\npacker/ami_table.json\npacker/example_output.txt")),
}, {
off: 1234,
- obj: newObj(core.BlobObject, []byte(".gradle\nbuild/\n*.iml\n.idea\n*.pyc\n*~\n#*\nconfig/spinnaker-local.yml\n.DS_Store\npacker/ami_table.md\npacker/ami_table.json\npacker/example_output.txt")),
+ obj: newObject(core.BlobObject, []byte(".gradle\nbuild/\n*.iml\n.idea\n*.pyc\n*~\n#*\nconfig/spinnaker-local.yml\n.DS_Store\npacker/ami_table.md\npacker/ami_table.json\npacker/example_output.txt")),
err: "duplicated object: with hash .*",
}, {
off: 3037,
- obj: newObj(core.BlobObject, []byte("")),
+ obj: newObject(core.BlobObject, []byte("")),
err: "duplicated object: with offset 3037",
ignore: "seekable",
// seekable can not check if the offset has already been added
@@ -186,19 +185,17 @@ func (s *ReadRecallerImplSuite) TestRememberRecall(c *C) {
result, err := sr.RecallByHash(test.obj.Hash())
c.Assert(err, IsNil, com)
+ c.Assert(result.Hash(), Equals, test.obj.Hash())
c.Assert(result, DeepEquals, test.obj, com)
result, err = sr.RecallByOffset(test.off)
c.Assert(err, IsNil, com)
+ c.Assert(result.Hash(), Equals, test.obj.Hash())
c.Assert(result, DeepEquals, test.obj, com)
}
}
}
-func newObj(typ core.ObjectType, cont []byte) core.Object {
- return memory.NewObject(typ, int64(len(cont)), cont)
-}
-
func (s *ReadRecallerImplSuite) TestRecallByHashErrors(c *C) {
for _, impl := range []struct {
id string
@@ -209,7 +206,7 @@ func (s *ReadRecallerImplSuite) TestRecallByHashErrors(c *C) {
} {
com := Commentf("implementation %s", impl.id)
sr := impl.newFn([]byte{})
- obj := newObj(core.CommitObject, []byte{})
+ obj := newObject(core.CommitObject, []byte{})
_, err := sr.RecallByHash(obj.Hash())
c.Assert(err, ErrorMatches, ErrCannotRecall.Error()+".*", com)
@@ -249,9 +246,9 @@ func rememberSomeObjects(sr ReadRecaller) error {
off int64
obj core.Object
}{
- {off: 0, obj: newObj(core.CommitObject, []byte{'a'})}, // 93114cce67ec23976d15199514399203f69cc676
- {off: 10, obj: newObj(core.CommitObject, []byte{'b'})}, // 2bb767097e479f668f0ebdabe88df11337bd8f19
- {off: 20, obj: newObj(core.CommitObject, []byte{'c'})}, // 2f8096005677370e6446541a50e074299d43d468
+ {off: 0, obj: newObject(core.CommitObject, []byte{'a'})}, // 93114cce67ec23976d15199514399203f69cc676
+ {off: 10, obj: newObject(core.CommitObject, []byte{'b'})}, // 2bb767097e479f668f0ebdabe88df11337bd8f19
+ {off: 20, obj: newObject(core.CommitObject, []byte{'c'})}, // 2f8096005677370e6446541a50e074299d43d468
} {
err := sr.Remember(init.off, init.obj)
if err != nil {
diff --git a/formats/packfile/seekable.go b/formats/packfile/seekable.go
index 37b4ee9..65c8a69 100644
--- a/formats/packfile/seekable.go
+++ b/formats/packfile/seekable.go
@@ -104,5 +104,6 @@ func (r *Seekable) RecallByOffset(o int64) (obj core.Object, err error) {
return nil, err
}
- return NewParser(r).ReadObject()
+ obj = &core.MemoryObject{}
+ return obj, NewParser(r).FillObject(obj)
}
diff --git a/objects_test.go b/objects_test.go
index 4aa835d..80ebf7f 100644
--- a/objects_test.go
+++ b/objects_test.go
@@ -5,7 +5,6 @@ import (
"time"
"gopkg.in/src-d/go-git.v4/core"
- "gopkg.in/src-d/go-git.v4/storage/memory"
. "gopkg.in/check.v1"
)
@@ -83,7 +82,7 @@ func (s *ObjectsSuite) TestParseTree(c *C) {
}
func (s *ObjectsSuite) TestBlobHash(c *C) {
- o := &memory.Object{}
+ o := &core.MemoryObject{}
o.SetType(core.BlobObject)
o.SetSize(3)
diff --git a/storage/memory/object.go b/storage/memory/object.go
deleted file mode 100644
index 8c35360..0000000
--- a/storage/memory/object.go
+++ /dev/null
@@ -1,72 +0,0 @@
-package memory
-
-import (
- "bytes"
- "io/ioutil"
-
- "gopkg.in/src-d/go-git.v4/core"
-)
-
-// Object on memory core.Object implementation
-type Object struct {
- t core.ObjectType
- h core.Hash
- cont []byte
- sz int64
-}
-
-// NewObject creates a new object with the given type and content
-func NewObject(typ core.ObjectType, size int64, cont []byte) *Object {
- return &Object{
- t: typ,
- h: core.ComputeHash(typ, cont),
- cont: cont,
- sz: int64(len(cont)),
- }
-}
-
-// Hash return the object Hash, the hash is calculated on-the-fly the first
-// time is called, the subsequent calls the same Hash is returned even if the
-// type or the content has changed. The Hash is only generated if the size of
-// the content is exactly the Object.Size
-func (o *Object) Hash() core.Hash {
- if o.h == core.ZeroHash && int64(len(o.cont)) == o.sz {
- o.h = core.ComputeHash(o.t, o.cont)
- }
-
- return o.h
-}
-
-// Type return the core.ObjectType
-func (o *Object) Type() core.ObjectType { return o.t }
-
-// SetType sets the core.ObjectType
-func (o *Object) SetType(t core.ObjectType) { o.t = t }
-
-// Size return the size of the object
-func (o *Object) Size() int64 { return o.sz }
-
-// SetSize set the object size, the given size should be written afterwards
-func (o *Object) SetSize(s int64) { o.sz = s }
-
-// Content returns the contents of the object
-func (o *Object) Content() []byte { return o.cont }
-
-// Reader returns a core.ObjectReader used to read the object's content.
-func (o *Object) Reader() (core.ObjectReader, error) {
- return ioutil.NopCloser(bytes.NewBuffer(o.cont)), nil
-}
-
-// Writer returns a core.ObjectWriter used to write the object's content.
-func (o *Object) Writer() (core.ObjectWriter, error) {
- return o, nil
-}
-
-func (o *Object) Write(p []byte) (n int, err error) {
- o.cont = append(o.cont, p...)
- return len(p), nil
-}
-
-// Close releases any resources consumed by the object when it is acting as a
-// core.ObjectWriter.
-func (o *Object) Close() error { return nil }
diff --git a/storage/memory/storage.go b/storage/memory/storage.go
index 62fd1b4..c827ce0 100644
--- a/storage/memory/storage.go
+++ b/storage/memory/storage.go
@@ -28,6 +28,11 @@ func NewObjectStorage() *ObjectStorage {
}
}
+// NewObject creates a new MemoryObject
+func (o *ObjectStorage) NewObject() core.Object {
+ return &core.MemoryObject{}
+}
+
// Set stores an object, the object should be properly filled before set it.
func (o *ObjectStorage) Set(obj core.Object) (core.Hash, error) {
h := obj.Hash()
diff --git a/storage/memory/storage_test.go b/storage/memory/storage_test.go
index 200dbba..3b6c994 100644
--- a/storage/memory/storage_test.go
+++ b/storage/memory/storage_test.go
@@ -1,10 +1,14 @@
package memory
import (
+ "testing"
+
. "gopkg.in/check.v1"
"gopkg.in/src-d/go-git.v4/core"
)
+func Test(t *testing.T) { TestingT(t) }
+
type ObjectStorageSuite struct{}
var _ = Suite(&ObjectStorageSuite{})
@@ -12,7 +16,7 @@ var _ = Suite(&ObjectStorageSuite{})
func (s *ObjectStorageSuite) TestSet(c *C) {
os := NewObjectStorage()
- o := &Object{}
+ o := &core.MemoryObject{}
o.SetType(core.CommitObject)
o.SetSize(3)
@@ -30,7 +34,7 @@ func (s *ObjectStorageSuite) TestSet(c *C) {
func (s *ObjectStorageSuite) TestGet(c *C) {
os := NewObjectStorage()
- o := &Object{}
+ o := &core.MemoryObject{}
o.SetType(core.CommitObject)
o.SetSize(3)
diff --git a/storage/seekable/internal/index/index.go b/storage/seekable/internal/index/index.go
index 4282f3e..737aca6 100644
--- a/storage/seekable/internal/index/index.go
+++ b/storage/seekable/internal/index/index.go
@@ -51,8 +51,8 @@ func NewFromPackfile(rs io.ReadSeeker) (Index, error) {
return nil, err
}
- obj, err := p.ReadObject()
- if err != nil {
+ obj := &core.MemoryObject{}
+ if err := p.FillObject(obj); err != nil {
return nil, err
}
diff --git a/storage/seekable/storage.go b/storage/seekable/storage.go
index 9cc37ba..e056c54 100644
--- a/storage/seekable/storage.go
+++ b/storage/seekable/storage.go
@@ -90,6 +90,10 @@ func buildIndexFromIdxfile(fs fs.FS, path string) (index.Index, error) {
return index.NewFromIdx(f)
}
+func (s *ObjectStorage) NewObject() core.Object {
+ return &core.MemoryObject{}
+}
+
// Set adds a new object to the storage. As this functionality is not
// yet supported, this method always returns a "not implemented yet"
// error an zero hash.
@@ -131,7 +135,8 @@ func (s *ObjectStorage) Get(h core.Hash) (core.Object, error) {
r.HashToOffset = map[core.Hash]int64(s.index)
p := packfile.NewParser(r)
- return p.ReadObject()
+ obj := s.NewObject()
+ return obj, p.FillObject(obj)
}
// Iter returns an iterator for all the objects in the packfile with the
diff --git a/storage/seekable/storage_test.go b/storage/seekable/storage_test.go
index 2002d2b..bd12ed1 100644
--- a/storage/seekable/storage_test.go
+++ b/storage/seekable/storage_test.go
@@ -321,6 +321,6 @@ func (s *FsSuite) TestSet(c *C) {
sto, err := seekable.New(fs, gitPath)
c.Assert(err, IsNil)
- _, err = sto.Set(&memory.Object{})
+ _, err = sto.Set(&core.MemoryObject{})
c.Assert(err, ErrorMatches, "not implemented yet")
}