aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.travis/deploy.pem.encbin1680 -> 0 bytes
-rwxr-xr-x.travis/install_key.sh9
-rw-r--r--DCO36
-rw-r--r--plumbing/cache/common.go2
-rw-r--r--plumbing/cache/object_lru.go5
-rw-r--r--plumbing/cache/object_test.go101
-rw-r--r--plumbing/format/packfile/decoder.go33
-rw-r--r--plumbing/format/packfile/decoder_test.go23
-rw-r--r--plumbing/format/packfile/delta_selector.go5
-rw-r--r--plumbing/format/packfile/encoder.go57
-rw-r--r--plumbing/format/packfile/encoder_advanced_test.go6
-rw-r--r--plumbing/format/packfile/encoder_test.go81
-rw-r--r--plumbing/format/packfile/object_pack.go32
-rw-r--r--storage/filesystem/object.go17
14 files changed, 301 insertions, 106 deletions
diff --git a/.travis/deploy.pem.enc b/.travis/deploy.pem.enc
deleted file mode 100644
index 0584013..0000000
--- a/.travis/deploy.pem.enc
+++ /dev/null
Binary files differ
diff --git a/.travis/install_key.sh b/.travis/install_key.sh
deleted file mode 100755
index 95a6571..0000000
--- a/.travis/install_key.sh
+++ /dev/null
@@ -1,9 +0,0 @@
-#!/bin/bash
-openssl aes-256-cbc \
- -K $encrypted_1477e58fe67a_key \
- -iv $encrypted_1477e58fe67a_iv \
- -in .travis/deploy.pem.enc \
- -out $HOME/.travis/deploy.pem -d
-
-chmod 600 $HOME/.travis/deploy.pem
-
diff --git a/DCO b/DCO
new file mode 100644
index 0000000..3aca339
--- /dev/null
+++ b/DCO
@@ -0,0 +1,36 @@
+Developer Certificate of Origin
+Version 1.1
+
+Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
+660 York Street, Suite 102,
+San Francisco, CA 94110 USA
+
+Everyone is permitted to copy and distribute verbatim copies of this
+license document, but changing it is not allowed.
+
+
+Developer's Certificate of Origin 1.1
+
+By making a contribution to this project, I certify that:
+
+(a) The contribution was created in whole or in part by me and I
+ have the right to submit it under the open source license
+ indicated in the file; or
+
+(b) The contribution is based upon previous work that, to the best
+ of my knowledge, is covered under an appropriate open source
+ license and I have the right under that license to submit that
+ work with modifications, whether created in whole or in part
+ by me, under the same open source license (unless I am
+ permitted to submit under a different license), as indicated
+ in the file; or
+
+(c) The contribution was provided directly to me by some other
+ person who certified (a), (b) or (c) and I have not modified
+ it.
+
+(d) I understand and agree that this project and the contribution
+ are public and that a record of the contribution (including all
+ personal information I submit with it, including my sign-off) is
+ maintained indefinitely and may be redistributed consistent with
+ this project or the open source license(s) involved. \ No newline at end of file
diff --git a/plumbing/cache/common.go b/plumbing/cache/common.go
index 9efc26c..e77baf0 100644
--- a/plumbing/cache/common.go
+++ b/plumbing/cache/common.go
@@ -11,6 +11,8 @@ const (
type FileSize int64
+const DefaultMaxSize FileSize = 96 * MiByte
+
// Object is an interface to a object cache.
type Object interface {
// Put puts the given object into the cache. Whether this object will
diff --git a/plumbing/cache/object_lru.go b/plumbing/cache/object_lru.go
index e8414ab..d99a5c9 100644
--- a/plumbing/cache/object_lru.go
+++ b/plumbing/cache/object_lru.go
@@ -24,6 +24,11 @@ func NewObjectLRU(maxSize FileSize) *ObjectLRU {
return &ObjectLRU{MaxSize: maxSize}
}
+// NewObjectLRUDefault creates a new ObjectLRU with the default cache size.
+func NewObjectLRUDefault() *ObjectLRU {
+ return &ObjectLRU{MaxSize: DefaultMaxSize}
+}
+
// Put puts an object into the cache. If the object is already in the cache, it
// will be marked as used. Otherwise, it will be inserted. A single object might
// be evicted to make room for the new object.
diff --git a/plumbing/cache/object_test.go b/plumbing/cache/object_test.go
index b38272f..ec01d60 100644
--- a/plumbing/cache/object_test.go
+++ b/plumbing/cache/object_test.go
@@ -14,7 +14,7 @@ import (
func Test(t *testing.T) { TestingT(t) }
type ObjectSuite struct {
- c Object
+ c map[string]Object
aObject plumbing.EncodedObject
bObject plumbing.EncodedObject
cObject plumbing.EncodedObject
@@ -29,70 +29,89 @@ func (s *ObjectSuite) SetUpTest(c *C) {
s.cObject = newObject("cccccccccccccccccccccccccccccccccccccccc", 1*Byte)
s.dObject = newObject("dddddddddddddddddddddddddddddddddddddddd", 1*Byte)
- s.c = NewObjectLRU(2 * Byte)
+ s.c = make(map[string]Object)
+ s.c["two_bytes"] = NewObjectLRU(2 * Byte)
+ s.c["default_lru"] = NewObjectLRUDefault()
}
func (s *ObjectSuite) TestPutSameObject(c *C) {
- s.c.Put(s.aObject)
- s.c.Put(s.aObject)
- _, ok := s.c.Get(s.aObject.Hash())
- c.Assert(ok, Equals, true)
+ for _, o := range s.c {
+ o.Put(s.aObject)
+ o.Put(s.aObject)
+ _, ok := o.Get(s.aObject.Hash())
+ c.Assert(ok, Equals, true)
+ }
}
func (s *ObjectSuite) TestPutBigObject(c *C) {
- s.c.Put(s.bObject)
- _, ok := s.c.Get(s.aObject.Hash())
- c.Assert(ok, Equals, false)
+ for _, o := range s.c {
+ o.Put(s.bObject)
+ _, ok := o.Get(s.aObject.Hash())
+ c.Assert(ok, Equals, false)
+ }
}
func (s *ObjectSuite) TestPutCacheOverflow(c *C) {
- s.c.Put(s.aObject)
- s.c.Put(s.cObject)
- s.c.Put(s.dObject)
+ // this test only works with an specific size
+ o := s.c["two_bytes"]
+
+ o.Put(s.aObject)
+ o.Put(s.cObject)
+ o.Put(s.dObject)
- obj, ok := s.c.Get(s.aObject.Hash())
+ obj, ok := o.Get(s.aObject.Hash())
c.Assert(ok, Equals, false)
c.Assert(obj, IsNil)
- obj, ok = s.c.Get(s.cObject.Hash())
+ obj, ok = o.Get(s.cObject.Hash())
c.Assert(ok, Equals, true)
c.Assert(obj, NotNil)
- obj, ok = s.c.Get(s.dObject.Hash())
+ obj, ok = o.Get(s.dObject.Hash())
c.Assert(ok, Equals, true)
c.Assert(obj, NotNil)
}
func (s *ObjectSuite) TestClear(c *C) {
- s.c.Put(s.aObject)
- s.c.Clear()
- obj, ok := s.c.Get(s.aObject.Hash())
- c.Assert(ok, Equals, false)
- c.Assert(obj, IsNil)
+ for _, o := range s.c {
+ o.Put(s.aObject)
+ o.Clear()
+ obj, ok := o.Get(s.aObject.Hash())
+ c.Assert(ok, Equals, false)
+ c.Assert(obj, IsNil)
+ }
}
func (s *ObjectSuite) TestConcurrentAccess(c *C) {
- var wg sync.WaitGroup
-
- for i := 0; i < 1000; i++ {
- wg.Add(3)
- go func(i int) {
- s.c.Put(newObject(fmt.Sprint(i), FileSize(i)))
- wg.Done()
- }(i)
-
- go func(i int) {
- if i%30 == 0 {
- s.c.Clear()
- }
- wg.Done()
- }(i)
-
- go func(i int) {
- s.c.Get(plumbing.NewHash(fmt.Sprint(i)))
- wg.Done()
- }(i)
+ for _, o := range s.c {
+ var wg sync.WaitGroup
+
+ for i := 0; i < 1000; i++ {
+ wg.Add(3)
+ go func(i int) {
+ o.Put(newObject(fmt.Sprint(i), FileSize(i)))
+ wg.Done()
+ }(i)
+
+ go func(i int) {
+ if i%30 == 0 {
+ o.Clear()
+ }
+ wg.Done()
+ }(i)
+
+ go func(i int) {
+ o.Get(plumbing.NewHash(fmt.Sprint(i)))
+ wg.Done()
+ }(i)
+ }
+
+ wg.Wait()
}
+}
+
+func (s *ObjectSuite) TestDefaultLRU(c *C) {
+ defaultLRU := s.c["default_lru"].(*ObjectLRU)
- wg.Wait()
+ c.Assert(defaultLRU.MaxSize, Equals, DefaultMaxSize)
}
type dummyObject struct {
diff --git a/plumbing/format/packfile/decoder.go b/plumbing/format/packfile/decoder.go
index ad72ea0..cb78701 100644
--- a/plumbing/format/packfile/decoder.go
+++ b/plumbing/format/packfile/decoder.go
@@ -52,7 +52,7 @@ var (
// is destroyed. The Offsets and CRCs are calculated whether an
// ObjectStorer was provided or not.
type Decoder struct {
- DeltaBaseCache cache.Object
+ deltaBaseCache cache.Object
s *Scanner
o storer.EncodedObjectStorer
@@ -80,15 +80,27 @@ type Decoder struct {
// If the ObjectStorer implements storer.Transactioner, a transaction is created
// during the Decode execution. If anything fails, Rollback is called
func NewDecoder(s *Scanner, o storer.EncodedObjectStorer) (*Decoder, error) {
- return NewDecoderForType(s, o, plumbing.AnyObject)
+ return NewDecoderForType(s, o, plumbing.AnyObject,
+ cache.NewObjectLRUDefault())
+}
+
+// NewDecoderWithCache is a version of NewDecoder where cache can be specified.
+func NewDecoderWithCache(s *Scanner, o storer.EncodedObjectStorer,
+ cacheObject cache.Object) (*Decoder, error) {
+
+ return NewDecoderForType(s, o, plumbing.AnyObject, cacheObject)
}
// NewDecoderForType returns a new Decoder but in this case for a specific object type.
// When an object is read using this Decoder instance and it is not of the same type of
// the specified one, nil will be returned. This is intended to avoid the content
-// deserialization of all the objects
+// deserialization of all the objects.
+//
+// cacheObject is a cache.Object implementation that is used to speed up the
+// process. If cache is not needed you can pass nil. To create an LRU cache
+// object with the default size you can use the helper cache.ObjectLRUDefault().
func NewDecoderForType(s *Scanner, o storer.EncodedObjectStorer,
- t plumbing.ObjectType) (*Decoder, error) {
+ t plumbing.ObjectType, cacheObject cache.Object) (*Decoder, error) {
if t == plumbing.OFSDeltaObject ||
t == plumbing.REFDeltaObject ||
@@ -101,8 +113,9 @@ func NewDecoderForType(s *Scanner, o storer.EncodedObjectStorer,
}
return &Decoder{
- s: s,
- o: o,
+ s: s,
+ o: o,
+ deltaBaseCache: cacheObject,
idx: NewIndex(0),
offsetToType: make(map[int64]plumbing.ObjectType),
@@ -404,19 +417,19 @@ func (d *Decoder) fillOFSDeltaObjectContent(obj plumbing.EncodedObject, offset i
}
func (d *Decoder) cacheGet(h plumbing.Hash) (plumbing.EncodedObject, bool) {
- if d.DeltaBaseCache == nil {
+ if d.deltaBaseCache == nil {
return nil, false
}
- return d.DeltaBaseCache.Get(h)
+ return d.deltaBaseCache.Get(h)
}
func (d *Decoder) cachePut(obj plumbing.EncodedObject) {
- if d.DeltaBaseCache == nil {
+ if d.deltaBaseCache == nil {
return
}
- d.DeltaBaseCache.Put(obj)
+ d.deltaBaseCache.Put(obj)
}
func (d *Decoder) recallByOffset(o int64) (plumbing.EncodedObject, error) {
diff --git a/plumbing/format/packfile/decoder_test.go b/plumbing/format/packfile/decoder_test.go
index 1a1a74a..b5bc7b7 100644
--- a/plumbing/format/packfile/decoder_test.go
+++ b/plumbing/format/packfile/decoder_test.go
@@ -4,6 +4,7 @@ import (
"io"
"gopkg.in/src-d/go-git.v4/plumbing"
+ "gopkg.in/src-d/go-git.v4/plumbing/cache"
"gopkg.in/src-d/go-git.v4/plumbing/format/idxfile"
"gopkg.in/src-d/go-git.v4/plumbing/format/packfile"
"gopkg.in/src-d/go-git.v4/plumbing/storer"
@@ -51,7 +52,8 @@ func (s *ReaderSuite) TestDecodeByTypeRefDelta(c *C) {
storage := memory.NewStorage()
scanner := packfile.NewScanner(f.Packfile())
- d, err := packfile.NewDecoderForType(scanner, storage, plumbing.CommitObject)
+ d, err := packfile.NewDecoderForType(scanner, storage, plumbing.CommitObject,
+ cache.NewObjectLRUDefault())
c.Assert(err, IsNil)
// Index required to decode by ref-delta.
@@ -77,7 +79,8 @@ func (s *ReaderSuite) TestDecodeByTypeRefDeltaError(c *C) {
fixtures.Basic().ByTag("ref-delta").Test(c, func(f *fixtures.Fixture) {
storage := memory.NewStorage()
scanner := packfile.NewScanner(f.Packfile())
- d, err := packfile.NewDecoderForType(scanner, storage, plumbing.CommitObject)
+ d, err := packfile.NewDecoderForType(scanner, storage,
+ plumbing.CommitObject, cache.NewObjectLRUDefault())
c.Assert(err, IsNil)
defer d.Close()
@@ -111,7 +114,8 @@ func (s *ReaderSuite) TestDecodeByType(c *C) {
for _, t := range ts {
storage := memory.NewStorage()
scanner := packfile.NewScanner(f.Packfile())
- d, err := packfile.NewDecoderForType(scanner, storage, t)
+ d, err := packfile.NewDecoderForType(scanner, storage, t,
+ cache.NewObjectLRUDefault())
c.Assert(err, IsNil)
// when the packfile is ref-delta based, the offsets are required
@@ -141,13 +145,17 @@ func (s *ReaderSuite) TestDecodeByTypeConstructor(c *C) {
storage := memory.NewStorage()
scanner := packfile.NewScanner(f.Packfile())
- _, err := packfile.NewDecoderForType(scanner, storage, plumbing.OFSDeltaObject)
+ _, err := packfile.NewDecoderForType(scanner, storage,
+ plumbing.OFSDeltaObject, cache.NewObjectLRUDefault())
c.Assert(err, Equals, plumbing.ErrInvalidType)
- _, err = packfile.NewDecoderForType(scanner, storage, plumbing.REFDeltaObject)
+ _, err = packfile.NewDecoderForType(scanner, storage,
+ plumbing.REFDeltaObject, cache.NewObjectLRUDefault())
+
c.Assert(err, Equals, plumbing.ErrInvalidType)
- _, err = packfile.NewDecoderForType(scanner, storage, plumbing.InvalidObject)
+ _, err = packfile.NewDecoderForType(scanner, storage, plumbing.InvalidObject,
+ cache.NewObjectLRUDefault())
c.Assert(err, Equals, plumbing.ErrInvalidType)
}
@@ -313,7 +321,8 @@ func (s *ReaderSuite) TestDecodeObjectAt(c *C) {
func (s *ReaderSuite) TestDecodeObjectAtForType(c *C) {
f := fixtures.Basic().One()
scanner := packfile.NewScanner(f.Packfile())
- d, err := packfile.NewDecoderForType(scanner, nil, plumbing.TreeObject)
+ d, err := packfile.NewDecoderForType(scanner, nil, plumbing.TreeObject,
+ cache.NewObjectLRUDefault())
c.Assert(err, IsNil)
// when the packfile is ref-delta based, the offsets are required
diff --git a/plumbing/format/packfile/delta_selector.go b/plumbing/format/packfile/delta_selector.go
index 51adcdf..8792574 100644
--- a/plumbing/format/packfile/delta_selector.go
+++ b/plumbing/format/packfile/delta_selector.go
@@ -174,11 +174,6 @@ func (dw *deltaSelector) fixAndBreakChainsOne(objectsToPack map[plumbing.Hash]*O
return dw.undeltify(otp)
}
- if base.Size() <= otp.Size() {
- // Bases should be bigger
- return dw.undeltify(otp)
- }
-
if err := dw.fixAndBreakChainsOne(objectsToPack, base); err != nil {
return err
}
diff --git a/plumbing/format/packfile/encoder.go b/plumbing/format/packfile/encoder.go
index 7ee6546..6686dd5 100644
--- a/plumbing/format/packfile/encoder.go
+++ b/plumbing/format/packfile/encoder.go
@@ -18,10 +18,7 @@ type Encoder struct {
w *offsetWriter
zw *zlib.Writer
hasher plumbing.Hasher
- // offsets is a map of object hashes to corresponding offsets in the packfile.
- // It is used to determine offset of the base of a delta when a OFS_DELTA is
- // used.
- offsets map[plumbing.Hash]int64
+
useRefDeltas bool
}
@@ -40,7 +37,6 @@ func NewEncoder(w io.Writer, s storer.EncodedObjectStorer, useRefDeltas bool) *E
w: ow,
zw: zw,
hasher: h,
- offsets: make(map[plumbing.Hash]int64),
useRefDeltas: useRefDeltas,
}
}
@@ -85,11 +81,34 @@ func (e *Encoder) head(numEntries int) error {
}
func (e *Encoder) entry(o *ObjectToPack) error {
- offset := e.w.Offset()
- e.offsets[o.Hash()] = offset
+ if o.WantWrite() {
+ // A cycle exists in this delta chain. This should only occur if a
+ // selected object representation disappeared during writing
+ // (for example due to a concurrent repack) and a different base
+ // was chosen, forcing a cycle. Select something other than a
+ // delta, and write this object.
+ o.BackToOriginal()
+ }
+
+ if o.IsWritten() {
+ return nil
+ }
+
+ o.MarkWantWrite()
+
+ if err := e.writeBaseIfDelta(o); err != nil {
+ return err
+ }
+
+ // We need to check if we already write that object due a cyclic delta chain
+ if o.IsWritten() {
+ return nil
+ }
+
+ o.Offset = e.w.Offset()
if o.IsDelta() {
- if err := e.writeDeltaHeader(o, offset); err != nil {
+ if err := e.writeDeltaHeader(o); err != nil {
return err
}
} else {
@@ -112,7 +131,16 @@ func (e *Encoder) entry(o *ObjectToPack) error {
return e.zw.Close()
}
-func (e *Encoder) writeDeltaHeader(o *ObjectToPack, offset int64) error {
+func (e *Encoder) writeBaseIfDelta(o *ObjectToPack) error {
+ if o.IsDelta() && !o.Base.IsWritten() {
+ // We must write base first
+ return e.entry(o.Base)
+ }
+
+ return nil
+}
+
+func (e *Encoder) writeDeltaHeader(o *ObjectToPack) error {
// Write offset deltas by default
t := plumbing.OFSDeltaObject
if e.useRefDeltas {
@@ -126,7 +154,7 @@ func (e *Encoder) writeDeltaHeader(o *ObjectToPack, offset int64) error {
if e.useRefDeltas {
return e.writeRefDeltaHeader(o.Base.Hash())
} else {
- return e.writeOfsDeltaHeader(offset, o.Base.Hash())
+ return e.writeOfsDeltaHeader(o)
}
}
@@ -134,15 +162,10 @@ func (e *Encoder) writeRefDeltaHeader(base plumbing.Hash) error {
return binary.Write(e.w, base)
}
-func (e *Encoder) writeOfsDeltaHeader(deltaOffset int64, base plumbing.Hash) error {
- baseOffset, ok := e.offsets[base]
- if !ok {
- return fmt.Errorf("base for delta not found, base hash: %v", base)
- }
-
+func (e *Encoder) writeOfsDeltaHeader(o *ObjectToPack) error {
// for OFS_DELTA, offset of the base is interpreted as negative offset
// relative to the type-byte of the header of the ofs-delta entry.
- relativeOffset := deltaOffset - baseOffset
+ relativeOffset := o.Offset - o.Base.Offset
if relativeOffset <= 0 {
return fmt.Errorf("bad offset for OFS_DELTA entry: %d", relativeOffset)
}
diff --git a/plumbing/format/packfile/encoder_advanced_test.go b/plumbing/format/packfile/encoder_advanced_test.go
index 69d6b39..1075875 100644
--- a/plumbing/format/packfile/encoder_advanced_test.go
+++ b/plumbing/format/packfile/encoder_advanced_test.go
@@ -68,16 +68,18 @@ func (s *EncoderAdvancedSuite) testEncodeDecode(c *C, storage storer.Storer, pac
buf := bytes.NewBuffer(nil)
enc := NewEncoder(buf, storage, false)
- _, err = enc.Encode(hashes, packWindow)
+ encodeHash, err := enc.Encode(hashes, packWindow)
c.Assert(err, IsNil)
scanner := NewScanner(buf)
storage = memory.NewStorage()
d, err := NewDecoder(scanner, storage)
c.Assert(err, IsNil)
- _, err = d.Decode()
+ decodeHash, err := d.Decode()
c.Assert(err, IsNil)
+ c.Assert(encodeHash, Equals, decodeHash)
+
objIter, err = storage.IterEncodedObjects(plumbing.AnyObject)
c.Assert(err, IsNil)
obtainedObjects := map[plumbing.Hash]bool{}
diff --git a/plumbing/format/packfile/encoder_test.go b/plumbing/format/packfile/encoder_test.go
index f40517d..320036b 100644
--- a/plumbing/format/packfile/encoder_test.go
+++ b/plumbing/format/packfile/encoder_test.go
@@ -106,6 +106,16 @@ func (s *EncoderSuite) TestDecodeEncodeWithDeltasDecodeOFS(c *C) {
s.deltaOverDeltaTest(c)
}
+func (s *EncoderSuite) TestDecodeEncodeWithCycleREF(c *C) {
+ s.enc = NewEncoder(s.buf, s.store, true)
+ s.deltaOverDeltaCyclicTest(c)
+}
+
+func (s *EncoderSuite) TestDecodeEncodeWithCycleOFS(c *C) {
+ s.enc = NewEncoder(s.buf, s.store, false)
+ s.deltaOverDeltaCyclicTest(c)
+}
+
func (s *EncoderSuite) simpleDeltaTest(c *C) {
srcObject := newObject(plumbing.BlobObject, []byte("0"))
targetObject := newObject(plumbing.BlobObject, []byte("01"))
@@ -114,7 +124,7 @@ func (s *EncoderSuite) simpleDeltaTest(c *C) {
c.Assert(err, IsNil)
srcToPack := newObjectToPack(srcObject)
- _, err = s.enc.encode([]*ObjectToPack{
+ encHash, err := s.enc.encode([]*ObjectToPack{
srcToPack,
newDeltaObjectToPack(srcToPack, targetObject, deltaObject),
})
@@ -126,9 +136,11 @@ func (s *EncoderSuite) simpleDeltaTest(c *C) {
d, err := NewDecoder(scanner, storage)
c.Assert(err, IsNil)
- _, err = d.Decode()
+ decHash, err := d.Decode()
c.Assert(err, IsNil)
+ c.Assert(encHash, Equals, decHash)
+
decSrc, err := storage.EncodedObject(srcObject.Type(), srcObject.Hash())
c.Assert(err, IsNil)
c.Assert(decSrc, DeepEquals, srcObject)
@@ -153,7 +165,8 @@ func (s *EncoderSuite) deltaOverDeltaTest(c *C) {
srcToPack := newObjectToPack(srcObject)
targetToPack := newObjectToPack(targetObject)
- _, err = s.enc.encode([]*ObjectToPack{
+ encHash, err := s.enc.encode([]*ObjectToPack{
+ targetToPack,
srcToPack,
newDeltaObjectToPack(srcToPack, targetObject, deltaObject),
newDeltaObjectToPack(targetToPack, otherTargetObject, otherDeltaObject),
@@ -165,9 +178,11 @@ func (s *EncoderSuite) deltaOverDeltaTest(c *C) {
d, err := NewDecoder(scanner, storage)
c.Assert(err, IsNil)
- _, err = d.Decode()
+ decHash, err := d.Decode()
c.Assert(err, IsNil)
+ c.Assert(encHash, Equals, decHash)
+
decSrc, err := storage.EncodedObject(srcObject.Type(), srcObject.Hash())
c.Assert(err, IsNil)
c.Assert(decSrc, DeepEquals, srcObject)
@@ -180,3 +195,61 @@ func (s *EncoderSuite) deltaOverDeltaTest(c *C) {
c.Assert(err, IsNil)
c.Assert(decOtherTarget, DeepEquals, otherTargetObject)
}
+
+func (s *EncoderSuite) deltaOverDeltaCyclicTest(c *C) {
+ o1 := newObject(plumbing.BlobObject, []byte("0"))
+ o2 := newObject(plumbing.BlobObject, []byte("01"))
+ o3 := newObject(plumbing.BlobObject, []byte("011111"))
+ o4 := newObject(plumbing.BlobObject, []byte("01111100000"))
+
+ d2, err := GetDelta(o1, o2)
+ c.Assert(err, IsNil)
+
+ d3, err := GetDelta(o4, o3)
+ c.Assert(err, IsNil)
+
+ d4, err := GetDelta(o3, o4)
+ c.Assert(err, IsNil)
+
+ po1 := newObjectToPack(o1)
+ pd2 := newDeltaObjectToPack(po1, o2, d2)
+ pd3 := newObjectToPack(o3)
+ pd4 := newObjectToPack(o4)
+
+ pd3.SetDelta(pd4, d3)
+ pd4.SetDelta(pd3, d4)
+
+ encHash, err := s.enc.encode([]*ObjectToPack{
+ po1,
+ pd2,
+ pd3,
+ pd4,
+ })
+ c.Assert(err, IsNil)
+
+ scanner := NewScanner(s.buf)
+ storage := memory.NewStorage()
+ d, err := NewDecoder(scanner, storage)
+ c.Assert(err, IsNil)
+
+ decHash, err := d.Decode()
+ c.Assert(err, IsNil)
+
+ c.Assert(encHash, Equals, decHash)
+
+ decSrc, err := storage.EncodedObject(o1.Type(), o1.Hash())
+ c.Assert(err, IsNil)
+ c.Assert(decSrc, DeepEquals, o1)
+
+ decTarget, err := storage.EncodedObject(o2.Type(), o2.Hash())
+ c.Assert(err, IsNil)
+ c.Assert(decTarget, DeepEquals, o2)
+
+ decOtherTarget, err := storage.EncodedObject(o3.Type(), o3.Hash())
+ c.Assert(err, IsNil)
+ c.Assert(decOtherTarget, DeepEquals, o3)
+
+ decAnotherTarget, err := storage.EncodedObject(o4.Type(), o4.Hash())
+ c.Assert(err, IsNil)
+ c.Assert(decAnotherTarget, DeepEquals, o4)
+}
diff --git a/plumbing/format/packfile/object_pack.go b/plumbing/format/packfile/object_pack.go
index e22e783..1563517 100644
--- a/plumbing/format/packfile/object_pack.go
+++ b/plumbing/format/packfile/object_pack.go
@@ -13,12 +13,16 @@ type ObjectToPack struct {
// If the main object is not a delta, Base will be null
Base *ObjectToPack
// Original is the object that we can generate applying the delta to
- // Base, or the same object as EncodedObject in the case of a non-delta
+ // Base, or the same object as Object in the case of a non-delta
// object.
Original plumbing.EncodedObject
// Depth is the amount of deltas needed to resolve to obtain Original
// (delta based on delta based on ...)
Depth int
+
+ // offset in pack when object has been already written, or 0 if it
+ // has not been written yet
+ Offset int64
}
// newObjectToPack creates a correct ObjectToPack based on a non-delta object
@@ -41,6 +45,32 @@ func newDeltaObjectToPack(base *ObjectToPack, original, delta plumbing.EncodedOb
}
}
+// BackToOriginal converts that ObjectToPack to a non-deltified object if it was one
+func (o *ObjectToPack) BackToOriginal() {
+ if o.IsDelta() {
+ o.Object = o.Original
+ o.Base = nil
+ o.Depth = 0
+ }
+}
+
+// IsWritten returns if that ObjectToPack was
+// already written into the packfile or not
+func (o *ObjectToPack) IsWritten() bool {
+ return o.Offset > 1
+}
+
+// MarkWantWrite marks this ObjectToPack as WantWrite
+// to avoid delta chain loops
+func (o *ObjectToPack) MarkWantWrite() {
+ o.Offset = 1
+}
+
+// WantWrite checks if this ObjectToPack was marked as WantWrite before
+func (o *ObjectToPack) WantWrite() bool {
+ return o.Offset == 1
+}
+
func (o *ObjectToPack) Type() plumbing.ObjectType {
if o.Original != nil {
return o.Original.Type()
diff --git a/storage/filesystem/object.go b/storage/filesystem/object.go
index fd52ed5..3ec7304 100644
--- a/storage/filesystem/object.go
+++ b/storage/filesystem/object.go
@@ -18,11 +18,9 @@ import (
"gopkg.in/src-d/go-billy.v4"
)
-const DefaultMaxDeltaBaseCacheSize = 92 * cache.MiByte
-
type ObjectStorage struct {
- // DeltaBaseCache is an object cache uses to cache delta's bases when
- DeltaBaseCache cache.Object
+ // deltaBaseCache is an object cache uses to cache delta's bases when
+ deltaBaseCache cache.Object
dir *dotgit.DotGit
index map[plumbing.Hash]*packfile.Index
@@ -30,7 +28,7 @@ type ObjectStorage struct {
func newObjectStorage(dir *dotgit.DotGit) (ObjectStorage, error) {
s := ObjectStorage{
- DeltaBaseCache: cache.NewObjectLRU(DefaultMaxDeltaBaseCacheSize),
+ deltaBaseCache: cache.NewObjectLRUDefault(),
dir: dir,
}
@@ -287,13 +285,13 @@ func (s *ObjectStorage) decodeObjectAt(
p := packfile.NewScanner(f)
- d, err := packfile.NewDecoder(p, memory.NewStorage())
+ d, err := packfile.NewDecoderWithCache(p, memory.NewStorage(),
+ s.deltaBaseCache)
if err != nil {
return nil, err
}
d.SetIndex(idx)
- d.DeltaBaseCache = s.DeltaBaseCache
obj, err := d.DecodeObjectAt(offset)
return obj, err
}
@@ -400,7 +398,7 @@ func (s *ObjectStorage) buildPackfileIters(t plumbing.ObjectType, seen map[plumb
return nil, err
}
- iter, err := newPackfileIter(pack, t, seen, s.index[h], s.DeltaBaseCache)
+ iter, err := newPackfileIter(pack, t, seen, s.index[h], s.deltaBaseCache)
if err != nil {
return nil, err
}
@@ -433,13 +431,12 @@ func newPackfileIter(f billy.File, t plumbing.ObjectType, seen map[plumbing.Hash
return nil, err
}
- d, err := packfile.NewDecoderForType(s, memory.NewStorage(), t)
+ d, err := packfile.NewDecoderForType(s, memory.NewStorage(), t, cache)
if err != nil {
return nil, err
}
d.SetIndex(index)
- d.DeltaBaseCache = cache
return &packfileIter{
f: f,