diff options
Diffstat (limited to 'plumbing')
-rw-r--r-- | plumbing/cache/common.go | 2 | ||||
-rw-r--r-- | plumbing/cache/object_lru.go | 5 | ||||
-rw-r--r-- | plumbing/cache/object_test.go | 101 | ||||
-rw-r--r-- | plumbing/format/packfile/decoder.go | 33 | ||||
-rw-r--r-- | plumbing/format/packfile/decoder_test.go | 23 |
5 files changed, 106 insertions, 58 deletions
diff --git a/plumbing/cache/common.go b/plumbing/cache/common.go index 9efc26c..e77baf0 100644 --- a/plumbing/cache/common.go +++ b/plumbing/cache/common.go @@ -11,6 +11,8 @@ const ( type FileSize int64 +const DefaultMaxSize FileSize = 96 * MiByte + // Object is an interface to a object cache. type Object interface { // Put puts the given object into the cache. Whether this object will diff --git a/plumbing/cache/object_lru.go b/plumbing/cache/object_lru.go index e8414ab..d99a5c9 100644 --- a/plumbing/cache/object_lru.go +++ b/plumbing/cache/object_lru.go @@ -24,6 +24,11 @@ func NewObjectLRU(maxSize FileSize) *ObjectLRU { return &ObjectLRU{MaxSize: maxSize} } +// NewObjectLRUDefault creates a new ObjectLRU with the default cache size. +func NewObjectLRUDefault() *ObjectLRU { + return &ObjectLRU{MaxSize: DefaultMaxSize} +} + // Put puts an object into the cache. If the object is already in the cache, it // will be marked as used. Otherwise, it will be inserted. A single object might // be evicted to make room for the new object. diff --git a/plumbing/cache/object_test.go b/plumbing/cache/object_test.go index b38272f..ec01d60 100644 --- a/plumbing/cache/object_test.go +++ b/plumbing/cache/object_test.go @@ -14,7 +14,7 @@ import ( func Test(t *testing.T) { TestingT(t) } type ObjectSuite struct { - c Object + c map[string]Object aObject plumbing.EncodedObject bObject plumbing.EncodedObject cObject plumbing.EncodedObject @@ -29,70 +29,89 @@ func (s *ObjectSuite) SetUpTest(c *C) { s.cObject = newObject("cccccccccccccccccccccccccccccccccccccccc", 1*Byte) s.dObject = newObject("dddddddddddddddddddddddddddddddddddddddd", 1*Byte) - s.c = NewObjectLRU(2 * Byte) + s.c = make(map[string]Object) + s.c["two_bytes"] = NewObjectLRU(2 * Byte) + s.c["default_lru"] = NewObjectLRUDefault() } func (s *ObjectSuite) TestPutSameObject(c *C) { - s.c.Put(s.aObject) - s.c.Put(s.aObject) - _, ok := s.c.Get(s.aObject.Hash()) - c.Assert(ok, Equals, true) + for _, o := range s.c { + o.Put(s.aObject) + o.Put(s.aObject) + _, ok := o.Get(s.aObject.Hash()) + c.Assert(ok, Equals, true) + } } func (s *ObjectSuite) TestPutBigObject(c *C) { - s.c.Put(s.bObject) - _, ok := s.c.Get(s.aObject.Hash()) - c.Assert(ok, Equals, false) + for _, o := range s.c { + o.Put(s.bObject) + _, ok := o.Get(s.aObject.Hash()) + c.Assert(ok, Equals, false) + } } func (s *ObjectSuite) TestPutCacheOverflow(c *C) { - s.c.Put(s.aObject) - s.c.Put(s.cObject) - s.c.Put(s.dObject) + // this test only works with an specific size + o := s.c["two_bytes"] + + o.Put(s.aObject) + o.Put(s.cObject) + o.Put(s.dObject) - obj, ok := s.c.Get(s.aObject.Hash()) + obj, ok := o.Get(s.aObject.Hash()) c.Assert(ok, Equals, false) c.Assert(obj, IsNil) - obj, ok = s.c.Get(s.cObject.Hash()) + obj, ok = o.Get(s.cObject.Hash()) c.Assert(ok, Equals, true) c.Assert(obj, NotNil) - obj, ok = s.c.Get(s.dObject.Hash()) + obj, ok = o.Get(s.dObject.Hash()) c.Assert(ok, Equals, true) c.Assert(obj, NotNil) } func (s *ObjectSuite) TestClear(c *C) { - s.c.Put(s.aObject) - s.c.Clear() - obj, ok := s.c.Get(s.aObject.Hash()) - c.Assert(ok, Equals, false) - c.Assert(obj, IsNil) + for _, o := range s.c { + o.Put(s.aObject) + o.Clear() + obj, ok := o.Get(s.aObject.Hash()) + c.Assert(ok, Equals, false) + c.Assert(obj, IsNil) + } } func (s *ObjectSuite) TestConcurrentAccess(c *C) { - var wg sync.WaitGroup - - for i := 0; i < 1000; i++ { - wg.Add(3) - go func(i int) { - s.c.Put(newObject(fmt.Sprint(i), FileSize(i))) - wg.Done() - }(i) - - go func(i int) { - if i%30 == 0 { - s.c.Clear() - } - wg.Done() - }(i) - - go func(i int) { - s.c.Get(plumbing.NewHash(fmt.Sprint(i))) - wg.Done() - }(i) + for _, o := range s.c { + var wg sync.WaitGroup + + for i := 0; i < 1000; i++ { + wg.Add(3) + go func(i int) { + o.Put(newObject(fmt.Sprint(i), FileSize(i))) + wg.Done() + }(i) + + go func(i int) { + if i%30 == 0 { + o.Clear() + } + wg.Done() + }(i) + + go func(i int) { + o.Get(plumbing.NewHash(fmt.Sprint(i))) + wg.Done() + }(i) + } + + wg.Wait() } +} + +func (s *ObjectSuite) TestDefaultLRU(c *C) { + defaultLRU := s.c["default_lru"].(*ObjectLRU) - wg.Wait() + c.Assert(defaultLRU.MaxSize, Equals, DefaultMaxSize) } type dummyObject struct { diff --git a/plumbing/format/packfile/decoder.go b/plumbing/format/packfile/decoder.go index ad72ea0..cb78701 100644 --- a/plumbing/format/packfile/decoder.go +++ b/plumbing/format/packfile/decoder.go @@ -52,7 +52,7 @@ var ( // is destroyed. The Offsets and CRCs are calculated whether an // ObjectStorer was provided or not. type Decoder struct { - DeltaBaseCache cache.Object + deltaBaseCache cache.Object s *Scanner o storer.EncodedObjectStorer @@ -80,15 +80,27 @@ type Decoder struct { // If the ObjectStorer implements storer.Transactioner, a transaction is created // during the Decode execution. If anything fails, Rollback is called func NewDecoder(s *Scanner, o storer.EncodedObjectStorer) (*Decoder, error) { - return NewDecoderForType(s, o, plumbing.AnyObject) + return NewDecoderForType(s, o, plumbing.AnyObject, + cache.NewObjectLRUDefault()) +} + +// NewDecoderWithCache is a version of NewDecoder where cache can be specified. +func NewDecoderWithCache(s *Scanner, o storer.EncodedObjectStorer, + cacheObject cache.Object) (*Decoder, error) { + + return NewDecoderForType(s, o, plumbing.AnyObject, cacheObject) } // NewDecoderForType returns a new Decoder but in this case for a specific object type. // When an object is read using this Decoder instance and it is not of the same type of // the specified one, nil will be returned. This is intended to avoid the content -// deserialization of all the objects +// deserialization of all the objects. +// +// cacheObject is a cache.Object implementation that is used to speed up the +// process. If cache is not needed you can pass nil. To create an LRU cache +// object with the default size you can use the helper cache.ObjectLRUDefault(). func NewDecoderForType(s *Scanner, o storer.EncodedObjectStorer, - t plumbing.ObjectType) (*Decoder, error) { + t plumbing.ObjectType, cacheObject cache.Object) (*Decoder, error) { if t == plumbing.OFSDeltaObject || t == plumbing.REFDeltaObject || @@ -101,8 +113,9 @@ func NewDecoderForType(s *Scanner, o storer.EncodedObjectStorer, } return &Decoder{ - s: s, - o: o, + s: s, + o: o, + deltaBaseCache: cacheObject, idx: NewIndex(0), offsetToType: make(map[int64]plumbing.ObjectType), @@ -404,19 +417,19 @@ func (d *Decoder) fillOFSDeltaObjectContent(obj plumbing.EncodedObject, offset i } func (d *Decoder) cacheGet(h plumbing.Hash) (plumbing.EncodedObject, bool) { - if d.DeltaBaseCache == nil { + if d.deltaBaseCache == nil { return nil, false } - return d.DeltaBaseCache.Get(h) + return d.deltaBaseCache.Get(h) } func (d *Decoder) cachePut(obj plumbing.EncodedObject) { - if d.DeltaBaseCache == nil { + if d.deltaBaseCache == nil { return } - d.DeltaBaseCache.Put(obj) + d.deltaBaseCache.Put(obj) } func (d *Decoder) recallByOffset(o int64) (plumbing.EncodedObject, error) { diff --git a/plumbing/format/packfile/decoder_test.go b/plumbing/format/packfile/decoder_test.go index 1a1a74a..b5bc7b7 100644 --- a/plumbing/format/packfile/decoder_test.go +++ b/plumbing/format/packfile/decoder_test.go @@ -4,6 +4,7 @@ import ( "io" "gopkg.in/src-d/go-git.v4/plumbing" + "gopkg.in/src-d/go-git.v4/plumbing/cache" "gopkg.in/src-d/go-git.v4/plumbing/format/idxfile" "gopkg.in/src-d/go-git.v4/plumbing/format/packfile" "gopkg.in/src-d/go-git.v4/plumbing/storer" @@ -51,7 +52,8 @@ func (s *ReaderSuite) TestDecodeByTypeRefDelta(c *C) { storage := memory.NewStorage() scanner := packfile.NewScanner(f.Packfile()) - d, err := packfile.NewDecoderForType(scanner, storage, plumbing.CommitObject) + d, err := packfile.NewDecoderForType(scanner, storage, plumbing.CommitObject, + cache.NewObjectLRUDefault()) c.Assert(err, IsNil) // Index required to decode by ref-delta. @@ -77,7 +79,8 @@ func (s *ReaderSuite) TestDecodeByTypeRefDeltaError(c *C) { fixtures.Basic().ByTag("ref-delta").Test(c, func(f *fixtures.Fixture) { storage := memory.NewStorage() scanner := packfile.NewScanner(f.Packfile()) - d, err := packfile.NewDecoderForType(scanner, storage, plumbing.CommitObject) + d, err := packfile.NewDecoderForType(scanner, storage, + plumbing.CommitObject, cache.NewObjectLRUDefault()) c.Assert(err, IsNil) defer d.Close() @@ -111,7 +114,8 @@ func (s *ReaderSuite) TestDecodeByType(c *C) { for _, t := range ts { storage := memory.NewStorage() scanner := packfile.NewScanner(f.Packfile()) - d, err := packfile.NewDecoderForType(scanner, storage, t) + d, err := packfile.NewDecoderForType(scanner, storage, t, + cache.NewObjectLRUDefault()) c.Assert(err, IsNil) // when the packfile is ref-delta based, the offsets are required @@ -141,13 +145,17 @@ func (s *ReaderSuite) TestDecodeByTypeConstructor(c *C) { storage := memory.NewStorage() scanner := packfile.NewScanner(f.Packfile()) - _, err := packfile.NewDecoderForType(scanner, storage, plumbing.OFSDeltaObject) + _, err := packfile.NewDecoderForType(scanner, storage, + plumbing.OFSDeltaObject, cache.NewObjectLRUDefault()) c.Assert(err, Equals, plumbing.ErrInvalidType) - _, err = packfile.NewDecoderForType(scanner, storage, plumbing.REFDeltaObject) + _, err = packfile.NewDecoderForType(scanner, storage, + plumbing.REFDeltaObject, cache.NewObjectLRUDefault()) + c.Assert(err, Equals, plumbing.ErrInvalidType) - _, err = packfile.NewDecoderForType(scanner, storage, plumbing.InvalidObject) + _, err = packfile.NewDecoderForType(scanner, storage, plumbing.InvalidObject, + cache.NewObjectLRUDefault()) c.Assert(err, Equals, plumbing.ErrInvalidType) } @@ -313,7 +321,8 @@ func (s *ReaderSuite) TestDecodeObjectAt(c *C) { func (s *ReaderSuite) TestDecodeObjectAtForType(c *C) { f := fixtures.Basic().One() scanner := packfile.NewScanner(f.Packfile()) - d, err := packfile.NewDecoderForType(scanner, nil, plumbing.TreeObject) + d, err := packfile.NewDecoderForType(scanner, nil, plumbing.TreeObject, + cache.NewObjectLRUDefault()) c.Assert(err, IsNil) // when the packfile is ref-delta based, the offsets are required |