From 6e15f9cb0dbac68992eb242282e725784fe72b32 Mon Sep 17 00:00:00 2001 From: Antonio Jesus Navarro Perez Date: Tue, 21 Feb 2017 14:53:30 +0100 Subject: cache: move package to plumbing Because cache package is only intended to be used at internal level, we move it to the plumbing package. --- cache/common.go | 16 ---------- cache/object.go | 68 ----------------------------------------- cache/object_test.go | 85 ---------------------------------------------------- cache/queue.go | 46 ---------------------------- 4 files changed, 215 deletions(-) delete mode 100644 cache/common.go delete mode 100644 cache/object.go delete mode 100644 cache/object_test.go delete mode 100644 cache/queue.go (limited to 'cache') diff --git a/cache/common.go b/cache/common.go deleted file mode 100644 index 33fb2bc..0000000 --- a/cache/common.go +++ /dev/null @@ -1,16 +0,0 @@ -package cache - -import "srcd.works/go-git.v4/plumbing" - -const ( - Byte = 1 << (iota * 10) - KiByte - MiByte - GiByte -) - -type Object interface { - Add(o plumbing.EncodedObject) - Get(k plumbing.Hash) plumbing.EncodedObject - Clear() -} diff --git a/cache/object.go b/cache/object.go deleted file mode 100644 index 47e390b..0000000 --- a/cache/object.go +++ /dev/null @@ -1,68 +0,0 @@ -package cache - -import "srcd.works/go-git.v4/plumbing" - -const ( - initialQueueSize = 20 - MaxSize = 10 * MiByte -) - -type ObjectFIFO struct { - objects map[plumbing.Hash]plumbing.EncodedObject - order *queue - - maxSize int64 - actualSize int64 -} - -// NewObjectFIFO returns an Object cache that keeps the newest objects that fit -// into the specific memory size -func NewObjectFIFO(size int64) *ObjectFIFO { - return &ObjectFIFO{ - objects: make(map[plumbing.Hash]plumbing.EncodedObject), - order: newQueue(initialQueueSize), - maxSize: size, - } -} - -// Add adds a new object to the cache. If the object size is greater than the -// cache size, the object is not added. -func (c *ObjectFIFO) Add(o plumbing.EncodedObject) { - // if the size of the object is bigger or equal than the cache size, - // skip it - if o.Size() >= c.maxSize { - return - } - - // if the object is into the cache, do not add it again - if _, ok := c.objects[o.Hash()]; ok { - return - } - - // delete the oldest object if cache is full - if c.actualSize >= c.maxSize { - h := c.order.Pop() - o := c.objects[h] - if o != nil { - c.actualSize -= o.Size() - delete(c.objects, h) - } - } - - c.objects[o.Hash()] = o - c.order.Push(o.Hash()) - c.actualSize += o.Size() -} - -// Get returns an object by his hash. If the object is not found in the cache, it -// returns nil -func (c *ObjectFIFO) Get(k plumbing.Hash) plumbing.EncodedObject { - return c.objects[k] -} - -// Clear the content of this object cache -func (c *ObjectFIFO) Clear() { - c.objects = make(map[plumbing.Hash]plumbing.EncodedObject) - c.order = newQueue(initialQueueSize) - c.actualSize = 0 -} diff --git a/cache/object_test.go b/cache/object_test.go deleted file mode 100644 index 7d00970..0000000 --- a/cache/object_test.go +++ /dev/null @@ -1,85 +0,0 @@ -package cache - -import ( - "io" - "testing" - - "srcd.works/go-git.v4/plumbing" - - . "gopkg.in/check.v1" -) - -func Test(t *testing.T) { TestingT(t) } - -type ObjectSuite struct { - c *ObjectFIFO - aObject plumbing.EncodedObject - bObject plumbing.EncodedObject - cObject plumbing.EncodedObject - dObject plumbing.EncodedObject -} - -var _ = Suite(&ObjectSuite{}) - -func (s *ObjectSuite) SetUpTest(c *C) { - s.aObject = newObject("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", 1*Byte) - s.bObject = newObject("bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", 3*Byte) - s.cObject = newObject("cccccccccccccccccccccccccccccccccccccccc", 1*Byte) - s.dObject = newObject("dddddddddddddddddddddddddddddddddddddddd", 1*Byte) - - s.c = NewObjectFIFO(2 * Byte) -} - -func (s *ObjectSuite) TestAdd_SameObject(c *C) { - s.c.Add(s.aObject) - c.Assert(s.c.actualSize, Equals, int64(1*Byte)) - s.c.Add(s.aObject) - c.Assert(s.c.actualSize, Equals, int64(1*Byte)) -} - -func (s *ObjectSuite) TestAdd_BigObject(c *C) { - s.c.Add(s.bObject) - c.Assert(s.c.actualSize, Equals, int64(0)) - c.Assert(len(s.c.objects), Equals, 0) -} - -func (s *ObjectSuite) TestAdd_CacheOverflow(c *C) { - s.c.Add(s.aObject) - c.Assert(s.c.actualSize, Equals, int64(1*Byte)) - s.c.Add(s.cObject) - c.Assert(len(s.c.objects), Equals, 2) - s.c.Add(s.dObject) - c.Assert(len(s.c.objects), Equals, 2) - - c.Assert(s.c.Get(s.aObject.Hash()), IsNil) - c.Assert(s.c.Get(s.cObject.Hash()), NotNil) - c.Assert(s.c.Get(s.dObject.Hash()), NotNil) -} - -func (s *ObjectSuite) TestClear(c *C) { - s.c.Add(s.aObject) - c.Assert(s.c.actualSize, Equals, int64(1*Byte)) - s.c.Clear() - c.Assert(s.c.actualSize, Equals, int64(0)) - c.Assert(s.c.Get(s.aObject.Hash()), IsNil) -} - -type dummyObject struct { - hash plumbing.Hash - size int64 -} - -func newObject(hash string, size int64) plumbing.EncodedObject { - return &dummyObject{ - hash: plumbing.NewHash(hash), - size: size, - } -} - -func (d *dummyObject) Hash() plumbing.Hash { return d.hash } -func (*dummyObject) Type() plumbing.ObjectType { return plumbing.InvalidObject } -func (*dummyObject) SetType(plumbing.ObjectType) {} -func (d *dummyObject) Size() int64 { return d.size } -func (*dummyObject) SetSize(s int64) {} -func (*dummyObject) Reader() (io.ReadCloser, error) { return nil, nil } -func (*dummyObject) Writer() (io.WriteCloser, error) { return nil, nil } diff --git a/cache/queue.go b/cache/queue.go deleted file mode 100644 index 8c6d7d3..0000000 --- a/cache/queue.go +++ /dev/null @@ -1,46 +0,0 @@ -package cache - -import "srcd.works/go-git.v4/plumbing" - -// queue is a basic FIFO queue based on a circular list that resize as needed. -type queue struct { - elements []plumbing.Hash - size int - head int - tail int - count int -} - -// newQueue returns a queue with the specified initial size -func newQueue(size int) *queue { - return &queue{ - elements: make([]plumbing.Hash, size), - size: size, - } -} - -// Push adds a node to the queue. -func (q *queue) Push(h plumbing.Hash) { - if q.head == q.tail && q.count > 0 { - elements := make([]plumbing.Hash, len(q.elements)+q.size) - copy(elements, q.elements[q.head:]) - copy(elements[len(q.elements)-q.head:], q.elements[:q.head]) - q.head = 0 - q.tail = len(q.elements) - q.elements = elements - } - q.elements[q.tail] = h - q.tail = (q.tail + 1) % len(q.elements) - q.count++ -} - -// Pop removes and returns a Hash from the queue in first to last order. -func (q *queue) Pop() plumbing.Hash { - if q.count == 0 { - return plumbing.ZeroHash - } - node := q.elements[q.head] - q.head = (q.head + 1) % len(q.elements) - q.count-- - return node -} -- cgit