From b0d807a1ae0687ef3a01d78c1dc5e55f7217268f Mon Sep 17 00:00:00 2001 From: Antonio Jesus Navarro Perez Date: Tue, 5 Jun 2018 18:33:27 +0200 Subject: dotgit: Move package outside internal. Signed-off-by: Antonio Jesus Navarro Perez --- storage/filesystem/object.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'storage/filesystem/object.go') diff --git a/storage/filesystem/object.go b/storage/filesystem/object.go index 26190fd..54f268a 100644 --- a/storage/filesystem/object.go +++ b/storage/filesystem/object.go @@ -11,7 +11,7 @@ import ( "gopkg.in/src-d/go-git.v4/plumbing/format/objfile" "gopkg.in/src-d/go-git.v4/plumbing/format/packfile" "gopkg.in/src-d/go-git.v4/plumbing/storer" - "gopkg.in/src-d/go-git.v4/storage/filesystem/internal/dotgit" + "gopkg.in/src-d/go-git.v4/storage/filesystem/dotgit" "gopkg.in/src-d/go-git.v4/storage/memory" "gopkg.in/src-d/go-git.v4/utils/ioutil" -- cgit From ecd2bd553ce223252d9784572fd47bd9f597618e Mon Sep 17 00:00:00 2001 From: Miguel Molina Date: Fri, 8 Jun 2018 12:31:15 +0200 Subject: storage: filesystem, make ObjectStorage constructor public Signed-off-by: Miguel Molina --- storage/filesystem/object.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'storage/filesystem/object.go') diff --git a/storage/filesystem/object.go b/storage/filesystem/object.go index 54f268a..9ffe4dc 100644 --- a/storage/filesystem/object.go +++ b/storage/filesystem/object.go @@ -26,7 +26,8 @@ type ObjectStorage struct { index map[plumbing.Hash]*packfile.Index } -func newObjectStorage(dir *dotgit.DotGit) (ObjectStorage, error) { +// NewObjectStorage creates a new ObjectStorage with the given .git directory. +func NewObjectStorage(dir *dotgit.DotGit) (ObjectStorage, error) { s := ObjectStorage{ deltaBaseCache: cache.NewObjectLRUDefault(), dir: dir, @@ -166,7 +167,7 @@ func (s *ObjectStorage) EncodedObject(t plumbing.ObjectType, h plumbing.Hash) (p // Create a new object storage with the DotGit(s) and check for the // required hash object. Skip when not found. for _, dg := range dotgits { - o, oe := newObjectStorage(dg) + o, oe := NewObjectStorage(dg) if oe != nil { continue } -- cgit From 009f1069a1248c1e9189a9e4c342f6d017156ec4 Mon Sep 17 00:00:00 2001 From: Miguel Molina Date: Thu, 19 Jul 2018 15:20:10 +0200 Subject: plumbing/format/idxfile: add new Index and MemoryIndex Signed-off-by: Miguel Molina --- storage/filesystem/object.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'storage/filesystem/object.go') diff --git a/storage/filesystem/object.go b/storage/filesystem/object.go index 9ffe4dc..ef67f50 100644 --- a/storage/filesystem/object.go +++ b/storage/filesystem/object.go @@ -63,7 +63,7 @@ func (s *ObjectStorage) loadIdxFile(h plumbing.Hash) (err error) { } defer ioutil.CheckClose(f, &err) - idxf := idxfile.NewIdxfile() + idxf := idxfile.NewMemoryIndex() d := idxfile.NewDecoder(f) if err = d.Decode(idxf); err != nil { return err -- cgit From 79f249465b24104b73c9dc220d9098cecdab4d77 Mon Sep 17 00:00:00 2001 From: Javi Fontan Date: Thu, 26 Jul 2018 13:42:51 +0200 Subject: plumbing, storage: integrate new index Now dotgit.PackWriter uses the new packfile.Parser and index. Signed-off-by: Javi Fontan --- storage/filesystem/object.go | 46 ++++++++++++++++++++++++++++---------------- 1 file changed, 29 insertions(+), 17 deletions(-) (limited to 'storage/filesystem/object.go') diff --git a/storage/filesystem/object.go b/storage/filesystem/object.go index ef67f50..b73b309 100644 --- a/storage/filesystem/object.go +++ b/storage/filesystem/object.go @@ -23,7 +23,7 @@ type ObjectStorage struct { deltaBaseCache cache.Object dir *dotgit.DotGit - index map[plumbing.Hash]*packfile.Index + index map[plumbing.Hash]idxfile.Index } // NewObjectStorage creates a new ObjectStorage with the given .git directory. @@ -41,7 +41,7 @@ func (s *ObjectStorage) requireIndex() error { return nil } - s.index = make(map[plumbing.Hash]*packfile.Index) + s.index = make(map[plumbing.Hash]idxfile.Index) packs, err := s.dir.ObjectPacks() if err != nil { return err @@ -69,7 +69,7 @@ func (s *ObjectStorage) loadIdxFile(h plumbing.Hash) (err error) { return err } - s.index[h] = packfile.NewIndexFromIdxFile(idxf) + s.index[h] = idxf return err } @@ -87,8 +87,11 @@ func (s *ObjectStorage) PackfileWriter() (io.WriteCloser, error) { return nil, err } - w.Notify = func(h plumbing.Hash, idx *packfile.Index) { - s.index[h] = idx + w.Notify = func(h plumbing.Hash, writer *idxfile.Writer) { + index, err := writer.Index() + if err == nil { + s.index[h] = index + } } return w, nil @@ -278,7 +281,7 @@ func (s *ObjectStorage) getFromPackfile(h plumbing.Hash, canBeDelta bool) ( func (s *ObjectStorage) decodeObjectAt( f billy.File, - idx *packfile.Index, + idx idxfile.Index, offset int64) (plumbing.EncodedObject, error) { if _, err := f.Seek(0, io.SeekStart); err != nil { return nil, err @@ -299,7 +302,7 @@ func (s *ObjectStorage) decodeObjectAt( func (s *ObjectStorage) decodeDeltaObjectAt( f billy.File, - idx *packfile.Index, + idx idxfile.Index, offset int64, hash plumbing.Hash) (plumbing.EncodedObject, error) { if _, err := f.Seek(0, io.SeekStart); err != nil { @@ -324,12 +327,10 @@ func (s *ObjectStorage) decodeDeltaObjectAt( case plumbing.REFDeltaObject: base = header.Reference case plumbing.OFSDeltaObject: - e, ok := idx.LookupOffset(uint64(header.OffsetReference)) - if !ok { - return nil, plumbing.ErrObjectNotFound + base, err = idx.FindHash(header.OffsetReference) + if err != nil { + return nil, err } - - base = e.Hash default: return s.decodeObjectAt(f, idx, offset) } @@ -350,8 +351,9 @@ func (s *ObjectStorage) decodeDeltaObjectAt( func (s *ObjectStorage) findObjectInPackfile(h plumbing.Hash) (plumbing.Hash, plumbing.Hash, int64) { for packfile, index := range s.index { - if e, ok := index.LookupHash(h); ok { - return packfile, e.Hash, int64(e.Offset) + offset, err := index.FindOffset(h) + if err == nil { + return packfile, h, offset } } @@ -460,12 +462,22 @@ type packfileIter struct { total uint32 } -func NewPackfileIter(f billy.File, t plumbing.ObjectType) (storer.EncodedObjectIter, error) { +// NewPackfileIter returns a new EncodedObjectIter for the provided packfile +// and object type. +func NewPackfileIter( + f billy.File, + t plumbing.ObjectType, +) (storer.EncodedObjectIter, error) { return newPackfileIter(f, t, make(map[plumbing.Hash]struct{}), nil, nil) } -func newPackfileIter(f billy.File, t plumbing.ObjectType, seen map[plumbing.Hash]struct{}, - index *packfile.Index, cache cache.Object) (storer.EncodedObjectIter, error) { +func newPackfileIter( + f billy.File, + t plumbing.ObjectType, + seen map[plumbing.Hash]struct{}, + index idxfile.Index, + cache cache.Object, +) (storer.EncodedObjectIter, error) { s := packfile.NewScanner(f) _, total, err := s.Header() if err != nil { -- cgit From 6a24b4c1f0cb9e5daf30fa7979f2643a967af1ad Mon Sep 17 00:00:00 2001 From: Miguel Molina Date: Tue, 7 Aug 2018 18:41:19 +0200 Subject: *: use parser to populate non writable storages and bug fixes Signed-off-by: Miguel Molina --- storage/filesystem/object.go | 77 +++++++++++++++++--------------------------- 1 file changed, 30 insertions(+), 47 deletions(-) (limited to 'storage/filesystem/object.go') diff --git a/storage/filesystem/object.go b/storage/filesystem/object.go index b73b309..2032eac 100644 --- a/storage/filesystem/object.go +++ b/storage/filesystem/object.go @@ -12,7 +12,6 @@ import ( "gopkg.in/src-d/go-git.v4/plumbing/format/packfile" "gopkg.in/src-d/go-git.v4/plumbing/storer" "gopkg.in/src-d/go-git.v4/storage/filesystem/dotgit" - "gopkg.in/src-d/go-git.v4/storage/memory" "gopkg.in/src-d/go-git.v4/utils/ioutil" "gopkg.in/src-d/go-billy.v4" @@ -282,29 +281,34 @@ func (s *ObjectStorage) getFromPackfile(h plumbing.Hash, canBeDelta bool) ( func (s *ObjectStorage) decodeObjectAt( f billy.File, idx idxfile.Index, - offset int64) (plumbing.EncodedObject, error) { - if _, err := f.Seek(0, io.SeekStart); err != nil { - return nil, err + offset int64, +) (plumbing.EncodedObject, error) { + hash, err := idx.FindHash(offset) + if err == nil { + obj, ok := s.deltaBaseCache.Get(hash) + if ok { + return obj, nil + } } - p := packfile.NewScanner(f) + if err != nil && err != plumbing.ErrObjectNotFound { + return nil, err + } - d, err := packfile.NewDecoderWithCache(p, memory.NewStorage(), - s.deltaBaseCache) + obj, err := packfile.NewPackfile(idx, f).GetByOffset(offset) if err != nil { return nil, err } - d.SetIndex(idx) - obj, err := d.DecodeObjectAt(offset) - return obj, err + return packfile.MemoryObjectFromDisk(obj) } func (s *ObjectStorage) decodeDeltaObjectAt( f billy.File, idx idxfile.Index, offset int64, - hash plumbing.Hash) (plumbing.EncodedObject, error) { + hash plumbing.Hash, +) (plumbing.EncodedObject, error) { if _, err := f.Seek(0, io.SeekStart); err != nil { return nil, err } @@ -453,22 +457,23 @@ func (it *lazyPackfilesIter) Close() { } type packfileIter struct { - f billy.File - d *packfile.Decoder - t plumbing.ObjectType - - seen map[plumbing.Hash]struct{} - position uint32 - total uint32 + iter storer.EncodedObjectIter + seen map[plumbing.Hash]struct{} } // NewPackfileIter returns a new EncodedObjectIter for the provided packfile // and object type. func NewPackfileIter( f billy.File, + idxFile billy.File, t plumbing.ObjectType, ) (storer.EncodedObjectIter, error) { - return newPackfileIter(f, t, make(map[plumbing.Hash]struct{}), nil, nil) + idx := idxfile.NewMemoryIndex() + if err := idxfile.NewDecoder(idxFile).Decode(idx); err != nil { + return nil, err + } + + return newPackfileIter(f, t, make(map[plumbing.Hash]struct{}), idx, nil) } func newPackfileIter( @@ -478,47 +483,26 @@ func newPackfileIter( index idxfile.Index, cache cache.Object, ) (storer.EncodedObjectIter, error) { - s := packfile.NewScanner(f) - _, total, err := s.Header() + iter, err := packfile.NewPackfile(index, f).GetByType(t) if err != nil { return nil, err } - d, err := packfile.NewDecoderForType(s, memory.NewStorage(), t, cache) - if err != nil { - return nil, err - } - - d.SetIndex(index) - return &packfileIter{ - f: f, - d: d, - t: t, - - total: total, - seen: seen, + iter: iter, + seen: seen, }, nil } func (iter *packfileIter) Next() (plumbing.EncodedObject, error) { for { - if iter.position >= iter.total { - return nil, io.EOF - } - - obj, err := iter.d.DecodeObject() + obj, err := iter.iter.Next() if err != nil { return nil, err } - iter.position++ - if obj == nil { - continue - } - if _, ok := iter.seen[obj.Hash()]; ok { - return iter.Next() + continue } return obj, nil @@ -531,8 +515,7 @@ func (iter *packfileIter) ForEach(cb func(plumbing.EncodedObject) error) error { } func (iter *packfileIter) Close() { - iter.f.Close() - iter.d.Close() + iter.iter.Close() } type objectsIter struct { -- cgit From 34cc506735ee0cd29362da51592b49a217df8159 Mon Sep 17 00:00:00 2001 From: Miguel Molina Date: Thu, 9 Aug 2018 12:16:57 +0200 Subject: storage: filesystem, benchmark PackfileIter Signed-off-by: Miguel Molina --- storage/filesystem/object.go | 30 ++++++++++++++++++++++++++---- 1 file changed, 26 insertions(+), 4 deletions(-) (limited to 'storage/filesystem/object.go') diff --git a/storage/filesystem/object.go b/storage/filesystem/object.go index 2032eac..4757938 100644 --- a/storage/filesystem/object.go +++ b/storage/filesystem/object.go @@ -509,9 +509,20 @@ func (iter *packfileIter) Next() (plumbing.EncodedObject, error) { } } -// ForEach is never called since is used inside of a MultiObjectIterator func (iter *packfileIter) ForEach(cb func(plumbing.EncodedObject) error) error { - return nil + for { + o, err := iter.Next() + if err != nil { + if err == io.EOF { + return nil + } + return err + } + + if err := cb(o); err != nil { + return err + } + } } func (iter *packfileIter) Close() { @@ -543,9 +554,20 @@ func (iter *objectsIter) Next() (plumbing.EncodedObject, error) { return obj, err } -// ForEach is never called since is used inside of a MultiObjectIterator func (iter *objectsIter) ForEach(cb func(plumbing.EncodedObject) error) error { - return nil + for { + o, err := iter.Next() + if err != nil { + if err == io.EOF { + return nil + } + return err + } + + if err := cb(o); err != nil { + return err + } + } } func (iter *objectsIter) Close() { -- cgit From 038cf238e6250094c7aeb387fd7ea92438719699 Mon Sep 17 00:00:00 2001 From: Miguel Molina Date: Thu, 9 Aug 2018 12:36:37 +0200 Subject: storage: filesystem, close Packfile after iterating objects Signed-off-by: Miguel Molina --- storage/filesystem/object.go | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) (limited to 'storage/filesystem/object.go') diff --git a/storage/filesystem/object.go b/storage/filesystem/object.go index 4757938..86d0da9 100644 --- a/storage/filesystem/object.go +++ b/storage/filesystem/object.go @@ -457,12 +457,14 @@ func (it *lazyPackfilesIter) Close() { } type packfileIter struct { + pack billy.File iter storer.EncodedObjectIter seen map[plumbing.Hash]struct{} } // NewPackfileIter returns a new EncodedObjectIter for the provided packfile -// and object type. +// and object type. Packfile and index file will be closed after they're +// used. func NewPackfileIter( f billy.File, idxFile billy.File, @@ -473,6 +475,10 @@ func NewPackfileIter( return nil, err } + if err := idxFile.Close(); err != nil { + return nil, err + } + return newPackfileIter(f, t, make(map[plumbing.Hash]struct{}), idx, nil) } @@ -489,6 +495,7 @@ func newPackfileIter( } return &packfileIter{ + pack: f, iter: iter, seen: seen, }, nil @@ -514,6 +521,7 @@ func (iter *packfileIter) ForEach(cb func(plumbing.EncodedObject) error) error { o, err := iter.Next() if err != nil { if err == io.EOF { + iter.Close() return nil } return err @@ -527,6 +535,7 @@ func (iter *packfileIter) ForEach(cb func(plumbing.EncodedObject) error) error { func (iter *packfileIter) Close() { iter.iter.Close() + _ = iter.pack.Close() } type objectsIter struct { -- cgit From 56c5e91b158bc4569b38bfd5d27d4b4be5e06a27 Mon Sep 17 00:00:00 2001 From: Miguel Molina Date: Thu, 9 Aug 2018 16:53:00 +0200 Subject: plumbing: packfile, open and close packfile on FSObject reads Signed-off-by: Miguel Molina --- storage/filesystem/object.go | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) (limited to 'storage/filesystem/object.go') diff --git a/storage/filesystem/object.go b/storage/filesystem/object.go index 86d0da9..6958e32 100644 --- a/storage/filesystem/object.go +++ b/storage/filesystem/object.go @@ -295,12 +295,7 @@ func (s *ObjectStorage) decodeObjectAt( return nil, err } - obj, err := packfile.NewPackfile(idx, f).GetByOffset(offset) - if err != nil { - return nil, err - } - - return packfile.MemoryObjectFromDisk(obj) + return packfile.NewPackfile(idx, s.dir.Fs(), f).GetByOffset(offset) } func (s *ObjectStorage) decodeDeltaObjectAt( @@ -404,7 +399,7 @@ func (s *ObjectStorage) buildPackfileIters(t plumbing.ObjectType, seen map[plumb if err != nil { return nil, err } - return newPackfileIter(pack, t, seen, s.index[h], s.deltaBaseCache) + return newPackfileIter(s.dir.Fs(), pack, t, seen, s.index[h], s.deltaBaseCache) }, }, nil } @@ -466,6 +461,7 @@ type packfileIter struct { // and object type. Packfile and index file will be closed after they're // used. func NewPackfileIter( + fs billy.Filesystem, f billy.File, idxFile billy.File, t plumbing.ObjectType, @@ -479,17 +475,18 @@ func NewPackfileIter( return nil, err } - return newPackfileIter(f, t, make(map[plumbing.Hash]struct{}), idx, nil) + return newPackfileIter(fs, f, t, make(map[plumbing.Hash]struct{}), idx, nil) } func newPackfileIter( + fs billy.Filesystem, f billy.File, t plumbing.ObjectType, seen map[plumbing.Hash]struct{}, index idxfile.Index, cache cache.Object, ) (storer.EncodedObjectIter, error) { - iter, err := packfile.NewPackfile(index, f).GetByType(t) + iter, err := packfile.NewPackfile(index, fs, f).GetByType(t) if err != nil { return nil, err } -- cgit From 790191ef92ec6382ce65cc30286c901863b3b7a3 Mon Sep 17 00:00:00 2001 From: Javi Fontan Date: Wed, 22 Aug 2018 16:46:50 +0200 Subject: plumbing, storage: add bases to the common cache After clone only resolved deltas were added to the cache. This caused slowdowns in small repositories where most objects can be held in cache. It also makes packfiles reuse delta cache from the store. Previously it created a new delta cache each time a packfile object was created. This also slowed down a bit accessing objects and had an impact on memory consumption when bases are added to the cache. Signed-off-by: Javi Fontan --- storage/filesystem/object.go | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) (limited to 'storage/filesystem/object.go') diff --git a/storage/filesystem/object.go b/storage/filesystem/object.go index 6958e32..3a3a2bd 100644 --- a/storage/filesystem/object.go +++ b/storage/filesystem/object.go @@ -295,7 +295,14 @@ func (s *ObjectStorage) decodeObjectAt( return nil, err } - return packfile.NewPackfile(idx, s.dir.Fs(), f).GetByOffset(offset) + var p *packfile.Packfile + if s.deltaBaseCache != nil { + p = packfile.NewPackfileWithCache(idx, s.dir.Fs(), f, s.deltaBaseCache) + } else { + p = packfile.NewPackfile(idx, s.dir.Fs(), f) + } + + return p.GetByOffset(offset) } func (s *ObjectStorage) decodeDeltaObjectAt( @@ -486,7 +493,14 @@ func newPackfileIter( index idxfile.Index, cache cache.Object, ) (storer.EncodedObjectIter, error) { - iter, err := packfile.NewPackfile(index, fs, f).GetByType(t) + var p *packfile.Packfile + if cache != nil { + p = packfile.NewPackfileWithCache(index, fs, f, cache) + } else { + p = packfile.NewPackfile(index, fs, f) + } + + iter, err := p.GetByType(t) if err != nil { return nil, err } -- cgit From 874f669becc25489081306bbbcbbc27b970f6295 Mon Sep 17 00:00:00 2001 From: Javi Fontan Date: Mon, 3 Sep 2018 19:40:22 +0200 Subject: storage/filesystem: move Options to filesytem and dotgit Signed-off-by: Javi Fontan --- storage/filesystem/object.go | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'storage/filesystem/object.go') diff --git a/storage/filesystem/object.go b/storage/filesystem/object.go index 3a3a2bd..3519385 100644 --- a/storage/filesystem/object.go +++ b/storage/filesystem/object.go @@ -18,6 +18,8 @@ import ( ) type ObjectStorage struct { + options Options + // deltaBaseCache is an object cache uses to cache delta's bases when deltaBaseCache cache.Object @@ -27,7 +29,17 @@ type ObjectStorage struct { // NewObjectStorage creates a new ObjectStorage with the given .git directory. func NewObjectStorage(dir *dotgit.DotGit) (ObjectStorage, error) { + return NewObjectStorageWithOptions(dir, Options{}) +} + +// NewObjectStorageWithOptions creates a new ObjectStorage with the given .git +// directory and sets its options. +func NewObjectStorageWithOptions( + dir *dotgit.DotGit, + ops Options, +) (ObjectStorage, error) { s := ObjectStorage{ + options: ops, deltaBaseCache: cache.NewObjectLRUDefault(), dir: dir, } -- cgit From 6384ab93a2dbac9045ee19099455cbcfe82d0201 Mon Sep 17 00:00:00 2001 From: Javi Fontan Date: Thu, 30 Aug 2018 20:28:40 +0200 Subject: storage/dotgit: add KeepDescriptors option This option maintains packfile file descriptors opened after reading objects from them. It improves performance as it does not have to be opening packfiles each time an object is needed. Also adds Close to EncodedObjectStorer to close all the files manualy. Signed-off-by: Javi Fontan --- storage/filesystem/object.go | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) (limited to 'storage/filesystem/object.go') diff --git a/storage/filesystem/object.go b/storage/filesystem/object.go index 3519385..3545e27 100644 --- a/storage/filesystem/object.go +++ b/storage/filesystem/object.go @@ -74,6 +74,7 @@ func (s *ObjectStorage) loadIdxFile(h plumbing.Hash) (err error) { } defer ioutil.CheckClose(f, &err) + idxf := idxfile.NewMemoryIndex() d := idxfile.NewDecoder(f) if err = d.Decode(idxf); err != nil { @@ -280,7 +281,9 @@ func (s *ObjectStorage) getFromPackfile(h plumbing.Hash, canBeDelta bool) ( return nil, err } - defer ioutil.CheckClose(f, &err) + if !s.options.KeepDescriptors { + defer ioutil.CheckClose(f, &err) + } idx := s.index[pack] if canBeDelta { @@ -423,6 +426,11 @@ func (s *ObjectStorage) buildPackfileIters(t plumbing.ObjectType, seen map[plumb }, nil } +// Close closes all opened files. +func (s *ObjectStorage) Close() error { + return s.dir.Close() +} + type lazyPackfilesIter struct { hashes []plumbing.Hash open func(h plumbing.Hash) (storer.EncodedObjectIter, error) -- cgit From 8f6b3127c1ff7661113fff2662416c328971a285 Mon Sep 17 00:00:00 2001 From: kuba-- Date: Fri, 7 Sep 2018 09:27:35 +0200 Subject: Expose Storage cache. Signed-off-by: kuba-- --- storage/filesystem/object.go | 29 ++++++++++------------------- 1 file changed, 10 insertions(+), 19 deletions(-) (limited to 'storage/filesystem/object.go') diff --git a/storage/filesystem/object.go b/storage/filesystem/object.go index 3545e27..9eb085f 100644 --- a/storage/filesystem/object.go +++ b/storage/filesystem/object.go @@ -27,24 +27,18 @@ type ObjectStorage struct { index map[plumbing.Hash]idxfile.Index } -// NewObjectStorage creates a new ObjectStorage with the given .git directory. -func NewObjectStorage(dir *dotgit.DotGit) (ObjectStorage, error) { - return NewObjectStorageWithOptions(dir, Options{}) -} - -// NewObjectStorageWithOptions creates a new ObjectStorage with the given .git -// directory and sets its options. -func NewObjectStorageWithOptions( - dir *dotgit.DotGit, - ops Options, -) (ObjectStorage, error) { - s := ObjectStorage{ +// NewObjectStorage creates a new ObjectStorage with the given .git directory and cache. +func NewObjectStorage(dir *dotgit.DotGit, cache cache.Object) *ObjectStorage { + return NewObjectStorageWithOptions(dir, cache, Options{}) +} + +// NewObjectStorageWithOptions creates a new ObjectStorage with the given .git directory, cache and extra options +func NewObjectStorageWithOptions(dir *dotgit.DotGit, cache cache.Object, ops Options) *ObjectStorage { + return &ObjectStorage{ options: ops, - deltaBaseCache: cache.NewObjectLRUDefault(), + deltaBaseCache: cache, dir: dir, } - - return s, nil } func (s *ObjectStorage) requireIndex() error { @@ -182,10 +176,7 @@ func (s *ObjectStorage) EncodedObject(t plumbing.ObjectType, h plumbing.Hash) (p // Create a new object storage with the DotGit(s) and check for the // required hash object. Skip when not found. for _, dg := range dotgits { - o, oe := NewObjectStorage(dg) - if oe != nil { - continue - } + o := NewObjectStorage(dg, s.deltaBaseCache) enobj, enerr := o.EncodedObject(t, h) if enerr != nil { continue -- cgit From 82c7a306db75d083db50dbd41aebebd8bd55081b Mon Sep 17 00:00:00 2001 From: Javi Fontan Date: Thu, 20 Sep 2018 17:10:39 +0200 Subject: storage/filesystem: keep packs open in PackfileIter PackfileIter was not taking into account the option KeepDescriptors and was always closing the file. This caused "file already closed" errors when iterating packfiles in with KeepDescriptors active. Signed-off-by: Javi Fontan --- storage/filesystem/object.go | 33 +++++++++++++++++++++++---------- 1 file changed, 23 insertions(+), 10 deletions(-) (limited to 'storage/filesystem/object.go') diff --git a/storage/filesystem/object.go b/storage/filesystem/object.go index 9eb085f..4aedacc 100644 --- a/storage/filesystem/object.go +++ b/storage/filesystem/object.go @@ -396,7 +396,10 @@ func (s *ObjectStorage) IterEncodedObjects(t plumbing.ObjectType) (storer.Encode return storer.NewMultiEncodedObjectIter(iters), nil } -func (s *ObjectStorage) buildPackfileIters(t plumbing.ObjectType, seen map[plumbing.Hash]struct{}) (storer.EncodedObjectIter, error) { +func (s *ObjectStorage) buildPackfileIters( + t plumbing.ObjectType, + seen map[plumbing.Hash]struct{}, +) (storer.EncodedObjectIter, error) { if err := s.requireIndex(); err != nil { return nil, err } @@ -412,7 +415,10 @@ func (s *ObjectStorage) buildPackfileIters(t plumbing.ObjectType, seen map[plumb if err != nil { return nil, err } - return newPackfileIter(s.dir.Fs(), pack, t, seen, s.index[h], s.deltaBaseCache) + return newPackfileIter( + s.dir.Fs(), pack, t, seen, s.index[h], + s.deltaBaseCache, s.options.KeepDescriptors, + ) }, }, nil } @@ -470,9 +476,10 @@ func (it *lazyPackfilesIter) Close() { } type packfileIter struct { - pack billy.File - iter storer.EncodedObjectIter - seen map[plumbing.Hash]struct{} + pack billy.File + iter storer.EncodedObjectIter + seen map[plumbing.Hash]struct{} + keepPack bool } // NewPackfileIter returns a new EncodedObjectIter for the provided packfile @@ -483,6 +490,7 @@ func NewPackfileIter( f billy.File, idxFile billy.File, t plumbing.ObjectType, + keepPack bool, ) (storer.EncodedObjectIter, error) { idx := idxfile.NewMemoryIndex() if err := idxfile.NewDecoder(idxFile).Decode(idx); err != nil { @@ -493,7 +501,8 @@ func NewPackfileIter( return nil, err } - return newPackfileIter(fs, f, t, make(map[plumbing.Hash]struct{}), idx, nil) + seen := make(map[plumbing.Hash]struct{}) + return newPackfileIter(fs, f, t, seen, idx, nil, keepPack) } func newPackfileIter( @@ -503,6 +512,7 @@ func newPackfileIter( seen map[plumbing.Hash]struct{}, index idxfile.Index, cache cache.Object, + keepPack bool, ) (storer.EncodedObjectIter, error) { var p *packfile.Packfile if cache != nil { @@ -517,9 +527,10 @@ func newPackfileIter( } return &packfileIter{ - pack: f, - iter: iter, - seen: seen, + pack: f, + iter: iter, + seen: seen, + keepPack: keepPack, }, nil } @@ -557,7 +568,9 @@ func (iter *packfileIter) ForEach(cb func(plumbing.EncodedObject) error) error { func (iter *packfileIter) Close() { iter.iter.Close() - _ = iter.pack.Close() + if !iter.keepPack { + _ = iter.pack.Close() + } } type objectsIter struct { -- cgit From a7b0102b83aa86fd299623d35666bfee93fee0c6 Mon Sep 17 00:00:00 2001 From: Javi Fontan Date: Fri, 21 Sep 2018 10:43:43 +0200 Subject: storage/filesystem: add more doc to NewPackfileIter Signed-off-by: Javi Fontan --- storage/filesystem/object.go | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) (limited to 'storage/filesystem/object.go') diff --git a/storage/filesystem/object.go b/storage/filesystem/object.go index 4aedacc..68bd140 100644 --- a/storage/filesystem/object.go +++ b/storage/filesystem/object.go @@ -476,15 +476,18 @@ func (it *lazyPackfilesIter) Close() { } type packfileIter struct { - pack billy.File - iter storer.EncodedObjectIter - seen map[plumbing.Hash]struct{} + pack billy.File + iter storer.EncodedObjectIter + seen map[plumbing.Hash]struct{} + + // tells whether the pack file should be left open after iteration or not keepPack bool } // NewPackfileIter returns a new EncodedObjectIter for the provided packfile // and object type. Packfile and index file will be closed after they're -// used. +// used. If keepPack is true the packfile won't be closed after the iteration +// finished. func NewPackfileIter( fs billy.Filesystem, f billy.File, -- cgit