diff options
author | Arran Walker <arran.walker@fiveturns.org> | 2019-04-21 00:37:59 +0000 |
---|---|---|
committer | Arran Walker <arran.walker@fiveturns.org> | 2019-04-22 22:44:21 +0000 |
commit | f5c23dae1fc7508b2d5cbac5a2954203ea4c3ac2 (patch) | |
tree | cd0a254f9041344b0cb2d640d5f4b9ed01729cfd /storage/filesystem/object.go | |
parent | e5268e9c3c94f60e3c2008dc2ab4762c75352bfc (diff) | |
download | go-git-f5c23dae1fc7508b2d5cbac5a2954203ea4c3ac2.tar.gz |
filesystem: ObjectStorage, MaxOpenDescriptors option
The MaxOpenDescriptors option provides a middle ground solution between keeping
all packfiles open (as offered by the KeepDescriptors option) and keeping none
open.
Signed-off-by: Arran Walker <arran.walker@fiveturns.org>
Diffstat (limited to 'storage/filesystem/object.go')
-rw-r--r-- | storage/filesystem/object.go | 145 |
1 files changed, 106 insertions, 39 deletions
diff --git a/storage/filesystem/object.go b/storage/filesystem/object.go index 3eb62a2..ad5d8d0 100644 --- a/storage/filesystem/object.go +++ b/storage/filesystem/object.go @@ -26,6 +26,10 @@ type ObjectStorage struct { dir *dotgit.DotGit index map[plumbing.Hash]idxfile.Index + + packList []plumbing.Hash + packListIdx int + packfiles map[plumbing.Hash]*packfile.Packfile } // NewObjectStorage creates a new ObjectStorage with the given .git directory and cache. @@ -187,6 +191,73 @@ func (s *ObjectStorage) encodedObjectSizeFromUnpacked(h plumbing.Hash) ( return size, err } +func (s *ObjectStorage) packfile(idx idxfile.Index, pack plumbing.Hash) (*packfile.Packfile, error) { + if p := s.packfileFromCache(pack); p != nil { + return p, nil + } + + f, err := s.dir.ObjectPack(pack) + if err != nil { + return nil, err + } + + var p *packfile.Packfile + if s.objectCache != nil { + p = packfile.NewPackfileWithCache(idx, s.dir.Fs(), f, s.objectCache) + } else { + p = packfile.NewPackfile(idx, s.dir.Fs(), f) + } + + return p, s.storePackfileInCache(pack, p) +} + +func (s *ObjectStorage) packfileFromCache(hash plumbing.Hash) *packfile.Packfile { + if s.packfiles == nil { + if s.options.KeepDescriptors { + s.packfiles = make(map[plumbing.Hash]*packfile.Packfile) + } else if s.options.MaxOpenDescriptors > 0 { + s.packList = make([]plumbing.Hash, s.options.MaxOpenDescriptors) + s.packfiles = make(map[plumbing.Hash]*packfile.Packfile, s.options.MaxOpenDescriptors) + } + } + + return s.packfiles[hash] +} + +func (s *ObjectStorage) storePackfileInCache(hash plumbing.Hash, p *packfile.Packfile) error { + if s.options.KeepDescriptors { + s.packfiles[hash] = p + return nil + } + + if s.options.MaxOpenDescriptors <= 0 { + return nil + } + + // start over as the limit of packList is hit + if s.packListIdx >= len(s.packList) { + s.packListIdx = 0 + } + + // close the existing packfile if open + if next := s.packList[s.packListIdx]; !next.IsZero() { + open := s.packfiles[next] + delete(s.packfiles, next) + if open != nil { + if err := open.Close(); err != nil { + return err + } + } + } + + // cache newly open packfile + s.packList[s.packListIdx] = hash + s.packfiles[hash] = p + s.packListIdx++ + + return nil +} + func (s *ObjectStorage) encodedObjectSizeFromPackfile(h plumbing.Hash) ( size int64, err error) { if err := s.requireIndex(); err != nil { @@ -198,12 +269,6 @@ func (s *ObjectStorage) encodedObjectSizeFromPackfile(h plumbing.Hash) ( return 0, plumbing.ErrObjectNotFound } - f, err := s.dir.ObjectPack(pack) - if err != nil { - return 0, err - } - defer ioutil.CheckClose(f, &err) - idx := s.index[pack] hash, err := idx.FindHash(offset) if err == nil { @@ -215,11 +280,13 @@ func (s *ObjectStorage) encodedObjectSizeFromPackfile(h plumbing.Hash) ( return 0, err } - var p *packfile.Packfile - if s.objectCache != nil { - p = packfile.NewPackfileWithCache(idx, s.dir.Fs(), f, s.objectCache) - } else { - p = packfile.NewPackfile(idx, s.dir.Fs(), f) + p, err := s.packfile(idx, pack) + if err != nil { + return 0, err + } + + if !s.options.KeepDescriptors && s.options.MaxOpenDescriptors == 0 { + defer ioutil.CheckClose(p, &err) } return p.GetSizeByOffset(offset) @@ -361,29 +428,28 @@ func (s *ObjectStorage) getFromPackfile(h plumbing.Hash, canBeDelta bool) ( return nil, plumbing.ErrObjectNotFound } - f, err := s.dir.ObjectPack(pack) + idx := s.index[pack] + p, err := s.packfile(idx, pack) if err != nil { return nil, err } - if !s.options.KeepDescriptors { - defer ioutil.CheckClose(f, &err) + if !s.options.KeepDescriptors && s.options.MaxOpenDescriptors == 0 { + defer ioutil.CheckClose(p, &err) } - idx := s.index[pack] if canBeDelta { - return s.decodeDeltaObjectAt(f, idx, offset, hash) + return s.decodeDeltaObjectAt(p, offset, hash) } - return s.decodeObjectAt(f, idx, offset) + return s.decodeObjectAt(p, offset) } func (s *ObjectStorage) decodeObjectAt( - f billy.File, - idx idxfile.Index, + p *packfile.Packfile, offset int64, ) (plumbing.EncodedObject, error) { - hash, err := idx.FindHash(offset) + hash, err := p.FindHash(offset) if err == nil { obj, ok := s.objectCache.Get(hash) if ok { @@ -395,28 +461,16 @@ func (s *ObjectStorage) decodeObjectAt( return nil, err } - var p *packfile.Packfile - if s.objectCache != nil { - p = packfile.NewPackfileWithCache(idx, s.dir.Fs(), f, s.objectCache) - } else { - p = packfile.NewPackfile(idx, s.dir.Fs(), f) - } - return p.GetByOffset(offset) } func (s *ObjectStorage) decodeDeltaObjectAt( - f billy.File, - idx idxfile.Index, + p *packfile.Packfile, offset int64, hash plumbing.Hash, ) (plumbing.EncodedObject, error) { - if _, err := f.Seek(0, io.SeekStart); err != nil { - return nil, err - } - - p := packfile.NewScanner(f) - header, err := p.SeekObjectHeader(offset) + scan := p.Scanner() + header, err := scan.SeekObjectHeader(offset) if err != nil { return nil, err } @@ -429,12 +483,12 @@ func (s *ObjectStorage) decodeDeltaObjectAt( case plumbing.REFDeltaObject: base = header.Reference case plumbing.OFSDeltaObject: - base, err = idx.FindHash(header.OffsetReference) + base, err = p.FindHash(header.OffsetReference) if err != nil { return nil, err } default: - return s.decodeObjectAt(f, idx, offset) + return s.decodeObjectAt(p, offset) } obj := &plumbing.MemoryObject{} @@ -444,7 +498,7 @@ func (s *ObjectStorage) decodeDeltaObjectAt( return nil, err } - if _, _, err := p.NextObject(w); err != nil { + if _, _, err := scan.NextObject(w); err != nil { return nil, err } @@ -515,7 +569,20 @@ func (s *ObjectStorage) buildPackfileIters( // Close closes all opened files. func (s *ObjectStorage) Close() error { - return s.dir.Close() + var firstError error + if s.options.KeepDescriptors || s.options.MaxOpenDescriptors > 0 { + for _, packfile := range s.packfiles { + err := packfile.Close() + if firstError == nil && err != nil { + firstError = err + } + } + } + + s.packfiles = nil + s.dir.Close() + + return firstError } type lazyPackfilesIter struct { |