diff options
Diffstat (limited to 'storage/filesystem')
-rw-r--r-- | storage/filesystem/object.go | 105 | ||||
-rw-r--r-- | storage/filesystem/object_test.go | 82 |
2 files changed, 175 insertions, 12 deletions
diff --git a/storage/filesystem/object.go b/storage/filesystem/object.go index 9eb085f..6cd2d4c 100644 --- a/storage/filesystem/object.go +++ b/storage/filesystem/object.go @@ -160,6 +160,79 @@ func (s *ObjectStorage) HasEncodedObject(h plumbing.Hash) (err error) { return nil } +func (s *ObjectStorage) encodedObjectSizeFromUnpacked(h plumbing.Hash) ( + size int64, err error) { + f, err := s.dir.Object(h) + if err != nil { + if os.IsNotExist(err) { + return 0, plumbing.ErrObjectNotFound + } + + return 0, err + } + + r, err := objfile.NewReader(f) + if err != nil { + return 0, err + } + defer ioutil.CheckClose(r, &err) + + _, size, err = r.Header() + return size, err +} + +func (s *ObjectStorage) encodedObjectSizeFromPackfile(h plumbing.Hash) ( + size int64, err error) { + if err := s.requireIndex(); err != nil { + return 0, err + } + + pack, _, offset := s.findObjectInPackfile(h) + if offset == -1 { + return 0, plumbing.ErrObjectNotFound + } + + f, err := s.dir.ObjectPack(pack) + if err != nil { + return 0, err + } + defer ioutil.CheckClose(f, &err) + + idx := s.index[pack] + hash, err := idx.FindHash(offset) + if err == nil { + obj, ok := s.deltaBaseCache.Get(hash) + if ok { + return obj.Size(), nil + } + } else if err != nil && err != plumbing.ErrObjectNotFound { + return 0, err + } + + var p *packfile.Packfile + if s.deltaBaseCache != nil { + p = packfile.NewPackfileWithCache(idx, s.dir.Fs(), f, s.deltaBaseCache) + } else { + p = packfile.NewPackfile(idx, s.dir.Fs(), f) + } + + return p.GetSizeByOffset(offset) +} + +// EncodedObjectSize returns the plaintext size of the given object, +// without actually reading the full object data from storage. +func (s *ObjectStorage) EncodedObjectSize(h plumbing.Hash) ( + size int64, err error) { + size, err = s.encodedObjectSizeFromUnpacked(h) + if err != nil && err != plumbing.ErrObjectNotFound { + return 0, err + } else if err == nil { + return size, nil + } + + return s.encodedObjectSizeFromPackfile(h) +} + // EncodedObject returns the object with the given hash, by searching for it in // the packfile and the git object directories. func (s *ObjectStorage) EncodedObject(t plumbing.ObjectType, h plumbing.Hash) (plumbing.EncodedObject, error) { @@ -396,7 +469,10 @@ func (s *ObjectStorage) IterEncodedObjects(t plumbing.ObjectType) (storer.Encode return storer.NewMultiEncodedObjectIter(iters), nil } -func (s *ObjectStorage) buildPackfileIters(t plumbing.ObjectType, seen map[plumbing.Hash]struct{}) (storer.EncodedObjectIter, error) { +func (s *ObjectStorage) buildPackfileIters( + t plumbing.ObjectType, + seen map[plumbing.Hash]struct{}, +) (storer.EncodedObjectIter, error) { if err := s.requireIndex(); err != nil { return nil, err } @@ -412,7 +488,10 @@ func (s *ObjectStorage) buildPackfileIters(t plumbing.ObjectType, seen map[plumb if err != nil { return nil, err } - return newPackfileIter(s.dir.Fs(), pack, t, seen, s.index[h], s.deltaBaseCache) + return newPackfileIter( + s.dir.Fs(), pack, t, seen, s.index[h], + s.deltaBaseCache, s.options.KeepDescriptors, + ) }, }, nil } @@ -473,16 +552,21 @@ type packfileIter struct { pack billy.File iter storer.EncodedObjectIter seen map[plumbing.Hash]struct{} + + // tells whether the pack file should be left open after iteration or not + keepPack bool } // NewPackfileIter returns a new EncodedObjectIter for the provided packfile // and object type. Packfile and index file will be closed after they're -// used. +// used. If keepPack is true the packfile won't be closed after the iteration +// finished. func NewPackfileIter( fs billy.Filesystem, f billy.File, idxFile billy.File, t plumbing.ObjectType, + keepPack bool, ) (storer.EncodedObjectIter, error) { idx := idxfile.NewMemoryIndex() if err := idxfile.NewDecoder(idxFile).Decode(idx); err != nil { @@ -493,7 +577,8 @@ func NewPackfileIter( return nil, err } - return newPackfileIter(fs, f, t, make(map[plumbing.Hash]struct{}), idx, nil) + seen := make(map[plumbing.Hash]struct{}) + return newPackfileIter(fs, f, t, seen, idx, nil, keepPack) } func newPackfileIter( @@ -503,6 +588,7 @@ func newPackfileIter( seen map[plumbing.Hash]struct{}, index idxfile.Index, cache cache.Object, + keepPack bool, ) (storer.EncodedObjectIter, error) { var p *packfile.Packfile if cache != nil { @@ -517,9 +603,10 @@ func newPackfileIter( } return &packfileIter{ - pack: f, - iter: iter, - seen: seen, + pack: f, + iter: iter, + seen: seen, + keepPack: keepPack, }, nil } @@ -557,7 +644,9 @@ func (iter *packfileIter) ForEach(cb func(plumbing.EncodedObject) error) error { func (iter *packfileIter) Close() { iter.iter.Close() - _ = iter.pack.Close() + if !iter.keepPack { + _ = iter.pack.Close() + } } type objectsIter struct { diff --git a/storage/filesystem/object_test.go b/storage/filesystem/object_test.go index bd4a94b..4e6bbfb 100644 --- a/storage/filesystem/object_test.go +++ b/storage/filesystem/object_test.go @@ -83,6 +83,44 @@ func (s *FsSuite) TestGetFromPackfileKeepDescriptors(c *C) { }) } +func (s *FsSuite) TestGetSizeOfObjectFile(c *C) { + fs := fixtures.ByTag(".git").ByTag("unpacked").One().DotGit() + o := NewObjectStorage(dotgit.New(fs), cache.NewObjectLRUDefault()) + + // Get the size of `tree_walker.go`. + expected := plumbing.NewHash("cbd81c47be12341eb1185b379d1c82675aeded6a") + size, err := o.EncodedObjectSize(expected) + c.Assert(err, IsNil) + c.Assert(size, Equals, int64(2412)) +} + +func (s *FsSuite) TestGetSizeFromPackfile(c *C) { + fixtures.Basic().ByTag(".git").Test(c, func(f *fixtures.Fixture) { + fs := f.DotGit() + o := NewObjectStorage(dotgit.New(fs), cache.NewObjectLRUDefault()) + + // Get the size of `binary.jpg`. + expected := plumbing.NewHash("d5c0f4ab811897cadf03aec358ae60d21f91c50d") + size, err := o.EncodedObjectSize(expected) + c.Assert(err, IsNil) + c.Assert(size, Equals, int64(76110)) + }) +} + +func (s *FsSuite) TestGetSizeOfAllObjectFiles(c *C) { + fs := fixtures.ByTag(".git").One().DotGit() + o := NewObjectStorage(dotgit.New(fs), cache.NewObjectLRUDefault()) + + // Get the size of `tree_walker.go`. + err := o.ForEachObjectHash(func(h plumbing.Hash) error { + size, err := o.EncodedObjectSize(h) + c.Assert(err, IsNil) + c.Assert(size, Not(Equals), int64(0)) + return nil + }) + c.Assert(err, IsNil) +} + func (s *FsSuite) TestGetFromPackfileMultiplePackfiles(c *C) { fs := fixtures.ByTag(".git").ByTag("multi-packfile").One().DotGit() o := NewObjectStorage(dotgit.New(fs), cache.NewObjectLRUDefault()) @@ -153,18 +191,54 @@ func (s *FsSuite) TestPackfileIter(c *C) { idxf, err := dg.ObjectPackIdx(h) c.Assert(err, IsNil) - iter, err := NewPackfileIter(fs, f, idxf, t) + iter, err := NewPackfileIter(fs, f, idxf, t, false) c.Assert(err, IsNil) + err = iter.ForEach(func(o plumbing.EncodedObject) error { c.Assert(o.Type(), Equals, t) return nil }) - c.Assert(err, IsNil) } } }) +} + +func (s *FsSuite) TestPackfileIterKeepDescriptors(c *C) { + fixtures.ByTag(".git").Test(c, func(f *fixtures.Fixture) { + fs := f.DotGit() + ops := dotgit.Options{KeepDescriptors: true} + dg := dotgit.NewWithOptions(fs, ops) + for _, t := range objectTypes { + ph, err := dg.ObjectPacks() + c.Assert(err, IsNil) + + for _, h := range ph { + f, err := dg.ObjectPack(h) + c.Assert(err, IsNil) + + idxf, err := dg.ObjectPackIdx(h) + c.Assert(err, IsNil) + + iter, err := NewPackfileIter(fs, f, idxf, t, true) + c.Assert(err, IsNil) + + err = iter.ForEach(func(o plumbing.EncodedObject) error { + c.Assert(o.Type(), Equals, t) + return nil + }) + c.Assert(err, IsNil) + + // test twice to check that packfiles are not closed + err = iter.ForEach(func(o plumbing.EncodedObject) error { + c.Assert(o.Type(), Equals, t) + return nil + }) + c.Assert(err, IsNil) + } + } + }) } func BenchmarkPackfileIter(b *testing.B) { @@ -201,7 +275,7 @@ func BenchmarkPackfileIter(b *testing.B) { b.Fatal(err) } - iter, err := NewPackfileIter(fs, f, idxf, t) + iter, err := NewPackfileIter(fs, f, idxf, t, false) if err != nil { b.Fatal(err) } @@ -257,7 +331,7 @@ func BenchmarkPackfileIterReadContent(b *testing.B) { b.Fatal(err) } - iter, err := NewPackfileIter(fs, f, idxf, t) + iter, err := NewPackfileIter(fs, f, idxf, t, false) if err != nil { b.Fatal(err) } |