aboutsummaryrefslogtreecommitdiffstats
path: root/storage/filesystem
diff options
context:
space:
mode:
Diffstat (limited to 'storage/filesystem')
-rw-r--r--storage/filesystem/dotgit/dotgit.go33
-rw-r--r--storage/filesystem/dotgit/dotgit_test.go85
-rw-r--r--storage/filesystem/index.go2
-rw-r--r--storage/filesystem/object.go4
-rw-r--r--storage/filesystem/object_test.go61
5 files changed, 166 insertions, 19 deletions
diff --git a/storage/filesystem/dotgit/dotgit.go b/storage/filesystem/dotgit/dotgit.go
index 31c4694..72c9ccf 100644
--- a/storage/filesystem/dotgit/dotgit.go
+++ b/storage/filesystem/dotgit/dotgit.go
@@ -72,6 +72,9 @@ var (
// ErrIsDir is returned when a reference file is attempting to be read,
// but the path specified is a directory.
ErrIsDir = errors.New("reference path is a directory")
+ // ErrEmptyRefFile is returned when a reference file is attempted to be read,
+ // but the file is empty
+ ErrEmptyRefFile = errors.New("ref file is empty")
)
// Options holds configuration for the storage.
@@ -249,7 +252,7 @@ func (d *DotGit) objectPacks() ([]plumbing.Hash, error) {
continue
}
- h := plumbing.NewHash(n[5 : len(n)-5]) //pack-(hash).pack
+ h := plumbing.NewHash(n[5 : len(n)-5]) // pack-(hash).pack
if h.IsZero() {
// Ignore files with badly-formatted names.
continue
@@ -661,18 +664,33 @@ func (d *DotGit) readReferenceFrom(rd io.Reader, name string) (ref *plumbing.Ref
return nil, err
}
+ if len(b) == 0 {
+ return nil, ErrEmptyRefFile
+ }
+
line := strings.TrimSpace(string(b))
return plumbing.NewReferenceFromStrings(name, line), nil
}
+// checkReferenceAndTruncate reads the reference from the given file, or the `pack-refs` file if
+// the file was empty. Then it checks that the old reference matches the stored reference and
+// truncates the file.
func (d *DotGit) checkReferenceAndTruncate(f billy.File, old *plumbing.Reference) error {
if old == nil {
return nil
}
+
ref, err := d.readReferenceFrom(f, old.Name().String())
+ if errors.Is(err, ErrEmptyRefFile) {
+ // This may happen if the reference is being read from a newly created file.
+ // In that case, try getting the reference from the packed refs file.
+ ref, err = d.packedRef(old.Name())
+ }
+
if err != nil {
return err
}
+
if ref.Hash() != old.Hash() {
return storage.ErrReferenceHasChanged
}
@@ -701,16 +719,16 @@ func (d *DotGit) SetRef(r, old *plumbing.Reference) error {
// Symbolic references are resolved and included in the output.
func (d *DotGit) Refs() ([]*plumbing.Reference, error) {
var refs []*plumbing.Reference
- var seen = make(map[plumbing.ReferenceName]bool)
- if err := d.addRefsFromRefDir(&refs, seen); err != nil {
+ seen := make(map[plumbing.ReferenceName]bool)
+ if err := d.addRefFromHEAD(&refs); err != nil {
return nil, err
}
- if err := d.addRefsFromPackedRefs(&refs, seen); err != nil {
+ if err := d.addRefsFromRefDir(&refs, seen); err != nil {
return nil, err
}
- if err := d.addRefFromHEAD(&refs); err != nil {
+ if err := d.addRefsFromPackedRefs(&refs, seen); err != nil {
return nil, err
}
@@ -815,7 +833,8 @@ func (d *DotGit) addRefsFromPackedRefsFile(refs *[]*plumbing.Reference, f billy.
}
func (d *DotGit) openAndLockPackedRefs(doCreate bool) (
- pr billy.File, err error) {
+ pr billy.File, err error,
+) {
var f billy.File
defer func() {
if err != nil && f != nil {
@@ -1020,7 +1039,7 @@ func (d *DotGit) readReferenceFile(path, name string) (ref *plumbing.Reference,
func (d *DotGit) CountLooseRefs() (int, error) {
var refs []*plumbing.Reference
- var seen = make(map[plumbing.ReferenceName]bool)
+ seen := make(map[plumbing.ReferenceName]bool)
if err := d.addRefsFromRefDir(&refs, seen); err != nil {
return 0, err
}
diff --git a/storage/filesystem/dotgit/dotgit_test.go b/storage/filesystem/dotgit/dotgit_test.go
index 2cbdb0c..fdb8a57 100644
--- a/storage/filesystem/dotgit/dotgit_test.go
+++ b/storage/filesystem/dotgit/dotgit_test.go
@@ -16,6 +16,7 @@ import (
"github.com/go-git/go-billy/v5/util"
fixtures "github.com/go-git/go-git-fixtures/v4"
"github.com/go-git/go-git/v5/plumbing"
+ "github.com/go-git/go-git/v5/storage"
"github.com/stretchr/testify/assert"
. "gopkg.in/check.v1"
)
@@ -85,6 +86,15 @@ func (s *SuiteDotGit) TestSetRefsNorwfs(c *C) {
testSetRefs(c, dir)
}
+func (s *SuiteDotGit) TestRefsHeadFirst(c *C) {
+ fs := fixtures.Basic().ByTag(".git").One().DotGit()
+ dir := New(fs)
+ refs, err := dir.Refs()
+ c.Assert(err, IsNil)
+ c.Assert(len(refs), Not(Equals), 0)
+ c.Assert(refs[0].Name().String(), Equals, "HEAD")
+}
+
func testSetRefs(c *C, dir *DotGit) {
firstFoo := plumbing.NewReferenceFromStrings(
"refs/heads/foo",
@@ -175,7 +185,6 @@ func (s *SuiteDotGit) TestRefsFromPackedRefs(c *C) {
ref := findReference(refs, "refs/remotes/origin/branch")
c.Assert(ref, NotNil)
c.Assert(ref.Hash().String(), Equals, "e8d3ffab552895c19b9fcf7aa264d277cde33881")
-
}
func (s *SuiteDotGit) TestRefsFromReferenceFile(c *C) {
@@ -189,7 +198,6 @@ func (s *SuiteDotGit) TestRefsFromReferenceFile(c *C) {
c.Assert(ref, NotNil)
c.Assert(ref.Type(), Equals, plumbing.SymbolicReference)
c.Assert(string(ref.Target()), Equals, "refs/remotes/origin/master")
-
}
func BenchmarkRefMultipleTimes(b *testing.B) {
@@ -538,7 +546,6 @@ func (s *SuiteDotGit) TestObjectPackWithKeepDescriptors(c *C) {
err = dir.Close()
c.Assert(err, NotNil)
-
}
func (s *SuiteDotGit) TestObjectPackIdx(c *C) {
@@ -605,7 +612,7 @@ func (s *SuiteDotGit) TestObjectsExclusive(c *C) {
testObjectsWithPrefix(c, fs, dir)
}
-func testObjects(c *C, fs billy.Filesystem, dir *DotGit) {
+func testObjects(c *C, _ billy.Filesystem, dir *DotGit) {
hashes, err := dir.Objects()
c.Assert(err, IsNil)
c.Assert(hashes, HasLen, 187)
@@ -614,7 +621,7 @@ func testObjects(c *C, fs billy.Filesystem, dir *DotGit) {
c.Assert(hashes[2].String(), Equals, "03db8e1fbe133a480f2867aac478fd866686d69e")
}
-func testObjectsWithPrefix(c *C, fs billy.Filesystem, dir *DotGit) {
+func testObjectsWithPrefix(c *C, _ billy.Filesystem, dir *DotGit) {
prefix, _ := hex.DecodeString("01d5")
hashes, err := dir.ObjectsWithPrefix(prefix)
c.Assert(err, IsNil)
@@ -649,7 +656,7 @@ func (s *SuiteDotGit) TestObject(c *C) {
file.Name(), fs.Join("objects", "03", "db8e1fbe133a480f2867aac478fd866686d69e")),
Equals, true,
)
- incomingHash := "9d25e0f9bde9f82882b49fe29117b9411cb157b7" //made up hash
+ incomingHash := "9d25e0f9bde9f82882b49fe29117b9411cb157b7" // made up hash
incomingDirPath := fs.Join("objects", "tmp_objdir-incoming-123456")
incomingFilePath := fs.Join(incomingDirPath, incomingHash[0:2], incomingHash[2:40])
fs.MkdirAll(incomingDirPath, os.FileMode(0755))
@@ -670,7 +677,7 @@ func (s *SuiteDotGit) TestPreGit235Object(c *C) {
file.Name(), fs.Join("objects", "03", "db8e1fbe133a480f2867aac478fd866686d69e")),
Equals, true,
)
- incomingHash := "9d25e0f9bde9f82882b49fe29117b9411cb157b7" //made up hash
+ incomingHash := "9d25e0f9bde9f82882b49fe29117b9411cb157b7" // made up hash
incomingDirPath := fs.Join("objects", "incoming-123456")
incomingFilePath := fs.Join(incomingDirPath, incomingHash[0:2], incomingHash[2:40])
fs.MkdirAll(incomingDirPath, os.FileMode(0755))
@@ -687,7 +694,7 @@ func (s *SuiteDotGit) TestObjectStat(c *C) {
hash := plumbing.NewHash("03db8e1fbe133a480f2867aac478fd866686d69e")
_, err := dir.ObjectStat(hash)
c.Assert(err, IsNil)
- incomingHash := "9d25e0f9bde9f82882b49fe29117b9411cb157b7" //made up hash
+ incomingHash := "9d25e0f9bde9f82882b49fe29117b9411cb157b7" // made up hash
incomingDirPath := fs.Join("objects", "tmp_objdir-incoming-123456")
incomingFilePath := fs.Join(incomingDirPath, incomingHash[0:2], incomingHash[2:40])
fs.MkdirAll(incomingDirPath, os.FileMode(0755))
@@ -705,7 +712,7 @@ func (s *SuiteDotGit) TestObjectDelete(c *C) {
err := dir.ObjectDelete(hash)
c.Assert(err, IsNil)
- incomingHash := "9d25e0f9bde9f82882b49fe29117b9411cb157b7" //made up hash
+ incomingHash := "9d25e0f9bde9f82882b49fe29117b9411cb157b7" // made up hash
incomingDirPath := fs.Join("objects", "tmp_objdir-incoming-123456")
incomingSubDirPath := fs.Join(incomingDirPath, incomingHash[0:2])
incomingFilePath := fs.Join(incomingSubDirPath, incomingHash[2:40])
@@ -1040,3 +1047,63 @@ func (s *SuiteDotGit) TestDeletedRefs(c *C) {
c.Assert(refs, HasLen, 1)
c.Assert(refs[0].Name(), Equals, plumbing.ReferenceName("refs/heads/foo"))
}
+
+// Checks that seting a reference that has been packed and checking its old value is successful
+func (s *SuiteDotGit) TestSetPackedRef(c *C) {
+ fs, clean := s.TemporalFilesystem()
+ defer clean()
+
+ dir := New(fs)
+
+ err := dir.SetRef(plumbing.NewReferenceFromStrings(
+ "refs/heads/foo",
+ "e8d3ffab552895c19b9fcf7aa264d277cde33881",
+ ), nil)
+ c.Assert(err, IsNil)
+
+ refs, err := dir.Refs()
+ c.Assert(err, IsNil)
+ c.Assert(refs, HasLen, 1)
+ looseCount, err := dir.CountLooseRefs()
+ c.Assert(err, IsNil)
+ c.Assert(looseCount, Equals, 1)
+
+ err = dir.PackRefs()
+ c.Assert(err, IsNil)
+
+ // Make sure the refs are still there, but no longer loose.
+ refs, err = dir.Refs()
+ c.Assert(err, IsNil)
+ c.Assert(refs, HasLen, 1)
+ looseCount, err = dir.CountLooseRefs()
+ c.Assert(err, IsNil)
+ c.Assert(looseCount, Equals, 0)
+
+ ref, err := dir.Ref("refs/heads/foo")
+ c.Assert(err, IsNil)
+ c.Assert(ref, NotNil)
+ c.Assert(ref.Hash().String(), Equals, "e8d3ffab552895c19b9fcf7aa264d277cde33881")
+
+ // Attempt to update the reference using an invalid old reference value
+ err = dir.SetRef(plumbing.NewReferenceFromStrings(
+ "refs/heads/foo",
+ "b8d3ffab552895c19b9fcf7aa264d277cde33881",
+ ), plumbing.NewReferenceFromStrings(
+ "refs/heads/foo",
+ "e8d3ffab552895c19b9fcf7aa264d277cde33882",
+ ))
+ c.Assert(err, Equals, storage.ErrReferenceHasChanged)
+
+ // Now update the reference and it should pass
+ err = dir.SetRef(plumbing.NewReferenceFromStrings(
+ "refs/heads/foo",
+ "b8d3ffab552895c19b9fcf7aa264d277cde33881",
+ ), plumbing.NewReferenceFromStrings(
+ "refs/heads/foo",
+ "e8d3ffab552895c19b9fcf7aa264d277cde33881",
+ ))
+ c.Assert(err, IsNil)
+ looseCount, err = dir.CountLooseRefs()
+ c.Assert(err, IsNil)
+ c.Assert(looseCount, Equals, 1)
+}
diff --git a/storage/filesystem/index.go b/storage/filesystem/index.go
index a19176f..a86ef3e 100644
--- a/storage/filesystem/index.go
+++ b/storage/filesystem/index.go
@@ -48,7 +48,7 @@ func (s *IndexStorage) Index() (i *index.Index, err error) {
defer ioutil.CheckClose(f, &err)
- d := index.NewDecoder(bufio.NewReader(f))
+ d := index.NewDecoder(f)
err = d.Decode(idx)
return idx, err
}
diff --git a/storage/filesystem/object.go b/storage/filesystem/object.go
index e812fe9..91b4ace 100644
--- a/storage/filesystem/object.go
+++ b/storage/filesystem/object.go
@@ -431,13 +431,13 @@ func (s *ObjectStorage) getFromUnpacked(h plumbing.Hash) (obj plumbing.EncodedOb
defer ioutil.CheckClose(w, &err)
- s.objectCache.Put(obj)
-
bufp := copyBufferPool.Get().(*[]byte)
buf := *bufp
_, err = io.CopyBuffer(w, r, buf)
copyBufferPool.Put(bufp)
+ s.objectCache.Put(obj)
+
return obj, err
}
diff --git a/storage/filesystem/object_test.go b/storage/filesystem/object_test.go
index 251077a..4f98458 100644
--- a/storage/filesystem/object_test.go
+++ b/storage/filesystem/object_test.go
@@ -547,3 +547,64 @@ func BenchmarkGetObjectFromPackfile(b *testing.B) {
})
}
}
+
+func (s *FsSuite) TestGetFromUnpackedCachesObjects(c *C) {
+ fs := fixtures.ByTag(".git").ByTag("unpacked").One().DotGit()
+ objectCache := cache.NewObjectLRUDefault()
+ objectStorage := NewObjectStorage(dotgit.New(fs), objectCache)
+ hash := plumbing.NewHash("f3dfe29d268303fc6e1bbce268605fc99573406e")
+
+ // Assert the cache is empty initially
+ _, ok := objectCache.Get(hash)
+ c.Assert(ok, Equals, false)
+
+ // Load the object
+ obj, err := objectStorage.EncodedObject(plumbing.AnyObject, hash)
+ c.Assert(err, IsNil)
+ c.Assert(obj.Hash(), Equals, hash)
+
+ // The object should've been cached during the load
+ cachedObj, ok := objectCache.Get(hash)
+ c.Assert(ok, Equals, true)
+ c.Assert(cachedObj, DeepEquals, obj)
+
+ // Assert that both objects can be read and that they both produce the same bytes
+
+ objReader, err := obj.Reader()
+ c.Assert(err, IsNil)
+ objBytes, err := io.ReadAll(objReader)
+ c.Assert(err, IsNil)
+ c.Assert(len(objBytes), Not(Equals), 0)
+ err = objReader.Close()
+ c.Assert(err, IsNil)
+
+ cachedObjReader, err := cachedObj.Reader()
+ c.Assert(err, IsNil)
+ cachedObjBytes, err := io.ReadAll(cachedObjReader)
+ c.Assert(len(cachedObjBytes), Not(Equals), 0)
+ c.Assert(err, IsNil)
+ err = cachedObjReader.Close()
+ c.Assert(err, IsNil)
+
+ c.Assert(cachedObjBytes, DeepEquals, objBytes)
+}
+
+func (s *FsSuite) TestGetFromUnpackedDoesNotCacheLargeObjects(c *C) {
+ fs := fixtures.ByTag(".git").ByTag("unpacked").One().DotGit()
+ objectCache := cache.NewObjectLRUDefault()
+ objectStorage := NewObjectStorageWithOptions(dotgit.New(fs), objectCache, Options{LargeObjectThreshold: 1})
+ hash := plumbing.NewHash("f3dfe29d268303fc6e1bbce268605fc99573406e")
+
+ // Assert the cache is empty initially
+ _, ok := objectCache.Get(hash)
+ c.Assert(ok, Equals, false)
+
+ // Load the object
+ obj, err := objectStorage.EncodedObject(plumbing.AnyObject, hash)
+ c.Assert(err, IsNil)
+ c.Assert(obj.Hash(), Equals, hash)
+
+ // The object should not have been cached during the load
+ _, ok = objectCache.Get(hash)
+ c.Assert(ok, Equals, false)
+}