aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJavi Fontan <jfontan@gmail.com>2018-07-26 13:42:51 +0200
committerJavi Fontan <jfontan@gmail.com>2018-07-26 14:17:56 +0200
commit79f249465b24104b73c9dc220d9098cecdab4d77 (patch)
tree8804820221e05096433b2e0cef5249e4a3e94c7b
parent74f56f388bbe8072bfcd976add2373f9a7e20341 (diff)
downloadgo-git-79f249465b24104b73c9dc220d9098cecdab4d77.tar.gz
plumbing, storage: integrate new index
Now dotgit.PackWriter uses the new packfile.Parser and index. Signed-off-by: Javi Fontan <jfontan@gmail.com>
-rw-r--r--plumbing/format/packfile/decoder.go9
-rw-r--r--plumbing/format/packfile/parser.go11
-rw-r--r--storage/filesystem/dotgit/writers.go33
-rw-r--r--storage/filesystem/dotgit/writers_test.go3
-rw-r--r--storage/filesystem/object.go46
5 files changed, 57 insertions, 45 deletions
diff --git a/plumbing/format/packfile/decoder.go b/plumbing/format/packfile/decoder.go
index 9bfd69b..69aef2d 100644
--- a/plumbing/format/packfile/decoder.go
+++ b/plumbing/format/packfile/decoder.go
@@ -447,11 +447,12 @@ func (d *Decoder) recallByOffset(o int64) (plumbing.EncodedObject, error) {
return d.DecodeObjectAt(o)
}
- // if e, ok := d.idx.LookupOffset(uint64(o)); ok {
- // return d.recallByHashNonSeekable(e.Hash)
- // }
+ hash, err := d.idx.FindHash(o)
+ if err != nil {
+ return nil, err
+ }
- return nil, plumbing.ErrObjectNotFound
+ return d.recallByHashNonSeekable(hash)
}
func (d *Decoder) recallByHash(h plumbing.Hash) (plumbing.EncodedObject, error) {
diff --git a/plumbing/format/packfile/parser.go b/plumbing/format/packfile/parser.go
index 460fc3f..696f5ba 100644
--- a/plumbing/format/packfile/parser.go
+++ b/plumbing/format/packfile/parser.go
@@ -311,11 +311,12 @@ func applyPatchBase(ota *objectInfo, data, base []byte) ([]byte, error) {
type objectInfo struct {
plumbing.Hasher
- Offset int64
- Length int64
- PackSize int64
- Type plumbing.ObjectType
- DiskType plumbing.ObjectType
+ Offset int64
+ Length int64
+ HeaderLength int64
+ PackSize int64
+ Type plumbing.ObjectType
+ DiskType plumbing.ObjectType
Crc32 uint32
diff --git a/storage/filesystem/dotgit/writers.go b/storage/filesystem/dotgit/writers.go
index c2b420f..e1ede3c 100644
--- a/storage/filesystem/dotgit/writers.go
+++ b/storage/filesystem/dotgit/writers.go
@@ -20,13 +20,14 @@ import (
// is renamed/moved (depends on the Filesystem implementation) to the final
// location, if the PackWriter is not used, nothing is written
type PackWriter struct {
- Notify func(plumbing.Hash, *packfile.Index)
+ Notify func(plumbing.Hash, *idxfile.Writer)
fs billy.Filesystem
fr, fw billy.File
synced *syncedReader
checksum plumbing.Hash
- index *packfile.Index
+ parser *packfile.Parser
+ writer *idxfile.Writer
result chan error
}
@@ -55,20 +56,16 @@ func newPackWrite(fs billy.Filesystem) (*PackWriter, error) {
func (w *PackWriter) buildIndex() {
s := packfile.NewScanner(w.synced)
- d, err := packfile.NewDecoder(s, nil)
- if err != nil {
- w.result <- err
- return
- }
+ w.writer = new(idxfile.Writer)
+ w.parser = packfile.NewParser(s, w.writer)
- checksum, err := d.Decode()
+ checksum, err := w.parser.Parse()
if err != nil {
w.result <- err
return
}
w.checksum = checksum
- w.index = d.Index()
w.result <- err
}
@@ -92,8 +89,8 @@ func (w *PackWriter) Write(p []byte) (int, error) {
// was written, the tempfiles are deleted without writing a packfile.
func (w *PackWriter) Close() error {
defer func() {
- if w.Notify != nil && w.index != nil && w.index.Size() > 0 {
- w.Notify(w.checksum, w.index)
+ if w.Notify != nil && w.writer != nil && w.writer.Finished() {
+ w.Notify(w.checksum, w.writer)
}
close(w.result)
@@ -115,7 +112,7 @@ func (w *PackWriter) Close() error {
return err
}
- if w.index == nil || w.index.Size() == 0 {
+ if w.writer == nil || !w.writer.Finished() {
return w.clean()
}
@@ -145,11 +142,13 @@ func (w *PackWriter) save() error {
}
func (w *PackWriter) encodeIdx(writer io.Writer) error {
- idx := w.index.ToIdxFile()
- idx.PackfileChecksum = w.checksum
- idx.Version = idxfile.VersionSupported
+ idx, err := w.writer.Index()
+ if err != nil {
+ return err
+ }
+
e := idxfile.NewEncoder(writer)
- _, err := e.Encode(idx)
+ _, err = e.Encode(idx)
return err
}
@@ -209,7 +208,6 @@ func (s *syncedReader) isBlocked() bool {
func (s *syncedReader) wake() {
if s.isBlocked() {
- // fmt.Println("wake")
atomic.StoreUint32(&s.blocked, 0)
s.news <- true
}
@@ -220,7 +218,6 @@ func (s *syncedReader) sleep() {
written := atomic.LoadUint64(&s.written)
if read >= written {
atomic.StoreUint32(&s.blocked, 1)
- // fmt.Println("sleep", read, written)
<-s.news
}
diff --git a/storage/filesystem/dotgit/writers_test.go b/storage/filesystem/dotgit/writers_test.go
index bf00762..5a5f7b4 100644
--- a/storage/filesystem/dotgit/writers_test.go
+++ b/storage/filesystem/dotgit/writers_test.go
@@ -9,6 +9,7 @@ import (
"strconv"
"gopkg.in/src-d/go-git.v4/plumbing"
+ "gopkg.in/src-d/go-git.v4/plumbing/format/idxfile"
"gopkg.in/src-d/go-git.v4/plumbing/format/packfile"
. "gopkg.in/check.v1"
@@ -148,7 +149,7 @@ func (s *SuiteDotGit) TestPackWriterUnusedNotify(c *C) {
w, err := newPackWrite(fs)
c.Assert(err, IsNil)
- w.Notify = func(h plumbing.Hash, idx *packfile.Index) {
+ w.Notify = func(h plumbing.Hash, idx *idxfile.Writer) {
c.Fatal("unexpected call to PackWriter.Notify")
}
diff --git a/storage/filesystem/object.go b/storage/filesystem/object.go
index ef67f50..b73b309 100644
--- a/storage/filesystem/object.go
+++ b/storage/filesystem/object.go
@@ -23,7 +23,7 @@ type ObjectStorage struct {
deltaBaseCache cache.Object
dir *dotgit.DotGit
- index map[plumbing.Hash]*packfile.Index
+ index map[plumbing.Hash]idxfile.Index
}
// NewObjectStorage creates a new ObjectStorage with the given .git directory.
@@ -41,7 +41,7 @@ func (s *ObjectStorage) requireIndex() error {
return nil
}
- s.index = make(map[plumbing.Hash]*packfile.Index)
+ s.index = make(map[plumbing.Hash]idxfile.Index)
packs, err := s.dir.ObjectPacks()
if err != nil {
return err
@@ -69,7 +69,7 @@ func (s *ObjectStorage) loadIdxFile(h plumbing.Hash) (err error) {
return err
}
- s.index[h] = packfile.NewIndexFromIdxFile(idxf)
+ s.index[h] = idxf
return err
}
@@ -87,8 +87,11 @@ func (s *ObjectStorage) PackfileWriter() (io.WriteCloser, error) {
return nil, err
}
- w.Notify = func(h plumbing.Hash, idx *packfile.Index) {
- s.index[h] = idx
+ w.Notify = func(h plumbing.Hash, writer *idxfile.Writer) {
+ index, err := writer.Index()
+ if err == nil {
+ s.index[h] = index
+ }
}
return w, nil
@@ -278,7 +281,7 @@ func (s *ObjectStorage) getFromPackfile(h plumbing.Hash, canBeDelta bool) (
func (s *ObjectStorage) decodeObjectAt(
f billy.File,
- idx *packfile.Index,
+ idx idxfile.Index,
offset int64) (plumbing.EncodedObject, error) {
if _, err := f.Seek(0, io.SeekStart); err != nil {
return nil, err
@@ -299,7 +302,7 @@ func (s *ObjectStorage) decodeObjectAt(
func (s *ObjectStorage) decodeDeltaObjectAt(
f billy.File,
- idx *packfile.Index,
+ idx idxfile.Index,
offset int64,
hash plumbing.Hash) (plumbing.EncodedObject, error) {
if _, err := f.Seek(0, io.SeekStart); err != nil {
@@ -324,12 +327,10 @@ func (s *ObjectStorage) decodeDeltaObjectAt(
case plumbing.REFDeltaObject:
base = header.Reference
case plumbing.OFSDeltaObject:
- e, ok := idx.LookupOffset(uint64(header.OffsetReference))
- if !ok {
- return nil, plumbing.ErrObjectNotFound
+ base, err = idx.FindHash(header.OffsetReference)
+ if err != nil {
+ return nil, err
}
-
- base = e.Hash
default:
return s.decodeObjectAt(f, idx, offset)
}
@@ -350,8 +351,9 @@ func (s *ObjectStorage) decodeDeltaObjectAt(
func (s *ObjectStorage) findObjectInPackfile(h plumbing.Hash) (plumbing.Hash, plumbing.Hash, int64) {
for packfile, index := range s.index {
- if e, ok := index.LookupHash(h); ok {
- return packfile, e.Hash, int64(e.Offset)
+ offset, err := index.FindOffset(h)
+ if err == nil {
+ return packfile, h, offset
}
}
@@ -460,12 +462,22 @@ type packfileIter struct {
total uint32
}
-func NewPackfileIter(f billy.File, t plumbing.ObjectType) (storer.EncodedObjectIter, error) {
+// NewPackfileIter returns a new EncodedObjectIter for the provided packfile
+// and object type.
+func NewPackfileIter(
+ f billy.File,
+ t plumbing.ObjectType,
+) (storer.EncodedObjectIter, error) {
return newPackfileIter(f, t, make(map[plumbing.Hash]struct{}), nil, nil)
}
-func newPackfileIter(f billy.File, t plumbing.ObjectType, seen map[plumbing.Hash]struct{},
- index *packfile.Index, cache cache.Object) (storer.EncodedObjectIter, error) {
+func newPackfileIter(
+ f billy.File,
+ t plumbing.ObjectType,
+ seen map[plumbing.Hash]struct{},
+ index idxfile.Index,
+ cache cache.Object,
+) (storer.EncodedObjectIter, error) {
s := packfile.NewScanner(f)
_, total, err := s.Header()
if err != nil {